blob: b22699de6e74948ed5d420d24bc832856a0ceb00 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053024#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
26#include <hif.h>
27#include <htt.h>
28#include <wdi_event.h>
29#include <queue.h>
30#include "dp_htt.h"
31#include "dp_types.h"
32#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053033#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070034#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070035#include "dp_rx.h"
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080036#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080037#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053038#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053039#include "cdp_txrx_stats_struct.h"
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070040#include "cdp_txrx_cmn_reg.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080041#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053042#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080043#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053044#include "htt_stats.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070045#include "qdf_mem.h" /* qdf_mem_malloc,free */
Vivek126db5d2018-07-25 22:05:04 +053046#include "cfg_ucfg_api.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070047#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070048#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070049#else
50static inline void
51cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
52{
53 return;
54}
55#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070056#include "dp_ipa.h"
Ravi Joshiaf9ace82017-02-17 12:41:48 -080057
Ruchi, Agrawal234753c2018-06-28 14:53:37 +053058#include "dp_cal_client_api.h"
59
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070060#ifdef CONFIG_MCL
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070061#ifndef REMOVE_PKT_LOG
62#include <pktlog_ac_api.h>
63#include <pktlog_ac.h>
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070064#endif
65#endif
66static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053067static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +053068 uint8_t *peer_mac_addr,
69 struct cdp_ctrl_objmgr_peer *ctrl_peer);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053070static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +053071static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
72static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070073
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -070074#define DP_INTR_POLL_TIMER_MS 10
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +053075#define DP_WDS_AGING_TIMER_DEFAULT_MS 120000
Ishank Jainbc2d91f2017-01-03 18:14:54 +053076#define DP_MCS_LENGTH (6*MAX_MCS)
77#define DP_NSS_LENGTH (6*SS_COUNT)
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +053078#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
79#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
80#define DP_MAX_MCS_STRING_LEN 30
Ishank Jain6290a3c2017-03-21 10:49:39 +053081#define DP_CURR_FW_STATS_AVAIL 19
82#define DP_HTT_DBG_EXT_STATS_MAX 256
Prathyusha Guduri43bb0562018-02-12 18:30:54 +053083#define DP_MAX_SLEEP_TIME 100
Ishank Jain949674c2017-02-27 17:09:29 +053084
Yun Parkfde6b9e2017-06-26 17:13:11 -070085#ifdef IPA_OFFLOAD
86/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -070087#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -070088#define RX_RING_MASK_VAL 0x7
89#else
90#define TX_RING_MASK_VAL 0xF
91#define RX_RING_MASK_VAL 0xF
92#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +053093
sumedh baikady72b1c712017-08-24 12:11:46 -070094#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +053095
Soumya Bhat0d6245c2018-02-08 21:02:57 +053096#define DP_PPDU_STATS_CFG_ALL 0xFFFF
97
98/* PPDU stats mask sent to FW to enable enhanced stats */
99#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
100/* PPDU stats mask sent to FW to support debug sniffer feature */
101#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
Vinay Adella873dc402018-05-28 12:06:34 +0530102/* PPDU stats mask sent to FW to support BPR feature*/
103#define DP_PPDU_STATS_CFG_BPR 0x2000
104/* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
105#define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
106 DP_PPDU_STATS_CFG_ENH_STATS)
107/* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
108#define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
109 DP_PPDU_TXLITE_STATS_BITMASK_CFG)
110
Vivek126db5d2018-07-25 22:05:04 +0530111#define RNG_ERR "SRNG setup failed for"
Ishank Jain949674c2017-02-27 17:09:29 +0530112/**
113 * default_dscp_tid_map - Default DSCP-TID mapping
114 *
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530115 * DSCP TID
116 * 000000 0
117 * 001000 1
118 * 010000 2
119 * 011000 3
120 * 100000 4
121 * 101000 5
122 * 110000 6
123 * 111000 7
Ishank Jain949674c2017-02-27 17:09:29 +0530124 */
125static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
126 0, 0, 0, 0, 0, 0, 0, 0,
127 1, 1, 1, 1, 1, 1, 1, 1,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530128 2, 2, 2, 2, 2, 2, 2, 2,
129 3, 3, 3, 3, 3, 3, 3, 3,
130 4, 4, 4, 4, 4, 4, 4, 4,
Ishank Jain949674c2017-02-27 17:09:29 +0530131 5, 5, 5, 5, 5, 5, 5, 5,
132 6, 6, 6, 6, 6, 6, 6, 6,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530133 7, 7, 7, 7, 7, 7, 7, 7,
Ishank Jain949674c2017-02-27 17:09:29 +0530134};
135
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530136/*
137 * struct dp_rate_debug
138 *
139 * @mcs_type: print string for a given mcs
140 * @valid: valid mcs rate?
141 */
142struct dp_rate_debug {
143 char mcs_type[DP_MAX_MCS_STRING_LEN];
144 uint8_t valid;
145};
146
147#define MCS_VALID 1
148#define MCS_INVALID 0
149
150static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
Anish Nataraj072d8972018-01-09 18:23:33 +0530151
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530152 {
153 {"OFDM 48 Mbps", MCS_VALID},
154 {"OFDM 24 Mbps", MCS_VALID},
155 {"OFDM 12 Mbps", MCS_VALID},
156 {"OFDM 6 Mbps ", MCS_VALID},
157 {"OFDM 54 Mbps", MCS_VALID},
158 {"OFDM 36 Mbps", MCS_VALID},
159 {"OFDM 18 Mbps", MCS_VALID},
160 {"OFDM 9 Mbps ", MCS_VALID},
161 {"INVALID ", MCS_INVALID},
162 {"INVALID ", MCS_INVALID},
163 {"INVALID ", MCS_INVALID},
164 {"INVALID ", MCS_INVALID},
165 {"INVALID ", MCS_VALID},
166 },
167 {
Anish Nataraj072d8972018-01-09 18:23:33 +0530168 {"CCK 11 Mbps Long ", MCS_VALID},
169 {"CCK 5.5 Mbps Long ", MCS_VALID},
170 {"CCK 2 Mbps Long ", MCS_VALID},
171 {"CCK 1 Mbps Long ", MCS_VALID},
172 {"CCK 11 Mbps Short ", MCS_VALID},
173 {"CCK 5.5 Mbps Short", MCS_VALID},
174 {"CCK 2 Mbps Short ", MCS_VALID},
175 {"INVALID ", MCS_INVALID},
176 {"INVALID ", MCS_INVALID},
177 {"INVALID ", MCS_INVALID},
178 {"INVALID ", MCS_INVALID},
179 {"INVALID ", MCS_INVALID},
180 {"INVALID ", MCS_VALID},
181 },
182 {
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530183 {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
184 {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
185 {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
186 {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
187 {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
188 {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
189 {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
190 {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
191 {"INVALID ", MCS_INVALID},
192 {"INVALID ", MCS_INVALID},
193 {"INVALID ", MCS_INVALID},
194 {"INVALID ", MCS_INVALID},
195 {"INVALID ", MCS_VALID},
196 },
197 {
198 {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
199 {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
200 {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
201 {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
202 {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
203 {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
204 {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
205 {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
206 {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
207 {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
208 {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530209 {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530210 {"INVALID ", MCS_VALID},
211 },
212 {
213 {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
214 {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
215 {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
216 {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
217 {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
218 {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
219 {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
220 {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
221 {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
222 {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
223 {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530224 {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530225 {"INVALID ", MCS_VALID},
226 }
227};
228
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700229/**
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530230 * @brief Cpu ring map types
231 */
232enum dp_cpu_ring_map_types {
233 DP_DEFAULT_MAP,
234 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
235 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
236 DP_NSS_ALL_RADIO_OFFLOADED_MAP,
237 DP_CPU_RING_MAP_MAX
238};
239
240/**
241 * @brief Cpu to tx ring map
242 */
243static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
244 {0x0, 0x1, 0x2, 0x0},
245 {0x1, 0x2, 0x1, 0x2},
246 {0x0, 0x2, 0x0, 0x2},
247 {0x2, 0x2, 0x2, 0x2}
248};
249
250/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800251 * @brief Select the type of statistics
252 */
253enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530254 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800255 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530256 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800257};
258
259/**
260 * @brief General Firmware statistics options
261 *
262 */
263enum dp_fw_stats {
264 TXRX_FW_STATS_INVALID = -1,
265};
266
267/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530268 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800269 * currently supported
270 */
271const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530272 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
273 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
274 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
275 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
276 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
277 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
278 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
279 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
280 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
281 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
282 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800283 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530284 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
285 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
286 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
287 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
288 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
289 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
290 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
291 /* Last ENUM for HTT FW STATS */
292 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800293 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530294 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
295 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
296 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800297 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530298 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700299 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Kai Chen783e0382018-01-25 16:29:08 -0800300 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700301 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800302};
303
Mohit Khannadba82f22018-07-12 10:59:17 -0700304/* MCL specific functions */
305#ifdef CONFIG_MCL
306/**
307 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
308 * @soc: pointer to dp_soc handle
309 * @intr_ctx_num: interrupt context number for which mon mask is needed
310 *
311 * For MCL, monitor mode rings are being processed in timer contexts (polled).
312 * This function is returning 0, since in interrupt mode(softirq based RX),
313 * we donot want to process monitor mode rings in a softirq.
314 *
315 * So, in case packet log is enabled for SAP/STA/P2P modes,
316 * regular interrupt processing will not process monitor mode rings. It would be
317 * done in a separate timer context.
318 *
319 * Return: 0
320 */
321static inline
322uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
323{
324 return 0;
325}
326
327/*
328 * dp_service_mon_rings()- timer to reap monitor rings
329 * reqd as we are not getting ppdu end interrupts
330 * @arg: SoC Handle
331 *
332 * Return:
333 *
334 */
335static void dp_service_mon_rings(void *arg)
336{
337 struct dp_soc *soc = (struct dp_soc *)arg;
338 int ring = 0, work_done, mac_id;
339 struct dp_pdev *pdev = NULL;
340
341 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
342 pdev = soc->pdev_list[ring];
343 if (!pdev)
344 continue;
345 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
346 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
347 pdev->pdev_id);
348 work_done = dp_mon_process(soc, mac_for_pdev,
349 QCA_NAPI_BUDGET);
350
351 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
352 FL("Reaped %d descs from Monitor rings"),
353 work_done);
354 }
355 }
356
357 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
358}
359
360#ifndef REMOVE_PKT_LOG
361/**
362 * dp_pkt_log_init() - API to initialize packet log
363 * @ppdev: physical device handle
364 * @scn: HIF context
365 *
366 * Return: none
367 */
368void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
369{
370 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
371
372 if (handle->pkt_log_init) {
373 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 "%s: Packet log not initialized", __func__);
375 return;
376 }
377
378 pktlog_sethandle(&handle->pl_dev, scn);
379 pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
380
381 if (pktlogmod_init(scn)) {
382 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 "%s: pktlogmod_init failed", __func__);
384 handle->pkt_log_init = false;
385 } else {
386 handle->pkt_log_init = true;
387 }
388}
389
390/**
391 * dp_pkt_log_con_service() - connect packet log service
392 * @ppdev: physical device handle
393 * @scn: device context
394 *
395 * Return: none
396 */
397static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
398{
399 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
400
401 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
402 pktlog_htc_attach();
403}
404
405/**
406 * dp_pktlogmod_exit() - API to cleanup pktlog info
407 * @handle: Pdev handle
408 *
409 * Return: none
410 */
411static void dp_pktlogmod_exit(struct dp_pdev *handle)
412{
413 void *scn = (void *)handle->soc->hif_handle;
414
415 if (!scn) {
416 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
417 "%s: Invalid hif(scn) handle", __func__);
418 return;
419 }
420
421 pktlogmod_exit(scn);
422 handle->pkt_log_init = false;
423}
424#endif
425#else
426static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
427
428/**
429 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
430 * @soc: pointer to dp_soc handle
431 * @intr_ctx_num: interrupt context number for which mon mask is needed
432 *
433 * Return: mon mask value
434 */
435static inline
436uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
437{
438 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
439}
440#endif
441
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700442/**
443 * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
444 * @cdp_opaque_vdev: pointer to cdp_vdev
445 *
446 * Return: pointer to dp_vdev
447 */
448static
449struct dp_vdev * dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
450{
451 return (struct dp_vdev *)cdp_opaque_vdev;
452}
453
454
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530455static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
456 struct cdp_peer *peer_hdl,
457 uint8_t *mac_addr,
458 enum cdp_txrx_ast_entry_type type,
459 uint32_t flags)
460{
461
462 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
463 (struct dp_peer *)peer_hdl,
464 mac_addr,
465 type,
466 flags);
467}
468
469static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
470 void *ast_entry_hdl)
471{
472 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
473 qdf_spin_lock_bh(&soc->ast_lock);
474 dp_peer_del_ast((struct dp_soc *)soc_hdl,
475 (struct dp_ast_entry *)ast_entry_hdl);
476 qdf_spin_unlock_bh(&soc->ast_lock);
477}
478
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530479
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530480static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
481 struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530482 uint8_t *wds_macaddr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530483 uint32_t flags)
484{
phadiman0381f562018-06-29 15:40:52 +0530485 int status = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530486 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530487 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530488 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530489
490 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530491 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
492 peer->vdev->pdev->pdev_id);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530493
phadiman0381f562018-06-29 15:40:52 +0530494 if (ast_entry) {
495 status = dp_peer_update_ast(soc,
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530496 peer,
497 ast_entry, flags);
phadiman0381f562018-06-29 15:40:52 +0530498 }
499
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530500 qdf_spin_unlock_bh(&soc->ast_lock);
501
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530502 return status;
503}
504
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530505/*
506 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530507 * @soc_handle: Datapath SOC handle
508 * @wds_macaddr: WDS entry MAC Address
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530509 * Return: None
510 */
511static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530512 uint8_t *wds_macaddr, void *vdev_handle)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530513{
514 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
515 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530516 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530517
518 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530519 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
520 vdev->pdev->pdev_id);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530521
phadiman0381f562018-06-29 15:40:52 +0530522 if (ast_entry) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530523 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530524 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
525 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
phadiman0381f562018-06-29 15:40:52 +0530526 ast_entry->is_active = TRUE;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530527 }
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530528 }
phadiman0381f562018-06-29 15:40:52 +0530529
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530530 qdf_spin_unlock_bh(&soc->ast_lock);
531}
532
533/*
534 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530535 * @soc: Datapath SOC handle
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530536 *
537 * Return: None
538 */
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530539static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
Santosh Anbu76693bc2018-04-23 16:38:54 +0530540 void *vdev_hdl)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530541{
542 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
543 struct dp_pdev *pdev;
544 struct dp_vdev *vdev;
545 struct dp_peer *peer;
546 struct dp_ast_entry *ase, *temp_ase;
547 int i;
548
549 qdf_spin_lock_bh(&soc->ast_lock);
550
551 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
552 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530553 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530554 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
555 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
556 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530557 if ((ase->type ==
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530558 CDP_TXRX_AST_TYPE_STATIC) ||
559 (ase->type ==
560 CDP_TXRX_AST_TYPE_SELF) ||
561 (ase->type ==
562 CDP_TXRX_AST_TYPE_STA_BSS))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530563 continue;
564 ase->is_active = TRUE;
565 }
566 }
567 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530568 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530569 }
570
571 qdf_spin_unlock_bh(&soc->ast_lock);
572}
573
574/*
575 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
576 * @soc: Datapath SOC handle
577 *
578 * Return: None
579 */
580static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
581{
582 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
583 struct dp_pdev *pdev;
584 struct dp_vdev *vdev;
585 struct dp_peer *peer;
586 struct dp_ast_entry *ase, *temp_ase;
587 int i;
588
589 qdf_spin_lock_bh(&soc->ast_lock);
590
591 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
592 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530593 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530594 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
595 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
596 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530597 if ((ase->type ==
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530598 CDP_TXRX_AST_TYPE_STATIC) ||
599 (ase->type ==
600 CDP_TXRX_AST_TYPE_SELF) ||
601 (ase->type ==
602 CDP_TXRX_AST_TYPE_STA_BSS))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530603 continue;
604 dp_peer_del_ast(soc, ase);
605 }
606 }
607 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530608 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530609 }
610
611 qdf_spin_unlock_bh(&soc->ast_lock);
612}
613
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530614static void *dp_peer_ast_hash_find_soc_wifi3(struct cdp_soc_t *soc_hdl,
615 uint8_t *ast_mac_addr)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530616{
617 struct dp_ast_entry *ast_entry;
618 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530619
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530620 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530621 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
622 qdf_spin_unlock_bh(&soc->ast_lock);
623 return (void *)ast_entry;
624}
625
626static void *dp_peer_ast_hash_find_by_pdevid_wifi3(struct cdp_soc_t *soc_hdl,
627 uint8_t *ast_mac_addr,
628 uint8_t pdev_id)
629{
630 struct dp_ast_entry *ast_entry;
631 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
632
633 qdf_spin_lock_bh(&soc->ast_lock);
634 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530635 qdf_spin_unlock_bh(&soc->ast_lock);
636 return (void *)ast_entry;
637}
638
639static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
640 void *ast_entry_hdl)
641{
642 return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
643 (struct dp_ast_entry *)ast_entry_hdl);
644}
645
646static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
647 void *ast_entry_hdl)
648{
649 return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
650 (struct dp_ast_entry *)ast_entry_hdl);
651}
652
653static void dp_peer_ast_set_type_wifi3(
654 struct cdp_soc_t *soc_hdl,
655 void *ast_entry_hdl,
656 enum cdp_txrx_ast_entry_type type)
657{
658 dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
659 (struct dp_ast_entry *)ast_entry_hdl,
660 type);
661}
662
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530663static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
664 struct cdp_soc_t *soc_hdl,
665 void *ast_entry_hdl)
666{
667 return ((struct dp_ast_entry *)ast_entry_hdl)->type;
668}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530669
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530670#if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
671void dp_peer_ast_set_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
672 void *ast_entry,
673 void *cp_ctx)
674{
675 struct dp_soc *soc = (struct dp_soc *)soc_handle;
676
677 qdf_spin_lock_bh(&soc->ast_lock);
678 dp_peer_ast_set_cp_ctx(soc,
679 (struct dp_ast_entry *)ast_entry, cp_ctx);
680 qdf_spin_unlock_bh(&soc->ast_lock);
681}
682
683void *dp_peer_ast_get_cp_ctx_wifi3(struct cdp_soc_t *soc_handle,
684 void *ast_entry)
685{
686 struct dp_soc *soc = (struct dp_soc *)soc_handle;
687 void *cp_ctx = NULL;
688
689 qdf_spin_lock_bh(&soc->ast_lock);
690 cp_ctx = dp_peer_ast_get_cp_ctx(soc,
691 (struct dp_ast_entry *)ast_entry);
692 qdf_spin_unlock_bh(&soc->ast_lock);
693
694 return cp_ctx;
695}
696
697bool dp_peer_ast_get_wmi_sent_wifi3(struct cdp_soc_t *soc_handle,
698 void *ast_entry)
699{
700 struct dp_soc *soc = (struct dp_soc *)soc_handle;
701 bool wmi_sent = false;
702
703 qdf_spin_lock_bh(&soc->ast_lock);
704 wmi_sent = dp_peer_ast_get_wmi_sent(soc,
705 (struct dp_ast_entry *)ast_entry);
706 qdf_spin_unlock_bh(&soc->ast_lock);
707
708 return wmi_sent;
709}
710
711void dp_peer_ast_free_entry_wifi3(struct cdp_soc_t *soc_handle,
712 void *ast_entry)
713{
714 struct dp_soc *soc = (struct dp_soc *)soc_handle;
715
716 qdf_spin_lock_bh(&soc->ast_lock);
717 dp_peer_ast_free_entry(soc, (struct dp_ast_entry *)ast_entry);
718 qdf_spin_unlock_bh(&soc->ast_lock);
719}
720#endif
721
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530722static struct cdp_peer *dp_peer_ast_get_peer_wifi3(
723 struct cdp_soc_t *soc_hdl,
724 void *ast_entry_hdl)
725{
726 return (struct cdp_peer *)((struct dp_ast_entry *)ast_entry_hdl)->peer;
727}
728
729static uint32_t dp_peer_ast_get_nexhop_peer_id_wifi3(
730 struct cdp_soc_t *soc_hdl,
731 void *ast_entry_hdl)
732{
733 return ((struct dp_ast_entry *)ast_entry_hdl)->peer->peer_ids[0];
734}
Houston Hoffman648a9182017-05-21 23:27:50 -0700735/**
736 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
737 * @ring_num: ring num of the ring being queried
738 * @grp_mask: the grp_mask array for the ring type in question.
739 *
740 * The grp_mask array is indexed by group number and the bit fields correspond
741 * to ring numbers. We are finding which interrupt group a ring belongs to.
742 *
743 * Return: the index in the grp_mask array with the ring number.
744 * -QDF_STATUS_E_NOENT if no entry is found
745 */
746static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
747{
748 int ext_group_num;
749 int mask = 1 << ring_num;
750
751 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
752 ext_group_num++) {
753 if (mask & grp_mask[ext_group_num])
754 return ext_group_num;
755 }
756
757 return -QDF_STATUS_E_NOENT;
758}
759
760static int dp_srng_calculate_msi_group(struct dp_soc *soc,
761 enum hal_ring_type ring_type,
762 int ring_num)
763{
764 int *grp_mask;
765
766 switch (ring_type) {
767 case WBM2SW_RELEASE:
768 /* dp_tx_comp_handler - soc->tx_comp_ring */
769 if (ring_num < 3)
770 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
771
772 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
773 else if (ring_num == 3) {
774 /* sw treats this as a separate ring type */
775 grp_mask = &soc->wlan_cfg_ctx->
776 int_rx_wbm_rel_ring_mask[0];
777 ring_num = 0;
778 } else {
779 qdf_assert(0);
780 return -QDF_STATUS_E_NOENT;
781 }
782 break;
783
784 case REO_EXCEPTION:
785 /* dp_rx_err_process - &soc->reo_exception_ring */
786 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
787 break;
788
789 case REO_DST:
790 /* dp_rx_process - soc->reo_dest_ring */
791 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
792 break;
793
794 case REO_STATUS:
795 /* dp_reo_status_ring_handler - soc->reo_status_ring */
796 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
797 break;
798
799 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
800 case RXDMA_MONITOR_STATUS:
801 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
802 case RXDMA_MONITOR_DST:
803 /* dp_mon_process */
804 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
805 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700806 case RXDMA_DST:
807 /* dp_rxdma_err_process */
808 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
809 break;
Houston Hoffman648a9182017-05-21 23:27:50 -0700810
Houston Hoffman648a9182017-05-21 23:27:50 -0700811 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700812 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
813 break;
814
815 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -0700816 /* TODO: support low_thresh interrupt */
817 return -QDF_STATUS_E_NOENT;
818 break;
819
820 case TCL_DATA:
821 case TCL_CMD:
822 case REO_CMD:
823 case SW2WBM_RELEASE:
824 case WBM_IDLE_LINK:
825 /* normally empty SW_TO_HW rings */
826 return -QDF_STATUS_E_NOENT;
827 break;
828
829 case TCL_STATUS:
830 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -0700831 /* misc unused rings */
832 return -QDF_STATUS_E_NOENT;
833 break;
834
835 case CE_SRC:
836 case CE_DST:
837 case CE_DST_STATUS:
838 /* CE_rings - currently handled by hif */
839 default:
840 return -QDF_STATUS_E_NOENT;
841 break;
842 }
843
844 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
845}
846
847static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
848 *ring_params, int ring_type, int ring_num)
849{
850 int msi_group_number;
851 int msi_data_count;
852 int ret;
853 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
854
855 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
856 &msi_data_count, &msi_data_start,
857 &msi_irq_start);
858
859 if (ret)
860 return;
861
862 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
863 ring_num);
864 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -0700865 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -0700866 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
867 ring_type, ring_num);
868 ring_params->msi_addr = 0;
869 ring_params->msi_data = 0;
870 return;
871 }
872
873 if (msi_group_number > msi_data_count) {
874 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
875 FL("2 msi_groups will share an msi; msi_group_num %d"),
876 msi_group_number);
877
878 QDF_ASSERT(0);
879 }
880
881 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
882
883 ring_params->msi_addr = addr_low;
884 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
885 ring_params->msi_data = (msi_group_number % msi_data_count)
886 + msi_data_start;
887 ring_params->flags |= HAL_SRNG_MSI_INTR;
888}
889
890/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530891 * dp_print_ast_stats() - Dump AST table contents
892 * @soc: Datapath soc handle
893 *
894 * return void
895 */
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530896#ifdef FEATURE_AST
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530897static void dp_print_ast_stats(struct dp_soc *soc)
898{
899 uint8_t i;
900 uint8_t num_entries = 0;
901 struct dp_vdev *vdev;
902 struct dp_pdev *pdev;
903 struct dp_peer *peer;
904 struct dp_ast_entry *ase, *tmp_ase;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530905 char type[CDP_TXRX_AST_TYPE_MAX][10] = {
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530906 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
907 "DA", "HMWDS_SEC"};
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530908
909 DP_PRINT_STATS("AST Stats:");
910 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
911 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
912 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
913 DP_PRINT_STATS("AST Table:");
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530914
915 qdf_spin_lock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530916 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
917 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530918 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530919 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
920 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
921 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
922 DP_PRINT_STATS("%6d mac_addr = %pM"
923 " peer_mac_addr = %pM"
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530924 " type = %s"
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530925 " next_hop = %d"
926 " is_active = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530927 " is_bss = %d"
928 " ast_idx = %d"
Chaithanya Garrepalli1f64b242018-09-21 22:50:23 +0530929 " ast_hash = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530930 " pdev_id = %d"
931 " vdev_id = %d",
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530932 ++num_entries,
933 ase->mac_addr.raw,
934 ase->peer->mac_addr.raw,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530935 type[ase->type],
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530936 ase->next_hop,
937 ase->is_active,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530938 ase->is_bss,
939 ase->ast_idx,
Chaithanya Garrepalli1f64b242018-09-21 22:50:23 +0530940 ase->ast_hash_value,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530941 ase->pdev_id,
942 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530943 }
944 }
945 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530946 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530947 }
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530948 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530949}
950#else
951static void dp_print_ast_stats(struct dp_soc *soc)
952{
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530953 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530954 return;
955}
956#endif
957
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530958static void dp_print_peer_table(struct dp_vdev *vdev)
959{
960 struct dp_peer *peer = NULL;
961
962 DP_PRINT_STATS("Dumping Peer Table Stats:");
963 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
964 if (!peer) {
965 DP_PRINT_STATS("Invalid Peer");
966 return;
967 }
968 DP_PRINT_STATS(" peer_mac_addr = %pM"
969 " nawds_enabled = %d"
970 " bss_peer = %d"
971 " wapi = %d"
972 " wds_enabled = %d"
973 " delete in progress = %d",
974 peer->mac_addr.raw,
975 peer->nawds_enabled,
976 peer->bss_peer,
977 peer->wapi,
978 peer->wds_enabled,
979 peer->delete_in_progress);
980 }
981}
982
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530983/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700984 * dp_setup_srng - Internal function to setup SRNG rings used by data path
985 */
986static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800987 int ring_type, int ring_num, int mac_id, uint32_t num_entries)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700988{
989 void *hal_soc = soc->hal_soc;
990 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
991 /* TODO: See if we should get align size from hal */
992 uint32_t ring_base_align = 8;
993 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800994 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700995
Houston Hoffman648a9182017-05-21 23:27:50 -0700996 /* TODO: Currently hal layer takes care of endianness related settings.
997 * See if these settings need to passed from DP layer
998 */
999 ring_params.flags = 0;
1000
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001001 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001002 srng->hal_srng = NULL;
1003 srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001004 srng->num_entries = num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001005 srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001006 soc->osdev, soc->osdev->dev, srng->alloc_size,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001007 &(srng->base_paddr_unaligned));
1008
1009 if (!srng->base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301010 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1011 FL("alloc failed - ring_type: %d, ring_num %d"),
1012 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001013 return QDF_STATUS_E_NOMEM;
1014 }
1015
1016 ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1017 ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1018 ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1019 ((unsigned long)(ring_params.ring_base_vaddr) -
1020 (unsigned long)srng->base_vaddr_unaligned);
1021 ring_params.num_entries = num_entries;
1022
Mohit Khanna81179cb2018-08-16 20:50:43 -07001023 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1024 FL("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u"),
1025 ring_type, ring_num, (void *)ring_params.ring_base_vaddr,
1026 (void *)ring_params.ring_base_paddr, ring_params.num_entries);
1027
psimhac983d7e2017-07-26 15:20:07 -07001028 if (soc->intr_mode == DP_INTR_MSI) {
1029 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
Aditya Sathishded018e2018-07-02 16:25:21 +05301030 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1031 FL("Using MSI for ring_type: %d, ring_num %d"),
1032 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001033
1034 } else {
1035 ring_params.msi_data = 0;
1036 ring_params.msi_addr = 0;
Aditya Sathishded018e2018-07-02 16:25:21 +05301037 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1038 FL("Skipping MSI for ring_type: %d, ring_num %d"),
1039 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001040 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001041
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301042 /*
1043 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001044 * interrupt mitigation based on ring type
1045 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301046 if (ring_type == REO_DST) {
1047 ring_params.intr_timer_thres_us =
1048 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1049 ring_params.intr_batch_cntr_thres_entries =
1050 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1051 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1052 ring_params.intr_timer_thres_us =
1053 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1054 ring_params.intr_batch_cntr_thres_entries =
1055 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1056 } else {
1057 ring_params.intr_timer_thres_us =
1058 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1059 ring_params.intr_batch_cntr_thres_entries =
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001060 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301061 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001062
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001063 /* Enable low threshold interrupts for rx buffer rings (regular and
1064 * monitor buffer rings.
1065 * TODO: See if this is required for any other ring
1066 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001067 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1068 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001069 /* TODO: Setting low threshold to 1/8th of ring size
1070 * see if this needs to be configurable
1071 */
1072 ring_params.low_threshold = num_entries >> 3;
1073 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Karunakar Dasinenibef3b1b2018-03-28 22:23:57 -07001074 ring_params.intr_timer_thres_us =
1075 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1076 ring_params.intr_batch_cntr_thres_entries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001077 }
1078
1079 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08001080 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -08001081
1082 if (!srng->hal_srng) {
1083 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1084 srng->alloc_size,
1085 srng->base_vaddr_unaligned,
1086 srng->base_paddr_unaligned, 0);
1087 }
1088
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001089 return 0;
1090}
1091
1092/**
1093 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1094 * Any buffers allocated and attached to ring entries are expected to be freed
1095 * before calling this function.
1096 */
1097static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1098 int ring_type, int ring_num)
1099{
1100 if (!srng->hal_srng) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301101 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1102 FL("Ring type: %d, num:%d not setup"),
1103 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001104 return;
1105 }
1106
1107 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1108
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001109 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001110 srng->alloc_size,
1111 srng->base_vaddr_unaligned,
1112 srng->base_paddr_unaligned, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001113 srng->hal_srng = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001114}
1115
1116/* TODO: Need this interface from HIF */
1117void *hif_get_hal_handle(void *hif_handle);
1118
1119/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301120 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1121 * @dp_ctx: DP SOC handle
1122 * @budget: Number of frames/descriptors that can be processed in one shot
1123 *
1124 * Return: remaining budget/quota for the soc device
1125 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001126static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301127{
1128 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1129 struct dp_soc *soc = int_ctx->soc;
1130 int ring = 0;
1131 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301132 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301133 uint8_t tx_mask = int_ctx->tx_ring_mask;
1134 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301135 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1136 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001137 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301138 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001139 struct dp_pdev *pdev = NULL;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001140 int mac_id;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301141
1142 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301143 while (tx_mask) {
1144 if (tx_mask & 0x1) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001145 work_done = dp_tx_comp_handler(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301146 soc->tx_comp_ring[ring].hal_srng,
1147 remaining_quota);
1148
Houston Hoffmanae850c62017-08-11 16:47:50 -07001149 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1150 "tx mask 0x%x ring %d, budget %d, work_done %d",
1151 tx_mask, ring, budget, work_done);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301152
1153 budget -= work_done;
1154 if (budget <= 0)
1155 goto budget_done;
1156
1157 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301158 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301159 tx_mask = tx_mask >> 1;
1160 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301161 }
1162
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301163
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301164 /* Process REO Exception ring interrupt */
1165 if (rx_err_mask) {
1166 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301167 soc->reo_exception_ring.hal_srng,
1168 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301169
Houston Hoffmanae850c62017-08-11 16:47:50 -07001170 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1171 "REO Exception Ring: work_done %d budget %d",
1172 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301173
1174 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301175 if (budget <= 0) {
1176 goto budget_done;
1177 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301178 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301179 }
1180
1181 /* Process Rx WBM release ring interrupt */
1182 if (rx_wbm_rel_mask) {
1183 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301184 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301185
Houston Hoffmanae850c62017-08-11 16:47:50 -07001186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1187 "WBM Release Ring: work_done %d budget %d",
1188 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301189
1190 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301191 if (budget <= 0) {
1192 goto budget_done;
1193 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301194 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301195 }
1196
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301197 /* Process Rx interrupts */
1198 if (rx_mask) {
1199 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1200 if (rx_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001201 work_done = dp_rx_process(int_ctx,
Leo Chang5ea93a42016-11-03 12:39:49 -07001202 soc->reo_dest_ring[ring].hal_srng,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001203 ring,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301204 remaining_quota);
1205
Houston Hoffmanae850c62017-08-11 16:47:50 -07001206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1207 "rx mask 0x%x ring %d, work_done %d budget %d",
1208 rx_mask, ring, work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301209
1210 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301211 if (budget <= 0)
1212 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301213 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301214 }
1215 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001216 for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001217 work_done = dp_rxdma_err_process(soc, ring,
1218 remaining_quota);
1219 budget -= work_done;
1220 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301221 }
1222
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001223 if (reo_status_mask)
1224 dp_reo_status_ring_handler(soc);
1225
Karunakar Dasineni10185472017-06-19 16:32:06 -07001226 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -08001227 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001228 pdev = soc->pdev_list[ring];
1229 if (pdev == NULL)
Karunakar Dasineni10185472017-06-19 16:32:06 -07001230 continue;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001231 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1232 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1233 pdev->pdev_id);
1234
1235 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1236 work_done = dp_mon_process(soc, mac_for_pdev,
1237 remaining_quota);
1238 budget -= work_done;
1239 if (budget <= 0)
1240 goto budget_done;
1241 remaining_quota = budget;
1242 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001243
chenguocd0f3132018-02-28 15:53:50 -08001244 if (int_ctx->rxdma2host_ring_mask &
1245 (1 << mac_for_pdev)) {
1246 work_done = dp_rxdma_err_process(soc,
1247 mac_for_pdev,
1248 remaining_quota);
1249 budget -= work_done;
1250 if (budget <= 0)
1251 goto budget_done;
1252 remaining_quota = budget;
1253 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001254
chenguocd0f3132018-02-28 15:53:50 -08001255 if (int_ctx->host2rxdma_ring_mask &
1256 (1 << mac_for_pdev)) {
1257 union dp_rx_desc_list_elem_t *desc_list = NULL;
1258 union dp_rx_desc_list_elem_t *tail = NULL;
1259 struct dp_srng *rx_refill_buf_ring =
1260 &pdev->rx_refill_buf_ring;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001261
chenguocd0f3132018-02-28 15:53:50 -08001262 DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1263 1);
1264 dp_rx_buffers_replenish(soc, mac_for_pdev,
1265 rx_refill_buf_ring,
1266 &soc->rx_desc_buf[mac_for_pdev], 0,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001267 &desc_list, &tail);
chenguocd0f3132018-02-28 15:53:50 -08001268 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001269 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001270 }
1271
Dhanashri Atre0da31222017-03-23 12:30:58 -07001272 qdf_lro_flush(int_ctx->lro_ctx);
1273
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301274budget_done:
1275 return dp_budget - budget;
1276}
1277
1278/* dp_interrupt_timer()- timer poll for interrupts
1279 *
1280 * @arg: SoC Handle
1281 *
1282 * Return:
1283 *
1284 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001285static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301286{
1287 struct dp_soc *soc = (struct dp_soc *) arg;
1288 int i;
1289
Ravi Joshi86e98262017-03-01 13:47:03 -08001290 if (qdf_atomic_read(&soc->cmn_init_done)) {
1291 for (i = 0;
1292 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1293 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301294
Ravi Joshi86e98262017-03-01 13:47:03 -08001295 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1296 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301297}
1298
1299/*
psimhac983d7e2017-07-26 15:20:07 -07001300 * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301301 * @txrx_soc: DP SOC handle
1302 *
1303 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1304 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1305 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1306 *
1307 * Return: 0 for success. nonzero for failure.
1308 */
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301309static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301310{
1311 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1312 int i;
1313
psimhac983d7e2017-07-26 15:20:07 -07001314 soc->intr_mode = DP_INTR_POLL;
1315
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301316 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -07001317 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001318 soc->intr_ctx[i].tx_ring_mask =
1319 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1320 soc->intr_ctx[i].rx_ring_mask =
1321 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1322 soc->intr_ctx[i].rx_mon_ring_mask =
1323 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1324 soc->intr_ctx[i].rx_err_ring_mask =
1325 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1326 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1327 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1328 soc->intr_ctx[i].reo_status_ring_mask =
1329 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1330 soc->intr_ctx[i].rxdma2host_ring_mask =
1331 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301332 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001333 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301334 }
1335
1336 qdf_timer_init(soc->osdev, &soc->int_timer,
1337 dp_interrupt_timer, (void *)soc,
1338 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301339
1340 return QDF_STATUS_SUCCESS;
1341}
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301342
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301343static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001344#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301345extern int con_mode_monitor;
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301346/*
1347 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1348 * @txrx_soc: DP SOC handle
1349 *
1350 * Call the appropriate attach function based on the mode of operation.
1351 * This is a WAR for enabling monitor mode.
1352 *
1353 * Return: 0 for success. nonzero for failure.
1354 */
1355static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1356{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001357 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1358
1359 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1360 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001361 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1362 "%s: Poll mode", __func__);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301363 return dp_soc_attach_poll(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301364 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001365
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001366 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1367 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301368 return dp_soc_interrupt_attach(txrx_soc);
1369 }
1370}
1371#else
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301372#if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1373static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1374{
1375 return dp_soc_attach_poll(txrx_soc);
1376}
1377#else
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301378static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1379{
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301380 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1381
1382 if (hif_is_polled_mode_enabled(soc->hif_handle))
1383 return dp_soc_attach_poll(txrx_soc);
1384 else
1385 return dp_soc_interrupt_attach(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301386}
1387#endif
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301388#endif
Houston Hoffman648a9182017-05-21 23:27:50 -07001389
1390static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1391 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1392{
1393 int j;
1394 int num_irq = 0;
1395
1396 int tx_mask =
1397 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1398 int rx_mask =
1399 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1400 int rx_mon_mask =
1401 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1402 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1403 soc->wlan_cfg_ctx, intr_ctx_num);
1404 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1405 soc->wlan_cfg_ctx, intr_ctx_num);
1406 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1407 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001408 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1409 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001410 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1411 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001412
1413 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1414
1415 if (tx_mask & (1 << j)) {
1416 irq_id_map[num_irq++] =
1417 (wbm2host_tx_completions_ring1 - j);
1418 }
1419
1420 if (rx_mask & (1 << j)) {
1421 irq_id_map[num_irq++] =
1422 (reo2host_destination_ring1 - j);
1423 }
1424
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001425 if (rxdma2host_ring_mask & (1 << j)) {
1426 irq_id_map[num_irq++] =
1427 rxdma2host_destination_ring_mac1 -
1428 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1429 }
1430
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001431 if (host2rxdma_ring_mask & (1 << j)) {
1432 irq_id_map[num_irq++] =
1433 host2rxdma_host_buf_ring_mac1 -
1434 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1435 }
1436
Houston Hoffman648a9182017-05-21 23:27:50 -07001437 if (rx_mon_mask & (1 << j)) {
1438 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001439 ppdu_end_interrupts_mac1 -
1440 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001441 irq_id_map[num_irq++] =
1442 rxdma2host_monitor_status_ring_mac1 -
1443 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001444 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001445
Houston Hoffman648a9182017-05-21 23:27:50 -07001446 if (rx_wbm_rel_ring_mask & (1 << j))
1447 irq_id_map[num_irq++] = wbm2host_rx_release;
1448
1449 if (rx_err_ring_mask & (1 << j))
1450 irq_id_map[num_irq++] = reo2host_exception;
1451
1452 if (reo_status_ring_mask & (1 << j))
1453 irq_id_map[num_irq++] = reo2host_status;
1454
1455 }
1456 *num_irq_r = num_irq;
1457}
1458
1459static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1460 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1461 int msi_vector_count, int msi_vector_start)
1462{
1463 int tx_mask = wlan_cfg_get_tx_ring_mask(
1464 soc->wlan_cfg_ctx, intr_ctx_num);
1465 int rx_mask = wlan_cfg_get_rx_ring_mask(
1466 soc->wlan_cfg_ctx, intr_ctx_num);
1467 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1468 soc->wlan_cfg_ctx, intr_ctx_num);
1469 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1470 soc->wlan_cfg_ctx, intr_ctx_num);
1471 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1472 soc->wlan_cfg_ctx, intr_ctx_num);
1473 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1474 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001475 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1476 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001477
1478 unsigned int vector =
1479 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1480 int num_irq = 0;
1481
psimhac983d7e2017-07-26 15:20:07 -07001482 soc->intr_mode = DP_INTR_MSI;
1483
Houston Hoffman648a9182017-05-21 23:27:50 -07001484 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001485 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001486 irq_id_map[num_irq++] =
1487 pld_get_msi_irq(soc->osdev->dev, vector);
1488
1489 *num_irq_r = num_irq;
1490}
1491
1492static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1493 int *irq_id_map, int *num_irq)
1494{
1495 int msi_vector_count, ret;
1496 uint32_t msi_base_data, msi_vector_start;
1497
1498 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1499 &msi_vector_count,
1500 &msi_base_data,
1501 &msi_vector_start);
1502 if (ret)
1503 return dp_soc_interrupt_map_calculate_integrated(soc,
1504 intr_ctx_num, irq_id_map, num_irq);
1505
1506 else
1507 dp_soc_interrupt_map_calculate_msi(soc,
1508 intr_ctx_num, irq_id_map, num_irq,
1509 msi_vector_count, msi_vector_start);
1510}
1511
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301512/*
1513 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1514 * @txrx_soc: DP SOC handle
1515 *
1516 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1517 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1518 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1519 *
1520 * Return: 0 for success. nonzero for failure.
1521 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001522static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301523{
1524 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1525
1526 int i = 0;
1527 int num_irq = 0;
1528
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301529 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001530 int ret = 0;
1531
1532 /* Map of IRQ ids registered with one interrupt context */
1533 int irq_id_map[HIF_MAX_GRP_IRQ];
1534
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301535 int tx_mask =
1536 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1537 int rx_mask =
1538 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1539 int rx_mon_mask =
Mohit Khannadba82f22018-07-12 10:59:17 -07001540 dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301541 int rx_err_ring_mask =
1542 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1543 int rx_wbm_rel_ring_mask =
1544 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1545 int reo_status_ring_mask =
1546 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001547 int rxdma2host_ring_mask =
1548 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001549 int host2rxdma_ring_mask =
1550 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1551
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301552
Houston Hoffman648a9182017-05-21 23:27:50 -07001553 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301554 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1555 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1556 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301557 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001558 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001559 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301560 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1561 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1562
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301563 soc->intr_ctx[i].soc = soc;
1564
1565 num_irq = 0;
1566
Houston Hoffman648a9182017-05-21 23:27:50 -07001567 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1568 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301569
Houston Hoffmandef86a32017-04-21 20:23:45 -07001570 ret = hif_register_ext_group(soc->hif_handle,
1571 num_irq, irq_id_map, dp_service_srngs,
1572 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001573 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301574
1575 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301576 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1577 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301578
1579 return QDF_STATUS_E_FAILURE;
1580 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001581 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301582 }
1583
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301584 hif_configure_ext_group_interrupts(soc->hif_handle);
1585
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301586 return QDF_STATUS_SUCCESS;
1587}
1588
1589/*
1590 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1591 * @txrx_soc: DP SOC handle
1592 *
1593 * Return: void
1594 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001595static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301596{
1597 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001598 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301599
psimhac983d7e2017-07-26 15:20:07 -07001600 if (soc->intr_mode == DP_INTR_POLL) {
1601 qdf_timer_stop(&soc->int_timer);
1602 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001603 } else {
1604 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001605 }
1606
Leo Chang5ea93a42016-11-03 12:39:49 -07001607 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1608 soc->intr_ctx[i].tx_ring_mask = 0;
1609 soc->intr_ctx[i].rx_ring_mask = 0;
1610 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001611 soc->intr_ctx[i].rx_err_ring_mask = 0;
1612 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1613 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001614 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001615 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001616
Dhanashri Atre0da31222017-03-23 12:30:58 -07001617 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001618 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301619}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301620
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001621#define AVG_MAX_MPDUS_PER_TID 128
1622#define AVG_TIDS_PER_CLIENT 2
1623#define AVG_FLOWS_PER_TID 2
1624#define AVG_MSDUS_PER_FLOW 128
1625#define AVG_MSDUS_PER_MPDU 4
1626
1627/*
1628 * Allocate and setup link descriptor pool that will be used by HW for
1629 * various link and queue descriptors and managed by WBM
1630 */
1631static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1632{
1633 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1634 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1635 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1636 uint32_t num_mpdus_per_link_desc =
1637 hal_num_mpdus_per_link_desc(soc->hal_soc);
1638 uint32_t num_msdus_per_link_desc =
1639 hal_num_msdus_per_link_desc(soc->hal_soc);
1640 uint32_t num_mpdu_links_per_queue_desc =
1641 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1642 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1643 uint32_t total_link_descs, total_mem_size;
1644 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1645 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1646 uint32_t num_link_desc_banks;
1647 uint32_t last_bank_size = 0;
1648 uint32_t entry_size, num_entries;
1649 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001650 uint32_t desc_id = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001651
1652 /* Only Tx queue descriptors are allocated from common link descriptor
1653 * pool Rx queue descriptors are not included in this because (REO queue
1654 * extension descriptors) they are expected to be allocated contiguously
1655 * with REO queue descriptors
1656 */
1657 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1658 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1659
1660 num_mpdu_queue_descs = num_mpdu_link_descs /
1661 num_mpdu_links_per_queue_desc;
1662
1663 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1664 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1665 num_msdus_per_link_desc;
1666
1667 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1668 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1669
1670 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1671 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1672
1673 /* Round up to power of 2 */
1674 total_link_descs = 1;
1675 while (total_link_descs < num_entries)
1676 total_link_descs <<= 1;
1677
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301678 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1679 FL("total_link_descs: %u, link_desc_size: %d"),
1680 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001681 total_mem_size = total_link_descs * link_desc_size;
1682
1683 total_mem_size += link_desc_align;
1684
1685 if (total_mem_size <= max_alloc_size) {
1686 num_link_desc_banks = 0;
1687 last_bank_size = total_mem_size;
1688 } else {
1689 num_link_desc_banks = (total_mem_size) /
1690 (max_alloc_size - link_desc_align);
1691 last_bank_size = total_mem_size %
1692 (max_alloc_size - link_desc_align);
1693 }
1694
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301695 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1696 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1697 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001698
1699 for (i = 0; i < num_link_desc_banks; i++) {
1700 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001701 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001702 max_alloc_size,
1703 &(soc->link_desc_banks[i].base_paddr_unaligned));
1704 soc->link_desc_banks[i].size = max_alloc_size;
1705
1706 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1707 soc->link_desc_banks[i].base_vaddr_unaligned) +
1708 ((unsigned long)(
1709 soc->link_desc_banks[i].base_vaddr_unaligned) %
1710 link_desc_align));
1711
1712 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1713 soc->link_desc_banks[i].base_paddr_unaligned) +
1714 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1715 (unsigned long)(
1716 soc->link_desc_banks[i].base_vaddr_unaligned));
1717
1718 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1720 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001721 goto fail;
1722 }
1723 }
1724
1725 if (last_bank_size) {
1726 /* Allocate last bank in case total memory required is not exact
1727 * multiple of max_alloc_size
1728 */
1729 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001730 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001731 last_bank_size,
1732 &(soc->link_desc_banks[i].base_paddr_unaligned));
1733 soc->link_desc_banks[i].size = last_bank_size;
1734
1735 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1736 (soc->link_desc_banks[i].base_vaddr_unaligned) +
1737 ((unsigned long)(
1738 soc->link_desc_banks[i].base_vaddr_unaligned) %
1739 link_desc_align));
1740
1741 soc->link_desc_banks[i].base_paddr =
1742 (unsigned long)(
1743 soc->link_desc_banks[i].base_paddr_unaligned) +
1744 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1745 (unsigned long)(
1746 soc->link_desc_banks[i].base_vaddr_unaligned));
1747 }
1748
1749
1750 /* Allocate and setup link descriptor idle list for HW internal use */
1751 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1752 total_mem_size = entry_size * total_link_descs;
1753
1754 if (total_mem_size <= max_alloc_size) {
1755 void *desc;
1756
1757 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1758 WBM_IDLE_LINK, 0, 0, total_link_descs)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301759 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1760 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001761 goto fail;
1762 }
1763
1764 hal_srng_access_start_unlocked(soc->hal_soc,
1765 soc->wbm_idle_link_ring.hal_srng);
1766
1767 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1768 soc->link_desc_banks[i].base_paddr; i++) {
1769 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001770 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001771 soc->link_desc_banks[i].base_vaddr) -
1772 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001773 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001774 / link_desc_size;
1775 unsigned long paddr = (unsigned long)(
1776 soc->link_desc_banks[i].base_paddr);
1777
1778 while (num_entries && (desc = hal_srng_src_get_next(
1779 soc->hal_soc,
1780 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001781 hal_set_link_desc_addr(desc,
1782 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001783 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001784 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001785 paddr += link_desc_size;
1786 }
1787 }
1788 hal_srng_access_end_unlocked(soc->hal_soc,
1789 soc->wbm_idle_link_ring.hal_srng);
1790 } else {
1791 uint32_t num_scatter_bufs;
1792 uint32_t num_entries_per_buf;
1793 uint32_t rem_entries;
1794 uint8_t *scatter_buf_ptr;
1795 uint16_t scatter_buf_num;
1796
1797 soc->wbm_idle_scatter_buf_size =
1798 hal_idle_list_scatter_buf_size(soc->hal_soc);
1799 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1800 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001801 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1802 soc->hal_soc, total_mem_size,
1803 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001804
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001805 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1806 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1807 FL("scatter bufs size out of bounds"));
1808 goto fail;
1809 }
1810
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001811 for (i = 0; i < num_scatter_bufs; i++) {
1812 soc->wbm_idle_scatter_buf_base_vaddr[i] =
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001813 qdf_mem_alloc_consistent(soc->osdev,
1814 soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001815 soc->wbm_idle_scatter_buf_size,
1816 &(soc->wbm_idle_scatter_buf_base_paddr[i]));
1817 if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301818 QDF_TRACE(QDF_MODULE_ID_DP,
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001819 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301820 FL("Scatter list memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001821 goto fail;
1822 }
1823 }
1824
1825 /* Populate idle list scatter buffers with link descriptor
1826 * pointers
1827 */
1828 scatter_buf_num = 0;
1829 scatter_buf_ptr = (uint8_t *)(
1830 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1831 rem_entries = num_entries_per_buf;
1832
1833 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1834 soc->link_desc_banks[i].base_paddr; i++) {
1835 uint32_t num_link_descs =
1836 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001837 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001838 soc->link_desc_banks[i].base_vaddr) -
1839 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001840 soc->link_desc_banks[i].base_vaddr_unaligned)))
1841 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001842 unsigned long paddr = (unsigned long)(
1843 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001844
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001845 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001846 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001847 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001848 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001849 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001850 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001851 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001852 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001853 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001854 } else {
1855 rem_entries = num_entries_per_buf;
1856 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001857
1858 if (scatter_buf_num >= num_scatter_bufs)
1859 break;
1860
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001861 scatter_buf_ptr = (uint8_t *)(
1862 soc->wbm_idle_scatter_buf_base_vaddr[
1863 scatter_buf_num]);
1864 }
1865 }
1866 }
1867 /* Setup link descriptor idle list in HW */
1868 hal_setup_link_idle_list(soc->hal_soc,
1869 soc->wbm_idle_scatter_buf_base_paddr,
1870 soc->wbm_idle_scatter_buf_base_vaddr,
1871 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07001872 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001873 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1874 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001875 }
1876 return 0;
1877
1878fail:
1879 if (soc->wbm_idle_link_ring.hal_srng) {
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05301880 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1881 WBM_IDLE_LINK, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001882 }
1883
1884 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1885 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001886 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001887 soc->wbm_idle_scatter_buf_size,
1888 soc->wbm_idle_scatter_buf_base_vaddr[i],
1889 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001890 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001891 }
1892 }
1893
1894 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1895 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001896 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001897 soc->link_desc_banks[i].size,
1898 soc->link_desc_banks[i].base_vaddr_unaligned,
1899 soc->link_desc_banks[i].base_paddr_unaligned,
1900 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001901 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001902 }
1903 }
1904 return QDF_STATUS_E_FAILURE;
1905}
1906
1907/*
1908 * Free link descriptor pool that was setup HW
1909 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001910static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001911{
1912 int i;
1913
1914 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001915 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001916 WBM_IDLE_LINK, 0);
1917 }
1918
1919 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1920 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001921 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001922 soc->wbm_idle_scatter_buf_size,
1923 soc->wbm_idle_scatter_buf_base_vaddr[i],
1924 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001925 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001926 }
1927 }
1928
1929 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1930 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001931 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001932 soc->link_desc_banks[i].size,
1933 soc->link_desc_banks[i].base_vaddr_unaligned,
1934 soc->link_desc_banks[i].base_paddr_unaligned,
1935 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001936 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001937 }
1938 }
1939}
1940
Mohit Khanna81179cb2018-08-16 20:50:43 -07001941#ifdef IPA_OFFLOAD
1942#define REO_DST_RING_SIZE_QCA6290 1023
1943#ifndef QCA_WIFI_QCA8074_VP
1944#define REO_DST_RING_SIZE_QCA8074 1023
1945#else
1946#define REO_DST_RING_SIZE_QCA8074 8
1947#endif /* QCA_WIFI_QCA8074_VP */
1948
1949#else
1950
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301951#define REO_DST_RING_SIZE_QCA6290 1024
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05301952#ifndef QCA_WIFI_QCA8074_VP
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301953#define REO_DST_RING_SIZE_QCA8074 2048
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05301954#else
1955#define REO_DST_RING_SIZE_QCA8074 8
Mohit Khanna81179cb2018-08-16 20:50:43 -07001956#endif /* QCA_WIFI_QCA8074_VP */
1957#endif /* IPA_OFFLOAD */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001958
1959/*
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301960 * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1961 * @soc: Datapath SOC handle
1962 *
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301963 * This is a timer function used to age out stale AST nodes from
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301964 * AST table
1965 */
1966#ifdef FEATURE_WDS
1967static void dp_wds_aging_timer_fn(void *soc_hdl)
1968{
1969 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1970 struct dp_pdev *pdev;
1971 struct dp_vdev *vdev;
1972 struct dp_peer *peer;
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301973 struct dp_ast_entry *ase, *temp_ase;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301974 int i;
1975
1976 qdf_spin_lock_bh(&soc->ast_lock);
1977
1978 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1979 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301980 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301981 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1982 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301983 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301984 /*
1985 * Do not expire static ast entries
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301986 * and HM WDS entries
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301987 */
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05301988 if (ase->type != CDP_TXRX_AST_TYPE_WDS)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301989 continue;
1990
1991 if (ase->is_active) {
1992 ase->is_active = FALSE;
1993 continue;
1994 }
1995
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301996 DP_STATS_INC(soc, ast.aged_out, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301997 dp_peer_del_ast(soc, ase);
1998 }
1999 }
2000 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302001 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302002 }
2003
2004 qdf_spin_unlock_bh(&soc->ast_lock);
2005
2006 if (qdf_atomic_read(&soc->cmn_init_done))
2007 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
2008}
2009
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05302010
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302011/*
2012 * dp_soc_wds_attach() - Setup WDS timer and AST table
2013 * @soc: Datapath SOC handle
2014 *
2015 * Return: None
2016 */
2017static void dp_soc_wds_attach(struct dp_soc *soc)
2018{
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302019 qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
2020 dp_wds_aging_timer_fn, (void *)soc,
2021 QDF_TIMER_TYPE_WAKE_APPS);
2022
2023 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
2024}
2025
2026/*
2027 * dp_soc_wds_detach() - Detach WDS data structures and timers
2028 * @txrx_soc: DP SOC handle
2029 *
2030 * Return: None
2031 */
2032static void dp_soc_wds_detach(struct dp_soc *soc)
2033{
2034 qdf_timer_stop(&soc->wds_aging_timer);
2035 qdf_timer_free(&soc->wds_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302036}
2037#else
2038static void dp_soc_wds_attach(struct dp_soc *soc)
2039{
2040}
2041
2042static void dp_soc_wds_detach(struct dp_soc *soc)
2043{
2044}
2045#endif
2046
2047/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302048 * dp_soc_reset_ring_map() - Reset cpu ring map
2049 * @soc: Datapath soc handler
2050 *
2051 * This api resets the default cpu ring map
2052 */
2053
2054static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2055{
2056 uint8_t i;
2057 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2058
2059 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2060 if (nss_config == 1) {
2061 /*
2062 * Setting Tx ring map for one nss offloaded radio
2063 */
2064 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2065 } else if (nss_config == 2) {
2066 /*
2067 * Setting Tx ring for two nss offloaded radios
2068 */
2069 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2070 } else {
2071 /*
2072 * Setting Tx ring map for all nss offloaded radios
2073 */
2074 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
2075 }
2076 }
2077}
2078
Aniruddha Paule3a03342017-09-19 16:42:10 +05302079/*
2080 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2081 * @dp_soc - DP soc handle
2082 * @ring_type - ring type
2083 * @ring_num - ring_num
2084 *
2085 * return 0 or 1
2086 */
2087static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2088{
2089 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2090 uint8_t status = 0;
2091
2092 switch (ring_type) {
2093 case WBM2SW_RELEASE:
2094 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002095 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05302096 status = ((nss_config) & (1 << ring_num));
2097 break;
2098 default:
2099 break;
2100 }
2101
2102 return status;
2103}
2104
2105/*
2106 * dp_soc_reset_intr_mask() - reset interrupt mask
2107 * @dp_soc - DP Soc handle
2108 *
2109 * Return: Return void
2110 */
2111static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2112{
2113 uint8_t j;
2114 int *grp_mask = NULL;
2115 int group_number, mask, num_ring;
2116
2117 /* number of tx ring */
2118 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2119
2120 /*
2121 * group mask for tx completion ring.
2122 */
2123 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2124
2125 /* loop and reset the mask for only offloaded ring */
2126 for (j = 0; j < num_ring; j++) {
2127 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2128 continue;
2129 }
2130
2131 /*
2132 * Group number corresponding to tx offloaded ring.
2133 */
2134 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2135 if (group_number < 0) {
2136 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002137 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302138 WBM2SW_RELEASE, j);
2139 return;
2140 }
2141
2142 /* reset the tx mask for offloaded ring */
2143 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2144 mask &= (~(1 << j));
2145
2146 /*
2147 * reset the interrupt mask for offloaded ring.
2148 */
2149 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2150 }
2151
2152 /* number of rx rings */
2153 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2154
2155 /*
2156 * group mask for reo destination ring.
2157 */
2158 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2159
2160 /* loop and reset the mask for only offloaded ring */
2161 for (j = 0; j < num_ring; j++) {
2162 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2163 continue;
2164 }
2165
2166 /*
2167 * Group number corresponding to rx offloaded ring.
2168 */
2169 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2170 if (group_number < 0) {
2171 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002172 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302173 REO_DST, j);
2174 return;
2175 }
2176
2177 /* set the interrupt mask for offloaded ring */
2178 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2179 mask &= (~(1 << j));
2180
2181 /*
2182 * set the interrupt mask to zero for rx offloaded radio.
2183 */
2184 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2185 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002186
2187 /*
2188 * group mask for Rx buffer refill ring
2189 */
2190 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2191
2192 /* loop and reset the mask for only offloaded ring */
2193 for (j = 0; j < MAX_PDEV_CNT; j++) {
2194 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2195 continue;
2196 }
2197
2198 /*
2199 * Group number corresponding to rx offloaded ring.
2200 */
2201 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2202 if (group_number < 0) {
2203 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2204 FL("ring not part of any group; ring_type: %d,ring_num %d"),
2205 REO_DST, j);
2206 return;
2207 }
2208
2209 /* set the interrupt mask for offloaded ring */
2210 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2211 group_number);
2212 mask &= (~(1 << j));
2213
2214 /*
2215 * set the interrupt mask to zero for rx offloaded radio.
2216 */
2217 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2218 group_number, mask);
2219 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05302220}
2221
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302222#ifdef IPA_OFFLOAD
2223/**
2224 * dp_reo_remap_config() - configure reo remap register value based
2225 * nss configuration.
2226 * based on offload_radio value below remap configuration
2227 * get applied.
2228 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2229 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2230 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2231 * 3 - both Radios handled by NSS (remap not required)
2232 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2233 *
2234 * @remap1: output parameter indicates reo remap 1 register value
2235 * @remap2: output parameter indicates reo remap 2 register value
2236 * Return: bool type, true if remap is configured else false.
2237 */
2238static bool dp_reo_remap_config(struct dp_soc *soc,
2239 uint32_t *remap1,
2240 uint32_t *remap2)
2241{
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302242 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2243 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2244
2245 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2246 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2247
Mohit Khanna81179cb2018-08-16 20:50:43 -07002248 dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2249
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302250 return true;
2251}
2252#else
2253static bool dp_reo_remap_config(struct dp_soc *soc,
2254 uint32_t *remap1,
2255 uint32_t *remap2)
2256{
2257 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2258
2259 switch (offload_radio) {
2260 case 0:
2261 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2262 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2263 (0x3 << 18) | (0x4 << 21)) << 8;
2264
2265 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2266 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2267 (0x3 << 18) | (0x4 << 21)) << 8;
2268 break;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302269 case 1:
2270 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2271 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2272 (0x2 << 18) | (0x3 << 21)) << 8;
2273
2274 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2275 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2276 (0x4 << 18) | (0x2 << 21)) << 8;
2277 break;
2278
2279 case 2:
2280 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2281 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2282 (0x1 << 18) | (0x3 << 21)) << 8;
2283
2284 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2285 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2286 (0x4 << 18) | (0x1 << 21)) << 8;
2287 break;
2288
2289 case 3:
2290 /* return false if both radios are offloaded to NSS */
2291 return false;
2292 }
Mohit Khanna81179cb2018-08-16 20:50:43 -07002293
2294 dp_debug("remap1 %x remap2 %x offload_radio %u",
2295 *remap1, *remap2, offload_radio);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302296 return true;
2297}
2298#endif
2299
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302300/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302301 * dp_reo_frag_dst_set() - configure reo register to set the
2302 * fragment destination ring
2303 * @soc : Datapath soc
2304 * @frag_dst_ring : output parameter to set fragment destination ring
2305 *
2306 * Based on offload_radio below fragment destination rings is selected
2307 * 0 - TCL
2308 * 1 - SW1
2309 * 2 - SW2
2310 * 3 - SW3
2311 * 4 - SW4
2312 * 5 - Release
2313 * 6 - FW
2314 * 7 - alternate select
2315 *
2316 * return: void
2317 */
2318static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2319{
2320 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2321
2322 switch (offload_radio) {
2323 case 0:
2324 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2325 break;
2326 case 3:
2327 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2328 break;
2329 default:
2330 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2331 FL("dp_reo_frag_dst_set invalid offload radio config"));
2332 break;
2333 }
2334}
2335
2336/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002337 * dp_soc_cmn_setup() - Common SoC level initializion
2338 * @soc: Datapath SOC handle
2339 *
2340 * This is an internal function used to setup common SOC data structures,
2341 * to be called from PDEV attach after receiving HW mode capabilities from FW
2342 */
2343static int dp_soc_cmn_setup(struct dp_soc *soc)
2344{
2345 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08002346 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302347 int tx_ring_size;
2348 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302349 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302350 uint32_t entries;
2351 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002352
Ravi Joshi86e98262017-03-01 13:47:03 -08002353 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002354 return 0;
2355
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002356 if (dp_hw_link_desc_pool_setup(soc))
2357 goto fail1;
2358
Vivek126db5d2018-07-25 22:05:04 +05302359 soc_cfg_ctx = soc->wlan_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002360 /* Setup SRNG rings */
2361 /* Common rings */
2362 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302363 wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302364 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2365 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002366 goto fail1;
2367 }
2368
2369
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302370 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002371 /* Tx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302372 if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002373 soc->num_tcl_data_rings =
Vivek126db5d2018-07-25 22:05:04 +05302374 wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302375 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302376 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302377 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302378 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002379 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2380 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302381 TCL_DATA, i, 0, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302382 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002383 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302384 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002385 goto fail1;
2386 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07002387 /*
2388 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2389 * count
2390 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002391 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302392 WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302393 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002394 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302395 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002396 goto fail1;
2397 }
2398 }
2399 } else {
2400 /* This will be incremented during per pdev ring setup */
2401 soc->num_tcl_data_rings = 0;
2402 }
2403
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302404 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302405 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2406 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302407 goto fail1;
2408 }
2409
Vivek126db5d2018-07-25 22:05:04 +05302410 entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002411 /* TCL command and status rings */
2412 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302413 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302414 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2415 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002416 goto fail1;
2417 }
2418
Vivek126db5d2018-07-25 22:05:04 +05302419 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002420 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302421 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302422 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2423 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002424 goto fail1;
2425 }
2426
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302427 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002428
2429 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2430 * descriptors
2431 */
2432
2433 /* Rx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302434 if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002435 soc->num_reo_dest_rings =
Vivek126db5d2018-07-25 22:05:04 +05302436 wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08002437 QDF_TRACE(QDF_MODULE_ID_DP,
Aditya Sathishded018e2018-07-02 16:25:21 +05302438 QDF_TRACE_LEVEL_INFO,
2439 FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002440 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002441 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302442 i, 0, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302443 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302444 QDF_TRACE_LEVEL_ERROR,
2445 FL(RNG_ERR "reo_dest_ring [%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002446 goto fail1;
2447 }
2448 }
2449 } else {
2450 /* This will be incremented during per pdev ring setup */
2451 soc->num_reo_dest_rings = 0;
2452 }
2453
Vivek126db5d2018-07-25 22:05:04 +05302454 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002455 /* LMAC RxDMA to SW Rings configuration */
Vivek126db5d2018-07-25 22:05:04 +05302456 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002457 /* Only valid for MCL */
2458 struct dp_pdev *pdev = soc->pdev_list[0];
2459
2460 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2461 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302462 RXDMA_DST, 0, i,
2463 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002464 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302465 QDF_TRACE_LEVEL_ERROR,
2466 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002467 goto fail1;
2468 }
2469 }
2470 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002471 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2472
2473 /* REO reinjection ring */
Vivek126db5d2018-07-25 22:05:04 +05302474 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002475 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302476 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302477 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302478 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002479 goto fail1;
2480 }
2481
2482
2483 /* Rx release ring */
2484 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
Vivek126db5d2018-07-25 22:05:04 +05302485 wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302486 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302487 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002488 goto fail1;
2489 }
2490
2491
2492 /* Rx exception ring */
Vivek126db5d2018-07-25 22:05:04 +05302493 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2494 if (dp_srng_setup(soc, &soc->reo_exception_ring,
2495 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302496 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302497 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002498 goto fail1;
2499 }
2500
2501
2502 /* REO command and status rings */
2503 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302504 wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302505 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2506 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002507 goto fail1;
2508 }
2509
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002510 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2511 TAILQ_INIT(&soc->rx.reo_cmd_list);
2512 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2513
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002514 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302515 wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302516 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2517 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002518 goto fail1;
2519 }
2520
Yun Park92af7132017-09-13 16:33:35 -07002521 qdf_spinlock_create(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302522 dp_soc_wds_attach(soc);
2523
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302524 /* Reset the cpu ring map if radio is NSS offloaded */
Vivek126db5d2018-07-25 22:05:04 +05302525 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302526 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302527 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302528 }
2529
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002530 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002531 qdf_mem_zero(&reo_params, sizeof(reo_params));
2532
Vivek126db5d2018-07-25 22:05:04 +05302533 if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002534
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302535 /*
2536 * Reo ring remap is not required if both radios
2537 * are offloaded to NSS
2538 */
2539 if (!dp_reo_remap_config(soc,
2540 &reo_params.remap1,
2541 &reo_params.remap2))
2542 goto out;
2543
2544 reo_params.rx_hash_enabled = true;
2545 }
2546
psimhafc2f91b2018-01-10 15:30:03 -08002547 /* setup the global rx defrag waitlist */
2548 TAILQ_INIT(&soc->rx.defrag.waitlist);
2549 soc->rx.defrag.timeout_ms =
Vivek126db5d2018-07-25 22:05:04 +05302550 wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
psimhafc2f91b2018-01-10 15:30:03 -08002551 soc->rx.flags.defrag_timeout_check =
Vivek126db5d2018-07-25 22:05:04 +05302552 wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
Lin Baif1c577e2018-05-22 20:45:42 +08002553 qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
psimhafc2f91b2018-01-10 15:30:03 -08002554
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302555out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302556 /*
2557 * set the fragment destination ring
2558 */
2559 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2560
Dhanashri Atre14049172016-11-11 18:32:36 -08002561 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002562
Ravi Joshi86e98262017-03-01 13:47:03 -08002563 qdf_atomic_set(&soc->cmn_init_done, 1);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302564 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002565 return 0;
2566fail1:
2567 /*
2568 * Cleanup will be done as part of soc_detach, which will
2569 * be called on pdev attach failure
2570 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002571 return QDF_STATUS_E_FAILURE;
2572}
2573
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002574static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002575
Tallapragada Kalyan16395272018-08-28 12:34:21 +05302576static void dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
Dhanashri Atre14049172016-11-11 18:32:36 -08002577{
2578 struct cdp_lro_hash_config lro_hash;
2579
2580 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2581 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2583 FL("LRO disabled RX hash disabled"));
2584 return;
2585 }
2586
2587 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2588
2589 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2590 lro_hash.lro_enable = 1;
2591 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2592 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002593 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2594 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002595 }
2596
Houston Hoffman41b912c2017-08-30 14:27:51 -07002597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2598 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002599 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2600 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002601 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2602 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2603 LRO_IPV6_SEED_ARR_SZ));
2604
Houston Hoffman41b912c2017-08-30 14:27:51 -07002605 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2606 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
Dhanashri Atre14049172016-11-11 18:32:36 -08002607 lro_hash.lro_enable, lro_hash.tcp_flag,
2608 lro_hash.tcp_flag_mask);
2609
Dhanashri Atre14049172016-11-11 18:32:36 -08002610 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2611 QDF_TRACE_LEVEL_ERROR,
2612 (void *)lro_hash.toeplitz_hash_ipv4,
2613 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2614 LRO_IPV4_SEED_ARR_SZ));
2615
Dhanashri Atre14049172016-11-11 18:32:36 -08002616 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2617 QDF_TRACE_LEVEL_ERROR,
2618 (void *)lro_hash.toeplitz_hash_ipv6,
2619 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2620 LRO_IPV6_SEED_ARR_SZ));
2621
2622 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2623
2624 if (soc->cdp_soc.ol_ops->lro_hash_config)
2625 (void)soc->cdp_soc.ol_ops->lro_hash_config
Tallapragada Kalyan16395272018-08-28 12:34:21 +05302626 (pdev->ctrl_pdev, &lro_hash);
Dhanashri Atre14049172016-11-11 18:32:36 -08002627}
2628
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002629/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002630* dp_rxdma_ring_setup() - configure the RX DMA rings
2631* @soc: data path SoC handle
2632* @pdev: Physical device handle
2633*
2634* Return: 0 - success, > 0 - failure
2635*/
2636#ifdef QCA_HOST2FW_RXBUF_RING
2637static int dp_rxdma_ring_setup(struct dp_soc *soc,
2638 struct dp_pdev *pdev)
2639{
Vivek126db5d2018-07-25 22:05:04 +05302640 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2641 int max_mac_rings;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002642 int i;
2643
Vivek126db5d2018-07-25 22:05:04 +05302644 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2645 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2646
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002647 for (i = 0; i < max_mac_rings; i++) {
2648 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05302649 "%s: pdev_id %d mac_id %d",
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002650 __func__, pdev->pdev_id, i);
2651 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302652 RXDMA_BUF, 1, i,
2653 wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002654 QDF_TRACE(QDF_MODULE_ID_DP,
2655 QDF_TRACE_LEVEL_ERROR,
2656 FL("failed rx mac ring setup"));
2657 return QDF_STATUS_E_FAILURE;
2658 }
2659 }
2660 return QDF_STATUS_SUCCESS;
2661}
2662#else
2663static int dp_rxdma_ring_setup(struct dp_soc *soc,
2664 struct dp_pdev *pdev)
2665{
2666 return QDF_STATUS_SUCCESS;
2667}
2668#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302669
2670/**
2671 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2672 * @pdev - DP_PDEV handle
2673 *
2674 * Return: void
2675 */
2676static inline void
2677dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2678{
2679 uint8_t map_id;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302680 struct dp_soc *soc = pdev->soc;
2681
2682 if (!soc)
2683 return;
2684
Ishank Jain949674c2017-02-27 17:09:29 +05302685 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302686 qdf_mem_copy(pdev->dscp_tid_map[map_id],
2687 default_dscp_tid_map,
2688 sizeof(default_dscp_tid_map));
Ishank Jain949674c2017-02-27 17:09:29 +05302689 }
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302690
2691 for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2692 hal_tx_set_dscp_tid_map(soc->hal_soc,
2693 default_dscp_tid_map,
2694 map_id);
Ishank Jain949674c2017-02-27 17:09:29 +05302695 }
2696}
2697
Yun Park47e6af82018-01-17 12:15:01 -08002698#ifdef IPA_OFFLOAD
2699/**
2700 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2701 * @soc: data path instance
2702 * @pdev: core txrx pdev context
2703 *
2704 * Return: QDF_STATUS_SUCCESS: success
2705 * QDF_STATUS_E_RESOURCES: Error return
2706 */
2707static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2708 struct dp_pdev *pdev)
2709{
Vivek126db5d2018-07-25 22:05:04 +05302710 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2711 int entries;
2712
2713 soc_cfg_ctx = soc->wlan_cfg_ctx;
2714 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2715
Yun Park47e6af82018-01-17 12:15:01 -08002716 /* Setup second Rx refill buffer ring */
2717 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2718 IPA_RX_REFILL_BUF_RING_IDX,
Vivek126db5d2018-07-25 22:05:04 +05302719 pdev->pdev_id,
2720 entries)) {
Yun Park47e6af82018-01-17 12:15:01 -08002721 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2722 FL("dp_srng_setup failed second rx refill ring"));
2723 return QDF_STATUS_E_FAILURE;
2724 }
2725 return QDF_STATUS_SUCCESS;
2726}
2727
2728/**
2729 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2730 * @soc: data path instance
2731 * @pdev: core txrx pdev context
2732 *
2733 * Return: void
2734 */
2735static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2736 struct dp_pdev *pdev)
2737{
2738 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2739 IPA_RX_REFILL_BUF_RING_IDX);
2740}
2741
2742#else
Yun Park47e6af82018-01-17 12:15:01 -08002743static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2744 struct dp_pdev *pdev)
2745{
2746 return QDF_STATUS_SUCCESS;
2747}
2748
2749static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2750 struct dp_pdev *pdev)
2751{
2752}
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002753#endif
Yun Park47e6af82018-01-17 12:15:01 -08002754
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05302755#if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002756static
2757QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2758{
2759 int mac_id = 0;
2760 int pdev_id = pdev->pdev_id;
Vivek126db5d2018-07-25 22:05:04 +05302761 int entries;
2762 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2763
2764 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002765
2766 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2767 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2768
Vivek126db5d2018-07-25 22:05:04 +05302769 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002770 if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2771 RXDMA_MONITOR_BUF, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302772 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002773 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302774 FL(RNG_ERR "rxdma_mon_buf_ring "));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002775 return QDF_STATUS_E_NOMEM;
2776 }
2777
Vivek126db5d2018-07-25 22:05:04 +05302778 entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002779 if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2780 RXDMA_MONITOR_DST, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302781 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002782 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302783 FL(RNG_ERR "rxdma_mon_dst_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002784 return QDF_STATUS_E_NOMEM;
2785 }
2786
Vivek126db5d2018-07-25 22:05:04 +05302787 entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002788 if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2789 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302790 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002791 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302792 FL(RNG_ERR "rxdma_mon_status_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002793 return QDF_STATUS_E_NOMEM;
2794 }
2795
Vivek126db5d2018-07-25 22:05:04 +05302796 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002797 if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2798 RXDMA_MONITOR_DESC, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302799 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002800 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302801 FL(RNG_ERR "rxdma_mon_desc_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002802 return QDF_STATUS_E_NOMEM;
2803 }
2804 }
2805 return QDF_STATUS_SUCCESS;
2806}
2807#else
2808static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2809{
2810 return QDF_STATUS_SUCCESS;
2811}
Yun Park47e6af82018-01-17 12:15:01 -08002812#endif
2813
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05302814/*dp_iterate_update_peer_list - update peer stats on cal client timer
2815 * @pdev_hdl: pdev handle
2816 */
2817#ifdef ATH_SUPPORT_EXT_STAT
2818void dp_iterate_update_peer_list(void *pdev_hdl)
2819{
2820 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2821 struct dp_vdev *vdev = NULL;
2822 struct dp_peer *peer = NULL;
2823
2824 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2825 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2826 dp_cal_client_update_peer_stats(&peer->stats);
2827 }
2828 }
2829}
2830#else
2831void dp_iterate_update_peer_list(void *pdev_hdl)
2832{
2833}
2834#endif
2835
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002836/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002837* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302838* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002839* @txrx_soc: Datapath SOC handle
2840* @htc_handle: HTC handle for host-target interface
2841* @qdf_osdev: QDF OS device
2842* @pdev_id: PDEV ID
2843*
2844* Return: DP PDEV handle on success, NULL on failure
2845*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002846static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302847 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07002848 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002849{
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302850 int tx_ring_size;
2851 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302852 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302853 int entries;
2854 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2855 int nss_cfg;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302856
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002857 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2858 struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2859
2860 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302861 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2862 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002863 goto fail0;
2864 }
2865
Vivek126db5d2018-07-25 22:05:04 +05302866 soc_cfg_ctx = soc->wlan_cfg_ctx;
2867 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302868
2869 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302870 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2871 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302872
2873 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302874 goto fail0;
2875 }
2876
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302877 /*
2878 * set nss pdev config based on soc config
2879 */
Vivek126db5d2018-07-25 22:05:04 +05302880 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302881 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Vivek126db5d2018-07-25 22:05:04 +05302882 (nss_cfg & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302883
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002884 pdev->soc = soc;
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302885 pdev->ctrl_pdev = ctrl_pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002886 pdev->pdev_id = pdev_id;
Chaithanya Garrepalli1f64b242018-09-21 22:50:23 +05302887 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002888 soc->pdev_list[pdev_id] = pdev;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002889 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002890
2891 TAILQ_INIT(&pdev->vdev_list);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302892 qdf_spinlock_create(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002893 pdev->vdev_count = 0;
2894
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05302895 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302896 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2897 TAILQ_INIT(&pdev->neighbour_peers_list);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05302898 pdev->neighbour_peers_added = false;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302899
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002900 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302901 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2902 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302903 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002904 }
2905
2906 /* Setup per PDEV TCL rings if configured */
2907 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302908 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302909 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302910 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302911 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302912
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002913 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302914 pdev_id, pdev_id, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302915 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2916 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302917 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002918 }
2919 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302920 WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302921 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2922 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302923 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002924 }
2925 soc->num_tcl_data_rings++;
2926 }
2927
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302928 /* Tx specific init */
2929 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302930 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2931 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302932 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302933 }
2934
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302935 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002936 /* Setup per PDEV REO rings if configured */
Vivek126db5d2018-07-25 22:05:04 +05302937 if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002938 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302939 pdev_id, pdev_id, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302940 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2941 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302942 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002943 }
2944 soc->num_reo_dest_rings++;
2945
2946 }
Dhanashri Atre7351d172016-10-12 13:08:09 -07002947 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Vivek126db5d2018-07-25 22:05:04 +05302948 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2950 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302951 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002952 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002953
2954 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302955 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002956 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302957 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07002958 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002959
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002960 if (dp_mon_rings_setup(soc, pdev)) {
2961 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2962 FL("MONITOR rings setup failed"));
2963 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002964 }
2965
Vivek126db5d2018-07-25 22:05:04 +05302966 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002967 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2968 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
Vivek126db5d2018-07-25 22:05:04 +05302969 0, pdev_id,
2970 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002971 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302972 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002973 goto fail1;
2974 }
Pramod Simhae382ff82017-06-05 18:09:26 -07002975 }
2976
Yun Park47e6af82018-01-17 12:15:01 -08002977 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
Yun Park601d0d82017-08-28 21:49:31 -07002978 goto fail1;
Yun Park601d0d82017-08-28 21:49:31 -07002979
Yun Parkfde6b9e2017-06-26 17:13:11 -07002980 if (dp_ipa_ring_resource_setup(soc, pdev))
2981 goto fail1;
2982
2983 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07002984 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2985 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002986 goto fail1;
2987 }
2988
Leo Chang5ea93a42016-11-03 12:39:49 -07002989 /* Rx specific init */
2990 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07002991 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Yun Park601d0d82017-08-28 21:49:31 -07002992 FL("dp_rx_pdev_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002993 goto fail0;
Leo Chang5ea93a42016-11-03 12:39:49 -07002994 }
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302995 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002996
nobeljd124b742017-10-16 11:59:12 -07002997 /* Monitor filter init */
2998 pdev->mon_filter_mode = MON_FILTER_ALL;
2999 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3000 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3001 pdev->fp_data_filter = FILTER_DATA_ALL;
3002 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3003 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3004 pdev->mo_data_filter = FILTER_DATA_ALL;
3005
Leo Chang5ea93a42016-11-03 12:39:49 -07003006 dp_local_peer_id_pool_init(pdev);
Sravan Kumar Kairamf1e07662018-06-18 21:36:14 +05303007
Ishank Jain949674c2017-02-27 17:09:29 +05303008 dp_dscp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003009
Kai Chen6eca1a62017-01-12 10:17:53 -08003010 /* Rx monitor mode specific init */
3011 if (dp_rx_pdev_mon_attach(pdev)) {
3012 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303013 "dp_rx_pdev_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003014 goto fail1;
3015 }
3016
3017 if (dp_wdi_event_attach(pdev)) {
3018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303019 "dp_wdi_evet_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003020 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08003021 }
3022
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05303023 /* set the reo destination during initialization */
3024 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05303025
Anish Natarajb9e7d012018-02-16 00:38:10 +05303026 /*
3027 * initialize ppdu tlv list
3028 */
3029 TAILQ_INIT(&pdev->ppdu_info_list);
3030 pdev->tlv_count = 0;
3031 pdev->list_depth = 0;
3032
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303033 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3034
3035 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3036 sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3037 TRUE);
3038
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303039 /* initlialize cal client timer */
3040 dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3041 &dp_iterate_update_peer_list);
3042
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003043 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003044
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303045fail1:
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003046 dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303047
3048fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003049 return NULL;
3050}
3051
3052/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003053* dp_rxdma_ring_cleanup() - configure the RX DMA rings
3054* @soc: data path SoC handle
3055* @pdev: Physical device handle
3056*
3057* Return: void
3058*/
3059#ifdef QCA_HOST2FW_RXBUF_RING
3060static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3061 struct dp_pdev *pdev)
3062{
3063 int max_mac_rings =
3064 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3065 int i;
3066
3067 max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3068 max_mac_rings : MAX_RX_MAC_RINGS;
3069 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3070 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3071 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003072
3073 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003074}
3075#else
3076static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3077 struct dp_pdev *pdev)
3078{
3079}
3080#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303081
3082/*
3083 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3084 * @pdev: device object
3085 *
3086 * Return: void
3087 */
3088static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3089{
3090 struct dp_neighbour_peer *peer = NULL;
3091 struct dp_neighbour_peer *temp_peer = NULL;
3092
3093 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3094 neighbour_peer_list_elem, temp_peer) {
3095 /* delete this peer from the list */
3096 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3097 peer, neighbour_peer_list_elem);
3098 qdf_mem_free(peer);
3099 }
3100
3101 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3102}
3103
Anish Natarajcf526b72018-03-26 15:55:30 +05303104/**
3105* dp_htt_ppdu_stats_detach() - detach stats resources
3106* @pdev: Datapath PDEV handle
3107*
3108* Return: void
3109*/
3110static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3111{
3112 struct ppdu_info *ppdu_info, *ppdu_info_next;
3113
3114 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3115 ppdu_info_list_elem, ppdu_info_next) {
3116 if (!ppdu_info)
3117 break;
3118 qdf_assert_always(ppdu_info->nbuf);
3119 qdf_nbuf_free(ppdu_info->nbuf);
3120 qdf_mem_free(ppdu_info);
3121 }
3122}
3123
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05303124#if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003125static
3126void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3127 int mac_id)
3128{
3129 dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3130 RXDMA_MONITOR_BUF, 0);
3131 dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3132 RXDMA_MONITOR_DST, 0);
3133
3134 dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3135 RXDMA_MONITOR_STATUS, 0);
3136
3137 dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3138 RXDMA_MONITOR_DESC, 0);
3139 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3140 RXDMA_DST, 0);
3141}
3142#else
3143static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3144 int mac_id)
3145{
3146}
3147#endif
3148
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003149/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003150* dp_pdev_detach_wifi3() - detach txrx pdev
3151* @txrx_pdev: Datapath PDEV handle
3152* @force: Force detach
3153*
3154*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003155static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003156{
3157 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3158 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303159 qdf_nbuf_t curr_nbuf, next_nbuf;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003160 int mac_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003161
Keyur Parekhfad6d082017-05-07 08:54:47 -07003162 dp_wdi_event_detach(pdev);
3163
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303164 dp_tx_pdev_detach(pdev);
3165
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003166 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3167 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3168 TCL_DATA, pdev->pdev_id);
3169 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3170 WBM2SW_RELEASE, pdev->pdev_id);
3171 }
3172
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003173 dp_pktlogmod_exit(pdev);
3174
Leo Chang5ea93a42016-11-03 12:39:49 -07003175 dp_rx_pdev_detach(pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08003176 dp_rx_pdev_mon_detach(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303177 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303178 qdf_spinlock_destroy(&pdev->tx_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303179 qdf_spinlock_destroy(&pdev->vdev_list_lock);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303180
Yun Parkfde6b9e2017-06-26 17:13:11 -07003181 dp_ipa_uc_detach(soc, pdev);
3182
Yun Park47e6af82018-01-17 12:15:01 -08003183 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Yun Park601d0d82017-08-28 21:49:31 -07003184
Yun Parkfde6b9e2017-06-26 17:13:11 -07003185 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003186 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3187 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3188 REO_DST, pdev->pdev_id);
3189 }
3190
Dhanashri Atre7351d172016-10-12 13:08:09 -07003191 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003192
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003193 dp_rxdma_ring_cleanup(soc, pdev);
3194
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003195 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003196 dp_mon_ring_deinit(soc, pdev, mac_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003197 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3198 RXDMA_DST, 0);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003199 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003200
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303201 curr_nbuf = pdev->invalid_peer_head_msdu;
3202 while (curr_nbuf) {
3203 next_nbuf = qdf_nbuf_next(curr_nbuf);
3204 qdf_nbuf_free(curr_nbuf);
3205 curr_nbuf = next_nbuf;
3206 }
3207
Anish Natarajcf526b72018-03-26 15:55:30 +05303208 dp_htt_ppdu_stats_detach(pdev);
3209
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303210 qdf_nbuf_free(pdev->sojourn_buf);
3211
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303212 dp_cal_client_detach(&pdev->cal_client_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003213 soc->pdev_list[pdev->pdev_id] = NULL;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003214 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003215 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Santosh Anbu2280e862018-01-03 22:25:53 +05303216 qdf_mem_free(pdev->dp_txrx_handle);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003217 qdf_mem_free(pdev);
3218}
3219
3220/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003221 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3222 * @soc: DP SOC handle
3223 */
3224static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3225{
3226 struct reo_desc_list_node *desc;
3227 struct dp_rx_tid *rx_tid;
3228
3229 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3230 while (qdf_list_remove_front(&soc->reo_desc_freelist,
3231 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3232 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003233 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07003234 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003235 QDF_DMA_BIDIRECTIONAL,
3236 rx_tid->hw_qdesc_alloc_size);
3237 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003238 qdf_mem_free(desc);
3239 }
3240 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3241 qdf_list_destroy(&soc->reo_desc_freelist);
3242 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3243}
3244
3245/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003246 * dp_soc_detach_wifi3() - Detach txrx SOC
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003247 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003248 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003249static void dp_soc_detach_wifi3(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003250{
3251 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003252 int i;
3253
Ravi Joshi86e98262017-03-01 13:47:03 -08003254 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003255
Dustin Brownf653d162017-09-19 11:29:41 -07003256 qdf_flush_work(&soc->htt_stats.work);
3257 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303258
3259 /* Free pending htt stats messages */
3260 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303261
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003262 for (i = 0; i < MAX_PDEV_CNT; i++) {
3263 if (soc->pdev_list[i])
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +05303264 dp_pdev_detach_wifi3(
3265 (struct cdp_pdev *)soc->pdev_list[i], 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003266 }
3267
3268 dp_peer_find_detach(soc);
3269
3270 /* TBD: Call Tx and Rx cleanup functions to free buffers and
3271 * SW descriptors
3272 */
3273
3274 /* Free the ring memories */
3275 /* Common rings */
3276 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3277
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003278 dp_tx_soc_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003279 /* Tx data rings */
3280 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3281 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3282 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3283 TCL_DATA, i);
3284 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3285 WBM2SW_RELEASE, i);
3286 }
3287 }
3288
3289 /* TCL command and status rings */
3290 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3291 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3292
3293 /* Rx data rings */
3294 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3295 soc->num_reo_dest_rings =
3296 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3297 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3298 /* TODO: Get number of rings and ring sizes
3299 * from wlan_cfg
3300 */
3301 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3302 REO_DST, i);
3303 }
3304 }
3305 /* REO reinjection ring */
3306 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3307
3308 /* Rx release ring */
3309 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3310
3311 /* Rx exception ring */
3312 /* TODO: Better to store ring_type and ring_num in
3313 * dp_srng during setup
3314 */
3315 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3316
3317 /* REO command and status rings */
3318 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3319 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07003320 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003321
Leo Chang5ea93a42016-11-03 12:39:49 -07003322 qdf_spinlock_destroy(&soc->peer_ref_mutex);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303323 qdf_spinlock_destroy(&soc->htt_stats.lock);
3324
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003325 htt_soc_detach(soc->htt_handle);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003326
Lin Baif1c577e2018-05-22 20:45:42 +08003327 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3328
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003329 dp_reo_cmdlist_destroy(soc);
3330 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003331 dp_reo_desc_freelist_destroy(soc);
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003332
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003333 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303334
3335 dp_soc_wds_detach(soc);
Yun Park92af7132017-09-13 16:33:35 -07003336 qdf_spinlock_destroy(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303337
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08003338 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003339}
3340
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05303341#if !defined(QCA_WIFI_QCA6390) && !defined(DISABLE_MON_CONFIG)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003342static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3343 struct dp_pdev *pdev,
3344 int mac_id,
3345 int mac_for_pdev)
3346{
3347 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3348 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3349 RXDMA_MONITOR_BUF);
3350
3351 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3352 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3353 RXDMA_MONITOR_DST);
3354
3355 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3356 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3357 RXDMA_MONITOR_STATUS);
3358
3359 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3360 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3361 RXDMA_MONITOR_DESC);
3362}
3363#else
3364static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3365 struct dp_pdev *pdev,
3366 int mac_id,
3367 int mac_for_pdev)
3368{
3369}
3370#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003371/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07003372 * dp_rxdma_ring_config() - configure the RX DMA rings
3373 *
3374 * This function is used to configure the MAC rings.
3375 * On MCL host provides buffers in Host2FW ring
3376 * FW refills (copies) buffers to the ring and updates
3377 * ring_idx in register
3378 *
3379 * @soc: data path SoC handle
3380 *
3381 * Return: void
3382 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003383#ifdef QCA_HOST2FW_RXBUF_RING
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003384static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003385{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003386 int i;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003387
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003388 for (i = 0; i < MAX_PDEV_CNT; i++) {
3389 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003390
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003391 if (pdev) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003392 int mac_id;
Dhanashri Atre398935e2017-03-31 15:34:28 -07003393 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003394 int max_mac_rings =
3395 wlan_cfg_get_num_mac_rings
3396 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003397
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003398 htt_srng_setup(soc->htt_handle, 0,
3399 pdev->rx_refill_buf_ring.hal_srng,
3400 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003401
Yun Park601d0d82017-08-28 21:49:31 -07003402 if (pdev->rx_refill_buf_ring2.hal_srng)
3403 htt_srng_setup(soc->htt_handle, 0,
3404 pdev->rx_refill_buf_ring2.hal_srng,
3405 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003406
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07003407 if (soc->cdp_soc.ol_ops->
3408 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003409 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05303410 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07003411 }
3412
3413 if (dbs_enable) {
3414 QDF_TRACE(QDF_MODULE_ID_TXRX,
3415 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303416 FL("DBS enabled max_mac_rings %d"),
Dhanashri Atre398935e2017-03-31 15:34:28 -07003417 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003418 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003419 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003420 QDF_TRACE(QDF_MODULE_ID_TXRX,
3421 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303422 FL("DBS disabled, max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003423 max_mac_rings);
3424 }
3425
3426 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303427 FL("pdev_id %d max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003428 pdev->pdev_id, max_mac_rings);
3429
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003430 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3431 int mac_for_pdev = dp_get_mac_id_for_pdev(
3432 mac_id, pdev->pdev_id);
3433
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003434 QDF_TRACE(QDF_MODULE_ID_TXRX,
3435 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303436 FL("mac_id %d"), mac_for_pdev);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003437 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3438 pdev->rx_mac_buf_ring[mac_id]
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003439 .hal_srng,
3440 RXDMA_BUF);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003441 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3442 pdev->rxdma_err_dst_ring[mac_id]
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003443 .hal_srng,
3444 RXDMA_DST);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003445
3446 /* Configure monitor mode rings */
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003447 dp_mon_htt_srng_setup(soc, pdev, mac_id,
3448 mac_for_pdev);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003449
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003450 }
3451 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003452 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003453
3454 /*
3455 * Timer to reap rxdma status rings.
3456 * Needed until we enable ppdu end interrupts
3457 */
3458 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3459 dp_service_mon_rings, (void *)soc,
3460 QDF_TIMER_TYPE_WAKE_APPS);
3461 soc->reap_timer_init = 1;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003462}
3463#else
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003464/* This is only for WIN */
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003465static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003466{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003467 int i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003468 int mac_id;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003469
3470 for (i = 0; i < MAX_PDEV_CNT; i++) {
3471 struct dp_pdev *pdev = soc->pdev_list[i];
3472
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003473 if (pdev == NULL)
3474 continue;
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003475
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003476 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3477 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3478
3479 htt_srng_setup(soc->htt_handle, mac_for_pdev,
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003480 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05303481#ifndef DISABLE_MON_CONFIG
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003482 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3483 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3484 RXDMA_MONITOR_BUF);
3485 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3486 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3487 RXDMA_MONITOR_DST);
3488 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3489 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003490 RXDMA_MONITOR_STATUS);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003491 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3492 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003493 RXDMA_MONITOR_DESC);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05303494#endif
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003495 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3496 pdev->rxdma_err_dst_ring[mac_id].hal_srng,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003497 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003498 }
3499 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003500}
3501#endif
3502
3503/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003504 * dp_soc_attach_target_wifi3() - SOC initialization in the target
3505 * @txrx_soc: Datapath SOC handle
3506 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003507static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003508{
Leo Chang5ea93a42016-11-03 12:39:49 -07003509 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003510
3511 htt_soc_attach_target(soc->htt_handle);
3512
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003513 dp_rxdma_ring_config(soc);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003514
Ishank Jainbc2d91f2017-01-03 18:14:54 +05303515 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303516
3517 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303518 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303519
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003520 return 0;
3521}
3522
3523/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303524 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3525 * @txrx_soc: Datapath SOC handle
3526 */
3527static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3528{
3529 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3530 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3531}
3532/*
3533 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3534 * @txrx_soc: Datapath SOC handle
3535 * @nss_cfg: nss config
3536 */
3537static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3538{
3539 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05303540 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3541
3542 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3543
3544 /*
3545 * TODO: masked out based on the per offloaded radio
3546 */
3547 if (config == dp_nss_cfg_dbdc) {
3548 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3549 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3550 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3551 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3552 }
3553
Aditya Sathishded018e2018-07-02 16:25:21 +05303554 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3555 FL("nss-wifi<0> nss config is enabled"));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303556}
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303557/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003558* dp_vdev_attach_wifi3() - attach txrx vdev
3559* @txrx_pdev: Datapath PDEV handle
3560* @vdev_mac_addr: MAC address of the virtual interface
3561* @vdev_id: VDEV Id
3562* @wlan_op_mode: VDEV operating mode
3563*
3564* Return: DP VDEV handle on success, NULL on failure
3565*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003566static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003567 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3568{
3569 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3570 struct dp_soc *soc = pdev->soc;
3571 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3572
3573 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303574 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3575 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003576 goto fail0;
3577 }
3578
3579 vdev->pdev = pdev;
3580 vdev->vdev_id = vdev_id;
3581 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303582 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003583
3584 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303585 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303586 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003587 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303588 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003589 vdev->osif_vdev = NULL;
3590
3591 vdev->delete.pending = 0;
3592 vdev->safemode = 0;
3593 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05303594 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003595#ifdef notyet
3596 vdev->filters_num = 0;
3597#endif
3598
3599 qdf_mem_copy(
3600 &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3601
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003602 /* TODO: Initialize default HTT meta data that will be used in
3603 * TCL descriptors for packets transmitted from this VDEV
3604 */
3605
3606 TAILQ_INIT(&vdev->peer_list);
3607
Anish Nataraj83d08112018-10-17 20:20:55 +05303608 if (wlan_op_mode_monitor == vdev->opmode)
3609 return (struct cdp_vdev *)vdev;
3610
3611 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3612 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3613 vdev->dscp_tid_map_id = 0;
3614 vdev->mcast_enhancement_en = 0;
3615 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
3616
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303617 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003618 /* add this vdev into the pdev's list */
3619 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303620 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003621 pdev->vdev_count++;
3622
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303623 dp_tx_vdev_attach(vdev);
3624
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003625
psimhac983d7e2017-07-26 15:20:07 -07003626 if ((soc->intr_mode == DP_INTR_POLL) &&
3627 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303628 if (pdev->vdev_count == 1)
3629 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3630 }
Vijay Pamidipati88c40ff2016-11-17 21:27:02 +05303631
Tallapragada Kalyan16395272018-08-28 12:34:21 +05303632 if (pdev->vdev_count == 1)
3633 dp_lro_hash_setup(soc, pdev);
Dhanashri Atreb178eb42017-03-21 12:32:33 -07003634
Dhanashri Atre0da31222017-03-23 12:30:58 -07003635 /* LRO */
3636 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3637 wlan_op_mode_sta == vdev->opmode)
3638 vdev->lro_enable = true;
3639
3640 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3641 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3642
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303643 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003644 "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303645 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003646
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303647 if (wlan_op_mode_sta == vdev->opmode)
3648 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303649 vdev->mac_addr.raw,
3650 NULL);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303651
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003652 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003653
3654fail0:
3655 return NULL;
3656}
3657
3658/**
3659 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3660 * @vdev: Datapath VDEV handle
3661 * @osif_vdev: OSIF vdev handle
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303662 * @ctrl_vdev: UMAC vdev handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003663 * @txrx_ops: Tx and Rx operations
3664 *
3665 * Return: DP VDEV handle on success, NULL on failure
3666 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003667static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303668 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003669 struct ol_txrx_ops *txrx_ops)
3670{
3671 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3672 vdev->osif_vdev = osif_vdev;
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303673 vdev->ctrl_vdev = ctrl_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003674 vdev->osif_rx = txrx_ops->rx.rx;
Mohit Khanna7ac554b2018-05-24 11:58:13 -07003675 vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303676 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303677 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003678 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303679 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003680#ifdef notyet
3681#if ATH_SUPPORT_WAPI
3682 vdev->osif_check_wai = txrx_ops->rx.wai_check;
3683#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003684#endif
Ishank Jain997955e2017-03-24 18:18:50 +05303685#ifdef UMAC_SUPPORT_PROXY_ARP
3686 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003687#endif
Ishank Jainc838b132017-02-17 11:08:18 +05303688 vdev->me_convert = txrx_ops->me_convert;
3689
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003690 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05303691 if (vdev->mesh_vdev)
3692 txrx_ops->tx.tx = dp_tx_send_mesh;
3693 else
3694 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07003695
Prathyusha Guduribe41d972018-01-19 14:17:14 +05303696 txrx_ops->tx.tx_exception = dp_tx_send_exception;
3697
Houston Hoffman41b912c2017-08-30 14:27:51 -07003698 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303699 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003700}
3701
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303702/**
3703 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3704 * @vdev: Datapath VDEV handle
3705 *
3706 * Return: void
3707 */
3708static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3709{
3710 struct dp_pdev *pdev = vdev->pdev;
3711 struct dp_soc *soc = pdev->soc;
3712 struct dp_peer *peer;
3713 uint16_t *peer_ids;
3714 uint8_t i = 0, j = 0;
3715
3716 peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3717 if (!peer_ids) {
3718 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3719 "DP alloc failure - unable to flush peers");
3720 return;
3721 }
3722
3723 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3724 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3725 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3726 if (peer->peer_ids[i] != HTT_INVALID_PEER)
3727 if (j < soc->max_peers)
3728 peer_ids[j++] = peer->peer_ids[i];
3729 }
3730 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3731
3732 for (i = 0; i < j ; i++)
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303733 dp_rx_peer_unmap_handler(soc, peer_ids[i], vdev->vdev_id,
3734 NULL, 0);
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303735
3736 qdf_mem_free(peer_ids);
3737
3738 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3739 FL("Flushed peers for vdev object %pK "), vdev);
3740}
3741
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003742/*
3743 * dp_vdev_detach_wifi3() - Detach txrx vdev
3744 * @txrx_vdev: Datapath VDEV handle
3745 * @callback: Callback OL_IF on completion of detach
3746 * @cb_context: Callback context
3747 *
3748 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003749static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003750 ol_txrx_vdev_delete_cb callback, void *cb_context)
3751{
3752 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3753 struct dp_pdev *pdev = vdev->pdev;
3754 struct dp_soc *soc = pdev->soc;
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303755 struct dp_neighbour_peer *peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003756
3757 /* preconditions */
3758 qdf_assert(vdev);
3759
Anish Nataraj83d08112018-10-17 20:20:55 +05303760 if (wlan_op_mode_monitor == vdev->opmode)
3761 goto free_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003762
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05303763 if (wlan_op_mode_sta == vdev->opmode)
3764 dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3765
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003766 /*
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303767 * If Target is hung, flush all peers before detaching vdev
3768 * this will free all references held due to missing
3769 * unmap commands from Target
3770 */
3771 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3772 dp_vdev_flush_peers(vdev);
3773
3774 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003775 * Use peer_ref_mutex while accessing peer_list, in case
3776 * a peer is in the process of being removed from the list.
3777 */
3778 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3779 /* check that the vdev has no peers allocated */
3780 if (!TAILQ_EMPTY(&vdev->peer_list)) {
3781 /* debug print - will be removed later */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303782 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003783 FL("not deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303784 "until deletion finishes for all its peers"),
3785 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003786 /* indicate that the vdev needs to be deleted */
3787 vdev->delete.pending = 1;
3788 vdev->delete.callback = callback;
3789 vdev->delete.context = cb_context;
3790 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3791 return;
3792 }
3793 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3794
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303795 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3796 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3797 neighbour_peer_list_elem) {
3798 QDF_ASSERT(peer->vdev != vdev);
3799 }
3800 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3801
Krunal Soni7c4565f2018-09-04 19:02:53 -07003802 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303803 dp_tx_vdev_detach(vdev);
Krunal Soni7c4565f2018-09-04 19:02:53 -07003804 /* remove the vdev from its parent pdev's list */
3805 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303806 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003807 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Anish Nataraj83d08112018-10-17 20:20:55 +05303808
Krunal Soni7c4565f2018-09-04 19:02:53 -07003809 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Anish Nataraj83d08112018-10-17 20:20:55 +05303810free_vdev:
3811 qdf_mem_free(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003812
3813 if (callback)
3814 callback(cb_context);
3815}
3816
3817/*
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303818 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3819 * @soc - datapath soc handle
3820 * @peer - datapath peer handle
3821 *
3822 * Delete the AST entries belonging to a peer
3823 */
3824#ifdef FEATURE_AST
3825static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3826 struct dp_peer *peer)
3827{
3828 struct dp_ast_entry *ast_entry, *temp_ast_entry;
3829
3830 qdf_spin_lock_bh(&soc->ast_lock);
3831 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3832 dp_peer_del_ast(soc, ast_entry);
3833
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05303834 peer->self_ast_entry = NULL;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303835 TAILQ_INIT(&peer->ast_entry_list);
3836 qdf_spin_unlock_bh(&soc->ast_lock);
3837}
3838#else
3839static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3840 struct dp_peer *peer)
3841{
3842}
3843#endif
3844
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303845#if ATH_SUPPORT_WRAP
3846static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3847 uint8_t *peer_mac_addr)
3848{
3849 struct dp_peer *peer;
3850
3851 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3852 0, vdev->vdev_id);
3853 if (!peer)
3854 return NULL;
3855
3856 if (peer->bss_peer)
3857 return peer;
3858
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05303859 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303860 return NULL;
3861}
3862#else
3863static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3864 uint8_t *peer_mac_addr)
3865{
3866 struct dp_peer *peer;
3867
3868 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3869 0, vdev->vdev_id);
3870 if (!peer)
3871 return NULL;
3872
3873 if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3874 return peer;
3875
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05303876 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303877 return NULL;
3878}
3879#endif
3880
phadiman64a7b912018-10-10 16:19:00 +05303881#if defined(FEATURE_AST) && !defined(AST_HKV1_WORKAROUND)
3882static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
3883 uint8_t *peer_mac_addr)
3884{
3885 struct dp_ast_entry *ast_entry;
3886
3887 qdf_spin_lock_bh(&soc->ast_lock);
3888 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
3889 if (ast_entry && ast_entry->next_hop)
3890 dp_peer_del_ast(soc, ast_entry);
3891 qdf_spin_unlock_bh(&soc->ast_lock);
3892}
3893#else
3894static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
3895 uint8_t *peer_mac_addr)
3896{
3897}
3898#endif
3899
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303900/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003901 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003902 * @txrx_vdev: Datapath VDEV handle
3903 * @peer_mac_addr: Peer MAC address
3904 *
3905 * Return: DP peeer handle on success, NULL on failure
3906 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003907static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303908 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003909{
3910 struct dp_peer *peer;
3911 int i;
3912 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3913 struct dp_pdev *pdev;
3914 struct dp_soc *soc;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303915 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003916
3917 /* preconditions */
3918 qdf_assert(vdev);
3919 qdf_assert(peer_mac_addr);
3920
3921 pdev = vdev->pdev;
3922 soc = pdev->soc;
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303923
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303924 /*
3925 * If a peer entry with given MAC address already exists,
3926 * reuse the peer and reset the state of peer.
3927 */
3928 peer = dp_peer_can_reuse(vdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303929
3930 if (peer) {
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05303931 qdf_atomic_init(&peer->is_default_route_set);
3932 dp_peer_cleanup(vdev, peer);
3933
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303934 peer->delete_in_progress = false;
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303935
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303936 dp_peer_delete_ast_entries(soc, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303937
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303938 if ((vdev->opmode == wlan_op_mode_sta) &&
3939 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3940 DP_MAC_ADDR_LEN)) {
3941 ast_type = CDP_TXRX_AST_TYPE_SELF;
3942 }
3943
3944 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3945
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303946 /*
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303947 * Control path maintains a node count which is incremented
3948 * for every new peer create command. Since new peer is not being
3949 * created and earlier reference is reused here,
3950 * peer_unref_delete event is sent to control path to
3951 * increment the count back.
3952 */
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303953 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303954 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303955 vdev->vdev_id, peer->mac_addr.raw);
3956 }
Akshay Kosigi78eced82018-05-14 14:53:48 +05303957 peer->ctrl_peer = ctrl_peer;
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303958
Sravan Kumar Kairamda542172018-06-08 12:51:21 +05303959 dp_local_peer_id_alloc(pdev, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303960 DP_STATS_INIT(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303961
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303962 return (void *)peer;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303963 } else {
3964 /*
3965 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3966 * need to remove the AST entry which was earlier added as a WDS
3967 * entry.
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303968 * If an AST entry exists, but no peer entry exists with a given
3969 * MAC addresses, we could deduce it as a WDS entry
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303970 */
phadiman64a7b912018-10-10 16:19:00 +05303971 dp_peer_ast_handle_roam_del(soc, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303972 }
3973
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003974#ifdef notyet
3975 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3976 soc->mempool_ol_ath_peer);
3977#else
3978 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3979#endif
3980
3981 if (!peer)
3982 return NULL; /* failure */
3983
Tallapragada57d86602017-03-31 07:53:58 +05303984 qdf_mem_zero(peer, sizeof(struct dp_peer));
3985
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303986 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303987
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303988 /* store provided params */
3989 peer->vdev = vdev;
Akshay Kosigi78eced82018-05-14 14:53:48 +05303990 peer->ctrl_peer = ctrl_peer;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303991
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303992 if ((vdev->opmode == wlan_op_mode_sta) &&
3993 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3994 DP_MAC_ADDR_LEN)) {
3995 ast_type = CDP_TXRX_AST_TYPE_SELF;
3996 }
3997
3998 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303999
Leo Chang5ea93a42016-11-03 12:39:49 -07004000 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004001
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004002 qdf_mem_copy(
4003 &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
4004
4005 /* TODO: See of rx_opt_proc is really required */
4006 peer->rx_opt_proc = soc->rx_opt_proc;
4007
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004008 /* initialize the peer_id */
4009 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4010 peer->peer_ids[i] = HTT_INVALID_PEER;
4011
4012 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4013
4014 qdf_atomic_init(&peer->ref_cnt);
4015
4016 /* keep one reference for attach */
4017 qdf_atomic_inc(&peer->ref_cnt);
4018
4019 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304020 if (wlan_op_mode_sta == vdev->opmode)
4021 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4022 else
4023 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4024
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004025 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4026
4027 /* TODO: See if hash based search is required */
4028 dp_peer_find_hash_add(soc, peer);
4029
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08004030 /* Initialize the peer state */
4031 peer->state = OL_TXRX_PEER_STATE_DISC;
4032
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304033 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004034 "vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004035 vdev, peer, peer->mac_addr.raw,
4036 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004037 /*
4038 * For every peer MAp message search and set if bss_peer
4039 */
4040 if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4042 "vdev bss_peer!!!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004043 peer->bss_peer = 1;
4044 vdev->vap_bss_peer = peer;
4045 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08004046 for (i = 0; i < DP_MAX_TIDS; i++)
4047 qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05304048
Leo Chang5ea93a42016-11-03 12:39:49 -07004049 dp_local_peer_id_alloc(pdev, peer);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304050 DP_STATS_INIT(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004051 return (void *)peer;
4052}
4053
4054/*
Mohit Khanna81179cb2018-08-16 20:50:43 -07004055 * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4056 * @vdev: Datapath VDEV handle
4057 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4058 * @hash_based: pointer to hash value (enabled/disabled) to be populated
4059 *
4060 * Return: None
4061 */
4062static
4063void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4064 enum cdp_host_reo_dest_ring *reo_dest,
4065 bool *hash_based)
4066{
4067 struct dp_soc *soc;
4068 struct dp_pdev *pdev;
4069
4070 pdev = vdev->pdev;
4071 soc = pdev->soc;
4072 /*
4073 * hash based steering is disabled for Radios which are offloaded
4074 * to NSS
4075 */
4076 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4077 *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4078
4079 /*
4080 * Below line of code will ensure the proper reo_dest ring is chosen
4081 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4082 */
4083 *reo_dest = pdev->reo_dest;
4084}
4085
4086#ifdef IPA_OFFLOAD
4087/*
4088 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4089 * @vdev: Datapath VDEV handle
4090 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4091 * @hash_based: pointer to hash value (enabled/disabled) to be populated
4092 *
4093 * If IPA is enabled in ini, for SAP mode, disable hash based
4094 * steering, use default reo_dst ring for RX. Use config values for other modes.
4095 * Return: None
4096 */
4097static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4098 enum cdp_host_reo_dest_ring *reo_dest,
4099 bool *hash_based)
4100{
4101 struct dp_soc *soc;
4102 struct dp_pdev *pdev;
4103
4104 pdev = vdev->pdev;
4105 soc = pdev->soc;
4106
4107 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4108
4109 /*
4110 * If IPA is enabled, disable hash-based flow steering and set
4111 * reo_dest_ring_4 as the REO ring to receive packets on.
4112 * IPA is configured to reap reo_dest_ring_4.
4113 *
4114 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
4115 * value enum value is from 1 - 4.
4116 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
4117 */
4118 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4119 if (vdev->opmode == wlan_op_mode_ap) {
4120 *reo_dest = IPA_REO_DEST_RING_IDX + 1;
4121 *hash_based = 0;
4122 }
4123 }
4124}
4125
4126#else
4127
4128/*
4129 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
4130 * @vdev: Datapath VDEV handle
4131 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4132 * @hash_based: pointer to hash value (enabled/disabled) to be populated
4133 *
4134 * Use system config values for hash based steering.
4135 * Return: None
4136 */
4137
4138static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
4139 enum cdp_host_reo_dest_ring *reo_dest,
4140 bool *hash_based)
4141{
4142 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
4143}
4144#endif /* IPA_OFFLOAD */
4145
4146/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004147 * dp_peer_setup_wifi3() - initialize the peer
4148 * @vdev_hdl: virtual device object
4149 * @peer: Peer object
4150 *
4151 * Return: void
4152 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004153static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004154{
4155 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
4156 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4157 struct dp_pdev *pdev;
4158 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08004159 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304160 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004161
4162 /* preconditions */
4163 qdf_assert(vdev);
4164 qdf_assert(peer);
4165
4166 pdev = vdev->pdev;
4167 soc = pdev->soc;
4168
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08004169 peer->last_assoc_rcvd = 0;
4170 peer->last_disassoc_rcvd = 0;
4171 peer->last_deauth_rcvd = 0;
4172
Mohit Khanna81179cb2018-08-16 20:50:43 -07004173 dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05304174
Mohit Khanna81179cb2018-08-16 20:50:43 -07004175 dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
4176 pdev->pdev_id, vdev->vdev_id,
4177 vdev->opmode, hash_based, reo_dest);
Dhanashri Atre14049172016-11-11 18:32:36 -08004178
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304179
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05304180 /*
4181 * There are corner cases where the AD1 = AD2 = "VAPs address"
4182 * i.e both the devices have same MAC address. In these
4183 * cases we want such pkts to be processed in NULL Q handler
4184 * which is REO2TCL ring. for this reason we should
4185 * not setup reo_queues and default route for bss_peer.
4186 */
4187 if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
4188 return;
4189
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004190 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4191 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08004192 soc->cdp_soc.ol_ops->peer_set_default_routing(
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05304193 pdev->ctrl_pdev, peer->mac_addr.raw,
4194 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004195 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05304196
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05304197 qdf_atomic_set(&peer->is_default_route_set, 1);
4198
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05304199 dp_peer_rx_init(pdev, peer);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004200 return;
4201}
4202
4203/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05304204 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4205 * @vdev_handle: virtual device object
4206 * @htt_pkt_type: type of pkt
4207 *
4208 * Return: void
4209 */
4210static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4211 enum htt_cmn_pkt_type val)
4212{
4213 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4214 vdev->tx_encap_type = val;
4215}
4216
4217/*
4218 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4219 * @vdev_handle: virtual device object
4220 * @htt_pkt_type: type of pkt
4221 *
4222 * Return: void
4223 */
4224static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4225 enum htt_cmn_pkt_type val)
4226{
4227 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4228 vdev->rx_decap_type = val;
4229}
4230
4231/*
sumedh baikady1f8f3192018-02-20 17:30:32 -08004232 * dp_set_ba_aging_timeout() - set ba aging timeout per AC
4233 * @txrx_soc: cdp soc handle
4234 * @ac: Access category
4235 * @value: timeout value in millisec
4236 *
4237 * Return: void
4238 */
4239static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4240 uint8_t ac, uint32_t value)
4241{
4242 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4243
4244 hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
4245}
4246
4247/*
4248 * dp_get_ba_aging_timeout() - get ba aging timeout per AC
4249 * @txrx_soc: cdp soc handle
4250 * @ac: access category
4251 * @value: timeout value in millisec
4252 *
4253 * Return: void
4254 */
4255static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
4256 uint8_t ac, uint32_t *value)
4257{
4258 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4259
4260 hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
4261}
4262
4263/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304264 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4265 * @pdev_handle: physical device object
4266 * @val: reo destination ring index (1 - 4)
4267 *
4268 * Return: void
4269 */
4270static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4271 enum cdp_host_reo_dest_ring val)
4272{
4273 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4274
4275 if (pdev)
4276 pdev->reo_dest = val;
4277}
4278
4279/*
4280 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4281 * @pdev_handle: physical device object
4282 *
4283 * Return: reo destination ring index
4284 */
4285static enum cdp_host_reo_dest_ring
4286dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4287{
4288 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4289
4290 if (pdev)
4291 return pdev->reo_dest;
4292 else
4293 return cdp_host_reo_dest_ring_unknown;
4294}
4295
4296/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304297 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4298 * @pdev_handle: device object
4299 * @val: value to be set
4300 *
4301 * Return: void
4302 */
4303static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4304 uint32_t val)
4305{
4306 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4307
4308 /* Enable/Disable smart mesh filtering. This flag will be checked
4309 * during rx processing to check if packets are from NAC clients.
4310 */
4311 pdev->filter_neighbour_peers = val;
4312 return 0;
4313}
4314
4315/*
4316 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4317 * address for smart mesh filtering
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304318 * @vdev_handle: virtual device object
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304319 * @cmd: Add/Del command
4320 * @macaddr: nac client mac address
4321 *
4322 * Return: void
4323 */
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304324static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4325 uint32_t cmd, uint8_t *macaddr)
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304326{
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304327 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4328 struct dp_pdev *pdev = vdev->pdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304329 struct dp_neighbour_peer *peer = NULL;
4330
4331 if (!macaddr)
4332 goto fail0;
4333
4334 /* Store address of NAC (neighbour peer) which will be checked
4335 * against TA of received packets.
4336 */
4337 if (cmd == DP_NAC_PARAM_ADD) {
4338 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4339 sizeof(*peer));
4340
4341 if (!peer) {
4342 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4343 FL("DP neighbour peer node memory allocation failed"));
4344 goto fail0;
4345 }
4346
4347 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4348 macaddr, DP_MAC_ADDR_LEN);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304349 peer->vdev = vdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304350
4351 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304352
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304353 /* add this neighbour peer into the list */
4354 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4355 neighbour_peer_list_elem);
4356 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4357
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304358 /* first neighbour */
4359 if (!pdev->neighbour_peers_added) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304360 pdev->neighbour_peers_added = true;
sumedh baikady59a2d332018-05-22 01:50:38 -07004361 dp_ppdu_ring_cfg(pdev);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304362 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304363 return 1;
4364
4365 } else if (cmd == DP_NAC_PARAM_DEL) {
4366 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4367 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4368 neighbour_peer_list_elem) {
4369 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4370 macaddr, DP_MAC_ADDR_LEN)) {
4371 /* delete this peer from the list */
4372 TAILQ_REMOVE(&pdev->neighbour_peers_list,
4373 peer, neighbour_peer_list_elem);
4374 qdf_mem_free(peer);
4375 break;
4376 }
4377 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304378 /* last neighbour deleted */
4379 if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4380 pdev->neighbour_peers_added = false;
4381
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304382 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4383
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304384 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4385 !pdev->enhanced_stats_en)
4386 dp_ppdu_ring_reset(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304387 return 1;
4388
4389 }
4390
4391fail0:
4392 return 0;
4393}
4394
4395/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05304396 * dp_get_sec_type() - Get the security type
4397 * @peer: Datapath peer handle
4398 * @sec_idx: Security id (mcast, ucast)
4399 *
4400 * return sec_type: Security type
4401 */
4402static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4403{
4404 struct dp_peer *dpeer = (struct dp_peer *)peer;
4405
4406 return dpeer->security[sec_idx].sec_type;
4407}
4408
4409/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004410 * dp_peer_authorize() - authorize txrx peer
4411 * @peer_handle: Datapath peer handle
4412 * @authorize
4413 *
4414 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05304415static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004416{
4417 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4418 struct dp_soc *soc;
4419
4420 if (peer != NULL) {
4421 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004422 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4423 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004424 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4425 }
4426}
4427
Krunal Soni7c4565f2018-09-04 19:02:53 -07004428static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
4429 struct dp_pdev *pdev,
4430 struct dp_peer *peer,
4431 uint32_t vdev_id)
4432{
4433 struct dp_vdev *vdev = NULL;
4434 struct dp_peer *bss_peer = NULL;
4435 uint8_t *m_addr = NULL;
4436
4437 qdf_spin_lock_bh(&pdev->vdev_list_lock);
4438 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4439 if (vdev->vdev_id == vdev_id)
4440 break;
4441 }
4442 if (!vdev) {
4443 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4444 "vdev is NULL");
4445 } else {
4446 if (vdev->vap_bss_peer == peer)
4447 vdev->vap_bss_peer = NULL;
4448 m_addr = peer->mac_addr.raw;
4449 if (soc->cdp_soc.ol_ops->peer_unref_delete)
4450 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4451 vdev_id, m_addr);
4452 if (vdev && vdev->vap_bss_peer) {
4453 bss_peer = vdev->vap_bss_peer;
4454 DP_UPDATE_STATS(vdev, peer);
4455 }
4456 }
4457 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4458 qdf_mem_free(peer);
4459}
4460
Krunal Sonia5211f22018-09-21 13:46:33 -07004461/**
4462 * dp_delete_pending_vdev() - check and process vdev delete
4463 * @pdev: DP specific pdev pointer
4464 * @vdev: DP specific vdev pointer
4465 * @vdev_id: vdev id corresponding to vdev
4466 *
4467 * This API does following:
4468 * 1) It releases tx flow pools buffers as vdev is
4469 * going down and no peers are associated.
4470 * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
4471 */
4472static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
4473 uint8_t vdev_id)
Krunal Soni7c4565f2018-09-04 19:02:53 -07004474{
Krunal Soni7c4565f2018-09-04 19:02:53 -07004475 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
4476 void *vdev_delete_context = NULL;
4477
Krunal Sonia5211f22018-09-21 13:46:33 -07004478 vdev_delete_cb = vdev->delete.callback;
4479 vdev_delete_context = vdev->delete.context;
Krunal Soni7c4565f2018-09-04 19:02:53 -07004480
Krunal Sonia5211f22018-09-21 13:46:33 -07004481 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4482 FL("deleting vdev object %pK (%pM)- its last peer is done"),
4483 vdev, vdev->mac_addr.raw);
4484 /* all peers are gone, go ahead and delete it */
4485 dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4486 FLOW_TYPE_VDEV, vdev_id);
4487 dp_tx_vdev_detach(vdev);
4488
4489 qdf_spin_lock_bh(&pdev->vdev_list_lock);
4490 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Krunal Soni7c4565f2018-09-04 19:02:53 -07004491 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4492
Krunal Sonia5211f22018-09-21 13:46:33 -07004493 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4494 FL("deleting vdev object %pK (%pM)"),
4495 vdev, vdev->mac_addr.raw);
4496 qdf_mem_free(vdev);
4497 vdev = NULL;
4498
Krunal Soni7c4565f2018-09-04 19:02:53 -07004499 if (vdev_delete_cb)
4500 vdev_delete_cb(vdev_delete_context);
4501}
4502
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004503/*
4504 * dp_peer_unref_delete() - unref and delete peer
4505 * @peer_handle: Datapath peer handle
4506 *
4507 */
4508void dp_peer_unref_delete(void *peer_handle)
4509{
4510 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4511 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05304512 struct dp_pdev *pdev = vdev->pdev;
4513 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004514 struct dp_peer *tmppeer;
4515 int found = 0;
4516 uint16_t peer_id;
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004517 uint16_t vdev_id;
Krunal Sonia5211f22018-09-21 13:46:33 -07004518 bool delete_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004519
Sravan Kumar Kairam51d46642018-08-24 15:07:55 +05304520 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4521 "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
Sravan Kumar Kairamae20c9e2018-08-17 19:29:03 +05304522 peer, qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004523 /*
4524 * Hold the lock all the way from checking if the peer ref count
4525 * is zero until the peer references are removed from the hash
4526 * table and vdev list (if the peer ref count is zero).
4527 * This protects against a new HL tx operation starting to use the
4528 * peer object just after this function concludes it's done being used.
4529 * Furthermore, the lock needs to be held while checking whether the
4530 * vdev's list of peers is empty, to make sure that list is not modified
4531 * concurrently with the empty check.
4532 */
4533 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4534 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4535 peer_id = peer->peer_ids[0];
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004536 vdev_id = vdev->vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004537
4538 /*
4539 * Make sure that the reference to the peer in
4540 * peer object map is removed
4541 */
4542 if (peer_id != HTT_INVALID_PEER)
4543 soc->peer_id_to_obj_map[peer_id] = NULL;
4544
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304545 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004546 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004547
4548 /* remove the reference to the peer from the hash table */
4549 dp_peer_find_hash_remove(soc, peer);
4550
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05304551 qdf_spin_lock_bh(&soc->ast_lock);
4552 if (peer->self_ast_entry) {
4553 dp_peer_del_ast(soc, peer->self_ast_entry);
4554 peer->self_ast_entry = NULL;
4555 }
4556 qdf_spin_unlock_bh(&soc->ast_lock);
4557
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004558 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4559 if (tmppeer == peer) {
4560 found = 1;
4561 break;
4562 }
4563 }
Krunal Soni7c4565f2018-09-04 19:02:53 -07004564
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004565 if (found) {
4566 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4567 peer_list_elem);
4568 } else {
4569 /*Ignoring the remove operation as peer not found*/
Sravan Kumar Kairam51d46642018-08-24 15:07:55 +05304570 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4571 "peer:%pK not found in vdev:%pK peerlist:%pK",
4572 peer, vdev, &peer->vdev->peer_list);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004573 }
4574
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08004575 /* cleanup the peer data */
4576 dp_peer_cleanup(vdev, peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004577
4578 /* check whether the parent vdev has no peers left */
4579 if (TAILQ_EMPTY(&vdev->peer_list)) {
4580 /*
Krunal Sonia5211f22018-09-21 13:46:33 -07004581 * capture vdev delete pending flag's status
4582 * while holding peer_ref_mutex lock
4583 */
4584 delete_vdev = vdev->delete.pending;
4585 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004586 * Now that there are no references to the peer, we can
4587 * release the peer reference lock.
4588 */
4589 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4590 /*
4591 * Check if the parent vdev was waiting for its peers
4592 * to be deleted, in order for it to be deleted too.
4593 */
Krunal Sonia5211f22018-09-21 13:46:33 -07004594 if (delete_vdev)
4595 dp_delete_pending_vdev(pdev, vdev, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004596 } else {
4597 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4598 }
Krunal Soni7c4565f2018-09-04 19:02:53 -07004599 dp_reset_and_release_peer_mem(soc, pdev, peer, vdev_id);
chenguo1dead6f2018-01-08 14:51:44 +08004600
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004601 } else {
4602 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4603 }
4604}
4605
4606/*
4607 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07004608 * @peer_handle: Datapath peer handle
4609 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004610 *
4611 */
Naveen Rawat761329b2017-09-19 10:30:11 -07004612static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004613{
4614 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4615
4616 /* redirect the peer's rx delivery function to point to a
4617 * discard func
4618 */
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304619
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004620 peer->rx_opt_proc = dp_rx_discard;
Akshay Kosigi78eced82018-05-14 14:53:48 +05304621 peer->ctrl_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004622
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304623 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004624 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004625
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004626 dp_local_peer_id_free(peer->vdev->pdev, peer);
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004627 qdf_spinlock_destroy(&peer->peer_info_lock);
4628
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004629 /*
4630 * Remove the reference added during peer_attach.
4631 * The peer will still be left allocated until the
4632 * PEER_UNMAP message arrives to remove the other
4633 * reference, added by the PEER_MAP message.
4634 */
4635 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07004636}
4637
4638/*
4639 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4640 * @peer_handle: Datapath peer handle
4641 *
4642 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004643static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004644{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004645 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004646 return vdev->mac_addr.raw;
4647}
4648
4649/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08004650 * dp_vdev_set_wds() - Enable per packet stats
4651 * @vdev_handle: DP VDEV handle
4652 * @val: value
4653 *
4654 * Return: none
4655 */
4656static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4657{
4658 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4659
4660 vdev->wds_enabled = val;
4661 return 0;
4662}
4663
4664/*
Leo Chang5ea93a42016-11-03 12:39:49 -07004665 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4666 * @peer_handle: Datapath peer handle
4667 *
4668 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004669static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4670 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07004671{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004672 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004673 struct dp_vdev *vdev = NULL;
4674
4675 if (qdf_unlikely(!pdev))
4676 return NULL;
4677
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304678 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004679 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4680 if (vdev->vdev_id == vdev_id)
4681 break;
4682 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304683 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004684
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004685 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004686}
4687
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004688static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07004689{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004690 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07004691
4692 return vdev->opmode;
4693}
4694
Mohit Khanna7ac554b2018-05-24 11:58:13 -07004695static
4696void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
4697 ol_txrx_rx_fp *stack_fn_p,
4698 ol_osif_vdev_handle *osif_vdev_p)
4699{
4700 struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
4701
4702 qdf_assert(vdev);
4703 *stack_fn_p = vdev->osif_rx_stack;
4704 *osif_vdev_p = vdev->osif_vdev;
4705}
4706
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004707static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004708{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004709 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004710 struct dp_pdev *pdev = vdev->pdev;
4711
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004712 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07004713}
phadiman7821bf82018-02-06 16:03:54 +05304714
Kai Chen6eca1a62017-01-12 10:17:53 -08004715/**
sumedh baikady84613b02017-09-19 16:36:14 -07004716 * dp_reset_monitor_mode() - Disable monitor mode
4717 * @pdev_handle: Datapath PDEV handle
4718 *
4719 * Return: 0 on success, not 0 on failure
4720 */
4721static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4722{
4723 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4724 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004725 struct dp_soc *soc = pdev->soc;
sumedh baikady84613b02017-09-19 16:36:14 -07004726 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004727 int mac_id;
sumedh baikady84613b02017-09-19 16:36:14 -07004728
4729 pdev_id = pdev->pdev_id;
4730 soc = pdev->soc;
4731
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004732 qdf_spin_lock_bh(&pdev->mon_lock);
4733
sumedh baikady84613b02017-09-19 16:36:14 -07004734 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4735
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004736 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4737 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
sumedh baikady84613b02017-09-19 16:36:14 -07004738
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004739 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4740 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4741 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4742
4743 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4744 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4745 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4746 }
sumedh baikady84613b02017-09-19 16:36:14 -07004747
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004748 pdev->monitor_vdev = NULL;
4749
4750 qdf_spin_unlock_bh(&pdev->mon_lock);
4751
sumedh baikady84613b02017-09-19 16:36:14 -07004752 return 0;
4753}
phadiman7821bf82018-02-06 16:03:54 +05304754
4755/**
4756 * dp_set_nac() - set peer_nac
4757 * @peer_handle: Datapath PEER handle
4758 *
4759 * Return: void
4760 */
4761static void dp_set_nac(struct cdp_peer *peer_handle)
4762{
4763 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4764
4765 peer->nac = 1;
4766}
4767
4768/**
4769 * dp_get_tx_pending() - read pending tx
4770 * @pdev_handle: Datapath PDEV handle
4771 *
4772 * Return: outstanding tx
4773 */
4774static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4775{
4776 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4777
4778 return qdf_atomic_read(&pdev->num_tx_outstanding);
4779}
4780
4781/**
4782 * dp_get_peer_mac_from_peer_id() - get peer mac
4783 * @pdev_handle: Datapath PDEV handle
4784 * @peer_id: Peer ID
4785 * @peer_mac: MAC addr of PEER
4786 *
4787 * Return: void
4788 */
4789static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4790 uint32_t peer_id, uint8_t *peer_mac)
4791{
4792 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4793 struct dp_peer *peer;
4794
4795 if (pdev && peer_mac) {
4796 peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05304797 if (peer) {
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07004798 qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4799 DP_MAC_ADDR_LEN);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05304800 dp_peer_unref_del_find_by_id(peer);
phadiman7821bf82018-02-06 16:03:54 +05304801 }
4802 }
4803}
4804
sumedh baikady84613b02017-09-19 16:36:14 -07004805/**
Kai Chen6eca1a62017-01-12 10:17:53 -08004806 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4807 * @vdev_handle: Datapath VDEV handle
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304808 * @smart_monitor: Flag to denote if its smart monitor mode
Kai Chen6eca1a62017-01-12 10:17:53 -08004809 *
4810 * Return: 0 on success, not 0 on failure
4811 */
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304812static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4813 uint8_t smart_monitor)
Kai Chen6eca1a62017-01-12 10:17:53 -08004814{
4815 /* Many monitor VAPs can exists in a system but only one can be up at
4816 * anytime
4817 */
4818 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4819 struct dp_pdev *pdev;
4820 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4821 struct dp_soc *soc;
4822 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004823 int mac_id;
Kai Chen6eca1a62017-01-12 10:17:53 -08004824
4825 qdf_assert(vdev);
4826
4827 pdev = vdev->pdev;
4828 pdev_id = pdev->pdev_id;
4829 soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08004830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05304831 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -08004832 pdev, pdev_id, soc, vdev);
4833
4834 /*Check if current pdev's monitor_vdev exists */
4835 if (pdev->monitor_vdev) {
4836 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304837 "vdev=%pK", vdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08004838 qdf_assert(vdev);
4839 }
4840
4841 pdev->monitor_vdev = vdev;
4842
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304843 /* If smart monitor mode, do not configure monitor ring */
4844 if (smart_monitor)
4845 return QDF_STATUS_SUCCESS;
4846
nobeljd124b742017-10-16 11:59:12 -07004847 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05304848 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07004849 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4850 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4851 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4852 pdev->mo_data_filter);
4853
nobelj1c31fee2018-03-21 11:47:05 -07004854 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4855
Kai Chen6eca1a62017-01-12 10:17:53 -08004856 htt_tlv_filter.mpdu_start = 1;
4857 htt_tlv_filter.msdu_start = 1;
4858 htt_tlv_filter.packet = 1;
4859 htt_tlv_filter.msdu_end = 1;
4860 htt_tlv_filter.mpdu_end = 1;
4861 htt_tlv_filter.packet_header = 1;
4862 htt_tlv_filter.attention = 1;
4863 htt_tlv_filter.ppdu_start = 0;
4864 htt_tlv_filter.ppdu_end = 0;
4865 htt_tlv_filter.ppdu_end_user_stats = 0;
4866 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4867 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07004868 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07004869 htt_tlv_filter.enable_fp =
4870 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004871 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07004872 htt_tlv_filter.enable_mo =
4873 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4874 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4875 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4876 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4877 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4878 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4879 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kai Chen6eca1a62017-01-12 10:17:53 -08004880
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004881 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4882 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4883
4884 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4885 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4886 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4887 }
Kai Chen6eca1a62017-01-12 10:17:53 -08004888
nobelj1c31fee2018-03-21 11:47:05 -07004889 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4890
Kai Chen6eca1a62017-01-12 10:17:53 -08004891 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004892 htt_tlv_filter.msdu_start = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004893 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004894 htt_tlv_filter.msdu_end = 0;
4895 htt_tlv_filter.mpdu_end = 0;
4896 htt_tlv_filter.attention = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004897 htt_tlv_filter.ppdu_start = 1;
4898 htt_tlv_filter.ppdu_end = 1;
4899 htt_tlv_filter.ppdu_end_user_stats = 1;
4900 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4901 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004902 htt_tlv_filter.enable_fp = 1;
Karunakar Dasineni40555682017-03-26 22:44:39 -07004903 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004904 htt_tlv_filter.enable_mo = 1;
4905 if (pdev->mcopy_mode) {
4906 htt_tlv_filter.packet_header = 1;
4907 }
4908 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4909 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4910 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4911 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4912 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4913 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
nobeljd124b742017-10-16 11:59:12 -07004914
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004915 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07004916 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4917 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004918
4919 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4920 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4921 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4922 }
nobeljd124b742017-10-16 11:59:12 -07004923
4924 return QDF_STATUS_SUCCESS;
4925}
4926
4927/**
4928 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4929 * @pdev_handle: Datapath PDEV handle
4930 * @filter_val: Flag to select Filter for monitor mode
4931 * Return: 0 on success, not 0 on failure
4932 */
4933static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4934 struct cdp_monitor_filter *filter_val)
4935{
4936 /* Many monitor VAPs can exists in a system but only one can be up at
4937 * anytime
4938 */
4939 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4940 struct dp_vdev *vdev = pdev->monitor_vdev;
4941 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4942 struct dp_soc *soc;
4943 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004944 int mac_id;
nobeljd124b742017-10-16 11:59:12 -07004945
4946 pdev_id = pdev->pdev_id;
4947 soc = pdev->soc;
4948
4949 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05304950 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
nobeljd124b742017-10-16 11:59:12 -07004951 pdev, pdev_id, soc, vdev);
4952
4953 /*Check if current pdev's monitor_vdev exists */
4954 if (!pdev->monitor_vdev) {
4955 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304956 "vdev=%pK", vdev);
nobeljd124b742017-10-16 11:59:12 -07004957 qdf_assert(vdev);
4958 }
4959
4960 /* update filter mode, type in pdev structure */
4961 pdev->mon_filter_mode = filter_val->mode;
4962 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4963 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4964 pdev->fp_data_filter = filter_val->fp_data;
4965 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4966 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4967 pdev->mo_data_filter = filter_val->mo_data;
4968
4969 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05304970 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07004971 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4972 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4973 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4974 pdev->mo_data_filter);
4975
4976 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4977
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004978 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4979 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
nobeljd124b742017-10-16 11:59:12 -07004980
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004981 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4982 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4983 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4984
4985 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4986 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4987 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4988 }
nobeljd124b742017-10-16 11:59:12 -07004989
4990 htt_tlv_filter.mpdu_start = 1;
4991 htt_tlv_filter.msdu_start = 1;
4992 htt_tlv_filter.packet = 1;
4993 htt_tlv_filter.msdu_end = 1;
4994 htt_tlv_filter.mpdu_end = 1;
4995 htt_tlv_filter.packet_header = 1;
4996 htt_tlv_filter.attention = 1;
4997 htt_tlv_filter.ppdu_start = 0;
4998 htt_tlv_filter.ppdu_end = 0;
4999 htt_tlv_filter.ppdu_end_user_stats = 0;
5000 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5001 htt_tlv_filter.ppdu_end_status_done = 0;
5002 htt_tlv_filter.header_per_msdu = 1;
5003 htt_tlv_filter.enable_fp =
5004 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
5005 htt_tlv_filter.enable_md = 0;
5006 htt_tlv_filter.enable_mo =
5007 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5008 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5009 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
5010 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
5011 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
5012 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
5013 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5014
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005015 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5016 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5017
5018 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5019 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5020 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5021 }
nobeljd124b742017-10-16 11:59:12 -07005022
nobelj1c31fee2018-03-21 11:47:05 -07005023 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5024
nobeljd124b742017-10-16 11:59:12 -07005025 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005026 htt_tlv_filter.msdu_start = 0;
nobeljd124b742017-10-16 11:59:12 -07005027 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005028 htt_tlv_filter.msdu_end = 0;
5029 htt_tlv_filter.mpdu_end = 0;
5030 htt_tlv_filter.attention = 0;
nobeljd124b742017-10-16 11:59:12 -07005031 htt_tlv_filter.ppdu_start = 1;
5032 htt_tlv_filter.ppdu_end = 1;
5033 htt_tlv_filter.ppdu_end_user_stats = 1;
5034 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5035 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005036 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07005037 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005038 htt_tlv_filter.enable_mo = 1;
5039 if (pdev->mcopy_mode) {
5040 htt_tlv_filter.packet_header = 1;
5041 }
5042 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5043 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5044 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5045 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5046 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5047 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Karunakar Dasineni40555682017-03-26 22:44:39 -07005048
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005049 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07005050 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5051 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005052
5053 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5054 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5055 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5056 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305057
Kai Chen6eca1a62017-01-12 10:17:53 -08005058 return QDF_STATUS_SUCCESS;
5059}
Leo Chang5ea93a42016-11-03 12:39:49 -07005060
nobeljc8eb4d62018-01-04 14:29:32 -08005061/**
phadiman7821bf82018-02-06 16:03:54 +05305062 * dp_get_pdev_id_frm_pdev() - get pdev_id
5063 * @pdev_handle: Datapath PDEV handle
5064 *
5065 * Return: pdev_id
5066 */
5067static
5068uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5069{
5070 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5071
5072 return pdev->pdev_id;
5073}
5074
5075/**
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07005076 * dp_pdev_set_chan_noise_floor() - set channel noise floor
5077 * @pdev_handle: Datapath PDEV handle
5078 * @chan_noise_floor: Channel Noise Floor
5079 *
5080 * Return: void
5081 */
5082static
5083void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
5084 int16_t chan_noise_floor)
5085{
5086 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5087
5088 pdev->chan_noise_floor = chan_noise_floor;
5089}
5090
5091/**
nobeljc8eb4d62018-01-04 14:29:32 -08005092 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5093 * @vdev_handle: Datapath VDEV handle
5094 * Return: true on ucast filter flag set
5095 */
5096static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5097{
5098 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5099 struct dp_pdev *pdev;
5100
5101 pdev = vdev->pdev;
5102
5103 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5104 (pdev->mo_data_filter & FILTER_DATA_UCAST))
5105 return true;
5106
5107 return false;
5108}
5109
5110/**
5111 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5112 * @vdev_handle: Datapath VDEV handle
5113 * Return: true on mcast filter flag set
5114 */
5115static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5116{
5117 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5118 struct dp_pdev *pdev;
5119
5120 pdev = vdev->pdev;
5121
5122 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5123 (pdev->mo_data_filter & FILTER_DATA_MCAST))
5124 return true;
5125
5126 return false;
5127}
5128
5129/**
5130 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5131 * @vdev_handle: Datapath VDEV handle
5132 * Return: true on non data filter flag set
5133 */
5134static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5135{
5136 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5137 struct dp_pdev *pdev;
5138
5139 pdev = vdev->pdev;
5140
5141 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5142 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5143 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5144 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5145 return true;
5146 }
5147 }
5148
5149 return false;
5150}
5151
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305152#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305153void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305154{
5155 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5156
5157 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05305158 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305159 vdev->mesh_vdev = val;
5160}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305161
5162/*
5163 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5164 * @vdev_hdl: virtual device object
5165 * @val: value to be set
5166 *
5167 * Return: void
5168 */
5169void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5170{
5171 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5172
5173 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5174 FL("val %d"), val);
5175 vdev->mesh_rx_filter = val;
5176}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305177#endif
5178
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305179/*
5180 * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
Jeff Johnson2d821eb2018-05-06 16:25:49 -07005181 * Current scope is bar received count
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305182 *
5183 * @pdev_handle: DP_PDEV handle
5184 *
5185 * Return: void
5186 */
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305187#define STATS_PROC_TIMEOUT (HZ/1000)
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305188
5189static void
5190dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5191{
5192 struct dp_vdev *vdev;
5193 struct dp_peer *peer;
5194 uint32_t waitcnt;
5195
5196 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5197 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5198 if (!peer) {
5199 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5200 FL("DP Invalid Peer refernce"));
5201 return;
5202 }
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305203
5204 if (peer->delete_in_progress) {
5205 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5206 FL("DP Peer deletion in progress"));
5207 continue;
5208 }
5209
5210 qdf_atomic_inc(&peer->ref_cnt);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305211 waitcnt = 0;
5212 dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305213 while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305214 && waitcnt < 10) {
5215 schedule_timeout_interruptible(
5216 STATS_PROC_TIMEOUT);
5217 waitcnt++;
5218 }
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305219 qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305220 dp_peer_unref_delete(peer);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305221 }
5222 }
5223}
5224
5225/**
5226 * dp_rx_bar_stats_cb(): BAR received stats callback
5227 * @soc: SOC handle
5228 * @cb_ctxt: Call back context
5229 * @reo_status: Reo status
5230 *
5231 * return: void
5232 */
5233void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5234 union hal_reo_status *reo_status)
5235{
5236 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5237 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5238
5239 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5240 DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5241 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305242 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305243 return;
5244 }
5245
5246 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305247 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305248
5249}
5250
Ishank Jain1e7401c2017-02-17 15:38:39 +05305251/**
5252 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5253 * @vdev: DP VDEV handle
5254 *
5255 * return: void
5256 */
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305257void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
5258 struct cdp_vdev_stats *vdev_stats)
Ishank Jain1e7401c2017-02-17 15:38:39 +05305259{
5260 struct dp_peer *peer = NULL;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305261 struct dp_soc *soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305262
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305263 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
Ishank Jain1e7401c2017-02-17 15:38:39 +05305264
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305265 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305266 dp_update_vdev_stats(vdev_stats, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305267
psimhafb49db32017-08-31 15:33:33 -07005268 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305269 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305270 &vdev->stats, (uint16_t) vdev->vdev_id,
5271 UPDATE_VDEV_STATS);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07005272
Ishank Jain1e7401c2017-02-17 15:38:39 +05305273}
5274
5275/**
5276 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5277 * @pdev: DP PDEV handle
5278 *
5279 * return: void
5280 */
5281static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5282{
5283 struct dp_vdev *vdev = NULL;
Pranita Solankea38c7a32018-01-04 10:50:59 +05305284 struct dp_soc *soc = pdev->soc;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305285 struct cdp_vdev_stats *vdev_stats =
5286 qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
5287
5288 if (!vdev_stats) {
5289 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5290 "DP alloc failure - unable to get alloc vdev stats");
5291 return;
5292 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05305293
5294 qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5295 qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5296 qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5297
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305298 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305299 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05305300
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305301 dp_aggregate_vdev_stats(vdev, vdev_stats);
5302 dp_update_pdev_stats(pdev, vdev_stats);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305303
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305304 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305305
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305306 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5307 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5308 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5309 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5310 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5311 DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5312 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
Amir Patel02911572018-07-02 13:00:53 +05305313 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305314 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
Amir Patel02911572018-07-02 13:00:53 +05305315 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305316 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5317 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5318 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5319 DP_STATS_AGGR(pdev, vdev,
5320 tx_i.mcast_en.dropped_map_error);
5321 DP_STATS_AGGR(pdev, vdev,
5322 tx_i.mcast_en.dropped_self_mac);
5323 DP_STATS_AGGR(pdev, vdev,
5324 tx_i.mcast_en.dropped_send_fail);
5325 DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5326 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5327 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5328 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305329 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305330 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
Venkateswara Swamy Bandaru41ebb332018-09-12 18:25:29 +05305331 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.headroom_insufficient);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305332 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5333 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305334 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5335 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305336
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305337 pdev->stats.tx_i.dropped.dropped_pkt.num =
5338 pdev->stats.tx_i.dropped.dma_error +
5339 pdev->stats.tx_i.dropped.ring_full +
5340 pdev->stats.tx_i.dropped.enqueue_fail +
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305341 pdev->stats.tx_i.dropped.desc_na.num +
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305342 pdev->stats.tx_i.dropped.res_full;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305343
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305344 pdev->stats.tx.last_ack_rssi =
5345 vdev->stats.tx.last_ack_rssi;
5346 pdev->stats.tx_i.tso.num_seg =
5347 vdev->stats.tx_i.tso.num_seg;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305348 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305349 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305350 qdf_mem_free(vdev_stats);
5351
Pranita Solankea38c7a32018-01-04 10:50:59 +05305352 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305353 soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
Pranita Solankea38c7a32018-01-04 10:50:59 +05305354 &pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305355
Ishank Jain1e7401c2017-02-17 15:38:39 +05305356}
5357
5358/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305359 * dp_vdev_getstats() - get vdev packet level stats
5360 * @vdev_handle: Datapath VDEV handle
5361 * @stats: cdp network device stats structure
5362 *
5363 * Return: void
5364 */
5365static void dp_vdev_getstats(void *vdev_handle,
5366 struct cdp_dev_stats *stats)
5367{
5368 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305369 struct cdp_vdev_stats *vdev_stats =
5370 qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305371
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05305372 if (!vdev_stats) {
5373 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5374 "DP alloc failure - unable to get alloc vdev stats");
5375 return;
5376 }
5377
5378 dp_aggregate_vdev_stats(vdev, vdev_stats);
5379
5380 stats->tx_packets = vdev_stats->tx_i.rcvd.num;
5381 stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
5382
5383 stats->tx_errors = vdev_stats->tx.tx_failed +
5384 vdev_stats->tx_i.dropped.dropped_pkt.num;
5385 stats->tx_dropped = stats->tx_errors;
5386
5387 stats->rx_packets = vdev_stats->rx.unicast.num +
5388 vdev_stats->rx.multicast.num +
5389 vdev_stats->rx.bcast.num;
5390 stats->rx_bytes = vdev_stats->rx.unicast.bytes +
5391 vdev_stats->rx.multicast.bytes +
5392 vdev_stats->rx.bcast.bytes;
5393
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305394}
5395
5396
5397/**
Anish Natarajf12b0a32018-03-14 14:27:13 +05305398 * dp_pdev_getstats() - get pdev packet level stats
5399 * @pdev_handle: Datapath PDEV handle
5400 * @stats: cdp network device stats structure
5401 *
5402 * Return: void
5403 */
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305404static void dp_pdev_getstats(void *pdev_handle,
Anish Natarajf12b0a32018-03-14 14:27:13 +05305405 struct cdp_dev_stats *stats)
5406{
5407 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5408
5409 dp_aggregate_pdev_stats(pdev);
5410
5411 stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5412 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5413
5414 stats->tx_errors = pdev->stats.tx.tx_failed +
5415 pdev->stats.tx_i.dropped.dropped_pkt.num;
5416 stats->tx_dropped = stats->tx_errors;
5417
5418 stats->rx_packets = pdev->stats.rx.unicast.num +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305419 pdev->stats.rx.multicast.num +
5420 pdev->stats.rx.bcast.num;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305421 stats->rx_bytes = pdev->stats.rx.unicast.bytes +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305422 pdev->stats.rx.multicast.bytes +
5423 pdev->stats.rx.bcast.bytes;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305424}
5425
5426/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305427 * dp_get_device_stats() - get interface level packet stats
5428 * @handle: device handle
5429 * @stats: cdp network device stats structure
5430 * @type: device type pdev/vdev
5431 *
5432 * Return: void
5433 */
5434static void dp_get_device_stats(void *handle,
5435 struct cdp_dev_stats *stats, uint8_t type)
5436{
5437 switch (type) {
5438 case UPDATE_VDEV_STATS:
5439 dp_vdev_getstats(handle, stats);
5440 break;
5441 case UPDATE_PDEV_STATS:
5442 dp_pdev_getstats(handle, stats);
5443 break;
5444 default:
5445 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5446 "apstats cannot be updated for this input "
Aditya Sathishded018e2018-07-02 16:25:21 +05305447 "type %d", type);
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305448 break;
5449 }
5450
5451}
5452
5453
5454/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305455 * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5456 * @pdev: DP_PDEV Handle
5457 *
5458 * Return:void
5459 */
5460static inline void
5461dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5462{
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305463 uint8_t index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305464 DP_PRINT_STATS("PDEV Tx Stats:\n");
5465 DP_PRINT_STATS("Received From Stack:");
5466 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305467 pdev->stats.tx_i.rcvd.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305468 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305469 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305470 DP_PRINT_STATS("Processed:");
5471 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305472 pdev->stats.tx_i.processed.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305473 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305474 pdev->stats.tx_i.processed.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005475 DP_PRINT_STATS("Total Completions:");
5476 DP_PRINT_STATS(" Packets = %u",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305477 pdev->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305478 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305479 pdev->stats.tx.comp_pkt.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005480 DP_PRINT_STATS("Successful Completions:");
5481 DP_PRINT_STATS(" Packets = %u",
5482 pdev->stats.tx.tx_success.num);
5483 DP_PRINT_STATS(" Bytes = %llu",
5484 pdev->stats.tx.tx_success.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305485 DP_PRINT_STATS("Dropped:");
5486 DP_PRINT_STATS(" Total = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305487 pdev->stats.tx_i.dropped.dropped_pkt.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305488 DP_PRINT_STATS(" Dma_map_error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305489 pdev->stats.tx_i.dropped.dma_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305490 DP_PRINT_STATS(" Ring Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305491 pdev->stats.tx_i.dropped.ring_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305492 DP_PRINT_STATS(" Descriptor Not available = %d",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305493 pdev->stats.tx_i.dropped.desc_na.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305494 DP_PRINT_STATS(" HW enqueue failed= %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305495 pdev->stats.tx_i.dropped.enqueue_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305496 DP_PRINT_STATS(" Resources Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305497 pdev->stats.tx_i.dropped.res_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305498 DP_PRINT_STATS(" FW removed = %d",
5499 pdev->stats.tx.dropped.fw_rem);
5500 DP_PRINT_STATS(" FW removed transmitted = %d",
5501 pdev->stats.tx.dropped.fw_rem_tx);
5502 DP_PRINT_STATS(" FW removed untransmitted = %d",
5503 pdev->stats.tx.dropped.fw_rem_notx);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005504 DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
5505 pdev->stats.tx.dropped.fw_reason1);
5506 DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
5507 pdev->stats.tx.dropped.fw_reason2);
5508 DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
5509 pdev->stats.tx.dropped.fw_reason3);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305510 DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
5511 pdev->stats.tx.dropped.age_out);
Venkateswara Swamy Bandaru41ebb332018-09-12 18:25:29 +05305512 DP_PRINT_STATS(" headroom insufficient = %d",
5513 pdev->stats.tx_i.dropped.headroom_insufficient);
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005514 DP_PRINT_STATS(" Multicast:");
5515 DP_PRINT_STATS(" Packets: %u",
5516 pdev->stats.tx.mcast.num);
5517 DP_PRINT_STATS(" Bytes: %llu",
5518 pdev->stats.tx.mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305519 DP_PRINT_STATS("Scatter Gather:");
5520 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305521 pdev->stats.tx_i.sg.sg_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305522 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305523 pdev->stats.tx_i.sg.sg_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305524 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305525 pdev->stats.tx_i.sg.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305526 DP_PRINT_STATS(" Dropped By Target = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305527 pdev->stats.tx_i.sg.dropped_target);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305528 DP_PRINT_STATS("TSO:");
5529 DP_PRINT_STATS(" Number of Segments = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305530 pdev->stats.tx_i.tso.num_seg);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305531 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305532 pdev->stats.tx_i.tso.tso_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305533 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305534 pdev->stats.tx_i.tso.tso_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305535 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305536 pdev->stats.tx_i.tso.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305537 DP_PRINT_STATS("Mcast Enhancement:");
5538 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305539 pdev->stats.tx_i.mcast_en.mcast_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305540 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305541 pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305542 DP_PRINT_STATS(" Dropped: Map Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305543 pdev->stats.tx_i.mcast_en.dropped_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305544 DP_PRINT_STATS(" Dropped: Self Mac = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305545 pdev->stats.tx_i.mcast_en.dropped_self_mac);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305546 DP_PRINT_STATS(" Dropped: Send Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305547 pdev->stats.tx_i.mcast_en.dropped_send_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305548 DP_PRINT_STATS(" Unicast sent = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305549 pdev->stats.tx_i.mcast_en.ucast);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305550 DP_PRINT_STATS("Raw:");
5551 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305552 pdev->stats.tx_i.raw.raw_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305553 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305554 pdev->stats.tx_i.raw.raw_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305555 DP_PRINT_STATS(" DMA map error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305556 pdev->stats.tx_i.raw.dma_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305557 DP_PRINT_STATS("Reinjected:");
5558 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305559 pdev->stats.tx_i.reinject_pkts.num);
chenguo6a027fb2018-05-21 18:42:54 +08005560 DP_PRINT_STATS(" Bytes = %llu\n",
5561 pdev->stats.tx_i.reinject_pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305562 DP_PRINT_STATS("Inspected:");
5563 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305564 pdev->stats.tx_i.inspect_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305565 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305566 pdev->stats.tx_i.inspect_pkts.bytes);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305567 DP_PRINT_STATS("Nawds Multicast:");
5568 DP_PRINT_STATS(" Packets = %d",
5569 pdev->stats.tx_i.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305570 DP_PRINT_STATS(" Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305571 pdev->stats.tx_i.nawds_mcast.bytes);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305572 DP_PRINT_STATS("CCE Classified:");
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305573 DP_PRINT_STATS(" CCE Classified Packets: %u",
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305574 pdev->stats.tx_i.cce_classified);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305575 DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05305576 pdev->stats.tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305577 DP_PRINT_STATS("Mesh stats:");
5578 DP_PRINT_STATS(" frames to firmware: %u",
5579 pdev->stats.tx_i.mesh.exception_fw);
5580 DP_PRINT_STATS(" completions from fw: %u",
5581 pdev->stats.tx_i.mesh.completion_fw);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305582 DP_PRINT_STATS("PPDU stats counter");
5583 for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5584 DP_PRINT_STATS(" Tag[%d] = %llu", index,
5585 pdev->stats.ppdu_stats_counter[index]);
5586 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05305587}
5588
5589/**
5590 * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5591 * @pdev: DP_PDEV Handle
5592 *
5593 * Return: void
5594 */
5595static inline void
5596dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5597{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305598 DP_PRINT_STATS("PDEV Rx Stats:\n");
5599 DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5600 DP_PRINT_STATS(" Packets = %d %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305601 pdev->stats.rx.rcvd_reo[0].num,
5602 pdev->stats.rx.rcvd_reo[1].num,
5603 pdev->stats.rx.rcvd_reo[2].num,
5604 pdev->stats.rx.rcvd_reo[3].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305605 DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305606 pdev->stats.rx.rcvd_reo[0].bytes,
5607 pdev->stats.rx.rcvd_reo[1].bytes,
5608 pdev->stats.rx.rcvd_reo[2].bytes,
5609 pdev->stats.rx.rcvd_reo[3].bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305610 DP_PRINT_STATS("Replenished:");
5611 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305612 pdev->stats.replenish.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305613 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305614 pdev->stats.replenish.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305615 DP_PRINT_STATS(" Buffers Added To Freelist = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305616 pdev->stats.buf_freelist);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07005617 DP_PRINT_STATS(" Low threshold intr = %d",
5618 pdev->stats.replenish.low_thresh_intrs);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305619 DP_PRINT_STATS("Dropped:");
5620 DP_PRINT_STATS(" msdu_not_done = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305621 pdev->stats.dropped.msdu_not_done);
Neil Zhao0bd967d2018-03-02 16:00:00 -08005622 DP_PRINT_STATS(" mon_rx_drop = %d",
5623 pdev->stats.dropped.mon_rx_drop);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05305624 DP_PRINT_STATS(" mec_drop = %d",
5625 pdev->stats.rx.mec_drop.num);
5626 DP_PRINT_STATS(" Bytes = %llu",
5627 pdev->stats.rx.mec_drop.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305628 DP_PRINT_STATS("Sent To Stack:");
5629 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305630 pdev->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305631 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305632 pdev->stats.rx.to_stack.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305633 DP_PRINT_STATS("Multicast/Broadcast:");
5634 DP_PRINT_STATS(" Packets = %d",
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05305635 pdev->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305636 DP_PRINT_STATS(" Bytes = %llu",
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05305637 pdev->stats.rx.multicast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305638 DP_PRINT_STATS("Errors:");
5639 DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305640 pdev->stats.replenish.rxdma_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305641 DP_PRINT_STATS(" Desc Alloc Failed: = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305642 pdev->stats.err.desc_alloc_fail);
chenguo6a027fb2018-05-21 18:42:54 +08005643 DP_PRINT_STATS(" IP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305644 pdev->stats.err.ip_csum_err);
chenguo6a027fb2018-05-21 18:42:54 +08005645 DP_PRINT_STATS(" TCP/UDP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305646 pdev->stats.err.tcp_udp_csum_err);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305647
5648 /* Get bar_recv_cnt */
5649 dp_aggregate_pdev_ctrl_frames_stats(pdev);
5650 DP_PRINT_STATS("BAR Received Count: = %d",
5651 pdev->stats.rx.bar_recv_cnt);
5652
Ishank Jain1e7401c2017-02-17 15:38:39 +05305653}
5654
5655/**
Kai Chen783e0382018-01-25 16:29:08 -08005656 * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5657 * @pdev: DP_PDEV Handle
5658 *
5659 * Return: void
5660 */
5661static inline void
5662dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5663{
5664 struct cdp_pdev_mon_stats *rx_mon_stats;
5665
5666 rx_mon_stats = &pdev->rx_mon_stats;
5667
5668 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5669
5670 dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5671
5672 DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5673 rx_mon_stats->status_ppdu_done);
5674 DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5675 rx_mon_stats->dest_ppdu_done);
5676 DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5677 rx_mon_stats->dest_mpdu_done);
Karunakar Dasinenibb7848e2018-05-07 15:09:46 -07005678 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5679 rx_mon_stats->dest_mpdu_drop);
Kai Chen783e0382018-01-25 16:29:08 -08005680}
5681
5682/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305683 * dp_print_soc_tx_stats(): Print SOC level stats
5684 * @soc DP_SOC Handle
5685 *
5686 * Return: void
5687 */
5688static inline void
5689dp_print_soc_tx_stats(struct dp_soc *soc)
5690{
Soumya Bhatdbb85302018-05-18 11:01:34 +05305691 uint8_t desc_pool_id;
5692 soc->stats.tx.desc_in_use = 0;
5693
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305694 DP_PRINT_STATS("SOC Tx Stats:\n");
Soumya Bhatdbb85302018-05-18 11:01:34 +05305695
5696 for (desc_pool_id = 0;
5697 desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5698 desc_pool_id++)
5699 soc->stats.tx.desc_in_use +=
5700 soc->tx_desc[desc_pool_id].num_allocated;
5701
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305702 DP_PRINT_STATS("Tx Descriptors In Use = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305703 soc->stats.tx.desc_in_use);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305704 DP_PRINT_STATS("Invalid peer:");
5705 DP_PRINT_STATS(" Packets = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305706 soc->stats.tx.tx_invalid_peer.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305707 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jaine73c4032017-03-16 11:48:15 +05305708 soc->stats.tx.tx_invalid_peer.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305709 DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305710 soc->stats.tx.tcl_ring_full[0],
5711 soc->stats.tx.tcl_ring_full[1],
5712 soc->stats.tx.tcl_ring_full[2]);
5713
Ishank Jain1e7401c2017-02-17 15:38:39 +05305714}
Ishank Jain1e7401c2017-02-17 15:38:39 +05305715/**
5716 * dp_print_soc_rx_stats: Print SOC level Rx stats
5717 * @soc: DP_SOC Handle
5718 *
5719 * Return:void
5720 */
5721static inline void
5722dp_print_soc_rx_stats(struct dp_soc *soc)
5723{
5724 uint32_t i;
5725 char reo_error[DP_REO_ERR_LENGTH];
5726 char rxdma_error[DP_RXDMA_ERR_LENGTH];
5727 uint8_t index = 0;
5728
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305729 DP_PRINT_STATS("SOC Rx Stats:\n");
Venkata Sharath Chandra Manchalaa7d58742018-08-31 15:14:24 -07005730 DP_PRINT_STATS("Fragmented packets: %u",
5731 soc->stats.rx.rx_frags);
5732 DP_PRINT_STATS("Reo reinjected packets: %u",
5733 soc->stats.rx.reo_reinject);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305734 DP_PRINT_STATS("Errors:\n");
5735 DP_PRINT_STATS("Rx Decrypt Errors = %d",
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305736 (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5737 soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305738 DP_PRINT_STATS("Invalid RBM = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305739 soc->stats.rx.err.invalid_rbm);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305740 DP_PRINT_STATS("Invalid Vdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305741 soc->stats.rx.err.invalid_vdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305742 DP_PRINT_STATS("Invalid Pdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305743 soc->stats.rx.err.invalid_pdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305744 DP_PRINT_STATS("Invalid Peer = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305745 soc->stats.rx.err.rx_invalid_peer.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305746 DP_PRINT_STATS("HAL Ring Access Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305747 soc->stats.rx.err.hal_ring_access_fail);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05305748 DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
5749 DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305750
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305751 for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305752 index += qdf_snprint(&rxdma_error[index],
5753 DP_RXDMA_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305754 " %d", soc->stats.rx.err.rxdma_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305755 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305756 DP_PRINT_STATS("RXDMA Error (0-31):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305757 rxdma_error);
5758
5759 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305760 for (i = 0; i < HAL_REO_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305761 index += qdf_snprint(&reo_error[index],
5762 DP_REO_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305763 " %d", soc->stats.rx.err.reo_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305764 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305765 DP_PRINT_STATS("REO Error(0-14):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305766 reo_error);
5767}
5768
sumedh baikady72b1c712017-08-24 12:11:46 -07005769
5770/**
5771 * dp_print_ring_stat_from_hal(): Print hal level ring stats
5772 * @soc: DP_SOC handle
5773 * @srng: DP_SRNG handle
5774 * @ring_name: SRNG name
5775 *
5776 * Return: void
5777 */
Mohit Khanna81179cb2018-08-16 20:50:43 -07005778static void
sumedh baikady72b1c712017-08-24 12:11:46 -07005779dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
5780 char *ring_name)
5781{
5782 uint32_t tailp;
5783 uint32_t headp;
5784
Mohit Khanna81179cb2018-08-16 20:50:43 -07005785 if (soc && srng && srng->hal_srng) {
sumedh baikady72b1c712017-08-24 12:11:46 -07005786 hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
Mohit Khanna81179cb2018-08-16 20:50:43 -07005787 DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d",
5788 ring_name, headp, tailp);
sumedh baikady72b1c712017-08-24 12:11:46 -07005789 }
5790}
5791
5792/**
5793 * dp_print_ring_stats(): Print tail and head pointer
5794 * @pdev: DP_PDEV handle
5795 *
5796 * Return:void
5797 */
5798static inline void
5799dp_print_ring_stats(struct dp_pdev *pdev)
5800{
5801 uint32_t i;
5802 char ring_name[STR_MAXLEN + 1];
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005803 int mac_id;
sumedh baikady72b1c712017-08-24 12:11:46 -07005804
5805 dp_print_ring_stat_from_hal(pdev->soc,
5806 &pdev->soc->reo_exception_ring,
5807 "Reo Exception Ring");
5808 dp_print_ring_stat_from_hal(pdev->soc,
5809 &pdev->soc->reo_reinject_ring,
5810 "Reo Inject Ring");
5811 dp_print_ring_stat_from_hal(pdev->soc,
5812 &pdev->soc->reo_cmd_ring,
5813 "Reo Command Ring");
5814 dp_print_ring_stat_from_hal(pdev->soc,
5815 &pdev->soc->reo_status_ring,
5816 "Reo Status Ring");
5817 dp_print_ring_stat_from_hal(pdev->soc,
5818 &pdev->soc->rx_rel_ring,
5819 "Rx Release ring");
5820 dp_print_ring_stat_from_hal(pdev->soc,
5821 &pdev->soc->tcl_cmd_ring,
5822 "Tcl command Ring");
5823 dp_print_ring_stat_from_hal(pdev->soc,
5824 &pdev->soc->tcl_status_ring,
5825 "Tcl Status Ring");
5826 dp_print_ring_stat_from_hal(pdev->soc,
5827 &pdev->soc->wbm_desc_rel_ring,
5828 "Wbm Desc Rel Ring");
5829 for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5830 snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5831 dp_print_ring_stat_from_hal(pdev->soc,
5832 &pdev->soc->reo_dest_ring[i],
5833 ring_name);
5834 }
Mohit Khanna81179cb2018-08-16 20:50:43 -07005835
sumedh baikady72b1c712017-08-24 12:11:46 -07005836 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5837 snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5838 dp_print_ring_stat_from_hal(pdev->soc,
5839 &pdev->soc->tcl_data_ring[i],
5840 ring_name);
5841 }
5842 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5843 snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5844 dp_print_ring_stat_from_hal(pdev->soc,
5845 &pdev->soc->tx_comp_ring[i],
5846 ring_name);
5847 }
5848 dp_print_ring_stat_from_hal(pdev->soc,
5849 &pdev->rx_refill_buf_ring,
5850 "Rx Refill Buf Ring");
5851
sumedh baikady72b1c712017-08-24 12:11:46 -07005852 dp_print_ring_stat_from_hal(pdev->soc,
Yun Park601d0d82017-08-28 21:49:31 -07005853 &pdev->rx_refill_buf_ring2,
5854 "Second Rx Refill Buf Ring");
sumedh baikady72b1c712017-08-24 12:11:46 -07005855
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005856 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5857 dp_print_ring_stat_from_hal(pdev->soc,
5858 &pdev->rxdma_mon_buf_ring[mac_id],
5859 "Rxdma Mon Buf Ring");
5860 dp_print_ring_stat_from_hal(pdev->soc,
5861 &pdev->rxdma_mon_dst_ring[mac_id],
5862 "Rxdma Mon Dst Ring");
5863 dp_print_ring_stat_from_hal(pdev->soc,
5864 &pdev->rxdma_mon_status_ring[mac_id],
5865 "Rxdma Mon Status Ring");
5866 dp_print_ring_stat_from_hal(pdev->soc,
5867 &pdev->rxdma_mon_desc_ring[mac_id],
5868 "Rxdma mon desc Ring");
5869 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005870
narayan4b25ab22018-06-19 12:52:24 +05305871 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005872 snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5873 dp_print_ring_stat_from_hal(pdev->soc,
5874 &pdev->rxdma_err_dst_ring[i],
5875 ring_name);
5876 }
5877
sumedh baikady72b1c712017-08-24 12:11:46 -07005878 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5879 snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5880 dp_print_ring_stat_from_hal(pdev->soc,
5881 &pdev->rx_mac_buf_ring[i],
5882 ring_name);
5883 }
5884}
5885
Ishank Jain1e7401c2017-02-17 15:38:39 +05305886/**
5887 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5888 * @vdev: DP_VDEV handle
5889 *
5890 * Return:void
5891 */
5892static inline void
5893dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5894{
5895 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05305896 struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5897
Ishank Jain1e7401c2017-02-17 15:38:39 +05305898 DP_STATS_CLR(vdev->pdev);
5899 DP_STATS_CLR(vdev->pdev->soc);
5900 DP_STATS_CLR(vdev);
5901 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5902 if (!peer)
5903 return;
5904 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05305905
5906 if (soc->cdp_soc.ol_ops->update_dp_stats) {
5907 soc->cdp_soc.ol_ops->update_dp_stats(
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305908 vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305909 &peer->stats,
5910 peer->peer_ids[0],
5911 UPDATE_PEER_STATS);
5912 }
5913
Ishank Jain1e7401c2017-02-17 15:38:39 +05305914 }
5915
Anish Nataraj28490c42018-01-19 19:34:54 +05305916 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305917 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305918 &vdev->stats, (uint16_t)vdev->vdev_id,
5919 UPDATE_VDEV_STATS);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305920}
5921
5922/**
chenguo4d877b82018-08-06 14:18:05 +08005923 * dp_print_common_rates_info(): Print common rate for tx or rx
5924 * @pkt_type_array: rate type array contains rate info
5925 *
5926 * Return:void
5927 */
5928static inline void
5929dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
5930{
5931 uint8_t mcs, pkt_type;
5932
5933 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5934 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5935 if (!dp_rate_string[pkt_type][mcs].valid)
5936 continue;
5937
5938 DP_PRINT_STATS(" %s = %d",
5939 dp_rate_string[pkt_type][mcs].mcs_type,
5940 pkt_type_array[pkt_type].mcs_count[mcs]);
5941 }
5942
5943 DP_PRINT_STATS("\n");
5944 }
5945}
5946
5947/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305948 * dp_print_rx_rates(): Print Rx rate stats
5949 * @vdev: DP_VDEV handle
5950 *
5951 * Return:void
5952 */
5953static inline void
5954dp_print_rx_rates(struct dp_vdev *vdev)
5955{
5956 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
chenguo4d877b82018-08-06 14:18:05 +08005957 uint8_t i;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305958 uint8_t index = 0;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305959 char nss[DP_NSS_LENGTH];
5960
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305961 DP_PRINT_STATS("Rx Rate Info:\n");
chenguo4d877b82018-08-06 14:18:05 +08005962 dp_print_common_rates_info(pdev->stats.rx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305963
Ishank Jain57c42a12017-04-12 10:42:22 +05305964
Ishank Jain1e7401c2017-02-17 15:38:39 +05305965 index = 0;
5966 for (i = 0; i < SS_COUNT; i++) {
5967 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305968 " %d", pdev->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305969 }
Anish Nataraj072d8972018-01-09 18:23:33 +05305970 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305971 nss);
5972
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305973 DP_PRINT_STATS("SGI ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305974 " 0.8us %d,"
5975 " 0.4us %d,"
5976 " 1.6us %d,"
5977 " 3.2us %d,",
5978 pdev->stats.rx.sgi_count[0],
5979 pdev->stats.rx.sgi_count[1],
5980 pdev->stats.rx.sgi_count[2],
5981 pdev->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305982 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305983 pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5984 pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305985 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305986 " SU: %d,"
5987 " MU_MIMO:%d,"
5988 " MU_OFDMA:%d,"
Ishank Jain57c42a12017-04-12 10:42:22 +05305989 " MU_OFDMA_MIMO:%d\n",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305990 pdev->stats.rx.reception_type[0],
5991 pdev->stats.rx.reception_type[1],
5992 pdev->stats.rx.reception_type[2],
5993 pdev->stats.rx.reception_type[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305994 DP_PRINT_STATS("Aggregation:\n");
5995 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305996 pdev->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305997 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305998 pdev->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305999 DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306000 pdev->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306001 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306002 pdev->stats.rx.non_amsdu_cnt);
6003}
6004
6005/**
6006 * dp_print_tx_rates(): Print tx rates
6007 * @vdev: DP_VDEV handle
6008 *
6009 * Return:void
6010 */
6011static inline void
6012dp_print_tx_rates(struct dp_vdev *vdev)
6013{
6014 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07006015 uint8_t index;
6016 char nss[DP_NSS_LENGTH];
6017 int nss_index;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306018
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306019 DP_PRINT_STATS("Tx Rate Info:\n");
chenguo4d877b82018-08-06 14:18:05 +08006020 dp_print_common_rates_info(pdev->stats.tx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306021
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306022 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05306023 " 0.8us %d"
6024 " 0.4us %d"
6025 " 1.6us %d"
6026 " 3.2us %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306027 pdev->stats.tx.sgi_count[0],
6028 pdev->stats.tx.sgi_count[1],
6029 pdev->stats.tx.sgi_count[2],
6030 pdev->stats.tx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306031
6032 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
chenguoec849832018-04-11 19:14:06 +08006033 pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
6034 pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306035
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07006036 index = 0;
6037 for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
6038 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6039 " %d", pdev->stats.tx.nss[nss_index]);
6040 }
6041
6042 DP_PRINT_STATS("NSS(1-8) = %s", nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306043 DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
6044 DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
6045 DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
6046 DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
6047 DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
6048
6049 DP_PRINT_STATS("Aggregation:\n");
6050 DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306051 pdev->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306052 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306053 pdev->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306054}
6055
6056/**
6057 * dp_print_peer_stats():print peer stats
6058 * @peer: DP_PEER handle
6059 *
6060 * return void
6061 */
6062static inline void dp_print_peer_stats(struct dp_peer *peer)
6063{
chenguo4d877b82018-08-06 14:18:05 +08006064 uint8_t i;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306065 uint32_t index;
6066 char nss[DP_NSS_LENGTH];
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306067 DP_PRINT_STATS("Node Tx Stats:\n");
6068 DP_PRINT_STATS("Total Packet Completions = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306069 peer->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306070 DP_PRINT_STATS("Total Bytes Completions = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306071 peer->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306072 DP_PRINT_STATS("Success Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306073 peer->stats.tx.tx_success.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306074 DP_PRINT_STATS("Success Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306075 peer->stats.tx.tx_success.bytes);
Pranita Solankefc2ff392017-12-15 19:25:13 +05306076 DP_PRINT_STATS("Unicast Success Packets = %d",
6077 peer->stats.tx.ucast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306078 DP_PRINT_STATS("Unicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306079 peer->stats.tx.ucast.bytes);
6080 DP_PRINT_STATS("Multicast Success Packets = %d",
6081 peer->stats.tx.mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306082 DP_PRINT_STATS("Multicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306083 peer->stats.tx.mcast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306084 DP_PRINT_STATS("Broadcast Success Packets = %d",
6085 peer->stats.tx.bcast.num);
6086 DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6087 peer->stats.tx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306088 DP_PRINT_STATS("Packets Failed = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306089 peer->stats.tx.tx_failed);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306090 DP_PRINT_STATS("Packets In OFDMA = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306091 peer->stats.tx.ofdma);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306092 DP_PRINT_STATS("Packets In STBC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306093 peer->stats.tx.stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306094 DP_PRINT_STATS("Packets In LDPC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306095 peer->stats.tx.ldpc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306096 DP_PRINT_STATS("Packet Retries = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306097 peer->stats.tx.retries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306098 DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306099 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306100 DP_PRINT_STATS("Last Packet RSSI = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306101 peer->stats.tx.last_ack_rssi);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306102 DP_PRINT_STATS("Dropped At FW: Removed = %d",
6103 peer->stats.tx.dropped.fw_rem);
6104 DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6105 peer->stats.tx.dropped.fw_rem_tx);
6106 DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6107 peer->stats.tx.dropped.fw_rem_notx);
6108 DP_PRINT_STATS("Dropped : Age Out = %d",
6109 peer->stats.tx.dropped.age_out);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306110 DP_PRINT_STATS("NAWDS : ");
6111 DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
6112 peer->stats.tx.nawds_mcast_drop);
6113 DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
6114 peer->stats.tx.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306115 DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306116 peer->stats.tx.nawds_mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306117
6118 DP_PRINT_STATS("Rate Info:");
chenguo4d877b82018-08-06 14:18:05 +08006119 dp_print_common_rates_info(peer->stats.tx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306120
Ishank Jain1e7401c2017-02-17 15:38:39 +05306121
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306122 DP_PRINT_STATS("SGI = "
Ishank Jain57c42a12017-04-12 10:42:22 +05306123 " 0.8us %d"
6124 " 0.4us %d"
6125 " 1.6us %d"
6126 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306127 peer->stats.tx.sgi_count[0],
6128 peer->stats.tx.sgi_count[1],
6129 peer->stats.tx.sgi_count[2],
6130 peer->stats.tx.sgi_count[3]);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306131 DP_PRINT_STATS("Excess Retries per AC ");
6132 DP_PRINT_STATS(" Best effort = %d",
6133 peer->stats.tx.excess_retries_per_ac[0]);
6134 DP_PRINT_STATS(" Background= %d",
6135 peer->stats.tx.excess_retries_per_ac[1]);
6136 DP_PRINT_STATS(" Video = %d",
6137 peer->stats.tx.excess_retries_per_ac[2]);
6138 DP_PRINT_STATS(" Voice = %d",
6139 peer->stats.tx.excess_retries_per_ac[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306140 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
Pranita Solanked7e10ba2017-12-13 15:40:38 +05306141 peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6142 peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306143
Pranita Solankeed0aba62018-01-12 19:14:31 +05306144 index = 0;
6145 for (i = 0; i < SS_COUNT; i++) {
6146 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6147 " %d", peer->stats.tx.nss[i]);
6148 }
6149 DP_PRINT_STATS("NSS(1-8) = %s",
6150 nss);
6151
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306152 DP_PRINT_STATS("Aggregation:");
6153 DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306154 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306155 DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
Ishank Jaine73c4032017-03-16 11:48:15 +05306156 peer->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306157
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05306158 DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
6159 DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
6160 peer->stats.tx.tx_byte_rate);
6161 DP_PRINT_STATS(" Data transmitted in last sec: %d",
6162 peer->stats.tx.tx_data_rate);
6163
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306164 DP_PRINT_STATS("Node Rx Stats:");
6165 DP_PRINT_STATS("Packets Sent To Stack = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306166 peer->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306167 DP_PRINT_STATS("Bytes Sent To Stack = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306168 peer->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05306169 for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
Pranita Solankefc2ff392017-12-15 19:25:13 +05306170 DP_PRINT_STATS("Ring Id = %d", i);
6171 DP_PRINT_STATS(" Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306172 peer->stats.rx.rcvd_reo[i].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306173 DP_PRINT_STATS(" Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306174 peer->stats.rx.rcvd_reo[i].bytes);
6175 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306176 DP_PRINT_STATS("Multicast Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306177 peer->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306178 DP_PRINT_STATS("Multicast Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306179 peer->stats.rx.multicast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306180 DP_PRINT_STATS("Broadcast Packets Received = %d",
6181 peer->stats.rx.bcast.num);
6182 DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6183 peer->stats.rx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306184 DP_PRINT_STATS("Intra BSS Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306185 peer->stats.rx.intra_bss.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306186 DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306187 peer->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306188 DP_PRINT_STATS("Raw Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306189 peer->stats.rx.raw.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306190 DP_PRINT_STATS("Raw Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306191 peer->stats.rx.raw.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306192 DP_PRINT_STATS("Errors: MIC Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306193 peer->stats.rx.err.mic_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306194 DP_PRINT_STATS("Erros: Decryption Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306195 peer->stats.rx.err.decrypt_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306196 DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306197 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306198 DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306199 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306200 DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306201 peer->stats.rx.non_amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306202 DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306203 peer->stats.rx.amsdu_cnt);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306204 DP_PRINT_STATS("NAWDS : ");
6205 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
Ruchi, Agrawal27550482018-02-20 19:43:41 +05306206 peer->stats.rx.nawds_mcast_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306207 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05306208 " 0.8us %d"
6209 " 0.4us %d"
6210 " 1.6us %d"
6211 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306212 peer->stats.rx.sgi_count[0],
6213 peer->stats.rx.sgi_count[1],
6214 peer->stats.rx.sgi_count[2],
6215 peer->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306216 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306217 peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6218 peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306219 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05306220 " SU %d,"
6221 " MU_MIMO %d,"
6222 " MU_OFDMA %d,"
6223 " MU_OFDMA_MIMO %d",
6224 peer->stats.rx.reception_type[0],
6225 peer->stats.rx.reception_type[1],
6226 peer->stats.rx.reception_type[2],
6227 peer->stats.rx.reception_type[3]);
6228
chenguo4d877b82018-08-06 14:18:05 +08006229 dp_print_common_rates_info(peer->stats.rx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306230
6231 index = 0;
6232 for (i = 0; i < SS_COUNT; i++) {
6233 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05306234 " %d", peer->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306235 }
Anish Nataraj072d8972018-01-09 18:23:33 +05306236 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306237 nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306238
6239 DP_PRINT_STATS("Aggregation:");
6240 DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306241 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306242 DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306243 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306244 DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306245 peer->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306246 DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306247 peer->stats.rx.non_amsdu_cnt);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05306248
6249 DP_PRINT_STATS("Bytes and Packets received in last one sec:");
6250 DP_PRINT_STATS(" Bytes received in last sec: %d",
6251 peer->stats.rx.rx_byte_rate);
6252 DP_PRINT_STATS(" Data received in last sec: %d",
6253 peer->stats.rx.rx_data_rate);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306254}
6255
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006256/*
6257 * dp_get_host_peer_stats()- function to print peer stats
6258 * @pdev_handle: DP_PDEV handle
6259 * @mac_addr: mac address of the peer
6260 *
6261 * Return: void
6262 */
6263static void
6264dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6265{
6266 struct dp_peer *peer;
6267 uint8_t local_id;
6268
6269 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6270 &local_id);
6271
6272 if (!peer) {
6273 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6274 "%s: Invalid peer\n", __func__);
6275 return;
6276 }
6277
6278 dp_print_peer_stats(peer);
6279 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6280}
6281
Ishank Jain1e7401c2017-02-17 15:38:39 +05306282/**
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07006283 * dp_txrx_stats_help() - Helper function for Txrx_Stats
6284 *
6285 * Return: None
6286 */
6287static void dp_txrx_stats_help(void)
6288{
6289 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
6290 dp_info("stats_option:");
6291 dp_info(" 1 -- HTT Tx Statistics");
6292 dp_info(" 2 -- HTT Rx Statistics");
6293 dp_info(" 3 -- HTT Tx HW Queue Statistics");
6294 dp_info(" 4 -- HTT Tx HW Sched Statistics");
6295 dp_info(" 5 -- HTT Error Statistics");
6296 dp_info(" 6 -- HTT TQM Statistics");
6297 dp_info(" 7 -- HTT TQM CMDQ Statistics");
6298 dp_info(" 8 -- HTT TX_DE_CMN Statistics");
6299 dp_info(" 9 -- HTT Tx Rate Statistics");
6300 dp_info(" 10 -- HTT Rx Rate Statistics");
6301 dp_info(" 11 -- HTT Peer Statistics");
6302 dp_info(" 12 -- HTT Tx SelfGen Statistics");
6303 dp_info(" 13 -- HTT Tx MU HWQ Statistics");
6304 dp_info(" 14 -- HTT RING_IF_INFO Statistics");
6305 dp_info(" 15 -- HTT SRNG Statistics");
6306 dp_info(" 16 -- HTT SFM Info Statistics");
6307 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
6308 dp_info(" 18 -- HTT Peer List Details");
6309 dp_info(" 20 -- Clear Host Statistics");
6310 dp_info(" 21 -- Host Rx Rate Statistics");
6311 dp_info(" 22 -- Host Tx Rate Statistics");
6312 dp_info(" 23 -- Host Tx Statistics");
6313 dp_info(" 24 -- Host Rx Statistics");
6314 dp_info(" 25 -- Host AST Statistics");
6315 dp_info(" 26 -- Host SRNG PTR Statistics");
6316 dp_info(" 27 -- Host Mon Statistics");
6317 dp_info(" 28 -- Host REO Queue Statistics");
6318}
6319
6320/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05306321 * dp_print_host_stats()- Function to print the stats aggregated at host
6322 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05306323 * @type: host stats type
6324 *
6325 * Available Stat types
Ishank Jain6290a3c2017-03-21 10:49:39 +05306326 * TXRX_CLEAR_STATS : Clear the stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306327 * TXRX_RX_RATE_STATS: Print Rx Rate Info
6328 * TXRX_TX_RATE_STATS: Print Tx Rate Info
6329 * TXRX_TX_HOST_STATS: Print Tx Stats
6330 * TXRX_RX_HOST_STATS: Print Rx Stats
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306331 * TXRX_AST_STATS: Print AST Stats
sumedh baikady72b1c712017-08-24 12:11:46 -07006332 * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306333 *
6334 * Return: 0 on success, print error message in case of failure
6335 */
6336static int
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006337dp_print_host_stats(struct cdp_vdev *vdev_handle,
6338 struct cdp_txrx_stats_req *req)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306339{
6340 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6341 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006342 enum cdp_host_txrx_stats type =
6343 dp_stats_mapping_table[req->stats][STATS_HOST];
Ishank Jain1e7401c2017-02-17 15:38:39 +05306344
6345 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306346
Ishank Jain1e7401c2017-02-17 15:38:39 +05306347 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05306348 case TXRX_CLEAR_STATS:
6349 dp_txrx_host_stats_clr(vdev);
6350 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306351 case TXRX_RX_RATE_STATS:
6352 dp_print_rx_rates(vdev);
6353 break;
6354 case TXRX_TX_RATE_STATS:
6355 dp_print_tx_rates(vdev);
6356 break;
6357 case TXRX_TX_HOST_STATS:
6358 dp_print_pdev_tx_stats(pdev);
6359 dp_print_soc_tx_stats(pdev->soc);
6360 break;
6361 case TXRX_RX_HOST_STATS:
6362 dp_print_pdev_rx_stats(pdev);
6363 dp_print_soc_rx_stats(pdev->soc);
6364 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306365 case TXRX_AST_STATS:
6366 dp_print_ast_stats(pdev->soc);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05306367 dp_print_peer_table(vdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306368 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07006369 case TXRX_SRNG_PTR_STATS:
Kai Chen783e0382018-01-25 16:29:08 -08006370 dp_print_ring_stats(pdev);
6371 break;
6372 case TXRX_RX_MON_STATS:
6373 dp_print_pdev_rx_mon_stats(pdev);
6374 break;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006375 case TXRX_REO_QUEUE_STATS:
6376 dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6377 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306378 default:
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07006379 dp_info("Wrong Input For TxRx Host Stats");
6380 dp_txrx_stats_help();
Ishank Jain1e7401c2017-02-17 15:38:39 +05306381 break;
6382 }
6383 return 0;
6384}
6385
6386/*
Soumya Bhat7422db82017-12-15 13:48:53 +05306387 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6388 * @pdev: DP_PDEV handle
6389 *
6390 * Return: void
6391 */
6392static void
6393dp_ppdu_ring_reset(struct dp_pdev *pdev)
6394{
6395 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006396 int mac_id;
Soumya Bhat7422db82017-12-15 13:48:53 +05306397
6398 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6399
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006400 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6401 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6402 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306403
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006404 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6405 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6406 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6407 }
Soumya Bhat7422db82017-12-15 13:48:53 +05306408}
6409
6410/*
Anish Nataraj38a29562017-08-18 19:41:17 +05306411 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6412 * @pdev: DP_PDEV handle
6413 *
6414 * Return: void
6415 */
6416static void
6417dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6418{
6419 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006420 int mac_id;
Anish Nataraj38a29562017-08-18 19:41:17 +05306421
Soumya Bhat35fc6992018-03-09 18:39:03 +05306422 htt_tlv_filter.mpdu_start = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306423 htt_tlv_filter.msdu_start = 0;
6424 htt_tlv_filter.packet = 0;
6425 htt_tlv_filter.msdu_end = 0;
6426 htt_tlv_filter.mpdu_end = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006427 htt_tlv_filter.attention = 0;
Anish Nataraj38a29562017-08-18 19:41:17 +05306428 htt_tlv_filter.ppdu_start = 1;
6429 htt_tlv_filter.ppdu_end = 1;
6430 htt_tlv_filter.ppdu_end_user_stats = 1;
6431 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6432 htt_tlv_filter.ppdu_end_status_done = 1;
6433 htt_tlv_filter.enable_fp = 1;
6434 htt_tlv_filter.enable_md = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -07006435 if (pdev->neighbour_peers_added &&
6436 pdev->soc->hw_nac_monitor_support) {
6437 htt_tlv_filter.enable_md = 1;
6438 htt_tlv_filter.packet_header = 1;
6439 }
nobelj1c31fee2018-03-21 11:47:05 -07006440 if (pdev->mcopy_mode) {
6441 htt_tlv_filter.packet_header = 1;
Soumya Bhat2f54de22018-02-21 09:54:28 +05306442 htt_tlv_filter.enable_mo = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006443 }
nobeljd124b742017-10-16 11:59:12 -07006444 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6445 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6446 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6447 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6448 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6449 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
sumedh baikady59a2d332018-05-22 01:50:38 -07006450 if (pdev->neighbour_peers_added &&
6451 pdev->soc->hw_nac_monitor_support)
6452 htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05306453
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006454 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6455 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6456 pdev->pdev_id);
6457
6458 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6459 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6460 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6461 }
Anish Nataraj38a29562017-08-18 19:41:17 +05306462}
6463
6464/*
Alok Singh40a622b2018-06-28 10:47:26 +05306465 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6466 * modes are enabled or not.
6467 * @dp_pdev: dp pdev handle.
6468 *
6469 * Return: bool
6470 */
6471static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6472{
6473 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6474 !pdev->mcopy_mode)
6475 return true;
6476 else
6477 return false;
6478}
6479
6480/*
Vinay Adella873dc402018-05-28 12:06:34 +05306481 *dp_set_bpr_enable() - API to enable/disable bpr feature
6482 *@pdev_handle: DP_PDEV handle.
6483 *@val: Provided value.
6484 *
6485 *Return: void
6486 */
6487static void
6488dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6489{
6490 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6491
6492 switch (val) {
6493 case CDP_BPR_DISABLE:
6494 pdev->bpr_enable = CDP_BPR_DISABLE;
6495 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6496 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6497 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6498 } else if (pdev->enhanced_stats_en &&
6499 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6500 !pdev->pktlog_ppdu_stats) {
6501 dp_h2t_cfg_stats_msg_send(pdev,
6502 DP_PPDU_STATS_CFG_ENH_STATS,
6503 pdev->pdev_id);
6504 }
6505 break;
6506 case CDP_BPR_ENABLE:
6507 pdev->bpr_enable = CDP_BPR_ENABLE;
6508 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6509 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6510 dp_h2t_cfg_stats_msg_send(pdev,
6511 DP_PPDU_STATS_CFG_BPR,
6512 pdev->pdev_id);
6513 } else if (pdev->enhanced_stats_en &&
6514 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6515 !pdev->pktlog_ppdu_stats) {
6516 dp_h2t_cfg_stats_msg_send(pdev,
6517 DP_PPDU_STATS_CFG_BPR_ENH,
6518 pdev->pdev_id);
6519 } else if (pdev->pktlog_ppdu_stats) {
6520 dp_h2t_cfg_stats_msg_send(pdev,
6521 DP_PPDU_STATS_CFG_BPR_PKTLOG,
6522 pdev->pdev_id);
6523 }
6524 break;
6525 default:
6526 break;
6527 }
6528}
6529
6530/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306531 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306532 * @pdev_handle: DP_PDEV handle
6533 * @val: user provided value
6534 *
6535 * Return: void
6536 */
6537static void
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306538dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306539{
6540 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6541
Soumya Bhat89647ef2017-11-16 17:23:48 +05306542 switch (val) {
6543 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306544 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05306545 pdev->mcopy_mode = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306546
Alok Singh40a622b2018-06-28 10:47:26 +05306547 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6548 !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006549 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306550 dp_ppdu_ring_reset(pdev);
Alok Singh40a622b2018-06-28 10:47:26 +05306551 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306552 dp_h2t_cfg_stats_msg_send(pdev,
6553 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306554 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6555 dp_h2t_cfg_stats_msg_send(pdev,
6556 DP_PPDU_STATS_CFG_BPR_ENH,
6557 pdev->pdev_id);
6558 } else {
6559 dp_h2t_cfg_stats_msg_send(pdev,
6560 DP_PPDU_STATS_CFG_BPR,
6561 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306562 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05306563 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306564
Soumya Bhat89647ef2017-11-16 17:23:48 +05306565 case 1:
6566 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05306567 pdev->mcopy_mode = 0;
6568
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306569 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306570 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306571 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306572 break;
6573 case 2:
Soumya Bhat7422db82017-12-15 13:48:53 +05306574 pdev->mcopy_mode = 1;
Soumya Bhat89647ef2017-11-16 17:23:48 +05306575 pdev->tx_sniffer_enable = 0;
Soumya Bhat14b6f262018-06-20 16:33:49 +05306576 dp_ppdu_ring_cfg(pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306577
6578 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306579 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306580 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306581 break;
6582 default:
6583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05306584 "Invalid value");
Soumya Bhat89647ef2017-11-16 17:23:48 +05306585 break;
6586 }
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306587}
6588
6589/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306590 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6591 * @pdev_handle: DP_PDEV handle
6592 *
6593 * Return: void
6594 */
6595static void
6596dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6597{
6598 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05306599
6600 if (pdev->enhanced_stats_en == 0)
6601 dp_cal_client_timer_start(pdev->cal_client_ctx);
6602
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306603 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306604
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05306605 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6606 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05306607 dp_ppdu_ring_cfg(pdev);
6608
Alok Singh40a622b2018-06-28 10:47:26 +05306609 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306610 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306611 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6612 dp_h2t_cfg_stats_msg_send(pdev,
6613 DP_PPDU_STATS_CFG_BPR_ENH,
6614 pdev->pdev_id);
6615 }
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306616}
6617
6618/*
6619 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6620 * @pdev_handle: DP_PDEV handle
6621 *
6622 * Return: void
6623 */
6624static void
6625dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6626{
6627 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306628
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05306629 if (pdev->enhanced_stats_en == 1)
6630 dp_cal_client_timer_stop(pdev->cal_client_ctx);
6631
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306632 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306633
Alok Singh40a622b2018-06-28 10:47:26 +05306634 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006635 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306636 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6637 dp_h2t_cfg_stats_msg_send(pdev,
6638 DP_PPDU_STATS_CFG_BPR,
6639 pdev->pdev_id);
6640 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05306641
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05306642 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
6643 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05306644 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306645}
6646
6647/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05306648 * dp_get_fw_peer_stats()- function to print peer stats
6649 * @pdev_handle: DP_PDEV handle
6650 * @mac_addr: mac address of the peer
6651 * @cap: Type of htt stats requested
6652 *
6653 * Currently Supporting only MAC ID based requests Only
6654 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6655 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6656 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6657 *
6658 * Return: void
6659 */
6660static void
6661dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6662 uint32_t cap)
6663{
6664 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306665 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306666 uint32_t config_param0 = 0;
6667 uint32_t config_param1 = 0;
6668 uint32_t config_param2 = 0;
6669 uint32_t config_param3 = 0;
6670
6671 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6672 config_param0 |= (1 << (cap + 1));
6673
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306674 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6675 config_param1 |= (1 << i);
6676 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306677
6678 config_param2 |= (mac_addr[0] & 0x000000ff);
6679 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6680 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6681 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6682
6683 config_param3 |= (mac_addr[4] & 0x000000ff);
6684 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6685
6686 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6687 config_param0, config_param1, config_param2,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006688 config_param3, 0, 0, 0);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07006689
Ishank Jain6290a3c2017-03-21 10:49:39 +05306690}
6691
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306692/* This struct definition will be removed from here
6693 * once it get added in FW headers*/
6694struct httstats_cmd_req {
6695 uint32_t config_param0;
6696 uint32_t config_param1;
6697 uint32_t config_param2;
6698 uint32_t config_param3;
6699 int cookie;
6700 u_int8_t stats_id;
6701};
6702
6703/*
6704 * dp_get_htt_stats: function to process the httstas request
6705 * @pdev_handle: DP pdev handle
6706 * @data: pointer to request data
6707 * @data_len: length for request data
6708 *
6709 * return: void
6710 */
6711static void
6712dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6713{
6714 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6715 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6716
6717 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6718 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6719 req->config_param0, req->config_param1,
6720 req->config_param2, req->config_param3,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006721 req->cookie, 0, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306722}
Vinay Adella873dc402018-05-28 12:06:34 +05306723
Ishank Jain9f174c62017-03-30 18:37:42 +05306724/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306725 * dp_set_pdev_param: function to set parameters in pdev
6726 * @pdev_handle: DP pdev handle
6727 * @param: parameter type to be set
6728 * @val: value of parameter to be set
6729 *
6730 * return: void
6731 */
6732static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6733 enum cdp_pdev_param_type param, uint8_t val)
6734{
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05306735 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306736 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306737 case CDP_CONFIG_DEBUG_SNIFFER:
6738 dp_config_debug_sniffer(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306739 break;
Vinay Adella873dc402018-05-28 12:06:34 +05306740 case CDP_CONFIG_BPR_ENABLE:
6741 dp_set_bpr_enable(pdev_handle, val);
6742 break;
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05306743 case CDP_CONFIG_PRIMARY_RADIO:
6744 pdev->is_primary = val;
6745 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306746 default:
6747 break;
6748 }
6749}
6750
6751/*
Ishank Jain9f174c62017-03-30 18:37:42 +05306752 * dp_set_vdev_param: function to set parameters in vdev
6753 * @param: parameter type to be set
6754 * @val: value of parameter to be set
6755 *
6756 * return: void
6757 */
6758static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6759 enum cdp_vdev_param_type param, uint32_t val)
6760{
6761 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05306762 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306763 case CDP_ENABLE_WDS:
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05306764 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6765 "wds_enable %d for vdev(%p) id(%d)\n",
6766 val, vdev, vdev->vdev_id);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306767 vdev->wds_enabled = val;
6768 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306769 case CDP_ENABLE_NAWDS:
6770 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306771 break;
Ishank Jainc838b132017-02-17 11:08:18 +05306772 case CDP_ENABLE_MCAST_EN:
6773 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306774 break;
6775 case CDP_ENABLE_PROXYSTA:
6776 vdev->proxysta_vdev = val;
6777 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07006778 case CDP_UPDATE_TDLS_FLAGS:
6779 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306780 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306781 case CDP_CFG_WDS_AGING_TIMER:
6782 if (val == 0)
6783 qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6784 else if (val != vdev->wds_aging_timer_val)
6785 qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6786
6787 vdev->wds_aging_timer_val = val;
6788 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05306789 case CDP_ENABLE_AP_BRIDGE:
6790 if (wlan_op_mode_sta != vdev->opmode)
6791 vdev->ap_bridge_enabled = val;
6792 else
6793 vdev->ap_bridge_enabled = false;
6794 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05306795 case CDP_ENABLE_CIPHER:
6796 vdev->sec_type = val;
6797 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05306798 case CDP_ENABLE_QWRAP_ISOLATION:
6799 vdev->isolation_vdev = val;
6800 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306801 default:
6802 break;
6803 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306804
6805 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05306806}
6807
6808/**
6809 * dp_peer_set_nawds: set nawds bit in peer
6810 * @peer_handle: pointer to peer
6811 * @value: enable/disable nawds
6812 *
6813 * return: void
6814 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05306815static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05306816{
6817 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6818 peer->nawds_enabled = value;
6819}
Ishank Jain1e7401c2017-02-17 15:38:39 +05306820
Ishank Jain949674c2017-02-27 17:09:29 +05306821/*
6822 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6823 * @vdev_handle: DP_VDEV handle
6824 * @map_id:ID of map that needs to be updated
6825 *
6826 * Return: void
6827 */
6828static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6829 uint8_t map_id)
6830{
6831 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6832 vdev->dscp_tid_map_id = map_id;
6833 return;
6834}
6835
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306836/* dp_txrx_get_peer_stats - will return cdp_peer_stats
6837 * @peer_handle: DP_PEER handle
6838 *
6839 * return : cdp_peer_stats pointer
6840 */
6841static struct cdp_peer_stats*
6842 dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
6843{
6844 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6845
6846 qdf_assert(peer);
6847
6848 return &peer->stats;
6849}
6850
6851/* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
6852 * @peer_handle: DP_PEER handle
6853 *
6854 * return : void
6855 */
6856static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
6857{
6858 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6859
6860 qdf_assert(peer);
6861
6862 qdf_mem_set(&peer->stats, sizeof(peer->stats), 0);
6863}
6864
6865/* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
6866 * @vdev_handle: DP_VDEV handle
6867 * @buf: buffer for vdev stats
6868 *
6869 * return : int
6870 */
6871static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
6872 bool is_aggregate)
6873{
6874 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6875 struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
6876
6877 if (is_aggregate)
6878 dp_aggregate_vdev_stats(vdev, buf);
6879 else
6880 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6881
6882 return 0;
6883}
6884
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306885/*
Pranita Solanke92096e42018-09-11 11:14:51 +05306886 * dp_get_total_per(): get total per
6887 * @pdev_handle: DP_PDEV handle
6888 *
6889 * Return: % error rate using retries per packet and success packets
6890 */
6891static int dp_get_total_per(struct cdp_pdev *pdev_handle)
6892{
6893 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6894
6895 dp_aggregate_pdev_stats(pdev);
6896 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
6897 return 0;
6898 return ((pdev->stats.tx.retries * 100) /
6899 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
6900}
6901
6902/*
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306903 * dp_txrx_stats_publish(): publish pdev stats into a buffer
6904 * @pdev_handle: DP_PDEV handle
6905 * @buf: to hold pdev_stats
6906 *
6907 * Return: int
6908 */
6909static int
6910dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6911{
6912 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6913 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306914 struct cdp_txrx_stats_req req = {0,};
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306915
6916 dp_aggregate_pdev_stats(pdev);
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07006917 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306918 req.cookie_val = 1;
6919 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006920 req.param1, req.param2, req.param3, 0,
6921 req.cookie_val, 0);
6922
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306923 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306924
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07006925 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306926 req.cookie_val = 1;
6927 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006928 req.param1, req.param2, req.param3, 0,
6929 req.cookie_val, 0);
6930
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306931 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306932 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6933
6934 return TXRX_STATS_LEVEL;
6935}
6936
Ishank Jain949674c2017-02-27 17:09:29 +05306937/**
6938 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6939 * @pdev: DP_PDEV handle
6940 * @map_id: ID of map that needs to be updated
6941 * @tos: index value in map
6942 * @tid: tid value passed by the user
6943 *
6944 * Return: void
6945 */
6946static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6947 uint8_t map_id, uint8_t tos, uint8_t tid)
6948{
6949 uint8_t dscp;
6950 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05306951 struct dp_soc *soc = pdev->soc;
6952
6953 if (!soc)
6954 return;
6955
Ishank Jain949674c2017-02-27 17:09:29 +05306956 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6957 pdev->dscp_tid_map[map_id][dscp] = tid;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05306958
6959 if (map_id < soc->num_hw_dscp_tid_map)
6960 hal_tx_update_dscp_tid(soc->hal_soc, tid,
6961 map_id, dscp);
Ishank Jain949674c2017-02-27 17:09:29 +05306962 return;
6963}
6964
Ishank Jain6290a3c2017-03-21 10:49:39 +05306965/**
6966 * dp_fw_stats_process(): Process TxRX FW stats request
6967 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306968 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05306969 *
6970 * return: int
6971 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306972static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6973 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05306974{
6975 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6976 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306977 uint32_t stats = req->stats;
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07006978 uint8_t mac_id = req->mac_id;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306979
6980 if (!vdev) {
6981 DP_TRACE(NONE, "VDEV not found");
6982 return 1;
6983 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306984 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306985
chenguocda25122018-01-24 17:39:38 +08006986 /*
6987 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6988 * from param0 to param3 according to below rule:
6989 *
6990 * PARAM:
6991 * - config_param0 : start_offset (stats type)
6992 * - config_param1 : stats bmask from start offset
6993 * - config_param2 : stats bmask from start offset + 32
6994 * - config_param3 : stats bmask from start offset + 64
6995 */
6996 if (req->stats == CDP_TXRX_STATS_0) {
6997 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6998 req->param1 = 0xFFFFFFFF;
6999 req->param2 = 0xFFFFFFFF;
7000 req->param3 = 0xFFFFFFFF;
Chaithanya Garrepalli32fcc2a2018-08-03 15:09:42 +05307001 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
7002 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
chenguocda25122018-01-24 17:39:38 +08007003 }
7004
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307005 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007006 req->param1, req->param2, req->param3,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07007007 0, 0, mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05307008}
7009
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307010/**
7011 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007012 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307013 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007014 *
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007015 * Return: QDF_STATUS
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007016 */
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007017static
7018QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7019 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007020{
7021 int host_stats;
7022 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307023 enum cdp_stats stats;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007024 int num_stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007025
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307026 if (!vdev || !req) {
7027 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7028 "Invalid vdev/req instance");
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007029 return QDF_STATUS_E_INVAL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307030 }
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007031
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307032 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007033 if (stats >= CDP_TXRX_MAX_STATS)
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007034 return QDF_STATUS_E_INVAL;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007035
Ishank Jain6290a3c2017-03-21 10:49:39 +05307036 /*
7037 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7038 * has to be updated if new FW HTT stats added
7039 */
7040 if (stats > CDP_TXRX_STATS_HTT_MAX)
7041 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007042
7043 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7044
7045 if (stats >= num_stats) {
7046 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7047 "%s: Invalid stats option: %d", __func__, stats);
7048 return QDF_STATUS_E_INVAL;
7049 }
7050
7051 req->stats = stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007052 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7053 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7054
7055 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007056 "stats: %u fw_stats_type: %d host_stats: %d",
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007057 stats, fw_stats, host_stats);
7058
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307059 if (fw_stats != TXRX_FW_STATS_INVALID) {
7060 /* update request with FW stats type */
7061 req->stats = fw_stats;
7062 return dp_fw_stats_process(vdev, req);
7063 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007064
Ishank Jain57c42a12017-04-12 10:42:22 +05307065 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7066 (host_stats <= TXRX_HOST_STATS_MAX))
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007067 return dp_print_host_stats(vdev, req);
Ishank Jain57c42a12017-04-12 10:42:22 +05307068 else
7069 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7070 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007071
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007072 return QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007073}
7074
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007075/*
psimha61b1a362017-07-27 15:45:49 -07007076 * dp_print_napi_stats(): NAPI stats
7077 * @soc - soc handle
7078 */
7079static void dp_print_napi_stats(struct dp_soc *soc)
7080{
7081 hif_print_napi_stats(soc->hif_handle);
7082}
7083
7084/*
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007085 * dp_print_per_ring_stats(): Packet count per ring
7086 * @soc - soc handle
7087 */
7088static void dp_print_per_ring_stats(struct dp_soc *soc)
7089{
chenguo8107b662017-12-13 16:31:13 +08007090 uint8_t ring;
7091 uint16_t core;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007092 uint64_t total_packets;
7093
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007094 DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007095 for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
7096 total_packets = 0;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007097 DP_TRACE_STATS(INFO_HIGH,
7098 "Packets on ring %u:", ring);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007099 for (core = 0; core < NR_CPUS; core++) {
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007100 DP_TRACE_STATS(INFO_HIGH,
7101 "Packets arriving on core %u: %llu",
7102 core,
7103 soc->stats.rx.ring_packets[core][ring]);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007104 total_packets += soc->stats.rx.ring_packets[core][ring];
7105 }
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007106 DP_TRACE_STATS(INFO_HIGH,
7107 "Total packets on ring %u: %llu",
7108 ring, total_packets);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007109 }
7110}
7111
7112/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007113 * dp_txrx_path_stats() - Function to display dump stats
7114 * @soc - soc handle
7115 *
7116 * return: none
7117 */
7118static void dp_txrx_path_stats(struct dp_soc *soc)
7119{
7120 uint8_t error_code;
7121 uint8_t loop_pdev;
7122 struct dp_pdev *pdev;
Ishank Jain57c42a12017-04-12 10:42:22 +05307123 uint8_t i;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007124
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007125 if (!soc) {
7126 DP_TRACE(ERROR, "%s: Invalid access",
7127 __func__);
7128 return;
7129 }
7130
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007131 for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
7132
7133 pdev = soc->pdev_list[loop_pdev];
7134 dp_aggregate_pdev_stats(pdev);
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007135 DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
7136 DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
7137 pdev->stats.tx_i.rcvd.num,
7138 pdev->stats.tx_i.rcvd.bytes);
7139 DP_TRACE_STATS(INFO_HIGH,
7140 "processed from host: %u msdus (%llu bytes)",
7141 pdev->stats.tx_i.processed.num,
7142 pdev->stats.tx_i.processed.bytes);
7143 DP_TRACE_STATS(INFO_HIGH,
7144 "successfully transmitted: %u msdus (%llu bytes)",
7145 pdev->stats.tx.tx_success.num,
7146 pdev->stats.tx.tx_success.bytes);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007147
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007148 DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
7149 DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
7150 pdev->stats.tx_i.dropped.dropped_pkt.num);
7151 DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
7152 pdev->stats.tx_i.dropped.desc_na.num);
7153 DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
7154 pdev->stats.tx_i.dropped.ring_full);
7155 DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
7156 pdev->stats.tx_i.dropped.enqueue_fail);
7157 DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
7158 pdev->stats.tx_i.dropped.dma_error);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007159
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007160 DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
7161 DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
7162 pdev->stats.tx.tx_failed);
7163 DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
7164 pdev->stats.tx.dropped.age_out);
7165 DP_TRACE_STATS(INFO_HIGH, "firmware removed: %u",
7166 pdev->stats.tx.dropped.fw_rem);
7167 DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
7168 pdev->stats.tx.dropped.fw_rem_tx);
7169 DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
7170 pdev->stats.tx.dropped.fw_rem_notx);
7171 DP_TRACE_STATS(INFO_HIGH, "peer_invalid: %u",
7172 pdev->soc->stats.tx.tx_invalid_peer.num);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007173
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007174 DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
7175 DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7176 pdev->stats.tx_comp_histogram.pkts_1);
7177 DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
7178 pdev->stats.tx_comp_histogram.pkts_2_20);
7179 DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7180 pdev->stats.tx_comp_histogram.pkts_21_40);
7181 DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7182 pdev->stats.tx_comp_histogram.pkts_41_60);
7183 DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7184 pdev->stats.tx_comp_histogram.pkts_61_80);
7185 DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7186 pdev->stats.tx_comp_histogram.pkts_81_100);
7187 DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7188 pdev->stats.tx_comp_histogram.pkts_101_200);
7189 DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
7190 pdev->stats.tx_comp_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007191
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007192 DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007193
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007194 DP_TRACE_STATS(INFO_HIGH,
7195 "delivered %u msdus ( %llu bytes),",
7196 pdev->stats.rx.to_stack.num,
7197 pdev->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05307198 for (i = 0; i < CDP_MAX_RX_RINGS; i++)
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007199 DP_TRACE_STATS(INFO_HIGH,
7200 "received on reo[%d] %u msdus( %llu bytes),",
7201 i, pdev->stats.rx.rcvd_reo[i].num,
7202 pdev->stats.rx.rcvd_reo[i].bytes);
7203 DP_TRACE_STATS(INFO_HIGH,
7204 "intra-bss packets %u msdus ( %llu bytes),",
7205 pdev->stats.rx.intra_bss.pkts.num,
7206 pdev->stats.rx.intra_bss.pkts.bytes);
7207 DP_TRACE_STATS(INFO_HIGH,
7208 "intra-bss fails %u msdus ( %llu bytes),",
7209 pdev->stats.rx.intra_bss.fail.num,
7210 pdev->stats.rx.intra_bss.fail.bytes);
7211 DP_TRACE_STATS(INFO_HIGH,
7212 "raw packets %u msdus ( %llu bytes),",
7213 pdev->stats.rx.raw.num,
7214 pdev->stats.rx.raw.bytes);
7215 DP_TRACE_STATS(INFO_HIGH, "dropped: error %u msdus",
7216 pdev->stats.rx.err.mic_err);
7217 DP_TRACE_STATS(INFO_HIGH, "peer invalid %u",
7218 pdev->soc->stats.rx.err.rx_invalid_peer.num);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007219
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007220 DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
7221 DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
7222 pdev->soc->stats.rx.err.invalid_rbm);
7223 DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
7224 pdev->soc->stats.rx.err.hal_ring_access_fail);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007225
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307226 for (error_code = 0; error_code < HAL_REO_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007227 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007228 if (!pdev->soc->stats.rx.err.reo_error[error_code])
7229 continue;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007230 DP_TRACE_STATS(INFO_HIGH,
7231 "Reo error number (%u): %u msdus",
7232 error_code,
7233 pdev->soc->stats.rx.err
7234 .reo_error[error_code]);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007235 }
7236
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307237 for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007238 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007239 if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7240 continue;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007241 DP_TRACE_STATS(INFO_HIGH,
7242 "Rxdma error number (%u): %u msdus",
7243 error_code,
7244 pdev->soc->stats.rx.err
7245 .rxdma_error[error_code]);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007246 }
7247
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007248 DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
7249 DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
7250 pdev->stats.rx_ind_histogram.pkts_1);
7251 DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
7252 pdev->stats.rx_ind_histogram.pkts_2_20);
7253 DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
7254 pdev->stats.rx_ind_histogram.pkts_21_40);
7255 DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
7256 pdev->stats.rx_ind_histogram.pkts_41_60);
7257 DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
7258 pdev->stats.rx_ind_histogram.pkts_61_80);
7259 DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
7260 pdev->stats.rx_ind_histogram.pkts_81_100);
7261 DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
7262 pdev->stats.rx_ind_histogram.pkts_101_200);
7263 DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
7264 pdev->stats.rx_ind_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007265
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007266 DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7267 __func__,
7268 pdev->soc->wlan_cfg_ctx
7269 ->tso_enabled,
7270 pdev->soc->wlan_cfg_ctx
7271 ->lro_enabled,
7272 pdev->soc->wlan_cfg_ctx
7273 ->rx_hash,
7274 pdev->soc->wlan_cfg_ctx
7275 ->napi_enabled);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007276#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07007277 DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7278 __func__,
7279 pdev->soc->wlan_cfg_ctx
7280 ->tx_flow_stop_queue_threshold,
7281 pdev->soc->wlan_cfg_ctx
7282 ->tx_flow_start_queue_offset);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007283#endif
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007284 }
7285}
7286
7287/*
7288 * dp_txrx_dump_stats() - Dump statistics
7289 * @value - Statistics option
7290 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07007291static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7292 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007293{
7294 struct dp_soc *soc =
7295 (struct dp_soc *)psoc;
7296 QDF_STATUS status = QDF_STATUS_SUCCESS;
7297
7298 if (!soc) {
7299 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7300 "%s: soc is NULL", __func__);
7301 return QDF_STATUS_E_INVAL;
7302 }
7303
7304 switch (value) {
7305 case CDP_TXRX_PATH_STATS:
7306 dp_txrx_path_stats(soc);
7307 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007308
7309 case CDP_RX_RING_STATS:
7310 dp_print_per_ring_stats(soc);
7311 break;
7312
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007313 case CDP_TXRX_TSO_STATS:
7314 /* TODO: NOT IMPLEMENTED */
7315 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007316
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007317 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007318 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007319 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007320
psimha61b1a362017-07-27 15:45:49 -07007321 case CDP_DP_NAPI_STATS:
7322 dp_print_napi_stats(soc);
7323 break;
7324
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007325 case CDP_TXRX_DESC_STATS:
7326 /* TODO: NOT IMPLEMENTED */
7327 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007328
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007329 default:
7330 status = QDF_STATUS_E_INVAL;
7331 break;
7332 }
7333
7334 return status;
7335
7336}
7337
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007338#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7339/**
7340 * dp_update_flow_control_parameters() - API to store datapath
7341 * config parameters
7342 * @soc: soc handle
7343 * @cfg: ini parameter handle
7344 *
7345 * Return: void
7346 */
7347static inline
7348void dp_update_flow_control_parameters(struct dp_soc *soc,
7349 struct cdp_config_params *params)
7350{
7351 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7352 params->tx_flow_stop_queue_threshold;
7353 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7354 params->tx_flow_start_queue_offset;
7355}
7356#else
7357static inline
7358void dp_update_flow_control_parameters(struct dp_soc *soc,
7359 struct cdp_config_params *params)
7360{
7361}
7362#endif
7363
7364/**
7365 * dp_update_config_parameters() - API to store datapath
7366 * config parameters
7367 * @soc: soc handle
7368 * @cfg: ini parameter handle
7369 *
7370 * Return: status
7371 */
7372static
7373QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7374 struct cdp_config_params *params)
7375{
7376 struct dp_soc *soc = (struct dp_soc *)psoc;
7377
7378 if (!(soc)) {
7379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7380 "%s: Invalid handle", __func__);
7381 return QDF_STATUS_E_INVAL;
7382 }
7383
7384 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7385 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7386 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7387 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7388 params->tcp_udp_checksumoffload;
7389 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
Mohit Khanna81179cb2018-08-16 20:50:43 -07007390 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
7391
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007392 dp_update_flow_control_parameters(soc, params);
7393
7394 return QDF_STATUS_SUCCESS;
7395}
7396
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307397/**
7398 * dp_txrx_set_wds_rx_policy() - API to store datapath
7399 * config parameters
7400 * @vdev_handle - datapath vdev handle
7401 * @cfg: ini parameter handle
7402 *
7403 * Return: status
7404 */
7405#ifdef WDS_VENDOR_EXTENSION
7406void
7407dp_txrx_set_wds_rx_policy(
7408 struct cdp_vdev *vdev_handle,
7409 u_int32_t val)
7410{
7411 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7412 struct dp_peer *peer;
7413 if (vdev->opmode == wlan_op_mode_ap) {
7414 /* for ap, set it on bss_peer */
7415 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7416 if (peer->bss_peer) {
7417 peer->wds_ecm.wds_rx_filter = 1;
7418 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7419 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7420 break;
7421 }
7422 }
7423 } else if (vdev->opmode == wlan_op_mode_sta) {
7424 peer = TAILQ_FIRST(&vdev->peer_list);
7425 peer->wds_ecm.wds_rx_filter = 1;
7426 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7427 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7428 }
7429}
7430
7431/**
7432 * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7433 *
7434 * @peer_handle - datapath peer handle
7435 * @wds_tx_ucast: policy for unicast transmission
7436 * @wds_tx_mcast: policy for multicast transmission
7437 *
7438 * Return: void
7439 */
7440void
7441dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7442 int wds_tx_ucast, int wds_tx_mcast)
7443{
7444 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7445 if (wds_tx_ucast || wds_tx_mcast) {
7446 peer->wds_enabled = 1;
7447 peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7448 peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7449 } else {
7450 peer->wds_enabled = 0;
7451 peer->wds_ecm.wds_tx_ucast_4addr = 0;
7452 peer->wds_ecm.wds_tx_mcast_4addr = 0;
7453 }
7454
7455 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7456 FL("Policy Update set to :\
7457 peer->wds_enabled %d\
7458 peer->wds_ecm.wds_tx_ucast_4addr %d\
Aditya Sathishded018e2018-07-02 16:25:21 +05307459 peer->wds_ecm.wds_tx_mcast_4addr %d"),
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307460 peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7461 peer->wds_ecm.wds_tx_mcast_4addr);
7462 return;
7463}
7464#endif
7465
Karunakar Dasinenica792542017-01-16 10:08:58 -08007466static struct cdp_wds_ops dp_ops_wds = {
7467 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307468#ifdef WDS_VENDOR_EXTENSION
7469 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7470 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7471#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08007472};
7473
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307474/*
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007475 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7476 * @vdev_handle - datapath vdev handle
7477 * @callback - callback function
7478 * @ctxt: callback context
7479 *
7480 */
7481static void
7482dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7483 ol_txrx_data_tx_cb callback, void *ctxt)
7484{
7485 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7486
7487 vdev->tx_non_std_data_callback.func = callback;
7488 vdev->tx_non_std_data_callback.ctxt = ctxt;
7489}
7490
Santosh Anbu2280e862018-01-03 22:25:53 +05307491/**
7492 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7493 * @pdev_hdl: datapath pdev handle
7494 *
7495 * Return: opaque pointer to dp txrx handle
7496 */
7497static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7498{
7499 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7500
7501 return pdev->dp_txrx_handle;
7502}
7503
7504/**
7505 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7506 * @pdev_hdl: datapath pdev handle
7507 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7508 *
7509 * Return: void
7510 */
7511static void
7512dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7513{
7514 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7515
7516 pdev->dp_txrx_handle = dp_txrx_hdl;
7517}
7518
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307519/**
7520 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7521 * @soc_handle: datapath soc handle
7522 *
7523 * Return: opaque pointer to external dp (non-core DP)
7524 */
7525static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7526{
7527 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7528
7529 return soc->external_txrx_handle;
7530}
7531
7532/**
7533 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7534 * @soc_handle: datapath soc handle
7535 * @txrx_handle: opaque pointer to external dp (non-core DP)
7536 *
7537 * Return: void
7538 */
7539static void
7540dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7541{
7542 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7543
7544 soc->external_txrx_handle = txrx_handle;
7545}
7546
Akshay Kosigia4f6e172018-09-03 21:42:27 +05307547/**
7548 * dp_get_cfg_capabilities() - get dp capabilities
7549 * @soc_handle: datapath soc handle
7550 * @dp_caps: enum for dp capabilities
7551 *
7552 * Return: bool to determine if dp caps is enabled
7553 */
7554static bool
7555dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
7556 enum cdp_capabilities dp_caps)
7557{
7558 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7559
7560 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
7561}
7562
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307563#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307564static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7565{
7566 struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7567 struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7568 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7569
Aditya Sathish6add3db2018-04-10 19:43:34 +05307570 /*
7571 * For BSS peer, new peer is not created on alloc_node if the
7572 * peer with same address already exists , instead refcnt is
7573 * increased for existing peer. Correspondingly in delete path,
7574 * only refcnt is decreased; and peer is only deleted , when all
7575 * references are deleted. So delete_in_progress should not be set
7576 * for bss_peer, unless only 2 reference remains (peer map reference
7577 * and peer hash table reference).
7578 */
7579 if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7580 return;
7581 }
7582
Karunakar Dasineni372647d2018-01-15 22:27:39 -08007583 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307584 dp_peer_delete_ast_entries(soc, peer);
7585}
7586#endif
7587
Soumya Bhatbc719e62018-02-18 18:21:25 +05307588#ifdef ATH_SUPPORT_NAC_RSSI
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307589/**
7590 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7591 * @vdev_hdl: DP vdev handle
7592 * @rssi: rssi value
7593 *
7594 * Return: 0 for success. nonzero for failure.
7595 */
7596QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7597 char *mac_addr,
7598 uint8_t *rssi)
7599{
7600 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7601 struct dp_pdev *pdev = vdev->pdev;
7602 struct dp_neighbour_peer *peer = NULL;
7603 QDF_STATUS status = QDF_STATUS_E_FAILURE;
7604
7605 *rssi = 0;
7606 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7607 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7608 neighbour_peer_list_elem) {
7609 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7610 mac_addr, DP_MAC_ADDR_LEN) == 0) {
7611 *rssi = peer->rssi;
7612 status = QDF_STATUS_SUCCESS;
7613 break;
7614 }
7615 }
7616 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7617 return status;
7618}
7619
Soumya Bhatbc719e62018-02-18 18:21:25 +05307620static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7621 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7622 uint8_t chan_num)
7623{
7624
7625 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7626 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7627 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7628
7629 pdev->nac_rssi_filtering = 1;
7630 /* Store address of NAC (neighbour peer) which will be checked
7631 * against TA of received packets.
7632 */
7633
7634 if (cmd == CDP_NAC_PARAM_ADD) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307635 dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7636 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307637 } else if (cmd == CDP_NAC_PARAM_DEL) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307638 dp_update_filter_neighbour_peers(vdev_handle,
7639 DP_NAC_PARAM_DEL,
7640 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307641 }
7642
7643 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7644 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05307645 ((void *)vdev->pdev->ctrl_pdev,
7646 vdev->vdev_id, cmd, bssid);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307647
7648 return QDF_STATUS_SUCCESS;
7649}
7650#endif
7651
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307652static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05307653 uint32_t max_peers,
7654 bool peer_map_unmap_v2)
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307655{
7656 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7657
7658 soc->max_peers = max_peers;
7659
7660 qdf_print ("%s max_peers %u\n", __func__, max_peers);
7661
7662 if (dp_peer_find_attach(soc))
7663 return QDF_STATUS_E_FAILURE;
7664
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05307665 soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
7666
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307667 return QDF_STATUS_SUCCESS;
7668}
7669
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307670/**
7671 * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7672 * @dp_pdev: dp pdev handle
7673 * @ctrl_pdev: UMAC ctrl pdev handle
7674 *
7675 * Return: void
7676 */
7677static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7678 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7679{
7680 struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7681
7682 pdev->ctrl_pdev = ctrl_pdev;
7683}
7684
jitiphil60ac9aa2018-10-05 19:54:04 +05307685/*
7686 * dp_get_cfg() - get dp cfg
7687 * @soc: cdp soc handle
7688 * @cfg: cfg enum
7689 *
7690 * Return: cfg value
7691 */
7692static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
7693{
7694 struct dp_soc *dpsoc = (struct dp_soc *)soc;
7695 uint32_t value = 0;
7696
7697 switch (cfg) {
7698 case cfg_dp_enable_data_stall:
7699 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
7700 break;
7701 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
7702 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
7703 break;
7704 case cfg_dp_tso_enable:
7705 value = dpsoc->wlan_cfg_ctx->tso_enabled;
7706 break;
7707 case cfg_dp_lro_enable:
7708 value = dpsoc->wlan_cfg_ctx->lro_enabled;
7709 break;
7710 case cfg_dp_gro_enable:
7711 value = dpsoc->wlan_cfg_ctx->gro_enabled;
7712 break;
7713 case cfg_dp_tx_flow_start_queue_offset:
7714 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
7715 break;
7716 case cfg_dp_tx_flow_stop_queue_threshold:
7717 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
7718 break;
7719 case cfg_dp_disable_intra_bss_fwd:
7720 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
7721 break;
7722 default:
7723 value = 0;
7724 }
7725
7726 return value;
7727}
7728
Leo Chang5ea93a42016-11-03 12:39:49 -07007729static struct cdp_cmn_ops dp_ops_cmn = {
7730 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7731 .txrx_vdev_attach = dp_vdev_attach_wifi3,
7732 .txrx_vdev_detach = dp_vdev_detach_wifi3,
7733 .txrx_pdev_attach = dp_pdev_attach_wifi3,
7734 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007735 .txrx_peer_create = dp_peer_create_wifi3,
7736 .txrx_peer_setup = dp_peer_setup_wifi3,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307737#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307738 .txrx_peer_teardown = dp_peer_teardown_wifi3,
7739#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007740 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307741#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05307742 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7743 .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7744 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +05307745 .txrx_peer_ast_hash_find_soc = dp_peer_ast_hash_find_soc_wifi3,
7746 .txrx_peer_ast_hash_find_by_pdevid =
7747 dp_peer_ast_hash_find_by_pdevid_wifi3,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05307748 .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7749 .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7750 .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05307751 .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +05307752 .txrx_peer_ast_get_peer = dp_peer_ast_get_peer_wifi3,
7753 .txrx_peer_ast_get_nexthop_peer_id =
7754 dp_peer_ast_get_nexhop_peer_id_wifi3,
Kiran Venkatappaed35f442018-07-19 22:22:29 +05307755#if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
7756 .txrx_peer_ast_set_cp_ctx = dp_peer_ast_set_cp_ctx_wifi3,
7757 .txrx_peer_ast_get_cp_ctx = dp_peer_ast_get_cp_ctx_wifi3,
7758 .txrx_peer_ast_get_wmi_sent = dp_peer_ast_get_wmi_sent_wifi3,
7759 .txrx_peer_ast_free_entry = dp_peer_ast_free_entry_wifi3,
7760#endif
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007761 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07007762 .txrx_vdev_register = dp_vdev_register_wifi3,
7763 .txrx_soc_detach = dp_soc_detach_wifi3,
7764 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7765 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7766 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05307767 .txrx_ath_getstats = dp_get_device_stats,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007768 .addba_requestprocess = dp_addba_requestprocess_wifi3,
7769 .addba_responsesetup = dp_addba_responsesetup_wifi3,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08007770 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007771 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08007772 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +05307773 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -07007774 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +05307775 /* TODO: get API's for dscp-tid need to be added*/
7776 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7777 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Pranita Solanke92096e42018-09-11 11:14:51 +05307778 .txrx_get_total_per = dp_get_total_per,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307779 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -08007780 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
phadiman7821bf82018-02-06 16:03:54 +05307781 .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07007782 .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
phadiman7821bf82018-02-06 16:03:54 +05307783 .txrx_set_nac = dp_set_nac,
7784 .txrx_get_tx_pending = dp_get_tx_pending,
7785 .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7786 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007787 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05307788 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7789 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -07007790 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05307791 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05307792 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007793 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -07007794 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +05307795 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7796 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7797 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307798 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7799 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
sumedh baikady1f8f3192018-02-20 17:30:32 -08007800 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
7801 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307802 .tx_send = dp_tx_send,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05307803 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7804 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7805 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307806 .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307807 .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07007808 .txrx_get_os_rx_handles_from_vdev =
7809 dp_get_os_rx_handles_from_vdev_wifi3,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07007810 .delba_tx_completion = dp_delba_tx_completion_wifi3,
Akshay Kosigia4f6e172018-09-03 21:42:27 +05307811 .get_dp_capabilities = dp_get_cfg_capabilities,
jitiphil60ac9aa2018-10-05 19:54:04 +05307812 .txrx_get_cfg = dp_get_cfg,
Leo Chang5ea93a42016-11-03 12:39:49 -07007813};
7814
7815static struct cdp_ctrl_ops dp_ops_ctrl = {
7816 .txrx_peer_authorize = dp_peer_authorize,
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05307817 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7818 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307819#ifdef MESH_MODE_SUPPORT
7820 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05307821 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307822#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05307823 .txrx_set_vdev_param = dp_set_vdev_param,
7824 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05307825 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7826 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05307827 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7828 .txrx_update_filter_neighbour_peers =
7829 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05307830 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -07007831 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -07007832 .txrx_wdi_event_sub = dp_wdi_event_sub,
7833 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007834#ifdef WDI_EVENT_ENABLE
7835 .txrx_get_pldev = dp_get_pldev,
7836#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307837 .txrx_set_pdev_param = dp_set_pdev_param,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307838#ifdef ATH_SUPPORT_NAC_RSSI
7839 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307840 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307841#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -07007842 .set_key = dp_set_michael_key,
Leo Chang5ea93a42016-11-03 12:39:49 -07007843};
7844
7845static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +05307846#ifdef ATH_SUPPORT_IQUE
7847 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7848 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
7849 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7850#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007851};
7852
7853static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -08007854 .txrx_monitor_set_filter_ucast_data = NULL,
7855 .txrx_monitor_set_filter_mcast_data = NULL,
7856 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -08007857 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7858 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7859 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -07007860 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -07007861 /* Added support for HK advance filter */
7862 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -07007863};
7864
7865static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +05307866 .txrx_per_peer_stats = dp_get_host_peer_stats,
7867 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05307868 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307869 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7870 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307871 .txrx_stats_publish = dp_txrx_stats_publish,
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307872 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
7873 .txrx_get_peer_stats = dp_txrx_get_peer_stats,
7874 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
Leo Chang5ea93a42016-11-03 12:39:49 -07007875 /* TODO */
7876};
7877
Leo Chang5ea93a42016-11-03 12:39:49 -07007878static struct cdp_raw_ops dp_ops_raw = {
7879 /* TODO */
7880};
7881
7882#ifdef CONFIG_WIN
7883static struct cdp_pflow_ops dp_ops_pflow = {
7884 /* TODO */
7885};
7886#endif /* CONFIG_WIN */
7887
Yue Ma245b47b2017-02-21 16:35:31 -08007888#ifdef FEATURE_RUNTIME_PM
7889/**
7890 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7891 * @opaque_pdev: DP pdev context
7892 *
7893 * DP is ready to runtime suspend if there are no pending TX packets.
7894 *
7895 * Return: QDF_STATUS
7896 */
7897static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7898{
7899 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7900 struct dp_soc *soc = pdev->soc;
7901
Yue Maaf4272d2018-08-27 12:35:21 -07007902 /* Abort if there are any pending TX packets */
7903 if (dp_get_tx_pending(opaque_pdev) > 0) {
7904 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7905 FL("Abort suspend due to pending TX packets"));
7906 return QDF_STATUS_E_AGAIN;
7907 }
Yue Ma245b47b2017-02-21 16:35:31 -08007908
7909 if (soc->intr_mode == DP_INTR_POLL)
7910 qdf_timer_stop(&soc->int_timer);
7911
7912 return QDF_STATUS_SUCCESS;
7913}
7914
7915/**
7916 * dp_runtime_resume() - ensure DP is ready to runtime resume
7917 * @opaque_pdev: DP pdev context
7918 *
7919 * Resume DP for runtime PM.
7920 *
7921 * Return: QDF_STATUS
7922 */
7923static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7924{
7925 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7926 struct dp_soc *soc = pdev->soc;
7927 void *hal_srng;
7928 int i;
7929
7930 if (soc->intr_mode == DP_INTR_POLL)
7931 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7932
7933 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7934 hal_srng = soc->tcl_data_ring[i].hal_srng;
7935 if (hal_srng) {
7936 /* We actually only need to acquire the lock */
7937 hal_srng_access_start(soc->hal_soc, hal_srng);
7938 /* Update SRC ring head pointer for HW to send
7939 all pending packets */
7940 hal_srng_access_end(soc->hal_soc, hal_srng);
7941 }
7942 }
7943
7944 return QDF_STATUS_SUCCESS;
7945}
7946#endif /* FEATURE_RUNTIME_PM */
7947
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007948static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7949{
7950 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7951 struct dp_soc *soc = pdev->soc;
7952
psimhac983d7e2017-07-26 15:20:07 -07007953 if (soc->intr_mode == DP_INTR_POLL)
7954 qdf_timer_stop(&soc->int_timer);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007955
7956 return QDF_STATUS_SUCCESS;
7957}
7958
7959static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7960{
7961 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7962 struct dp_soc *soc = pdev->soc;
7963
psimhac983d7e2017-07-26 15:20:07 -07007964 if (soc->intr_mode == DP_INTR_POLL)
7965 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007966
7967 return QDF_STATUS_SUCCESS;
7968}
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007969
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307970#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07007971static struct cdp_misc_ops dp_ops_misc = {
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007972 .tx_non_std = dp_tx_non_std,
Leo Chang5ea93a42016-11-03 12:39:49 -07007973 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007974#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -08007975 .runtime_suspend = dp_runtime_suspend,
7976 .runtime_resume = dp_runtime_resume,
7977#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007978 .pkt_log_init = dp_pkt_log_init,
7979 .pkt_log_con_service = dp_pkt_log_con_service,
Leo Chang5ea93a42016-11-03 12:39:49 -07007980};
7981
7982static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007983 /* WIFI 3.0 DP implement as required. */
7984#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08007985 .flow_pool_map_handler = dp_tx_flow_pool_map,
7986 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007987 .register_pause_cb = dp_txrx_register_pause_cb,
7988 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7989#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -07007990};
7991
7992static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7993 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7994};
7995
Yun Parkfde6b9e2017-06-26 17:13:11 -07007996#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07007997static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -07007998 .ipa_get_resource = dp_ipa_get_resource,
7999 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
8000 .ipa_op_response = dp_ipa_op_response,
8001 .ipa_register_op_cb = dp_ipa_register_op_cb,
8002 .ipa_get_stat = dp_ipa_get_stat,
8003 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
8004 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
8005 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
8006 .ipa_setup = dp_ipa_setup,
8007 .ipa_cleanup = dp_ipa_cleanup,
8008 .ipa_setup_iface = dp_ipa_setup_iface,
8009 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
8010 .ipa_enable_pipes = dp_ipa_enable_pipes,
8011 .ipa_disable_pipes = dp_ipa_disable_pipes,
8012 .ipa_set_perf_level = dp_ipa_set_perf_level
Leo Chang5ea93a42016-11-03 12:39:49 -07008013};
Yun Parkfde6b9e2017-06-26 17:13:11 -07008014#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008015
Leo Chang5ea93a42016-11-03 12:39:49 -07008016static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -07008017 .bus_suspend = dp_bus_suspend,
8018 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -07008019};
8020
8021static struct cdp_ocb_ops dp_ops_ocb = {
8022 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8023};
8024
8025
8026static struct cdp_throttle_ops dp_ops_throttle = {
8027 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8028};
8029
8030static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008031 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -07008032};
8033
8034static struct cdp_cfg_ops dp_ops_cfg = {
8035 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8036};
8037
Mohit Khannaadfe9082017-11-17 13:11:17 -08008038/*
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05308039 * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
Mohit Khannaadfe9082017-11-17 13:11:17 -08008040 * @dev: physical device instance
8041 * @peer_mac_addr: peer mac address
8042 * @local_id: local id for the peer
8043 * @debug_id: to track enum peer access
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05308044 *
Mohit Khannaadfe9082017-11-17 13:11:17 -08008045 * Return: peer instance pointer
8046 */
8047static inline void *
Krunal Sonibe43d552018-10-03 11:20:20 -07008048dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
8049 uint8_t *local_id,
8050 enum peer_debug_id_type debug_id)
Mohit Khannaadfe9082017-11-17 13:11:17 -08008051{
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05308052 struct dp_pdev *pdev = (struct dp_pdev *)dev;
8053 struct dp_peer *peer;
8054
8055 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
8056
8057 if (!peer)
8058 return NULL;
8059
8060 *local_id = peer->local_id;
8061 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
8062
8063 return peer;
8064}
8065
8066/*
8067 * dp_peer_release_ref - release peer ref count
8068 * @peer: peer handle
8069 * @debug_id: to track enum peer access
8070 *
8071 * Return: None
8072 */
8073static inline
8074void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
8075{
8076 dp_peer_unref_delete(peer);
Mohit Khannaadfe9082017-11-17 13:11:17 -08008077}
8078
Leo Chang5ea93a42016-11-03 12:39:49 -07008079static struct cdp_peer_ops dp_ops_peer = {
8080 .register_peer = dp_register_peer,
8081 .clear_peer = dp_clear_peer,
8082 .find_peer_by_addr = dp_find_peer_by_addr,
8083 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05308084 .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
8085 .peer_release_ref = dp_peer_release_ref,
Leo Chang5ea93a42016-11-03 12:39:49 -07008086 .local_peer_id = dp_local_peer_id,
8087 .peer_find_by_local_id = dp_peer_find_by_local_id,
8088 .peer_state_update = dp_peer_state_update,
8089 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -07008090 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -07008091 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
8092 .get_vdev_for_peer = dp_get_vdev_for_peer,
8093 .get_peer_state = dp_get_peer_state,
8094};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05308095#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008096
8097static struct cdp_ops dp_txrx_ops = {
8098 .cmn_drv_ops = &dp_ops_cmn,
8099 .ctrl_ops = &dp_ops_ctrl,
8100 .me_ops = &dp_ops_me,
8101 .mon_ops = &dp_ops_mon,
8102 .host_stats_ops = &dp_ops_host_stats,
8103 .wds_ops = &dp_ops_wds,
8104 .raw_ops = &dp_ops_raw,
8105#ifdef CONFIG_WIN
8106 .pflow_ops = &dp_ops_pflow,
8107#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05308108#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07008109 .misc_ops = &dp_ops_misc,
8110 .cfg_ops = &dp_ops_cfg,
8111 .flowctl_ops = &dp_ops_flowctl,
8112 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -07008113#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07008114 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -07008115#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008116 .bus_ops = &dp_ops_bus,
8117 .ocb_ops = &dp_ops_ocb,
8118 .peer_ops = &dp_ops_peer,
8119 .throttle_ops = &dp_ops_throttle,
8120 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05308121#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008122};
8123
8124/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05308125 * dp_soc_set_txrx_ring_map()
8126 * @dp_soc: DP handler for soc
8127 *
8128 * Return: Void
8129 */
8130static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
8131{
8132 uint32_t i;
8133 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
8134 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
8135 }
8136}
8137
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008138#ifdef QCA_WIFI_QCA8074
8139/**
Leo Chang5ea93a42016-11-03 12:39:49 -07008140 * dp_soc_attach_wifi3() - Attach txrx SOC
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308141 * @ctrl_psoc: Opaque SOC handle from control plane
Leo Chang5ea93a42016-11-03 12:39:49 -07008142 * @htc_handle: Opaque HTC handle
8143 * @hif_handle: Opaque HIF handle
8144 * @qdf_osdev: QDF device
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008145 * @ol_ops: Offload Operations
8146 * @device_id: Device ID
Leo Chang5ea93a42016-11-03 12:39:49 -07008147 *
8148 * Return: DP SOC handle on success, NULL on failure
8149 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308150void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008151 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
8152 struct ol_if_ops *ol_ops, uint16_t device_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07008153{
8154 struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308155 int target_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07008156
8157 if (!soc) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05308158 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8159 FL("DP SOC memory allocation failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07008160 goto fail0;
8161 }
8162
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008163 soc->device_id = device_id;
Leo Chang5ea93a42016-11-03 12:39:49 -07008164 soc->cdp_soc.ops = &dp_txrx_ops;
8165 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308166 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -07008167 soc->osdev = qdf_osdev;
8168 soc->hif_handle = hif_handle;
8169
8170 soc->hal_soc = hif_get_hal_handle(hif_handle);
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308171 soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07008172 soc->hal_soc, qdf_osdev);
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05308173 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
8174
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05308175 if (!soc->htt_handle) {
8176 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8177 FL("HTT attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07008178 goto fail1;
8179 }
8180
Vivek126db5d2018-07-25 22:05:04 +05308181 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
Leo Chang5ea93a42016-11-03 12:39:49 -07008182 if (!soc->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05308183 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05308184 FL("wlan_cfg_soc_attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07008185 goto fail2;
8186 }
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308187 target_type = hal_get_target_type(soc->hal_soc);
8188 switch (target_type) {
8189 case TARGET_TYPE_QCA6290:
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05308190 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8191 REO_DST_RING_SIZE_QCA6290);
8192 soc->ast_override_support = 1;
8193 break;
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05308194#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308195 case TARGET_TYPE_QCA6390:
8196 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8197 REO_DST_RING_SIZE_QCA6290);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05308198 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05308199 soc->ast_override_support = 1;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308200 break;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05308201#endif
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308202 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05308203 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8204 REO_DST_RING_SIZE_QCA8074);
8205 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
sumedh baikady59a2d332018-05-22 01:50:38 -07008206 soc->hw_nac_monitor_support = 1;
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05308207 break;
Venkateswara Swamy Bandaru29757ad2018-08-07 13:06:55 +05308208 case TARGET_TYPE_QCA8074V2:
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308209 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
8210 REO_DST_RING_SIZE_QCA8074);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05308211 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
sumedh baikady59a2d332018-05-22 01:50:38 -07008212 soc->hw_nac_monitor_support = 1;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05308213 soc->ast_override_support = 1;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05308214 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05308215 break;
8216 default:
8217 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
8218 qdf_assert_always(0);
8219 break;
8220 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05308221
Vivek126db5d2018-07-25 22:05:04 +05308222 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
8223 cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
Ruchi, Agrawal34721392017-11-13 18:02:09 +05308224 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05308225
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05308226 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308227 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05308228 CDP_CFG_MAX_PEER_ID);
8229
8230 if (ret != -EINVAL) {
8231 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
8232 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +05308233
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308234 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +05308235 CDP_CFG_CCE_DISABLE);
Ruchi, Agrawalf279a4a2018-02-26 18:12:44 +05308236 if (ret == 1)
Ruchi, Agrawal34721392017-11-13 18:02:09 +05308237 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05308238 }
8239
Leo Chang5ea93a42016-11-03 12:39:49 -07008240 qdf_spinlock_create(&soc->peer_ref_mutex);
8241
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08008242 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
8243 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
8244
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05308245 /* fill the tx/rx cpu ring map*/
8246 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05308247
8248 qdf_spinlock_create(&soc->htt_stats.lock);
8249 /* initialize work queue for stats processing */
8250 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
8251
Leo Chang5ea93a42016-11-03 12:39:49 -07008252 return (void *)soc;
8253
8254fail2:
8255 htt_soc_detach(soc->htt_handle);
8256fail1:
8257 qdf_mem_free(soc);
8258fail0:
8259 return NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07008260}
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008261#endif
Keyur Parekhfad6d082017-05-07 08:54:47 -07008262
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08008263/*
8264 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
8265 *
8266 * @soc: handle to DP soc
8267 * @mac_id: MAC id
8268 *
8269 * Return: Return pdev corresponding to MAC
8270 */
8271void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
8272{
8273 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
8274 return soc->pdev_list[mac_id];
8275
8276 /* Typically for MCL as there only 1 PDEV*/
8277 return soc->pdev_list[0];
8278}
8279
8280/*
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008281 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
8282 * @soc: DP SoC context
8283 * @max_mac_rings: No of MAC rings
8284 *
8285 * Return: None
8286 */
8287static
8288void dp_is_hw_dbs_enable(struct dp_soc *soc,
8289 int *max_mac_rings)
8290{
8291 bool dbs_enable = false;
8292 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
8293 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05308294 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008295
8296 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
8297}
8298
Keyur Parekhfad6d082017-05-07 08:54:47 -07008299/*
8300* dp_set_pktlog_wifi3() - attach txrx vdev
8301* @pdev: Datapath PDEV handle
8302* @event: which event's notifications are being subscribed to
8303* @enable: WDI event subscribe or not. (True or False)
8304*
8305* Return: Success, NULL on failure
8306*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008307#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -07008308int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
8309 bool enable)
8310{
8311 struct dp_soc *soc = pdev->soc;
8312 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008313 int max_mac_rings = wlan_cfg_get_num_mac_rings
8314 (pdev->wlan_cfg_ctx);
8315 uint8_t mac_id = 0;
8316
8317 dp_is_hw_dbs_enable(soc, &max_mac_rings);
8318
8319 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05308320 FL("Max_mac_rings %d "),
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008321 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008322
8323 if (enable) {
8324 switch (event) {
8325 case WDI_EVENT_RX_DESC:
8326 if (pdev->monitor_vdev) {
8327 /* Nothing needs to be done if monitor mode is
8328 * enabled
8329 */
8330 return 0;
8331 }
8332 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
8333 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
8334 htt_tlv_filter.mpdu_start = 1;
8335 htt_tlv_filter.msdu_start = 1;
8336 htt_tlv_filter.msdu_end = 1;
8337 htt_tlv_filter.mpdu_end = 1;
8338 htt_tlv_filter.packet_header = 1;
8339 htt_tlv_filter.attention = 1;
8340 htt_tlv_filter.ppdu_start = 1;
8341 htt_tlv_filter.ppdu_end = 1;
8342 htt_tlv_filter.ppdu_end_user_stats = 1;
8343 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8344 htt_tlv_filter.ppdu_end_status_done = 1;
8345 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008346 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8347 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8348 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8349 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8350 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8351 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008352
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008353 for (mac_id = 0; mac_id < max_mac_rings;
8354 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008355 int mac_for_pdev =
8356 dp_get_mac_id_for_pdev(mac_id,
8357 pdev->pdev_id);
8358
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008359 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008360 mac_for_pdev,
8361 pdev->rxdma_mon_status_ring[mac_id]
8362 .hal_srng,
8363 RXDMA_MONITOR_STATUS,
8364 RX_BUFFER_SIZE,
8365 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008366
8367 }
8368
8369 if (soc->reap_timer_init)
8370 qdf_timer_mod(&soc->mon_reap_timer,
8371 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008372 }
8373 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008374
Keyur Parekhfad6d082017-05-07 08:54:47 -07008375 case WDI_EVENT_LITE_RX:
8376 if (pdev->monitor_vdev) {
8377 /* Nothing needs to be done if monitor mode is
8378 * enabled
8379 */
8380 return 0;
8381 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008382
Keyur Parekhfad6d082017-05-07 08:54:47 -07008383 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8384 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008385
Keyur Parekhfad6d082017-05-07 08:54:47 -07008386 htt_tlv_filter.ppdu_start = 1;
8387 htt_tlv_filter.ppdu_end = 1;
8388 htt_tlv_filter.ppdu_end_user_stats = 1;
8389 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8390 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008391 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008392 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008393 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8394 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8395 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8396 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8397 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8398 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008399
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008400 for (mac_id = 0; mac_id < max_mac_rings;
8401 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008402 int mac_for_pdev =
8403 dp_get_mac_id_for_pdev(mac_id,
8404 pdev->pdev_id);
8405
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008406 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008407 mac_for_pdev,
8408 pdev->rxdma_mon_status_ring[mac_id]
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008409 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -07008410 RXDMA_MONITOR_STATUS,
8411 RX_BUFFER_SIZE_PKTLOG_LITE,
8412 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008413 }
8414
8415 if (soc->reap_timer_init)
8416 qdf_timer_mod(&soc->mon_reap_timer,
8417 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008418 }
8419 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008420
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008421 case WDI_EVENT_LITE_T2H:
8422 if (pdev->monitor_vdev) {
8423 /* Nothing needs to be done if monitor mode is
8424 * enabled
8425 */
8426 return 0;
8427 }
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008428
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008429 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008430 int mac_for_pdev = dp_get_mac_id_for_pdev(
8431 mac_id, pdev->pdev_id);
8432
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308433 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008434 dp_h2t_cfg_stats_msg_send(pdev,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008435 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8436 mac_for_pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008437 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008438 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008439
Keyur Parekhfad6d082017-05-07 08:54:47 -07008440 default:
8441 /* Nothing needs to be done for other pktlog types */
8442 break;
8443 }
8444 } else {
8445 switch (event) {
8446 case WDI_EVENT_RX_DESC:
8447 case WDI_EVENT_LITE_RX:
8448 if (pdev->monitor_vdev) {
8449 /* Nothing needs to be done if monitor mode is
8450 * enabled
8451 */
8452 return 0;
8453 }
8454 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8455 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008456
8457 for (mac_id = 0; mac_id < max_mac_rings;
8458 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008459 int mac_for_pdev =
8460 dp_get_mac_id_for_pdev(mac_id,
8461 pdev->pdev_id);
8462
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008463 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008464 mac_for_pdev,
8465 pdev->rxdma_mon_status_ring[mac_id]
8466 .hal_srng,
8467 RXDMA_MONITOR_STATUS,
8468 RX_BUFFER_SIZE,
8469 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008470 }
8471
8472 if (soc->reap_timer_init)
8473 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008474 }
8475 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008476 case WDI_EVENT_LITE_T2H:
8477 if (pdev->monitor_vdev) {
8478 /* Nothing needs to be done if monitor mode is
8479 * enabled
8480 */
8481 return 0;
8482 }
8483 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8484 * passing value 0. Once these macros will define in htt
8485 * header file will use proper macros
8486 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008487 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008488 int mac_for_pdev =
8489 dp_get_mac_id_for_pdev(mac_id,
8490 pdev->pdev_id);
8491
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308492 pdev->pktlog_ppdu_stats = false;
8493 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8494 dp_h2t_cfg_stats_msg_send(pdev, 0,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008495 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308496 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8497 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008498 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308499 } else if (pdev->enhanced_stats_en) {
8500 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008501 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308502 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008503 }
8504
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008505 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008506 default:
8507 /* Nothing needs to be done for other pktlog types */
8508 break;
8509 }
8510 }
8511 return 0;
8512}
8513#endif