blob: 6939c0801d048c9ea226528e07b537337522d1e1 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053024#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
26#include <hif.h>
27#include <htt.h>
28#include <wdi_event.h>
29#include <queue.h>
30#include "dp_htt.h"
31#include "dp_types.h"
32#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053033#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070034#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070035#include "dp_rx.h"
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080036#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080037#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053038#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053039#include "cdp_txrx_stats_struct.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080040#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053041#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080042#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053043#include "htt_stats.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070044#include "qdf_mem.h" /* qdf_mem_malloc,free */
Vivek126db5d2018-07-25 22:05:04 +053045#include "cfg_ucfg_api.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070046#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070047#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070048#else
49static inline void
50cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
51{
52 return;
53}
54#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070055#include "dp_ipa.h"
Ravi Joshiaf9ace82017-02-17 12:41:48 -080056
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070057#ifdef CONFIG_MCL
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070058#ifndef REMOVE_PKT_LOG
59#include <pktlog_ac_api.h>
60#include <pktlog_ac.h>
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070061#endif
62#endif
63static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053064static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +053065 uint8_t *peer_mac_addr,
66 struct cdp_ctrl_objmgr_peer *ctrl_peer);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053067static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +053068static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
69static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070070
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -070071#define DP_INTR_POLL_TIMER_MS 10
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +053072#define DP_WDS_AGING_TIMER_DEFAULT_MS 120000
Ishank Jainbc2d91f2017-01-03 18:14:54 +053073#define DP_MCS_LENGTH (6*MAX_MCS)
74#define DP_NSS_LENGTH (6*SS_COUNT)
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +053075#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
76#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
77#define DP_MAX_MCS_STRING_LEN 30
Ishank Jain6290a3c2017-03-21 10:49:39 +053078#define DP_CURR_FW_STATS_AVAIL 19
79#define DP_HTT_DBG_EXT_STATS_MAX 256
Prathyusha Guduri43bb0562018-02-12 18:30:54 +053080#define DP_MAX_SLEEP_TIME 100
Ishank Jain949674c2017-02-27 17:09:29 +053081
Yun Parkfde6b9e2017-06-26 17:13:11 -070082#ifdef IPA_OFFLOAD
83/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -070084#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -070085#define RX_RING_MASK_VAL 0x7
86#else
87#define TX_RING_MASK_VAL 0xF
88#define RX_RING_MASK_VAL 0xF
89#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +053090
sumedh baikady72b1c712017-08-24 12:11:46 -070091#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +053092
Soumya Bhat0d6245c2018-02-08 21:02:57 +053093#define DP_PPDU_STATS_CFG_ALL 0xFFFF
94
95/* PPDU stats mask sent to FW to enable enhanced stats */
96#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97/* PPDU stats mask sent to FW to support debug sniffer feature */
98#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
Vinay Adella873dc402018-05-28 12:06:34 +053099/* PPDU stats mask sent to FW to support BPR feature*/
100#define DP_PPDU_STATS_CFG_BPR 0x2000
101/* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
102#define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
103 DP_PPDU_STATS_CFG_ENH_STATS)
104/* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
105#define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
106 DP_PPDU_TXLITE_STATS_BITMASK_CFG)
107
Vivek126db5d2018-07-25 22:05:04 +0530108#define RNG_ERR "SRNG setup failed for"
Ishank Jain949674c2017-02-27 17:09:29 +0530109/**
110 * default_dscp_tid_map - Default DSCP-TID mapping
111 *
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530112 * DSCP TID
113 * 000000 0
114 * 001000 1
115 * 010000 2
116 * 011000 3
117 * 100000 4
118 * 101000 5
119 * 110000 6
120 * 111000 7
Ishank Jain949674c2017-02-27 17:09:29 +0530121 */
122static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
123 0, 0, 0, 0, 0, 0, 0, 0,
124 1, 1, 1, 1, 1, 1, 1, 1,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530125 2, 2, 2, 2, 2, 2, 2, 2,
126 3, 3, 3, 3, 3, 3, 3, 3,
127 4, 4, 4, 4, 4, 4, 4, 4,
Ishank Jain949674c2017-02-27 17:09:29 +0530128 5, 5, 5, 5, 5, 5, 5, 5,
129 6, 6, 6, 6, 6, 6, 6, 6,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530130 7, 7, 7, 7, 7, 7, 7, 7,
Ishank Jain949674c2017-02-27 17:09:29 +0530131};
132
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530133/*
134 * struct dp_rate_debug
135 *
136 * @mcs_type: print string for a given mcs
137 * @valid: valid mcs rate?
138 */
139struct dp_rate_debug {
140 char mcs_type[DP_MAX_MCS_STRING_LEN];
141 uint8_t valid;
142};
143
144#define MCS_VALID 1
145#define MCS_INVALID 0
146
147static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
Anish Nataraj072d8972018-01-09 18:23:33 +0530148
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530149 {
150 {"OFDM 48 Mbps", MCS_VALID},
151 {"OFDM 24 Mbps", MCS_VALID},
152 {"OFDM 12 Mbps", MCS_VALID},
153 {"OFDM 6 Mbps ", MCS_VALID},
154 {"OFDM 54 Mbps", MCS_VALID},
155 {"OFDM 36 Mbps", MCS_VALID},
156 {"OFDM 18 Mbps", MCS_VALID},
157 {"OFDM 9 Mbps ", MCS_VALID},
158 {"INVALID ", MCS_INVALID},
159 {"INVALID ", MCS_INVALID},
160 {"INVALID ", MCS_INVALID},
161 {"INVALID ", MCS_INVALID},
162 {"INVALID ", MCS_VALID},
163 },
164 {
Anish Nataraj072d8972018-01-09 18:23:33 +0530165 {"CCK 11 Mbps Long ", MCS_VALID},
166 {"CCK 5.5 Mbps Long ", MCS_VALID},
167 {"CCK 2 Mbps Long ", MCS_VALID},
168 {"CCK 1 Mbps Long ", MCS_VALID},
169 {"CCK 11 Mbps Short ", MCS_VALID},
170 {"CCK 5.5 Mbps Short", MCS_VALID},
171 {"CCK 2 Mbps Short ", MCS_VALID},
172 {"INVALID ", MCS_INVALID},
173 {"INVALID ", MCS_INVALID},
174 {"INVALID ", MCS_INVALID},
175 {"INVALID ", MCS_INVALID},
176 {"INVALID ", MCS_INVALID},
177 {"INVALID ", MCS_VALID},
178 },
179 {
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530180 {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
181 {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
182 {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
183 {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
184 {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
185 {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
186 {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
187 {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
188 {"INVALID ", MCS_INVALID},
189 {"INVALID ", MCS_INVALID},
190 {"INVALID ", MCS_INVALID},
191 {"INVALID ", MCS_INVALID},
192 {"INVALID ", MCS_VALID},
193 },
194 {
195 {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
196 {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
197 {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
198 {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
199 {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
200 {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
201 {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
202 {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
203 {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
204 {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
205 {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530206 {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530207 {"INVALID ", MCS_VALID},
208 },
209 {
210 {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
211 {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
212 {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
213 {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
214 {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
215 {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
216 {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
217 {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
218 {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
219 {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
220 {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530221 {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530222 {"INVALID ", MCS_VALID},
223 }
224};
225
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700226/**
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530227 * @brief Cpu ring map types
228 */
229enum dp_cpu_ring_map_types {
230 DP_DEFAULT_MAP,
231 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
232 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
233 DP_NSS_ALL_RADIO_OFFLOADED_MAP,
234 DP_CPU_RING_MAP_MAX
235};
236
237/**
238 * @brief Cpu to tx ring map
239 */
240static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
241 {0x0, 0x1, 0x2, 0x0},
242 {0x1, 0x2, 0x1, 0x2},
243 {0x0, 0x2, 0x0, 0x2},
244 {0x2, 0x2, 0x2, 0x2}
245};
246
247/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800248 * @brief Select the type of statistics
249 */
250enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530251 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800252 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530253 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800254};
255
256/**
257 * @brief General Firmware statistics options
258 *
259 */
260enum dp_fw_stats {
261 TXRX_FW_STATS_INVALID = -1,
262};
263
264/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530265 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800266 * currently supported
267 */
268const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530269 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
270 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
271 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
272 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
273 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
274 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
275 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
276 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
277 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
278 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
279 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800280 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530281 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
282 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
283 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
284 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
285 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
286 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
287 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
288 /* Last ENUM for HTT FW STATS */
289 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800290 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530291 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
292 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
293 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800294 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530295 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700296 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Kai Chen783e0382018-01-25 16:29:08 -0800297 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700298 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800299};
300
Mohit Khannadba82f22018-07-12 10:59:17 -0700301/* MCL specific functions */
302#ifdef CONFIG_MCL
303/**
304 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
305 * @soc: pointer to dp_soc handle
306 * @intr_ctx_num: interrupt context number for which mon mask is needed
307 *
308 * For MCL, monitor mode rings are being processed in timer contexts (polled).
309 * This function is returning 0, since in interrupt mode(softirq based RX),
310 * we donot want to process monitor mode rings in a softirq.
311 *
312 * So, in case packet log is enabled for SAP/STA/P2P modes,
313 * regular interrupt processing will not process monitor mode rings. It would be
314 * done in a separate timer context.
315 *
316 * Return: 0
317 */
318static inline
319uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
320{
321 return 0;
322}
323
324/*
325 * dp_service_mon_rings()- timer to reap monitor rings
326 * reqd as we are not getting ppdu end interrupts
327 * @arg: SoC Handle
328 *
329 * Return:
330 *
331 */
332static void dp_service_mon_rings(void *arg)
333{
334 struct dp_soc *soc = (struct dp_soc *)arg;
335 int ring = 0, work_done, mac_id;
336 struct dp_pdev *pdev = NULL;
337
338 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
339 pdev = soc->pdev_list[ring];
340 if (!pdev)
341 continue;
342 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
343 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
344 pdev->pdev_id);
345 work_done = dp_mon_process(soc, mac_for_pdev,
346 QCA_NAPI_BUDGET);
347
348 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
349 FL("Reaped %d descs from Monitor rings"),
350 work_done);
351 }
352 }
353
354 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
355}
356
357#ifndef REMOVE_PKT_LOG
358/**
359 * dp_pkt_log_init() - API to initialize packet log
360 * @ppdev: physical device handle
361 * @scn: HIF context
362 *
363 * Return: none
364 */
365void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
366{
367 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
368
369 if (handle->pkt_log_init) {
370 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
371 "%s: Packet log not initialized", __func__);
372 return;
373 }
374
375 pktlog_sethandle(&handle->pl_dev, scn);
376 pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
377
378 if (pktlogmod_init(scn)) {
379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
380 "%s: pktlogmod_init failed", __func__);
381 handle->pkt_log_init = false;
382 } else {
383 handle->pkt_log_init = true;
384 }
385}
386
387/**
388 * dp_pkt_log_con_service() - connect packet log service
389 * @ppdev: physical device handle
390 * @scn: device context
391 *
392 * Return: none
393 */
394static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
395{
396 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
397
398 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
399 pktlog_htc_attach();
400}
401
402/**
403 * dp_pktlogmod_exit() - API to cleanup pktlog info
404 * @handle: Pdev handle
405 *
406 * Return: none
407 */
408static void dp_pktlogmod_exit(struct dp_pdev *handle)
409{
410 void *scn = (void *)handle->soc->hif_handle;
411
412 if (!scn) {
413 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
414 "%s: Invalid hif(scn) handle", __func__);
415 return;
416 }
417
418 pktlogmod_exit(scn);
419 handle->pkt_log_init = false;
420}
421#endif
422#else
423static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
424
425/**
426 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
427 * @soc: pointer to dp_soc handle
428 * @intr_ctx_num: interrupt context number for which mon mask is needed
429 *
430 * Return: mon mask value
431 */
432static inline
433uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
434{
435 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
436}
437#endif
438
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530439static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
440 struct cdp_peer *peer_hdl,
441 uint8_t *mac_addr,
442 enum cdp_txrx_ast_entry_type type,
443 uint32_t flags)
444{
445
446 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
447 (struct dp_peer *)peer_hdl,
448 mac_addr,
449 type,
450 flags);
451}
452
453static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
454 void *ast_entry_hdl)
455{
456 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
457 qdf_spin_lock_bh(&soc->ast_lock);
458 dp_peer_del_ast((struct dp_soc *)soc_hdl,
459 (struct dp_ast_entry *)ast_entry_hdl);
460 qdf_spin_unlock_bh(&soc->ast_lock);
461}
462
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530463
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530464static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
465 struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530466 uint8_t *wds_macaddr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530467 uint32_t flags)
468{
phadiman0381f562018-06-29 15:40:52 +0530469 int status = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530470 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530471 struct dp_ast_entry *ast_entry = NULL;
472
473 qdf_spin_lock_bh(&soc->ast_lock);
474 ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
475
phadiman0381f562018-06-29 15:40:52 +0530476 if (ast_entry) {
477 status = dp_peer_update_ast(soc,
478 (struct dp_peer *)peer_hdl,
479 ast_entry, flags);
480 }
481
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530482 qdf_spin_unlock_bh(&soc->ast_lock);
483
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530484 return status;
485}
486
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530487/*
488 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530489 * @soc_handle: Datapath SOC handle
490 * @wds_macaddr: WDS entry MAC Address
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530491 * Return: None
492 */
493static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530494 uint8_t *wds_macaddr, void *vdev_handle)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530495{
496 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
497 struct dp_ast_entry *ast_entry = NULL;
498
499 qdf_spin_lock_bh(&soc->ast_lock);
500 ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
501
phadiman0381f562018-06-29 15:40:52 +0530502 if (ast_entry) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530503 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
504 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
phadiman0381f562018-06-29 15:40:52 +0530505 ast_entry->is_active = TRUE;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530506 }
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530507 }
phadiman0381f562018-06-29 15:40:52 +0530508
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530509 qdf_spin_unlock_bh(&soc->ast_lock);
510}
511
512/*
513 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530514 * @soc: Datapath SOC handle
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530515 *
516 * Return: None
517 */
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530518static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
Santosh Anbu76693bc2018-04-23 16:38:54 +0530519 void *vdev_hdl)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530520{
521 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
522 struct dp_pdev *pdev;
523 struct dp_vdev *vdev;
524 struct dp_peer *peer;
525 struct dp_ast_entry *ase, *temp_ase;
526 int i;
527
528 qdf_spin_lock_bh(&soc->ast_lock);
529
530 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
531 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530532 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530533 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
534 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
535 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
536 if (ase->type ==
537 CDP_TXRX_AST_TYPE_STATIC)
538 continue;
539 ase->is_active = TRUE;
540 }
541 }
542 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530543 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530544 }
545
546 qdf_spin_unlock_bh(&soc->ast_lock);
547}
548
549/*
550 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
551 * @soc: Datapath SOC handle
552 *
553 * Return: None
554 */
555static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
556{
557 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
558 struct dp_pdev *pdev;
559 struct dp_vdev *vdev;
560 struct dp_peer *peer;
561 struct dp_ast_entry *ase, *temp_ase;
562 int i;
563
564 qdf_spin_lock_bh(&soc->ast_lock);
565
566 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
567 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530568 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530569 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
570 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
571 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
572 if (ase->type ==
573 CDP_TXRX_AST_TYPE_STATIC)
574 continue;
575 dp_peer_del_ast(soc, ase);
576 }
577 }
578 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530579 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530580 }
581
582 qdf_spin_unlock_bh(&soc->ast_lock);
583}
584
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530585static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
586 uint8_t *ast_mac_addr)
587{
588 struct dp_ast_entry *ast_entry;
589 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
590 qdf_spin_lock_bh(&soc->ast_lock);
591 ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
592 qdf_spin_unlock_bh(&soc->ast_lock);
593 return (void *)ast_entry;
594}
595
596static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
597 void *ast_entry_hdl)
598{
599 return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
600 (struct dp_ast_entry *)ast_entry_hdl);
601}
602
603static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
604 void *ast_entry_hdl)
605{
606 return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
607 (struct dp_ast_entry *)ast_entry_hdl);
608}
609
610static void dp_peer_ast_set_type_wifi3(
611 struct cdp_soc_t *soc_hdl,
612 void *ast_entry_hdl,
613 enum cdp_txrx_ast_entry_type type)
614{
615 dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
616 (struct dp_ast_entry *)ast_entry_hdl,
617 type);
618}
619
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530620static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
621 struct cdp_soc_t *soc_hdl,
622 void *ast_entry_hdl)
623{
624 return ((struct dp_ast_entry *)ast_entry_hdl)->type;
625}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530626
Houston Hoffman648a9182017-05-21 23:27:50 -0700627/**
628 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
629 * @ring_num: ring num of the ring being queried
630 * @grp_mask: the grp_mask array for the ring type in question.
631 *
632 * The grp_mask array is indexed by group number and the bit fields correspond
633 * to ring numbers. We are finding which interrupt group a ring belongs to.
634 *
635 * Return: the index in the grp_mask array with the ring number.
636 * -QDF_STATUS_E_NOENT if no entry is found
637 */
638static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
639{
640 int ext_group_num;
641 int mask = 1 << ring_num;
642
643 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
644 ext_group_num++) {
645 if (mask & grp_mask[ext_group_num])
646 return ext_group_num;
647 }
648
649 return -QDF_STATUS_E_NOENT;
650}
651
652static int dp_srng_calculate_msi_group(struct dp_soc *soc,
653 enum hal_ring_type ring_type,
654 int ring_num)
655{
656 int *grp_mask;
657
658 switch (ring_type) {
659 case WBM2SW_RELEASE:
660 /* dp_tx_comp_handler - soc->tx_comp_ring */
661 if (ring_num < 3)
662 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
663
664 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
665 else if (ring_num == 3) {
666 /* sw treats this as a separate ring type */
667 grp_mask = &soc->wlan_cfg_ctx->
668 int_rx_wbm_rel_ring_mask[0];
669 ring_num = 0;
670 } else {
671 qdf_assert(0);
672 return -QDF_STATUS_E_NOENT;
673 }
674 break;
675
676 case REO_EXCEPTION:
677 /* dp_rx_err_process - &soc->reo_exception_ring */
678 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
679 break;
680
681 case REO_DST:
682 /* dp_rx_process - soc->reo_dest_ring */
683 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
684 break;
685
686 case REO_STATUS:
687 /* dp_reo_status_ring_handler - soc->reo_status_ring */
688 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
689 break;
690
691 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
692 case RXDMA_MONITOR_STATUS:
693 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
694 case RXDMA_MONITOR_DST:
695 /* dp_mon_process */
696 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
697 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700698 case RXDMA_DST:
699 /* dp_rxdma_err_process */
700 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
701 break;
Houston Hoffman648a9182017-05-21 23:27:50 -0700702
Houston Hoffman648a9182017-05-21 23:27:50 -0700703 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700704 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
705 break;
706
707 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -0700708 /* TODO: support low_thresh interrupt */
709 return -QDF_STATUS_E_NOENT;
710 break;
711
712 case TCL_DATA:
713 case TCL_CMD:
714 case REO_CMD:
715 case SW2WBM_RELEASE:
716 case WBM_IDLE_LINK:
717 /* normally empty SW_TO_HW rings */
718 return -QDF_STATUS_E_NOENT;
719 break;
720
721 case TCL_STATUS:
722 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -0700723 /* misc unused rings */
724 return -QDF_STATUS_E_NOENT;
725 break;
726
727 case CE_SRC:
728 case CE_DST:
729 case CE_DST_STATUS:
730 /* CE_rings - currently handled by hif */
731 default:
732 return -QDF_STATUS_E_NOENT;
733 break;
734 }
735
736 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
737}
738
739static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
740 *ring_params, int ring_type, int ring_num)
741{
742 int msi_group_number;
743 int msi_data_count;
744 int ret;
745 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
746
747 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
748 &msi_data_count, &msi_data_start,
749 &msi_irq_start);
750
751 if (ret)
752 return;
753
754 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
755 ring_num);
756 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -0700757 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -0700758 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
759 ring_type, ring_num);
760 ring_params->msi_addr = 0;
761 ring_params->msi_data = 0;
762 return;
763 }
764
765 if (msi_group_number > msi_data_count) {
766 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
767 FL("2 msi_groups will share an msi; msi_group_num %d"),
768 msi_group_number);
769
770 QDF_ASSERT(0);
771 }
772
773 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
774
775 ring_params->msi_addr = addr_low;
776 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
777 ring_params->msi_data = (msi_group_number % msi_data_count)
778 + msi_data_start;
779 ring_params->flags |= HAL_SRNG_MSI_INTR;
780}
781
782/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530783 * dp_print_ast_stats() - Dump AST table contents
784 * @soc: Datapath soc handle
785 *
786 * return void
787 */
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530788#ifdef FEATURE_AST
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530789static void dp_print_ast_stats(struct dp_soc *soc)
790{
791 uint8_t i;
792 uint8_t num_entries = 0;
793 struct dp_vdev *vdev;
794 struct dp_pdev *pdev;
795 struct dp_peer *peer;
796 struct dp_ast_entry *ase, *tmp_ase;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530797 char type[CDP_TXRX_AST_TYPE_MAX][10] = {
798 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530799
800 DP_PRINT_STATS("AST Stats:");
801 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
802 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
803 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
804 DP_PRINT_STATS("AST Table:");
805 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
806 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530807 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530808 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
809 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
810 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
811 DP_PRINT_STATS("%6d mac_addr = %pM"
812 " peer_mac_addr = %pM"
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530813 " type = %s"
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530814 " next_hop = %d"
815 " is_active = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530816 " is_bss = %d"
817 " ast_idx = %d"
818 " pdev_id = %d"
819 " vdev_id = %d",
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530820 ++num_entries,
821 ase->mac_addr.raw,
822 ase->peer->mac_addr.raw,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530823 type[ase->type],
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530824 ase->next_hop,
825 ase->is_active,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530826 ase->is_bss,
827 ase->ast_idx,
828 ase->pdev_id,
829 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530830 }
831 }
832 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530833 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530834 }
835}
836#else
837static void dp_print_ast_stats(struct dp_soc *soc)
838{
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530839 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530840 return;
841}
842#endif
843
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530844static void dp_print_peer_table(struct dp_vdev *vdev)
845{
846 struct dp_peer *peer = NULL;
847
848 DP_PRINT_STATS("Dumping Peer Table Stats:");
849 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
850 if (!peer) {
851 DP_PRINT_STATS("Invalid Peer");
852 return;
853 }
854 DP_PRINT_STATS(" peer_mac_addr = %pM"
855 " nawds_enabled = %d"
856 " bss_peer = %d"
857 " wapi = %d"
858 " wds_enabled = %d"
859 " delete in progress = %d",
860 peer->mac_addr.raw,
861 peer->nawds_enabled,
862 peer->bss_peer,
863 peer->wapi,
864 peer->wds_enabled,
865 peer->delete_in_progress);
866 }
867}
868
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530869/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700870 * dp_setup_srng - Internal function to setup SRNG rings used by data path
871 */
872static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800873 int ring_type, int ring_num, int mac_id, uint32_t num_entries)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700874{
875 void *hal_soc = soc->hal_soc;
876 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
877 /* TODO: See if we should get align size from hal */
878 uint32_t ring_base_align = 8;
879 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800880 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700881
Houston Hoffman648a9182017-05-21 23:27:50 -0700882 /* TODO: Currently hal layer takes care of endianness related settings.
883 * See if these settings need to passed from DP layer
884 */
885 ring_params.flags = 0;
Houston Hoffman41b912c2017-08-30 14:27:51 -0700886 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Yun Parkfde6b9e2017-06-26 17:13:11 -0700887 FL("Ring type: %d, num:%d"), ring_type, ring_num);
Houston Hoffman648a9182017-05-21 23:27:50 -0700888
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800889 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700890 srng->hal_srng = NULL;
891 srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700892 srng->num_entries = num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700893 srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700894 soc->osdev, soc->osdev->dev, srng->alloc_size,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700895 &(srng->base_paddr_unaligned));
896
897 if (!srng->base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530898 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
899 FL("alloc failed - ring_type: %d, ring_num %d"),
900 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700901 return QDF_STATUS_E_NOMEM;
902 }
903
904 ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
905 ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
906 ring_params.ring_base_paddr = srng->base_paddr_unaligned +
907 ((unsigned long)(ring_params.ring_base_vaddr) -
908 (unsigned long)srng->base_vaddr_unaligned);
909 ring_params.num_entries = num_entries;
910
psimhac983d7e2017-07-26 15:20:07 -0700911 if (soc->intr_mode == DP_INTR_MSI) {
912 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
913 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
914 FL("Using MSI for ring_type: %d, ring_num %d"),
915 ring_type, ring_num);
916
917 } else {
918 ring_params.msi_data = 0;
919 ring_params.msi_addr = 0;
920 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
921 FL("Skipping MSI for ring_type: %d, ring_num %d"),
922 ring_type, ring_num);
923 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700924
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530925 /*
926 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700927 * interrupt mitigation based on ring type
928 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530929 if (ring_type == REO_DST) {
930 ring_params.intr_timer_thres_us =
931 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
932 ring_params.intr_batch_cntr_thres_entries =
933 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
934 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
935 ring_params.intr_timer_thres_us =
936 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
937 ring_params.intr_batch_cntr_thres_entries =
938 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
939 } else {
940 ring_params.intr_timer_thres_us =
941 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
942 ring_params.intr_batch_cntr_thres_entries =
Karunakar Dasineni25f1b042018-02-15 23:26:17 -0800943 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530944 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700945
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700946 /* Enable low threshold interrupts for rx buffer rings (regular and
947 * monitor buffer rings.
948 * TODO: See if this is required for any other ring
949 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800950 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
951 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700952 /* TODO: Setting low threshold to 1/8th of ring size
953 * see if this needs to be configurable
954 */
955 ring_params.low_threshold = num_entries >> 3;
956 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Karunakar Dasinenibef3b1b2018-03-28 22:23:57 -0700957 ring_params.intr_timer_thres_us =
958 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
959 ring_params.intr_batch_cntr_thres_entries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700960 }
961
962 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800963 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -0800964
965 if (!srng->hal_srng) {
966 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
967 srng->alloc_size,
968 srng->base_vaddr_unaligned,
969 srng->base_paddr_unaligned, 0);
970 }
971
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700972 return 0;
973}
974
975/**
976 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
977 * Any buffers allocated and attached to ring entries are expected to be freed
978 * before calling this function.
979 */
980static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
981 int ring_type, int ring_num)
982{
983 if (!srng->hal_srng) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530984 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
985 FL("Ring type: %d, num:%d not setup"),
986 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700987 return;
988 }
989
990 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
991
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700992 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700993 srng->alloc_size,
994 srng->base_vaddr_unaligned,
995 srng->base_paddr_unaligned, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -0700996 srng->hal_srng = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700997}
998
999/* TODO: Need this interface from HIF */
1000void *hif_get_hal_handle(void *hif_handle);
1001
1002/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301003 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1004 * @dp_ctx: DP SOC handle
1005 * @budget: Number of frames/descriptors that can be processed in one shot
1006 *
1007 * Return: remaining budget/quota for the soc device
1008 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001009static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301010{
1011 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1012 struct dp_soc *soc = int_ctx->soc;
1013 int ring = 0;
1014 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301015 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301016 uint8_t tx_mask = int_ctx->tx_ring_mask;
1017 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301018 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1019 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001020 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301021 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001022 struct dp_pdev *pdev = NULL;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001023 int mac_id;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301024
1025 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301026 while (tx_mask) {
1027 if (tx_mask & 0x1) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001028 work_done = dp_tx_comp_handler(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301029 soc->tx_comp_ring[ring].hal_srng,
1030 remaining_quota);
1031
Houston Hoffmanae850c62017-08-11 16:47:50 -07001032 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1033 "tx mask 0x%x ring %d, budget %d, work_done %d",
1034 tx_mask, ring, budget, work_done);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301035
1036 budget -= work_done;
1037 if (budget <= 0)
1038 goto budget_done;
1039
1040 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301041 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301042 tx_mask = tx_mask >> 1;
1043 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301044 }
1045
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301046
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301047 /* Process REO Exception ring interrupt */
1048 if (rx_err_mask) {
1049 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301050 soc->reo_exception_ring.hal_srng,
1051 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301052
Houston Hoffmanae850c62017-08-11 16:47:50 -07001053 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1054 "REO Exception Ring: work_done %d budget %d",
1055 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301056
1057 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301058 if (budget <= 0) {
1059 goto budget_done;
1060 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301061 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301062 }
1063
1064 /* Process Rx WBM release ring interrupt */
1065 if (rx_wbm_rel_mask) {
1066 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301067 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301068
Houston Hoffmanae850c62017-08-11 16:47:50 -07001069 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1070 "WBM Release Ring: work_done %d budget %d",
1071 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301072
1073 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301074 if (budget <= 0) {
1075 goto budget_done;
1076 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301077 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301078 }
1079
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301080 /* Process Rx interrupts */
1081 if (rx_mask) {
1082 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1083 if (rx_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001084 work_done = dp_rx_process(int_ctx,
Leo Chang5ea93a42016-11-03 12:39:49 -07001085 soc->reo_dest_ring[ring].hal_srng,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301086 remaining_quota);
1087
Houston Hoffmanae850c62017-08-11 16:47:50 -07001088 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1089 "rx mask 0x%x ring %d, work_done %d budget %d",
1090 rx_mask, ring, work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301091
1092 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301093 if (budget <= 0)
1094 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301095 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301096 }
1097 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001098 for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001099 work_done = dp_rxdma_err_process(soc, ring,
1100 remaining_quota);
1101 budget -= work_done;
1102 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301103 }
1104
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001105 if (reo_status_mask)
1106 dp_reo_status_ring_handler(soc);
1107
Karunakar Dasineni10185472017-06-19 16:32:06 -07001108 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -08001109 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001110 pdev = soc->pdev_list[ring];
1111 if (pdev == NULL)
Karunakar Dasineni10185472017-06-19 16:32:06 -07001112 continue;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001113 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1114 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1115 pdev->pdev_id);
1116
1117 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1118 work_done = dp_mon_process(soc, mac_for_pdev,
1119 remaining_quota);
1120 budget -= work_done;
1121 if (budget <= 0)
1122 goto budget_done;
1123 remaining_quota = budget;
1124 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001125
chenguocd0f3132018-02-28 15:53:50 -08001126 if (int_ctx->rxdma2host_ring_mask &
1127 (1 << mac_for_pdev)) {
1128 work_done = dp_rxdma_err_process(soc,
1129 mac_for_pdev,
1130 remaining_quota);
1131 budget -= work_done;
1132 if (budget <= 0)
1133 goto budget_done;
1134 remaining_quota = budget;
1135 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001136
chenguocd0f3132018-02-28 15:53:50 -08001137 if (int_ctx->host2rxdma_ring_mask &
1138 (1 << mac_for_pdev)) {
1139 union dp_rx_desc_list_elem_t *desc_list = NULL;
1140 union dp_rx_desc_list_elem_t *tail = NULL;
1141 struct dp_srng *rx_refill_buf_ring =
1142 &pdev->rx_refill_buf_ring;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001143
chenguocd0f3132018-02-28 15:53:50 -08001144 DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1145 1);
1146 dp_rx_buffers_replenish(soc, mac_for_pdev,
1147 rx_refill_buf_ring,
1148 &soc->rx_desc_buf[mac_for_pdev], 0,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001149 &desc_list, &tail);
chenguocd0f3132018-02-28 15:53:50 -08001150 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001151 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001152 }
1153
Dhanashri Atre0da31222017-03-23 12:30:58 -07001154 qdf_lro_flush(int_ctx->lro_ctx);
1155
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301156budget_done:
1157 return dp_budget - budget;
1158}
1159
psimhac983d7e2017-07-26 15:20:07 -07001160#ifdef DP_INTR_POLL_BASED
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301161/* dp_interrupt_timer()- timer poll for interrupts
1162 *
1163 * @arg: SoC Handle
1164 *
1165 * Return:
1166 *
1167 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001168static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301169{
1170 struct dp_soc *soc = (struct dp_soc *) arg;
1171 int i;
1172
Ravi Joshi86e98262017-03-01 13:47:03 -08001173 if (qdf_atomic_read(&soc->cmn_init_done)) {
1174 for (i = 0;
1175 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1176 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301177
Ravi Joshi86e98262017-03-01 13:47:03 -08001178 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1179 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301180}
1181
1182/*
psimhac983d7e2017-07-26 15:20:07 -07001183 * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301184 * @txrx_soc: DP SOC handle
1185 *
1186 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1187 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1188 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1189 *
1190 * Return: 0 for success. nonzero for failure.
1191 */
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301192static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301193{
1194 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1195 int i;
1196
psimhac983d7e2017-07-26 15:20:07 -07001197 soc->intr_mode = DP_INTR_POLL;
1198
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301199 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -07001200 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001201 soc->intr_ctx[i].tx_ring_mask =
1202 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1203 soc->intr_ctx[i].rx_ring_mask =
1204 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1205 soc->intr_ctx[i].rx_mon_ring_mask =
1206 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1207 soc->intr_ctx[i].rx_err_ring_mask =
1208 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1209 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1210 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1211 soc->intr_ctx[i].reo_status_ring_mask =
1212 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1213 soc->intr_ctx[i].rxdma2host_ring_mask =
1214 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301215 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001216 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301217 }
1218
1219 qdf_timer_init(soc->osdev, &soc->int_timer,
1220 dp_interrupt_timer, (void *)soc,
1221 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301222
1223 return QDF_STATUS_SUCCESS;
1224}
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301225#else
1226static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1227{
1228 return -QDF_STATUS_E_NOSUPPORT;
1229}
1230#endif
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301231
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301232static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001233#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301234extern int con_mode_monitor;
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301235/*
1236 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1237 * @txrx_soc: DP SOC handle
1238 *
1239 * Call the appropriate attach function based on the mode of operation.
1240 * This is a WAR for enabling monitor mode.
1241 *
1242 * Return: 0 for success. nonzero for failure.
1243 */
1244static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1245{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001246 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1247
1248 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1249 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001250 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1251 "%s: Poll mode", __func__);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301252 return dp_soc_attach_poll(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301253 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001254
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001255 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1256 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301257 return dp_soc_interrupt_attach(txrx_soc);
1258 }
1259}
1260#else
1261static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1262{
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301263 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1264
1265 if (hif_is_polled_mode_enabled(soc->hif_handle))
1266 return dp_soc_attach_poll(txrx_soc);
1267 else
1268 return dp_soc_interrupt_attach(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301269}
1270#endif
Houston Hoffman648a9182017-05-21 23:27:50 -07001271
1272static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1273 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1274{
1275 int j;
1276 int num_irq = 0;
1277
1278 int tx_mask =
1279 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1280 int rx_mask =
1281 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1282 int rx_mon_mask =
1283 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1284 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1285 soc->wlan_cfg_ctx, intr_ctx_num);
1286 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1287 soc->wlan_cfg_ctx, intr_ctx_num);
1288 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1289 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001290 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1291 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001292 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1293 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001294
1295 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1296
1297 if (tx_mask & (1 << j)) {
1298 irq_id_map[num_irq++] =
1299 (wbm2host_tx_completions_ring1 - j);
1300 }
1301
1302 if (rx_mask & (1 << j)) {
1303 irq_id_map[num_irq++] =
1304 (reo2host_destination_ring1 - j);
1305 }
1306
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001307 if (rxdma2host_ring_mask & (1 << j)) {
1308 irq_id_map[num_irq++] =
1309 rxdma2host_destination_ring_mac1 -
1310 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1311 }
1312
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001313 if (host2rxdma_ring_mask & (1 << j)) {
1314 irq_id_map[num_irq++] =
1315 host2rxdma_host_buf_ring_mac1 -
1316 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1317 }
1318
Houston Hoffman648a9182017-05-21 23:27:50 -07001319 if (rx_mon_mask & (1 << j)) {
1320 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001321 ppdu_end_interrupts_mac1 -
1322 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001323 irq_id_map[num_irq++] =
1324 rxdma2host_monitor_status_ring_mac1 -
1325 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001326 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001327
Houston Hoffman648a9182017-05-21 23:27:50 -07001328 if (rx_wbm_rel_ring_mask & (1 << j))
1329 irq_id_map[num_irq++] = wbm2host_rx_release;
1330
1331 if (rx_err_ring_mask & (1 << j))
1332 irq_id_map[num_irq++] = reo2host_exception;
1333
1334 if (reo_status_ring_mask & (1 << j))
1335 irq_id_map[num_irq++] = reo2host_status;
1336
1337 }
1338 *num_irq_r = num_irq;
1339}
1340
1341static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1342 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1343 int msi_vector_count, int msi_vector_start)
1344{
1345 int tx_mask = wlan_cfg_get_tx_ring_mask(
1346 soc->wlan_cfg_ctx, intr_ctx_num);
1347 int rx_mask = wlan_cfg_get_rx_ring_mask(
1348 soc->wlan_cfg_ctx, intr_ctx_num);
1349 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1350 soc->wlan_cfg_ctx, intr_ctx_num);
1351 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1352 soc->wlan_cfg_ctx, intr_ctx_num);
1353 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1354 soc->wlan_cfg_ctx, intr_ctx_num);
1355 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1356 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001357 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1358 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001359
1360 unsigned int vector =
1361 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1362 int num_irq = 0;
1363
psimhac983d7e2017-07-26 15:20:07 -07001364 soc->intr_mode = DP_INTR_MSI;
1365
Houston Hoffman648a9182017-05-21 23:27:50 -07001366 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001367 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001368 irq_id_map[num_irq++] =
1369 pld_get_msi_irq(soc->osdev->dev, vector);
1370
1371 *num_irq_r = num_irq;
1372}
1373
1374static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1375 int *irq_id_map, int *num_irq)
1376{
1377 int msi_vector_count, ret;
1378 uint32_t msi_base_data, msi_vector_start;
1379
1380 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1381 &msi_vector_count,
1382 &msi_base_data,
1383 &msi_vector_start);
1384 if (ret)
1385 return dp_soc_interrupt_map_calculate_integrated(soc,
1386 intr_ctx_num, irq_id_map, num_irq);
1387
1388 else
1389 dp_soc_interrupt_map_calculate_msi(soc,
1390 intr_ctx_num, irq_id_map, num_irq,
1391 msi_vector_count, msi_vector_start);
1392}
1393
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301394/*
1395 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1396 * @txrx_soc: DP SOC handle
1397 *
1398 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1399 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1400 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1401 *
1402 * Return: 0 for success. nonzero for failure.
1403 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001404static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301405{
1406 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1407
1408 int i = 0;
1409 int num_irq = 0;
1410
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301411 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001412 int ret = 0;
1413
1414 /* Map of IRQ ids registered with one interrupt context */
1415 int irq_id_map[HIF_MAX_GRP_IRQ];
1416
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301417 int tx_mask =
1418 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1419 int rx_mask =
1420 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1421 int rx_mon_mask =
Mohit Khannadba82f22018-07-12 10:59:17 -07001422 dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301423 int rx_err_ring_mask =
1424 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1425 int rx_wbm_rel_ring_mask =
1426 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1427 int reo_status_ring_mask =
1428 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001429 int rxdma2host_ring_mask =
1430 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001431 int host2rxdma_ring_mask =
1432 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1433
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301434
Houston Hoffman648a9182017-05-21 23:27:50 -07001435 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301436 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1437 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1438 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301439 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001440 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001441 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301442 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1443 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1444
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301445 soc->intr_ctx[i].soc = soc;
1446
1447 num_irq = 0;
1448
Houston Hoffman648a9182017-05-21 23:27:50 -07001449 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1450 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301451
Houston Hoffmandef86a32017-04-21 20:23:45 -07001452 ret = hif_register_ext_group(soc->hif_handle,
1453 num_irq, irq_id_map, dp_service_srngs,
1454 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001455 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301456
1457 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301458 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1459 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301460
1461 return QDF_STATUS_E_FAILURE;
1462 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001463 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301464 }
1465
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301466 hif_configure_ext_group_interrupts(soc->hif_handle);
1467
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301468 return QDF_STATUS_SUCCESS;
1469}
1470
1471/*
1472 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1473 * @txrx_soc: DP SOC handle
1474 *
1475 * Return: void
1476 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001477static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301478{
1479 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001480 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301481
psimhac983d7e2017-07-26 15:20:07 -07001482 if (soc->intr_mode == DP_INTR_POLL) {
1483 qdf_timer_stop(&soc->int_timer);
1484 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001485 } else {
1486 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001487 }
1488
Leo Chang5ea93a42016-11-03 12:39:49 -07001489 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1490 soc->intr_ctx[i].tx_ring_mask = 0;
1491 soc->intr_ctx[i].rx_ring_mask = 0;
1492 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001493 soc->intr_ctx[i].rx_err_ring_mask = 0;
1494 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1495 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001496 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001497 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001498
Dhanashri Atre0da31222017-03-23 12:30:58 -07001499 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001500 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301501}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301502
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001503#define AVG_MAX_MPDUS_PER_TID 128
1504#define AVG_TIDS_PER_CLIENT 2
1505#define AVG_FLOWS_PER_TID 2
1506#define AVG_MSDUS_PER_FLOW 128
1507#define AVG_MSDUS_PER_MPDU 4
1508
1509/*
1510 * Allocate and setup link descriptor pool that will be used by HW for
1511 * various link and queue descriptors and managed by WBM
1512 */
1513static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1514{
1515 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1516 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1517 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1518 uint32_t num_mpdus_per_link_desc =
1519 hal_num_mpdus_per_link_desc(soc->hal_soc);
1520 uint32_t num_msdus_per_link_desc =
1521 hal_num_msdus_per_link_desc(soc->hal_soc);
1522 uint32_t num_mpdu_links_per_queue_desc =
1523 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1524 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1525 uint32_t total_link_descs, total_mem_size;
1526 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1527 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1528 uint32_t num_link_desc_banks;
1529 uint32_t last_bank_size = 0;
1530 uint32_t entry_size, num_entries;
1531 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001532 uint32_t desc_id = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001533
1534 /* Only Tx queue descriptors are allocated from common link descriptor
1535 * pool Rx queue descriptors are not included in this because (REO queue
1536 * extension descriptors) they are expected to be allocated contiguously
1537 * with REO queue descriptors
1538 */
1539 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1540 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1541
1542 num_mpdu_queue_descs = num_mpdu_link_descs /
1543 num_mpdu_links_per_queue_desc;
1544
1545 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1546 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1547 num_msdus_per_link_desc;
1548
1549 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1550 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1551
1552 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1553 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1554
1555 /* Round up to power of 2 */
1556 total_link_descs = 1;
1557 while (total_link_descs < num_entries)
1558 total_link_descs <<= 1;
1559
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301560 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1561 FL("total_link_descs: %u, link_desc_size: %d"),
1562 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001563 total_mem_size = total_link_descs * link_desc_size;
1564
1565 total_mem_size += link_desc_align;
1566
1567 if (total_mem_size <= max_alloc_size) {
1568 num_link_desc_banks = 0;
1569 last_bank_size = total_mem_size;
1570 } else {
1571 num_link_desc_banks = (total_mem_size) /
1572 (max_alloc_size - link_desc_align);
1573 last_bank_size = total_mem_size %
1574 (max_alloc_size - link_desc_align);
1575 }
1576
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301577 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1578 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1579 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001580
1581 for (i = 0; i < num_link_desc_banks; i++) {
1582 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001583 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001584 max_alloc_size,
1585 &(soc->link_desc_banks[i].base_paddr_unaligned));
1586 soc->link_desc_banks[i].size = max_alloc_size;
1587
1588 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1589 soc->link_desc_banks[i].base_vaddr_unaligned) +
1590 ((unsigned long)(
1591 soc->link_desc_banks[i].base_vaddr_unaligned) %
1592 link_desc_align));
1593
1594 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1595 soc->link_desc_banks[i].base_paddr_unaligned) +
1596 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1597 (unsigned long)(
1598 soc->link_desc_banks[i].base_vaddr_unaligned));
1599
1600 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301601 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1602 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001603 goto fail;
1604 }
1605 }
1606
1607 if (last_bank_size) {
1608 /* Allocate last bank in case total memory required is not exact
1609 * multiple of max_alloc_size
1610 */
1611 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001612 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001613 last_bank_size,
1614 &(soc->link_desc_banks[i].base_paddr_unaligned));
1615 soc->link_desc_banks[i].size = last_bank_size;
1616
1617 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1618 (soc->link_desc_banks[i].base_vaddr_unaligned) +
1619 ((unsigned long)(
1620 soc->link_desc_banks[i].base_vaddr_unaligned) %
1621 link_desc_align));
1622
1623 soc->link_desc_banks[i].base_paddr =
1624 (unsigned long)(
1625 soc->link_desc_banks[i].base_paddr_unaligned) +
1626 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1627 (unsigned long)(
1628 soc->link_desc_banks[i].base_vaddr_unaligned));
1629 }
1630
1631
1632 /* Allocate and setup link descriptor idle list for HW internal use */
1633 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1634 total_mem_size = entry_size * total_link_descs;
1635
1636 if (total_mem_size <= max_alloc_size) {
1637 void *desc;
1638
1639 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1640 WBM_IDLE_LINK, 0, 0, total_link_descs)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301641 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1642 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001643 goto fail;
1644 }
1645
1646 hal_srng_access_start_unlocked(soc->hal_soc,
1647 soc->wbm_idle_link_ring.hal_srng);
1648
1649 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1650 soc->link_desc_banks[i].base_paddr; i++) {
1651 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001652 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001653 soc->link_desc_banks[i].base_vaddr) -
1654 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001655 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001656 / link_desc_size;
1657 unsigned long paddr = (unsigned long)(
1658 soc->link_desc_banks[i].base_paddr);
1659
1660 while (num_entries && (desc = hal_srng_src_get_next(
1661 soc->hal_soc,
1662 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001663 hal_set_link_desc_addr(desc,
1664 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001665 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001666 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001667 paddr += link_desc_size;
1668 }
1669 }
1670 hal_srng_access_end_unlocked(soc->hal_soc,
1671 soc->wbm_idle_link_ring.hal_srng);
1672 } else {
1673 uint32_t num_scatter_bufs;
1674 uint32_t num_entries_per_buf;
1675 uint32_t rem_entries;
1676 uint8_t *scatter_buf_ptr;
1677 uint16_t scatter_buf_num;
1678
1679 soc->wbm_idle_scatter_buf_size =
1680 hal_idle_list_scatter_buf_size(soc->hal_soc);
1681 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1682 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001683 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1684 soc->hal_soc, total_mem_size,
1685 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001686
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001687 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1688 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1689 FL("scatter bufs size out of bounds"));
1690 goto fail;
1691 }
1692
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001693 for (i = 0; i < num_scatter_bufs; i++) {
1694 soc->wbm_idle_scatter_buf_base_vaddr[i] =
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001695 qdf_mem_alloc_consistent(soc->osdev,
1696 soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001697 soc->wbm_idle_scatter_buf_size,
1698 &(soc->wbm_idle_scatter_buf_base_paddr[i]));
1699 if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301700 QDF_TRACE(QDF_MODULE_ID_DP,
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001701 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301702 FL("Scatter list memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001703 goto fail;
1704 }
1705 }
1706
1707 /* Populate idle list scatter buffers with link descriptor
1708 * pointers
1709 */
1710 scatter_buf_num = 0;
1711 scatter_buf_ptr = (uint8_t *)(
1712 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1713 rem_entries = num_entries_per_buf;
1714
1715 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1716 soc->link_desc_banks[i].base_paddr; i++) {
1717 uint32_t num_link_descs =
1718 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001719 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001720 soc->link_desc_banks[i].base_vaddr) -
1721 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001722 soc->link_desc_banks[i].base_vaddr_unaligned)))
1723 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001724 unsigned long paddr = (unsigned long)(
1725 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001726
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001727 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001728 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001729 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001730 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001731 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001732 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001733 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001734 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001735 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001736 } else {
1737 rem_entries = num_entries_per_buf;
1738 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001739
1740 if (scatter_buf_num >= num_scatter_bufs)
1741 break;
1742
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001743 scatter_buf_ptr = (uint8_t *)(
1744 soc->wbm_idle_scatter_buf_base_vaddr[
1745 scatter_buf_num]);
1746 }
1747 }
1748 }
1749 /* Setup link descriptor idle list in HW */
1750 hal_setup_link_idle_list(soc->hal_soc,
1751 soc->wbm_idle_scatter_buf_base_paddr,
1752 soc->wbm_idle_scatter_buf_base_vaddr,
1753 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07001754 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001755 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1756 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001757 }
1758 return 0;
1759
1760fail:
1761 if (soc->wbm_idle_link_ring.hal_srng) {
1762 dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1763 WBM_IDLE_LINK, 0);
1764 }
1765
1766 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1767 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001768 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001769 soc->wbm_idle_scatter_buf_size,
1770 soc->wbm_idle_scatter_buf_base_vaddr[i],
1771 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001772 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001773 }
1774 }
1775
1776 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1777 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001778 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001779 soc->link_desc_banks[i].size,
1780 soc->link_desc_banks[i].base_vaddr_unaligned,
1781 soc->link_desc_banks[i].base_paddr_unaligned,
1782 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001783 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001784 }
1785 }
1786 return QDF_STATUS_E_FAILURE;
1787}
1788
1789/*
1790 * Free link descriptor pool that was setup HW
1791 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001792static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001793{
1794 int i;
1795
1796 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001797 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001798 WBM_IDLE_LINK, 0);
1799 }
1800
1801 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1802 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001803 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001804 soc->wbm_idle_scatter_buf_size,
1805 soc->wbm_idle_scatter_buf_base_vaddr[i],
1806 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001807 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001808 }
1809 }
1810
1811 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1812 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001813 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001814 soc->link_desc_banks[i].size,
1815 soc->link_desc_banks[i].base_vaddr_unaligned,
1816 soc->link_desc_banks[i].base_paddr_unaligned,
1817 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001818 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001819 }
1820 }
1821}
1822
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301823#define REO_DST_RING_SIZE_QCA6290 1024
1824#define REO_DST_RING_SIZE_QCA8074 2048
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001825
1826/*
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301827 * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1828 * @soc: Datapath SOC handle
1829 *
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301830 * This is a timer function used to age out stale AST nodes from
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301831 * AST table
1832 */
1833#ifdef FEATURE_WDS
1834static void dp_wds_aging_timer_fn(void *soc_hdl)
1835{
1836 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1837 struct dp_pdev *pdev;
1838 struct dp_vdev *vdev;
1839 struct dp_peer *peer;
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301840 struct dp_ast_entry *ase, *temp_ase;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301841 int i;
1842
1843 qdf_spin_lock_bh(&soc->ast_lock);
1844
1845 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1846 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301847 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301848 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1849 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301850 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301851 /*
1852 * Do not expire static ast entries
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301853 * and HM WDS entries
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301854 */
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05301855 if (ase->type != CDP_TXRX_AST_TYPE_WDS)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301856 continue;
1857
1858 if (ase->is_active) {
1859 ase->is_active = FALSE;
1860 continue;
1861 }
1862
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301863 DP_STATS_INC(soc, ast.aged_out, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301864 dp_peer_del_ast(soc, ase);
1865 }
1866 }
1867 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301868 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301869 }
1870
1871 qdf_spin_unlock_bh(&soc->ast_lock);
1872
1873 if (qdf_atomic_read(&soc->cmn_init_done))
1874 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1875}
1876
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301877
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301878/*
1879 * dp_soc_wds_attach() - Setup WDS timer and AST table
1880 * @soc: Datapath SOC handle
1881 *
1882 * Return: None
1883 */
1884static void dp_soc_wds_attach(struct dp_soc *soc)
1885{
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301886 qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1887 dp_wds_aging_timer_fn, (void *)soc,
1888 QDF_TIMER_TYPE_WAKE_APPS);
1889
1890 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1891}
1892
1893/*
1894 * dp_soc_wds_detach() - Detach WDS data structures and timers
1895 * @txrx_soc: DP SOC handle
1896 *
1897 * Return: None
1898 */
1899static void dp_soc_wds_detach(struct dp_soc *soc)
1900{
1901 qdf_timer_stop(&soc->wds_aging_timer);
1902 qdf_timer_free(&soc->wds_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301903}
1904#else
1905static void dp_soc_wds_attach(struct dp_soc *soc)
1906{
1907}
1908
1909static void dp_soc_wds_detach(struct dp_soc *soc)
1910{
1911}
1912#endif
1913
1914/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05301915 * dp_soc_reset_ring_map() - Reset cpu ring map
1916 * @soc: Datapath soc handler
1917 *
1918 * This api resets the default cpu ring map
1919 */
1920
1921static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1922{
1923 uint8_t i;
1924 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1925
1926 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1927 if (nss_config == 1) {
1928 /*
1929 * Setting Tx ring map for one nss offloaded radio
1930 */
1931 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1932 } else if (nss_config == 2) {
1933 /*
1934 * Setting Tx ring for two nss offloaded radios
1935 */
1936 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1937 } else {
1938 /*
1939 * Setting Tx ring map for all nss offloaded radios
1940 */
1941 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1942 }
1943 }
1944}
1945
Aniruddha Paule3a03342017-09-19 16:42:10 +05301946/*
1947 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1948 * @dp_soc - DP soc handle
1949 * @ring_type - ring type
1950 * @ring_num - ring_num
1951 *
1952 * return 0 or 1
1953 */
1954static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1955{
1956 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1957 uint8_t status = 0;
1958
1959 switch (ring_type) {
1960 case WBM2SW_RELEASE:
1961 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001962 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05301963 status = ((nss_config) & (1 << ring_num));
1964 break;
1965 default:
1966 break;
1967 }
1968
1969 return status;
1970}
1971
1972/*
1973 * dp_soc_reset_intr_mask() - reset interrupt mask
1974 * @dp_soc - DP Soc handle
1975 *
1976 * Return: Return void
1977 */
1978static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1979{
1980 uint8_t j;
1981 int *grp_mask = NULL;
1982 int group_number, mask, num_ring;
1983
1984 /* number of tx ring */
1985 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1986
1987 /*
1988 * group mask for tx completion ring.
1989 */
1990 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1991
1992 /* loop and reset the mask for only offloaded ring */
1993 for (j = 0; j < num_ring; j++) {
1994 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1995 continue;
1996 }
1997
1998 /*
1999 * Group number corresponding to tx offloaded ring.
2000 */
2001 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2002 if (group_number < 0) {
2003 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002004 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302005 WBM2SW_RELEASE, j);
2006 return;
2007 }
2008
2009 /* reset the tx mask for offloaded ring */
2010 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2011 mask &= (~(1 << j));
2012
2013 /*
2014 * reset the interrupt mask for offloaded ring.
2015 */
2016 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2017 }
2018
2019 /* number of rx rings */
2020 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2021
2022 /*
2023 * group mask for reo destination ring.
2024 */
2025 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2026
2027 /* loop and reset the mask for only offloaded ring */
2028 for (j = 0; j < num_ring; j++) {
2029 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2030 continue;
2031 }
2032
2033 /*
2034 * Group number corresponding to rx offloaded ring.
2035 */
2036 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2037 if (group_number < 0) {
2038 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002039 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302040 REO_DST, j);
2041 return;
2042 }
2043
2044 /* set the interrupt mask for offloaded ring */
2045 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2046 mask &= (~(1 << j));
2047
2048 /*
2049 * set the interrupt mask to zero for rx offloaded radio.
2050 */
2051 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2052 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002053
2054 /*
2055 * group mask for Rx buffer refill ring
2056 */
2057 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2058
2059 /* loop and reset the mask for only offloaded ring */
2060 for (j = 0; j < MAX_PDEV_CNT; j++) {
2061 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2062 continue;
2063 }
2064
2065 /*
2066 * Group number corresponding to rx offloaded ring.
2067 */
2068 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2069 if (group_number < 0) {
2070 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2071 FL("ring not part of any group; ring_type: %d,ring_num %d"),
2072 REO_DST, j);
2073 return;
2074 }
2075
2076 /* set the interrupt mask for offloaded ring */
2077 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2078 group_number);
2079 mask &= (~(1 << j));
2080
2081 /*
2082 * set the interrupt mask to zero for rx offloaded radio.
2083 */
2084 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2085 group_number, mask);
2086 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05302087}
2088
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302089#ifdef IPA_OFFLOAD
2090/**
2091 * dp_reo_remap_config() - configure reo remap register value based
2092 * nss configuration.
2093 * based on offload_radio value below remap configuration
2094 * get applied.
2095 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2096 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2097 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2098 * 3 - both Radios handled by NSS (remap not required)
2099 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2100 *
2101 * @remap1: output parameter indicates reo remap 1 register value
2102 * @remap2: output parameter indicates reo remap 2 register value
2103 * Return: bool type, true if remap is configured else false.
2104 */
2105static bool dp_reo_remap_config(struct dp_soc *soc,
2106 uint32_t *remap1,
2107 uint32_t *remap2)
2108{
2109
2110 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2111 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2112
2113 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2114 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2115
2116 return true;
2117}
2118#else
2119static bool dp_reo_remap_config(struct dp_soc *soc,
2120 uint32_t *remap1,
2121 uint32_t *remap2)
2122{
2123 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2124
2125 switch (offload_radio) {
2126 case 0:
2127 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2128 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2129 (0x3 << 18) | (0x4 << 21)) << 8;
2130
2131 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2132 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2133 (0x3 << 18) | (0x4 << 21)) << 8;
2134 break;
2135
2136 case 1:
2137 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2138 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2139 (0x2 << 18) | (0x3 << 21)) << 8;
2140
2141 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2142 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2143 (0x4 << 18) | (0x2 << 21)) << 8;
2144 break;
2145
2146 case 2:
2147 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2148 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2149 (0x1 << 18) | (0x3 << 21)) << 8;
2150
2151 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2152 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2153 (0x4 << 18) | (0x1 << 21)) << 8;
2154 break;
2155
2156 case 3:
2157 /* return false if both radios are offloaded to NSS */
2158 return false;
2159 }
2160 return true;
2161}
2162#endif
2163
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302164/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302165 * dp_reo_frag_dst_set() - configure reo register to set the
2166 * fragment destination ring
2167 * @soc : Datapath soc
2168 * @frag_dst_ring : output parameter to set fragment destination ring
2169 *
2170 * Based on offload_radio below fragment destination rings is selected
2171 * 0 - TCL
2172 * 1 - SW1
2173 * 2 - SW2
2174 * 3 - SW3
2175 * 4 - SW4
2176 * 5 - Release
2177 * 6 - FW
2178 * 7 - alternate select
2179 *
2180 * return: void
2181 */
2182static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2183{
2184 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2185
2186 switch (offload_radio) {
2187 case 0:
2188 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2189 break;
2190 case 3:
2191 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2192 break;
2193 default:
2194 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2195 FL("dp_reo_frag_dst_set invalid offload radio config"));
2196 break;
2197 }
2198}
2199
2200/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002201 * dp_soc_cmn_setup() - Common SoC level initializion
2202 * @soc: Datapath SOC handle
2203 *
2204 * This is an internal function used to setup common SOC data structures,
2205 * to be called from PDEV attach after receiving HW mode capabilities from FW
2206 */
2207static int dp_soc_cmn_setup(struct dp_soc *soc)
2208{
2209 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08002210 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302211 int tx_ring_size;
2212 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302213 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302214 uint32_t entries;
2215 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002216
Ravi Joshi86e98262017-03-01 13:47:03 -08002217 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002218 return 0;
2219
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002220 if (dp_hw_link_desc_pool_setup(soc))
2221 goto fail1;
2222
Vivek126db5d2018-07-25 22:05:04 +05302223 soc_cfg_ctx = soc->wlan_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002224 /* Setup SRNG rings */
2225 /* Common rings */
2226 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302227 wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302228 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2229 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002230 goto fail1;
2231 }
2232
2233
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302234 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002235 /* Tx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302236 if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002237 soc->num_tcl_data_rings =
Vivek126db5d2018-07-25 22:05:04 +05302238 wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302239 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302240 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302241 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302242 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002243 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2244 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302245 TCL_DATA, i, 0, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302246 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002247 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302248 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002249 goto fail1;
2250 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07002251 /*
2252 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2253 * count
2254 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002255 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302256 WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302257 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002258 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302259 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002260 goto fail1;
2261 }
2262 }
2263 } else {
2264 /* This will be incremented during per pdev ring setup */
2265 soc->num_tcl_data_rings = 0;
2266 }
2267
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302268 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302269 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2270 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302271 goto fail1;
2272 }
2273
Vivek126db5d2018-07-25 22:05:04 +05302274 entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002275 /* TCL command and status rings */
2276 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302277 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302278 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2279 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002280 goto fail1;
2281 }
2282
Vivek126db5d2018-07-25 22:05:04 +05302283 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002284 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302285 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302286 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2287 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002288 goto fail1;
2289 }
2290
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302291 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002292
2293 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2294 * descriptors
2295 */
2296
2297 /* Rx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302298 if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002299 soc->num_reo_dest_rings =
Vivek126db5d2018-07-25 22:05:04 +05302300 wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08002301 QDF_TRACE(QDF_MODULE_ID_DP,
2302 QDF_TRACE_LEVEL_ERROR,
2303 FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002304 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002305 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302306 i, 0, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302307 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302308 QDF_TRACE_LEVEL_ERROR,
2309 FL(RNG_ERR "reo_dest_ring [%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002310 goto fail1;
2311 }
2312 }
2313 } else {
2314 /* This will be incremented during per pdev ring setup */
2315 soc->num_reo_dest_rings = 0;
2316 }
2317
Vivek126db5d2018-07-25 22:05:04 +05302318 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002319 /* LMAC RxDMA to SW Rings configuration */
Vivek126db5d2018-07-25 22:05:04 +05302320 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002321 /* Only valid for MCL */
2322 struct dp_pdev *pdev = soc->pdev_list[0];
2323
2324 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2325 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302326 RXDMA_DST, 0, i,
2327 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002328 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302329 QDF_TRACE_LEVEL_ERROR,
2330 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002331 goto fail1;
2332 }
2333 }
2334 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002335 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2336
2337 /* REO reinjection ring */
Vivek126db5d2018-07-25 22:05:04 +05302338 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002339 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302340 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302341 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302342 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002343 goto fail1;
2344 }
2345
2346
2347 /* Rx release ring */
2348 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
Vivek126db5d2018-07-25 22:05:04 +05302349 wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302350 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302351 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002352 goto fail1;
2353 }
2354
2355
2356 /* Rx exception ring */
Vivek126db5d2018-07-25 22:05:04 +05302357 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2358 if (dp_srng_setup(soc, &soc->reo_exception_ring,
2359 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302360 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302361 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002362 goto fail1;
2363 }
2364
2365
2366 /* REO command and status rings */
2367 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302368 wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302369 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2370 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002371 goto fail1;
2372 }
2373
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002374 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2375 TAILQ_INIT(&soc->rx.reo_cmd_list);
2376 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2377
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002378 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302379 wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302380 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2381 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002382 goto fail1;
2383 }
2384
Yun Park92af7132017-09-13 16:33:35 -07002385 qdf_spinlock_create(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302386 dp_soc_wds_attach(soc);
2387
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302388 /* Reset the cpu ring map if radio is NSS offloaded */
Vivek126db5d2018-07-25 22:05:04 +05302389 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302390 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302391 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302392 }
2393
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002394 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002395 qdf_mem_zero(&reo_params, sizeof(reo_params));
2396
Vivek126db5d2018-07-25 22:05:04 +05302397 if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002398
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302399 /*
2400 * Reo ring remap is not required if both radios
2401 * are offloaded to NSS
2402 */
2403 if (!dp_reo_remap_config(soc,
2404 &reo_params.remap1,
2405 &reo_params.remap2))
2406 goto out;
2407
2408 reo_params.rx_hash_enabled = true;
2409 }
2410
psimhafc2f91b2018-01-10 15:30:03 -08002411 /* setup the global rx defrag waitlist */
2412 TAILQ_INIT(&soc->rx.defrag.waitlist);
2413 soc->rx.defrag.timeout_ms =
Vivek126db5d2018-07-25 22:05:04 +05302414 wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
psimhafc2f91b2018-01-10 15:30:03 -08002415 soc->rx.flags.defrag_timeout_check =
Vivek126db5d2018-07-25 22:05:04 +05302416 wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
Lin Baif1c577e2018-05-22 20:45:42 +08002417 qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
psimhafc2f91b2018-01-10 15:30:03 -08002418
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302419out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302420 /*
2421 * set the fragment destination ring
2422 */
2423 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2424
Dhanashri Atre14049172016-11-11 18:32:36 -08002425 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002426
Ravi Joshi86e98262017-03-01 13:47:03 -08002427 qdf_atomic_set(&soc->cmn_init_done, 1);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302428 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002429 return 0;
2430fail1:
2431 /*
2432 * Cleanup will be done as part of soc_detach, which will
2433 * be called on pdev attach failure
2434 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002435 return QDF_STATUS_E_FAILURE;
2436}
2437
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002438static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002439
Dhanashri Atre14049172016-11-11 18:32:36 -08002440static void dp_lro_hash_setup(struct dp_soc *soc)
2441{
2442 struct cdp_lro_hash_config lro_hash;
2443
2444 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2445 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2446 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2447 FL("LRO disabled RX hash disabled"));
2448 return;
2449 }
2450
2451 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2452
2453 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2454 lro_hash.lro_enable = 1;
2455 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2456 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002457 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2458 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002459 }
2460
Houston Hoffman41b912c2017-08-30 14:27:51 -07002461 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2462 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002463 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2464 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002465 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2466 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2467 LRO_IPV6_SEED_ARR_SZ));
2468
Houston Hoffman41b912c2017-08-30 14:27:51 -07002469 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2470 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
Dhanashri Atre14049172016-11-11 18:32:36 -08002471 lro_hash.lro_enable, lro_hash.tcp_flag,
2472 lro_hash.tcp_flag_mask);
2473
Dhanashri Atre14049172016-11-11 18:32:36 -08002474 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2475 QDF_TRACE_LEVEL_ERROR,
2476 (void *)lro_hash.toeplitz_hash_ipv4,
2477 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2478 LRO_IPV4_SEED_ARR_SZ));
2479
Dhanashri Atre14049172016-11-11 18:32:36 -08002480 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2481 QDF_TRACE_LEVEL_ERROR,
2482 (void *)lro_hash.toeplitz_hash_ipv6,
2483 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2484 LRO_IPV6_SEED_ARR_SZ));
2485
2486 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2487
2488 if (soc->cdp_soc.ol_ops->lro_hash_config)
2489 (void)soc->cdp_soc.ol_ops->lro_hash_config
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302490 (soc->ctrl_psoc, &lro_hash);
Dhanashri Atre14049172016-11-11 18:32:36 -08002491}
2492
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002493/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002494* dp_rxdma_ring_setup() - configure the RX DMA rings
2495* @soc: data path SoC handle
2496* @pdev: Physical device handle
2497*
2498* Return: 0 - success, > 0 - failure
2499*/
2500#ifdef QCA_HOST2FW_RXBUF_RING
2501static int dp_rxdma_ring_setup(struct dp_soc *soc,
2502 struct dp_pdev *pdev)
2503{
Vivek126db5d2018-07-25 22:05:04 +05302504 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2505 int max_mac_rings;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002506 int i;
2507
Vivek126db5d2018-07-25 22:05:04 +05302508 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2509 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2510
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002511 for (i = 0; i < max_mac_rings; i++) {
2512 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2513 "%s: pdev_id %d mac_id %d\n",
2514 __func__, pdev->pdev_id, i);
2515 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302516 RXDMA_BUF, 1, i,
2517 wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002518 QDF_TRACE(QDF_MODULE_ID_DP,
2519 QDF_TRACE_LEVEL_ERROR,
2520 FL("failed rx mac ring setup"));
2521 return QDF_STATUS_E_FAILURE;
2522 }
2523 }
2524 return QDF_STATUS_SUCCESS;
2525}
2526#else
2527static int dp_rxdma_ring_setup(struct dp_soc *soc,
2528 struct dp_pdev *pdev)
2529{
2530 return QDF_STATUS_SUCCESS;
2531}
2532#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302533
2534/**
2535 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2536 * @pdev - DP_PDEV handle
2537 *
2538 * Return: void
2539 */
2540static inline void
2541dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2542{
2543 uint8_t map_id;
2544 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2545 qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2546 sizeof(default_dscp_tid_map));
2547 }
2548 for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2549 hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2550 pdev->dscp_tid_map[map_id],
2551 map_id);
2552 }
2553}
2554
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302555#ifdef QCA_SUPPORT_SON
2556/**
2557 * dp_mark_peer_inact(): Update peer inactivity status
2558 * @peer_handle - datapath peer handle
2559 *
2560 * Return: void
2561 */
2562void dp_mark_peer_inact(void *peer_handle, bool inactive)
2563{
2564 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2565 struct dp_pdev *pdev;
2566 struct dp_soc *soc;
2567 bool inactive_old;
2568
2569 if (!peer)
2570 return;
2571
2572 pdev = peer->vdev->pdev;
2573 soc = pdev->soc;
2574
2575 inactive_old = peer->peer_bs_inact_flag == 1;
2576 if (!inactive)
2577 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2578 peer->peer_bs_inact_flag = inactive ? 1 : 0;
2579
2580 if (inactive_old != inactive) {
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302581 /**
2582 * Note: a node lookup can happen in RX datapath context
2583 * when a node changes from inactive to active (at most once
2584 * per inactivity timeout threshold)
2585 */
2586 if (soc->cdp_soc.ol_ops->record_act_change) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302587 soc->cdp_soc.ol_ops->record_act_change(
2588 (void *)pdev->ctrl_pdev,
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302589 peer->mac_addr.raw, !inactive);
2590 }
2591 }
2592}
2593
2594/**
2595 * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2596 *
2597 * Periodically checks the inactivity status
2598 */
2599static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2600{
2601 struct dp_pdev *pdev;
2602 struct dp_vdev *vdev;
2603 struct dp_peer *peer;
2604 struct dp_soc *soc;
2605 int i;
2606
2607 OS_GET_TIMER_ARG(soc, struct dp_soc *);
2608
2609 qdf_spin_lock(&soc->peer_ref_mutex);
2610
2611 for (i = 0; i < soc->pdev_count; i++) {
2612 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302613 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302614 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2615 if (vdev->opmode != wlan_op_mode_ap)
2616 continue;
2617
2618 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2619 if (!peer->authorize) {
2620 /**
2621 * Inactivity check only interested in
2622 * connected node
2623 */
2624 continue;
2625 }
2626 if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2627 /**
2628 * This check ensures we do not wait extra long
2629 * due to the potential race condition
2630 */
2631 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2632 }
2633 if (peer->peer_bs_inact > 0) {
2634 /* Do not let it wrap around */
2635 peer->peer_bs_inact--;
2636 }
2637 if (peer->peer_bs_inact == 0)
2638 dp_mark_peer_inact(peer, true);
2639 }
2640 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302641 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302642 }
2643
2644 qdf_spin_unlock(&soc->peer_ref_mutex);
2645 qdf_timer_mod(&soc->pdev_bs_inact_timer,
2646 soc->pdev_bs_inact_interval * 1000);
2647}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302648
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302649
2650/**
2651 * dp_free_inact_timer(): free inact timer
2652 * @timer - inact timer handle
2653 *
2654 * Return: bool
2655 */
2656void dp_free_inact_timer(struct dp_soc *soc)
2657{
2658 qdf_timer_free(&soc->pdev_bs_inact_timer);
2659}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302660#else
2661
2662void dp_mark_peer_inact(void *peer, bool inactive)
2663{
2664 return;
2665}
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302666
2667void dp_free_inact_timer(struct dp_soc *soc)
2668{
2669 return;
2670}
2671
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302672#endif
2673
Yun Park47e6af82018-01-17 12:15:01 -08002674#ifdef IPA_OFFLOAD
2675/**
2676 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2677 * @soc: data path instance
2678 * @pdev: core txrx pdev context
2679 *
2680 * Return: QDF_STATUS_SUCCESS: success
2681 * QDF_STATUS_E_RESOURCES: Error return
2682 */
2683static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2684 struct dp_pdev *pdev)
2685{
Vivek126db5d2018-07-25 22:05:04 +05302686 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2687 int entries;
2688
2689 soc_cfg_ctx = soc->wlan_cfg_ctx;
2690 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2691
Yun Park47e6af82018-01-17 12:15:01 -08002692 /* Setup second Rx refill buffer ring */
2693 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2694 IPA_RX_REFILL_BUF_RING_IDX,
Vivek126db5d2018-07-25 22:05:04 +05302695 pdev->pdev_id,
2696 entries)) {
Yun Park47e6af82018-01-17 12:15:01 -08002697 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2698 FL("dp_srng_setup failed second rx refill ring"));
2699 return QDF_STATUS_E_FAILURE;
2700 }
2701 return QDF_STATUS_SUCCESS;
2702}
2703
2704/**
2705 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2706 * @soc: data path instance
2707 * @pdev: core txrx pdev context
2708 *
2709 * Return: void
2710 */
2711static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2712 struct dp_pdev *pdev)
2713{
2714 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2715 IPA_RX_REFILL_BUF_RING_IDX);
2716}
2717
2718#else
Yun Park47e6af82018-01-17 12:15:01 -08002719static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2720 struct dp_pdev *pdev)
2721{
2722 return QDF_STATUS_SUCCESS;
2723}
2724
2725static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2726 struct dp_pdev *pdev)
2727{
2728}
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002729#endif
Yun Park47e6af82018-01-17 12:15:01 -08002730
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002731#ifndef QCA_WIFI_QCA6390
2732static
2733QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2734{
2735 int mac_id = 0;
2736 int pdev_id = pdev->pdev_id;
Vivek126db5d2018-07-25 22:05:04 +05302737 int entries;
2738 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2739
2740 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002741
2742 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2743 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2744
Vivek126db5d2018-07-25 22:05:04 +05302745 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002746 if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2747 RXDMA_MONITOR_BUF, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302748 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002749 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302750 FL(RNG_ERR "rxdma_mon_buf_ring "));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002751 return QDF_STATUS_E_NOMEM;
2752 }
2753
Vivek126db5d2018-07-25 22:05:04 +05302754 entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002755 if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2756 RXDMA_MONITOR_DST, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302757 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002758 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302759 FL(RNG_ERR "rxdma_mon_dst_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002760 return QDF_STATUS_E_NOMEM;
2761 }
2762
Vivek126db5d2018-07-25 22:05:04 +05302763 entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002764 if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2765 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302766 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002767 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302768 FL(RNG_ERR "rxdma_mon_status_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002769 return QDF_STATUS_E_NOMEM;
2770 }
2771
Vivek126db5d2018-07-25 22:05:04 +05302772 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002773 if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2774 RXDMA_MONITOR_DESC, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302775 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002776 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302777 FL(RNG_ERR "rxdma_mon_desc_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002778 return QDF_STATUS_E_NOMEM;
2779 }
2780 }
2781 return QDF_STATUS_SUCCESS;
2782}
2783#else
2784static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2785{
2786 return QDF_STATUS_SUCCESS;
2787}
Yun Park47e6af82018-01-17 12:15:01 -08002788#endif
2789
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002790/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002791* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302792* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002793* @txrx_soc: Datapath SOC handle
2794* @htc_handle: HTC handle for host-target interface
2795* @qdf_osdev: QDF OS device
2796* @pdev_id: PDEV ID
2797*
2798* Return: DP PDEV handle on success, NULL on failure
2799*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002800static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302801 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07002802 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002803{
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302804 int tx_ring_size;
2805 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302806 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302807 int entries;
2808 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2809 int nss_cfg;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302810
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002811 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2812 struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2813
2814 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302815 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2816 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002817 goto fail0;
2818 }
2819
Vivek126db5d2018-07-25 22:05:04 +05302820 soc_cfg_ctx = soc->wlan_cfg_ctx;
2821 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302822
2823 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302824 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2825 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302826
2827 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302828 goto fail0;
2829 }
2830
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302831 /*
2832 * set nss pdev config based on soc config
2833 */
Vivek126db5d2018-07-25 22:05:04 +05302834 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302835 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Vivek126db5d2018-07-25 22:05:04 +05302836 (nss_cfg & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302837
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002838 pdev->soc = soc;
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302839 pdev->ctrl_pdev = ctrl_pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002840 pdev->pdev_id = pdev_id;
2841 soc->pdev_list[pdev_id] = pdev;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002842 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002843
2844 TAILQ_INIT(&pdev->vdev_list);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302845 qdf_spinlock_create(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002846 pdev->vdev_count = 0;
2847
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05302848 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302849 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2850 TAILQ_INIT(&pdev->neighbour_peers_list);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05302851 pdev->neighbour_peers_added = false;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302852
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002853 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302854 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2855 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302856 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002857 }
2858
2859 /* Setup per PDEV TCL rings if configured */
2860 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302861 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302862 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302863 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302864 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302865
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002866 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302867 pdev_id, pdev_id, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302868 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2869 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302870 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002871 }
2872 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302873 WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302874 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2875 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302876 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002877 }
2878 soc->num_tcl_data_rings++;
2879 }
2880
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302881 /* Tx specific init */
2882 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302883 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2884 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302885 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302886 }
2887
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302888 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002889 /* Setup per PDEV REO rings if configured */
Vivek126db5d2018-07-25 22:05:04 +05302890 if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002891 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302892 pdev_id, pdev_id, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302893 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2894 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302895 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002896 }
2897 soc->num_reo_dest_rings++;
2898
2899 }
Dhanashri Atre7351d172016-10-12 13:08:09 -07002900 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Vivek126db5d2018-07-25 22:05:04 +05302901 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302902 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2903 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302904 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002905 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002906
2907 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302908 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002909 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302910 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07002911 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002912
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002913 if (dp_mon_rings_setup(soc, pdev)) {
2914 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2915 FL("MONITOR rings setup failed"));
2916 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002917 }
2918
Vivek126db5d2018-07-25 22:05:04 +05302919 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002920 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2921 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
Vivek126db5d2018-07-25 22:05:04 +05302922 0, pdev_id,
2923 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002924 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302925 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002926 goto fail1;
2927 }
Pramod Simhae382ff82017-06-05 18:09:26 -07002928 }
2929
Yun Park47e6af82018-01-17 12:15:01 -08002930 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
Yun Park601d0d82017-08-28 21:49:31 -07002931 goto fail1;
Yun Park601d0d82017-08-28 21:49:31 -07002932
Yun Parkfde6b9e2017-06-26 17:13:11 -07002933 if (dp_ipa_ring_resource_setup(soc, pdev))
2934 goto fail1;
2935
2936 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07002937 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2938 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002939 goto fail1;
2940 }
2941
Leo Chang5ea93a42016-11-03 12:39:49 -07002942 /* Rx specific init */
2943 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07002944 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Yun Park601d0d82017-08-28 21:49:31 -07002945 FL("dp_rx_pdev_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002946 goto fail0;
Leo Chang5ea93a42016-11-03 12:39:49 -07002947 }
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302948 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002949
nobeljd124b742017-10-16 11:59:12 -07002950 /* Monitor filter init */
2951 pdev->mon_filter_mode = MON_FILTER_ALL;
2952 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2953 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2954 pdev->fp_data_filter = FILTER_DATA_ALL;
2955 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2956 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2957 pdev->mo_data_filter = FILTER_DATA_ALL;
2958
Leo Chang5ea93a42016-11-03 12:39:49 -07002959 dp_local_peer_id_pool_init(pdev);
Sravan Kumar Kairamf1e07662018-06-18 21:36:14 +05302960
Ishank Jain949674c2017-02-27 17:09:29 +05302961 dp_dscp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002962
Kai Chen6eca1a62017-01-12 10:17:53 -08002963 /* Rx monitor mode specific init */
2964 if (dp_rx_pdev_mon_attach(pdev)) {
2965 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Keyur Parekhfad6d082017-05-07 08:54:47 -07002966 "dp_rx_pdev_attach failed\n");
2967 goto fail1;
2968 }
2969
2970 if (dp_wdi_event_attach(pdev)) {
2971 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2972 "dp_wdi_evet_attach failed\n");
2973 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002974 }
2975
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05302976 /* set the reo destination during initialization */
2977 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302978
Anish Natarajb9e7d012018-02-16 00:38:10 +05302979 /*
2980 * initialize ppdu tlv list
2981 */
2982 TAILQ_INIT(&pdev->ppdu_info_list);
2983 pdev->tlv_count = 0;
2984 pdev->list_depth = 0;
2985
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002986 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002987
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302988fail1:
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002989 dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302990
2991fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002992 return NULL;
2993}
2994
2995/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002996* dp_rxdma_ring_cleanup() - configure the RX DMA rings
2997* @soc: data path SoC handle
2998* @pdev: Physical device handle
2999*
3000* Return: void
3001*/
3002#ifdef QCA_HOST2FW_RXBUF_RING
3003static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3004 struct dp_pdev *pdev)
3005{
3006 int max_mac_rings =
3007 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3008 int i;
3009
3010 max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3011 max_mac_rings : MAX_RX_MAC_RINGS;
3012 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3013 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3014 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003015
3016 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003017}
3018#else
3019static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3020 struct dp_pdev *pdev)
3021{
3022}
3023#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303024
3025/*
3026 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3027 * @pdev: device object
3028 *
3029 * Return: void
3030 */
3031static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3032{
3033 struct dp_neighbour_peer *peer = NULL;
3034 struct dp_neighbour_peer *temp_peer = NULL;
3035
3036 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3037 neighbour_peer_list_elem, temp_peer) {
3038 /* delete this peer from the list */
3039 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3040 peer, neighbour_peer_list_elem);
3041 qdf_mem_free(peer);
3042 }
3043
3044 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3045}
3046
Anish Natarajcf526b72018-03-26 15:55:30 +05303047/**
3048* dp_htt_ppdu_stats_detach() - detach stats resources
3049* @pdev: Datapath PDEV handle
3050*
3051* Return: void
3052*/
3053static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3054{
3055 struct ppdu_info *ppdu_info, *ppdu_info_next;
3056
3057 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3058 ppdu_info_list_elem, ppdu_info_next) {
3059 if (!ppdu_info)
3060 break;
3061 qdf_assert_always(ppdu_info->nbuf);
3062 qdf_nbuf_free(ppdu_info->nbuf);
3063 qdf_mem_free(ppdu_info);
3064 }
3065}
3066
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003067#ifndef QCA_WIFI_QCA6390
3068static
3069void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3070 int mac_id)
3071{
3072 dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3073 RXDMA_MONITOR_BUF, 0);
3074 dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3075 RXDMA_MONITOR_DST, 0);
3076
3077 dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3078 RXDMA_MONITOR_STATUS, 0);
3079
3080 dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3081 RXDMA_MONITOR_DESC, 0);
3082 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3083 RXDMA_DST, 0);
3084}
3085#else
3086static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3087 int mac_id)
3088{
3089}
3090#endif
3091
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003092/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003093* dp_pdev_detach_wifi3() - detach txrx pdev
3094* @txrx_pdev: Datapath PDEV handle
3095* @force: Force detach
3096*
3097*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003098static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003099{
3100 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3101 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303102 qdf_nbuf_t curr_nbuf, next_nbuf;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003103 int mac_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003104
Keyur Parekhfad6d082017-05-07 08:54:47 -07003105 dp_wdi_event_detach(pdev);
3106
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303107 dp_tx_pdev_detach(pdev);
3108
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003109 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3110 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3111 TCL_DATA, pdev->pdev_id);
3112 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3113 WBM2SW_RELEASE, pdev->pdev_id);
3114 }
3115
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003116 dp_pktlogmod_exit(pdev);
3117
Leo Chang5ea93a42016-11-03 12:39:49 -07003118 dp_rx_pdev_detach(pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08003119 dp_rx_pdev_mon_detach(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303120 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303121 qdf_spinlock_destroy(&pdev->tx_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303122 qdf_spinlock_destroy(&pdev->vdev_list_lock);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303123
Yun Parkfde6b9e2017-06-26 17:13:11 -07003124 dp_ipa_uc_detach(soc, pdev);
3125
Yun Park47e6af82018-01-17 12:15:01 -08003126 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Yun Park601d0d82017-08-28 21:49:31 -07003127
Yun Parkfde6b9e2017-06-26 17:13:11 -07003128 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003129 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3130 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3131 REO_DST, pdev->pdev_id);
3132 }
3133
Dhanashri Atre7351d172016-10-12 13:08:09 -07003134 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003135
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003136 dp_rxdma_ring_cleanup(soc, pdev);
3137
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003138 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003139 dp_mon_ring_deinit(soc, pdev, mac_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003140 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3141 RXDMA_DST, 0);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003142 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003143
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303144 curr_nbuf = pdev->invalid_peer_head_msdu;
3145 while (curr_nbuf) {
3146 next_nbuf = qdf_nbuf_next(curr_nbuf);
3147 qdf_nbuf_free(curr_nbuf);
3148 curr_nbuf = next_nbuf;
3149 }
3150
Anish Natarajcf526b72018-03-26 15:55:30 +05303151 dp_htt_ppdu_stats_detach(pdev);
3152
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003153 soc->pdev_list[pdev->pdev_id] = NULL;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003154 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003155 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Santosh Anbu2280e862018-01-03 22:25:53 +05303156 qdf_mem_free(pdev->dp_txrx_handle);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003157 qdf_mem_free(pdev);
3158}
3159
3160/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003161 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3162 * @soc: DP SOC handle
3163 */
3164static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3165{
3166 struct reo_desc_list_node *desc;
3167 struct dp_rx_tid *rx_tid;
3168
3169 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3170 while (qdf_list_remove_front(&soc->reo_desc_freelist,
3171 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3172 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003173 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07003174 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003175 QDF_DMA_BIDIRECTIONAL,
3176 rx_tid->hw_qdesc_alloc_size);
3177 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003178 qdf_mem_free(desc);
3179 }
3180 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3181 qdf_list_destroy(&soc->reo_desc_freelist);
3182 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3183}
3184
3185/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003186 * dp_soc_detach_wifi3() - Detach txrx SOC
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003187 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003188 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003189static void dp_soc_detach_wifi3(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003190{
3191 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003192 int i;
3193
Ravi Joshi86e98262017-03-01 13:47:03 -08003194 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003195
Dustin Brownf653d162017-09-19 11:29:41 -07003196 qdf_flush_work(&soc->htt_stats.work);
3197 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303198
3199 /* Free pending htt stats messages */
3200 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303201
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05303202 dp_free_inact_timer(soc);
3203
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003204 for (i = 0; i < MAX_PDEV_CNT; i++) {
3205 if (soc->pdev_list[i])
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +05303206 dp_pdev_detach_wifi3(
3207 (struct cdp_pdev *)soc->pdev_list[i], 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003208 }
3209
3210 dp_peer_find_detach(soc);
3211
3212 /* TBD: Call Tx and Rx cleanup functions to free buffers and
3213 * SW descriptors
3214 */
3215
3216 /* Free the ring memories */
3217 /* Common rings */
3218 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3219
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003220 dp_tx_soc_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003221 /* Tx data rings */
3222 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3223 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3224 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3225 TCL_DATA, i);
3226 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3227 WBM2SW_RELEASE, i);
3228 }
3229 }
3230
3231 /* TCL command and status rings */
3232 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3233 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3234
3235 /* Rx data rings */
3236 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3237 soc->num_reo_dest_rings =
3238 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3239 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3240 /* TODO: Get number of rings and ring sizes
3241 * from wlan_cfg
3242 */
3243 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3244 REO_DST, i);
3245 }
3246 }
3247 /* REO reinjection ring */
3248 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3249
3250 /* Rx release ring */
3251 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3252
3253 /* Rx exception ring */
3254 /* TODO: Better to store ring_type and ring_num in
3255 * dp_srng during setup
3256 */
3257 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3258
3259 /* REO command and status rings */
3260 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3261 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07003262 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003263
Leo Chang5ea93a42016-11-03 12:39:49 -07003264 qdf_spinlock_destroy(&soc->peer_ref_mutex);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303265 qdf_spinlock_destroy(&soc->htt_stats.lock);
3266
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003267 htt_soc_detach(soc->htt_handle);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003268
Lin Baif1c577e2018-05-22 20:45:42 +08003269 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3270
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003271 dp_reo_cmdlist_destroy(soc);
3272 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003273 dp_reo_desc_freelist_destroy(soc);
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003274
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003275 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303276
3277 dp_soc_wds_detach(soc);
Yun Park92af7132017-09-13 16:33:35 -07003278 qdf_spinlock_destroy(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303279
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08003280 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003281}
3282
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003283#ifndef QCA_WIFI_QCA6390
3284static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3285 struct dp_pdev *pdev,
3286 int mac_id,
3287 int mac_for_pdev)
3288{
3289 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3290 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3291 RXDMA_MONITOR_BUF);
3292
3293 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3294 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3295 RXDMA_MONITOR_DST);
3296
3297 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3298 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3299 RXDMA_MONITOR_STATUS);
3300
3301 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3302 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3303 RXDMA_MONITOR_DESC);
3304}
3305#else
3306static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3307 struct dp_pdev *pdev,
3308 int mac_id,
3309 int mac_for_pdev)
3310{
3311}
3312#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003313/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07003314 * dp_rxdma_ring_config() - configure the RX DMA rings
3315 *
3316 * This function is used to configure the MAC rings.
3317 * On MCL host provides buffers in Host2FW ring
3318 * FW refills (copies) buffers to the ring and updates
3319 * ring_idx in register
3320 *
3321 * @soc: data path SoC handle
3322 *
3323 * Return: void
3324 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003325#ifdef QCA_HOST2FW_RXBUF_RING
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003326static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003327{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003328 int i;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003329
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003330 for (i = 0; i < MAX_PDEV_CNT; i++) {
3331 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003332
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003333 if (pdev) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003334 int mac_id;
Dhanashri Atre398935e2017-03-31 15:34:28 -07003335 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003336 int max_mac_rings =
3337 wlan_cfg_get_num_mac_rings
3338 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003339
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003340 htt_srng_setup(soc->htt_handle, 0,
3341 pdev->rx_refill_buf_ring.hal_srng,
3342 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003343
Yun Park601d0d82017-08-28 21:49:31 -07003344 if (pdev->rx_refill_buf_ring2.hal_srng)
3345 htt_srng_setup(soc->htt_handle, 0,
3346 pdev->rx_refill_buf_ring2.hal_srng,
3347 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003348
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07003349 if (soc->cdp_soc.ol_ops->
3350 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003351 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05303352 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07003353 }
3354
3355 if (dbs_enable) {
3356 QDF_TRACE(QDF_MODULE_ID_TXRX,
3357 QDF_TRACE_LEVEL_ERROR,
3358 FL("DBS enabled max_mac_rings %d\n"),
3359 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003360 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003361 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003362 QDF_TRACE(QDF_MODULE_ID_TXRX,
3363 QDF_TRACE_LEVEL_ERROR,
Dhanashri Atre398935e2017-03-31 15:34:28 -07003364 FL("DBS disabled, max_mac_rings %d\n"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003365 max_mac_rings);
3366 }
3367
3368 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3369 FL("pdev_id %d max_mac_rings %d\n"),
3370 pdev->pdev_id, max_mac_rings);
3371
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003372 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3373 int mac_for_pdev = dp_get_mac_id_for_pdev(
3374 mac_id, pdev->pdev_id);
3375
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003376 QDF_TRACE(QDF_MODULE_ID_TXRX,
3377 QDF_TRACE_LEVEL_ERROR,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003378 FL("mac_id %d\n"), mac_for_pdev);
3379 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3380 pdev->rx_mac_buf_ring[mac_id]
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003381 .hal_srng,
3382 RXDMA_BUF);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003383 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3384 pdev->rxdma_err_dst_ring[mac_id]
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003385 .hal_srng,
3386 RXDMA_DST);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003387
3388 /* Configure monitor mode rings */
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003389 dp_mon_htt_srng_setup(soc, pdev, mac_id,
3390 mac_for_pdev);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003391
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003392 }
3393 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003394 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003395
3396 /*
3397 * Timer to reap rxdma status rings.
3398 * Needed until we enable ppdu end interrupts
3399 */
3400 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3401 dp_service_mon_rings, (void *)soc,
3402 QDF_TIMER_TYPE_WAKE_APPS);
3403 soc->reap_timer_init = 1;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003404}
3405#else
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003406/* This is only for WIN */
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003407static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003408{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003409 int i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003410 int mac_id;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003411
3412 for (i = 0; i < MAX_PDEV_CNT; i++) {
3413 struct dp_pdev *pdev = soc->pdev_list[i];
3414
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003415 if (pdev == NULL)
3416 continue;
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003417
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003418 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3419 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3420
3421 htt_srng_setup(soc->htt_handle, mac_for_pdev,
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003422 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Kai Chen6eca1a62017-01-12 10:17:53 -08003423
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003424 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3425 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3426 RXDMA_MONITOR_BUF);
3427 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3428 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3429 RXDMA_MONITOR_DST);
3430 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3431 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003432 RXDMA_MONITOR_STATUS);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003433 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3434 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003435 RXDMA_MONITOR_DESC);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003436 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3437 pdev->rxdma_err_dst_ring[mac_id].hal_srng,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003438 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003439 }
3440 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003441}
3442#endif
3443
3444/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003445 * dp_soc_attach_target_wifi3() - SOC initialization in the target
3446 * @txrx_soc: Datapath SOC handle
3447 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003448static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003449{
Leo Chang5ea93a42016-11-03 12:39:49 -07003450 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003451
3452 htt_soc_attach_target(soc->htt_handle);
3453
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003454 dp_rxdma_ring_config(soc);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003455
Ishank Jainbc2d91f2017-01-03 18:14:54 +05303456 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303457
3458 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303459 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303460
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003461 return 0;
3462}
3463
3464/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303465 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3466 * @txrx_soc: Datapath SOC handle
3467 */
3468static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3469{
3470 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3471 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3472}
3473/*
3474 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3475 * @txrx_soc: Datapath SOC handle
3476 * @nss_cfg: nss config
3477 */
3478static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3479{
3480 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05303481 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3482
3483 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3484
3485 /*
3486 * TODO: masked out based on the per offloaded radio
3487 */
3488 if (config == dp_nss_cfg_dbdc) {
3489 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3490 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3491 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3492 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3493 }
3494
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3496 FL("nss-wifi<0> nss config is enabled"));
3497}
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303498/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003499* dp_vdev_attach_wifi3() - attach txrx vdev
3500* @txrx_pdev: Datapath PDEV handle
3501* @vdev_mac_addr: MAC address of the virtual interface
3502* @vdev_id: VDEV Id
3503* @wlan_op_mode: VDEV operating mode
3504*
3505* Return: DP VDEV handle on success, NULL on failure
3506*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003507static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003508 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3509{
3510 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3511 struct dp_soc *soc = pdev->soc;
3512 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3513
3514 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303515 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3516 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003517 goto fail0;
3518 }
3519
3520 vdev->pdev = pdev;
3521 vdev->vdev_id = vdev_id;
3522 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303523 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003524
3525 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303526 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303527 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003528 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303529 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003530 vdev->osif_vdev = NULL;
3531
3532 vdev->delete.pending = 0;
3533 vdev->safemode = 0;
3534 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05303535 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003536#ifdef notyet
3537 vdev->filters_num = 0;
3538#endif
3539
3540 qdf_mem_copy(
3541 &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3542
3543 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3544 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
Ishank Jain949674c2017-02-27 17:09:29 +05303545 vdev->dscp_tid_map_id = 0;
Ishank Jainc838b132017-02-17 11:08:18 +05303546 vdev->mcast_enhancement_en = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003547
3548 /* TODO: Initialize default HTT meta data that will be used in
3549 * TCL descriptors for packets transmitted from this VDEV
3550 */
3551
3552 TAILQ_INIT(&vdev->peer_list);
3553
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303554 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003555 /* add this vdev into the pdev's list */
3556 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303557 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003558 pdev->vdev_count++;
3559
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303560 dp_tx_vdev_attach(vdev);
3561
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003562
psimhac983d7e2017-07-26 15:20:07 -07003563 if ((soc->intr_mode == DP_INTR_POLL) &&
3564 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303565 if (pdev->vdev_count == 1)
3566 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3567 }
Vijay Pamidipati88c40ff2016-11-17 21:27:02 +05303568
Dhanashri Atreb178eb42017-03-21 12:32:33 -07003569 dp_lro_hash_setup(soc);
3570
Dhanashri Atre0da31222017-03-23 12:30:58 -07003571 /* LRO */
3572 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3573 wlan_op_mode_sta == vdev->opmode)
3574 vdev->lro_enable = true;
3575
3576 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3577 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3578
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003580 "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303581 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003582
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303583 if (wlan_op_mode_sta == vdev->opmode)
3584 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303585 vdev->mac_addr.raw,
3586 NULL);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303587
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003588 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003589
3590fail0:
3591 return NULL;
3592}
3593
3594/**
3595 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3596 * @vdev: Datapath VDEV handle
3597 * @osif_vdev: OSIF vdev handle
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303598 * @ctrl_vdev: UMAC vdev handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003599 * @txrx_ops: Tx and Rx operations
3600 *
3601 * Return: DP VDEV handle on success, NULL on failure
3602 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003603static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303604 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003605 struct ol_txrx_ops *txrx_ops)
3606{
3607 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3608 vdev->osif_vdev = osif_vdev;
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303609 vdev->ctrl_vdev = ctrl_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003610 vdev->osif_rx = txrx_ops->rx.rx;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303611 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303612 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003613 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303614 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003615#ifdef notyet
3616#if ATH_SUPPORT_WAPI
3617 vdev->osif_check_wai = txrx_ops->rx.wai_check;
3618#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003619#endif
Ishank Jain997955e2017-03-24 18:18:50 +05303620#ifdef UMAC_SUPPORT_PROXY_ARP
3621 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003622#endif
Ishank Jainc838b132017-02-17 11:08:18 +05303623 vdev->me_convert = txrx_ops->me_convert;
3624
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003625 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05303626 if (vdev->mesh_vdev)
3627 txrx_ops->tx.tx = dp_tx_send_mesh;
3628 else
3629 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07003630
Prathyusha Guduribe41d972018-01-19 14:17:14 +05303631 txrx_ops->tx.tx_exception = dp_tx_send_exception;
3632
Houston Hoffman41b912c2017-08-30 14:27:51 -07003633 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303634 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003635}
3636
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303637/**
3638 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3639 * @vdev: Datapath VDEV handle
3640 *
3641 * Return: void
3642 */
3643static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3644{
3645 struct dp_pdev *pdev = vdev->pdev;
3646 struct dp_soc *soc = pdev->soc;
3647 struct dp_peer *peer;
3648 uint16_t *peer_ids;
3649 uint8_t i = 0, j = 0;
3650
3651 peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3652 if (!peer_ids) {
3653 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3654 "DP alloc failure - unable to flush peers");
3655 return;
3656 }
3657
3658 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3659 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3660 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3661 if (peer->peer_ids[i] != HTT_INVALID_PEER)
3662 if (j < soc->max_peers)
3663 peer_ids[j++] = peer->peer_ids[i];
3664 }
3665 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3666
3667 for (i = 0; i < j ; i++)
3668 dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3669
3670 qdf_mem_free(peer_ids);
3671
3672 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3673 FL("Flushed peers for vdev object %pK "), vdev);
3674}
3675
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003676/*
3677 * dp_vdev_detach_wifi3() - Detach txrx vdev
3678 * @txrx_vdev: Datapath VDEV handle
3679 * @callback: Callback OL_IF on completion of detach
3680 * @cb_context: Callback context
3681 *
3682 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003683static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003684 ol_txrx_vdev_delete_cb callback, void *cb_context)
3685{
3686 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3687 struct dp_pdev *pdev = vdev->pdev;
3688 struct dp_soc *soc = pdev->soc;
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303689 struct dp_neighbour_peer *peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003690
3691 /* preconditions */
3692 qdf_assert(vdev);
3693
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303694 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003695 /* remove the vdev from its parent pdev's list */
3696 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303697 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003698
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05303699 if (wlan_op_mode_sta == vdev->opmode)
3700 dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3701
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003702 /*
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303703 * If Target is hung, flush all peers before detaching vdev
3704 * this will free all references held due to missing
3705 * unmap commands from Target
3706 */
3707 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3708 dp_vdev_flush_peers(vdev);
3709
3710 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003711 * Use peer_ref_mutex while accessing peer_list, in case
3712 * a peer is in the process of being removed from the list.
3713 */
3714 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3715 /* check that the vdev has no peers allocated */
3716 if (!TAILQ_EMPTY(&vdev->peer_list)) {
3717 /* debug print - will be removed later */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303718 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003719 FL("not deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303720 "until deletion finishes for all its peers"),
3721 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003722 /* indicate that the vdev needs to be deleted */
3723 vdev->delete.pending = 1;
3724 vdev->delete.callback = callback;
3725 vdev->delete.context = cb_context;
3726 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3727 return;
3728 }
3729 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3730
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303731 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3732 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3733 neighbour_peer_list_elem) {
3734 QDF_ASSERT(peer->vdev != vdev);
3735 }
3736 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3737
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303738 dp_tx_vdev_detach(vdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303739 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003740 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003741
3742 qdf_mem_free(vdev);
3743
3744 if (callback)
3745 callback(cb_context);
3746}
3747
3748/*
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303749 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3750 * @soc - datapath soc handle
3751 * @peer - datapath peer handle
3752 *
3753 * Delete the AST entries belonging to a peer
3754 */
3755#ifdef FEATURE_AST
3756static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3757 struct dp_peer *peer)
3758{
3759 struct dp_ast_entry *ast_entry, *temp_ast_entry;
3760
3761 qdf_spin_lock_bh(&soc->ast_lock);
3762 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3763 dp_peer_del_ast(soc, ast_entry);
3764
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05303765 peer->self_ast_entry = NULL;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303766 TAILQ_INIT(&peer->ast_entry_list);
3767 qdf_spin_unlock_bh(&soc->ast_lock);
3768}
3769#else
3770static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3771 struct dp_peer *peer)
3772{
3773}
3774#endif
3775
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303776#if ATH_SUPPORT_WRAP
3777static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3778 uint8_t *peer_mac_addr)
3779{
3780 struct dp_peer *peer;
3781
3782 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3783 0, vdev->vdev_id);
3784 if (!peer)
3785 return NULL;
3786
3787 if (peer->bss_peer)
3788 return peer;
3789
3790 qdf_atomic_dec(&peer->ref_cnt);
3791 return NULL;
3792}
3793#else
3794static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3795 uint8_t *peer_mac_addr)
3796{
3797 struct dp_peer *peer;
3798
3799 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3800 0, vdev->vdev_id);
3801 if (!peer)
3802 return NULL;
3803
3804 if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3805 return peer;
3806
3807 qdf_atomic_dec(&peer->ref_cnt);
3808 return NULL;
3809}
3810#endif
3811
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303812/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003813 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003814 * @txrx_vdev: Datapath VDEV handle
3815 * @peer_mac_addr: Peer MAC address
3816 *
3817 * Return: DP peeer handle on success, NULL on failure
3818 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003819static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303820 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003821{
3822 struct dp_peer *peer;
3823 int i;
3824 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3825 struct dp_pdev *pdev;
3826 struct dp_soc *soc;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303827 struct dp_ast_entry *ast_entry;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303828 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003829
3830 /* preconditions */
3831 qdf_assert(vdev);
3832 qdf_assert(peer_mac_addr);
3833
3834 pdev = vdev->pdev;
3835 soc = pdev->soc;
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303836
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303837 /*
3838 * If a peer entry with given MAC address already exists,
3839 * reuse the peer and reset the state of peer.
3840 */
3841 peer = dp_peer_can_reuse(vdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303842
3843 if (peer) {
3844 peer->delete_in_progress = false;
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303845
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303846 dp_peer_delete_ast_entries(soc, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303847
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303848 if ((vdev->opmode == wlan_op_mode_sta) &&
3849 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3850 DP_MAC_ADDR_LEN)) {
3851 ast_type = CDP_TXRX_AST_TYPE_SELF;
3852 }
3853
3854 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3855
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303856 /*
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303857 * Control path maintains a node count which is incremented
3858 * for every new peer create command. Since new peer is not being
3859 * created and earlier reference is reused here,
3860 * peer_unref_delete event is sent to control path to
3861 * increment the count back.
3862 */
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303863 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303864 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303865 vdev->vdev_id, peer->mac_addr.raw);
3866 }
Akshay Kosigi78eced82018-05-14 14:53:48 +05303867 peer->ctrl_peer = ctrl_peer;
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303868
Sravan Kumar Kairamda542172018-06-08 12:51:21 +05303869 dp_local_peer_id_alloc(pdev, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303870 DP_STATS_INIT(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303871
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303872 return (void *)peer;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303873 } else {
3874 /*
3875 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3876 * need to remove the AST entry which was earlier added as a WDS
3877 * entry.
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303878 * If an AST entry exists, but no peer entry exists with a given
3879 * MAC addresses, we could deduce it as a WDS entry
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303880 */
3881 ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3882 if (ast_entry)
3883 dp_peer_del_ast(soc, ast_entry);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303884 }
3885
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003886#ifdef notyet
3887 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3888 soc->mempool_ol_ath_peer);
3889#else
3890 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3891#endif
3892
3893 if (!peer)
3894 return NULL; /* failure */
3895
Tallapragada57d86602017-03-31 07:53:58 +05303896 qdf_mem_zero(peer, sizeof(struct dp_peer));
3897
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303898 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303899
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303900 /* store provided params */
3901 peer->vdev = vdev;
Akshay Kosigi78eced82018-05-14 14:53:48 +05303902 peer->ctrl_peer = ctrl_peer;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303903
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303904 if ((vdev->opmode == wlan_op_mode_sta) &&
3905 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3906 DP_MAC_ADDR_LEN)) {
3907 ast_type = CDP_TXRX_AST_TYPE_SELF;
3908 }
3909
3910 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303911
Leo Chang5ea93a42016-11-03 12:39:49 -07003912 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003913
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003914 qdf_mem_copy(
3915 &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3916
3917 /* TODO: See of rx_opt_proc is really required */
3918 peer->rx_opt_proc = soc->rx_opt_proc;
3919
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003920 /* initialize the peer_id */
3921 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3922 peer->peer_ids[i] = HTT_INVALID_PEER;
3923
3924 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3925
3926 qdf_atomic_init(&peer->ref_cnt);
3927
3928 /* keep one reference for attach */
3929 qdf_atomic_inc(&peer->ref_cnt);
3930
3931 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303932 if (wlan_op_mode_sta == vdev->opmode)
3933 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3934 else
3935 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3936
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003937 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3938
3939 /* TODO: See if hash based search is required */
3940 dp_peer_find_hash_add(soc, peer);
3941
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08003942 /* Initialize the peer state */
3943 peer->state = OL_TXRX_PEER_STATE_DISC;
3944
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303945 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003946 "vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003947 vdev, peer, peer->mac_addr.raw,
3948 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003949 /*
3950 * For every peer MAp message search and set if bss_peer
3951 */
3952 if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303953 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3954 "vdev bss_peer!!!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003955 peer->bss_peer = 1;
3956 vdev->vap_bss_peer = peer;
3957 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08003958 for (i = 0; i < DP_MAX_TIDS; i++)
3959 qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303960
Leo Chang5ea93a42016-11-03 12:39:49 -07003961 dp_local_peer_id_alloc(pdev, peer);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303962 DP_STATS_INIT(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003963 return (void *)peer;
3964}
3965
3966/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003967 * dp_peer_setup_wifi3() - initialize the peer
3968 * @vdev_hdl: virtual device object
3969 * @peer: Peer object
3970 *
3971 * Return: void
3972 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003973static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003974{
3975 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3976 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3977 struct dp_pdev *pdev;
3978 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08003979 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303980 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003981
3982 /* preconditions */
3983 qdf_assert(vdev);
3984 qdf_assert(peer);
3985
3986 pdev = vdev->pdev;
3987 soc = pdev->soc;
3988
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08003989 peer->last_assoc_rcvd = 0;
3990 peer->last_disassoc_rcvd = 0;
3991 peer->last_deauth_rcvd = 0;
3992
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05303993 /*
3994 * hash based steering is disabled for Radios which are offloaded
3995 * to NSS
3996 */
3997 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3998 hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3999
4000 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4001 FL("hash based steering for pdev: %d is %d\n"),
4002 pdev->pdev_id, hash_based);
Dhanashri Atre14049172016-11-11 18:32:36 -08004003
Tallapragada Kalyan61cb97c2017-09-20 12:42:10 +05304004 /*
Jeff Johnson23dbde82018-05-05 23:55:52 -07004005 * Below line of code will ensure the proper reo_dest ring is chosen
Tallapragada Kalyan61cb97c2017-09-20 12:42:10 +05304006 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4007 */
4008 reo_dest = pdev->reo_dest;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304009
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004010 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4011 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08004012 soc->cdp_soc.ol_ops->peer_set_default_routing(
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05304013 pdev->ctrl_pdev, peer->mac_addr.raw,
4014 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004015 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05304016
4017 dp_peer_rx_init(pdev, peer);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004018 return;
4019}
4020
4021/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05304022 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4023 * @vdev_handle: virtual device object
4024 * @htt_pkt_type: type of pkt
4025 *
4026 * Return: void
4027 */
4028static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4029 enum htt_cmn_pkt_type val)
4030{
4031 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4032 vdev->tx_encap_type = val;
4033}
4034
4035/*
4036 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4037 * @vdev_handle: virtual device object
4038 * @htt_pkt_type: type of pkt
4039 *
4040 * Return: void
4041 */
4042static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4043 enum htt_cmn_pkt_type val)
4044{
4045 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4046 vdev->rx_decap_type = val;
4047}
4048
4049/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304050 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4051 * @pdev_handle: physical device object
4052 * @val: reo destination ring index (1 - 4)
4053 *
4054 * Return: void
4055 */
4056static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4057 enum cdp_host_reo_dest_ring val)
4058{
4059 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4060
4061 if (pdev)
4062 pdev->reo_dest = val;
4063}
4064
4065/*
4066 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4067 * @pdev_handle: physical device object
4068 *
4069 * Return: reo destination ring index
4070 */
4071static enum cdp_host_reo_dest_ring
4072dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4073{
4074 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4075
4076 if (pdev)
4077 return pdev->reo_dest;
4078 else
4079 return cdp_host_reo_dest_ring_unknown;
4080}
4081
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07004082#ifdef QCA_SUPPORT_SON
4083static void dp_son_peer_authorize(struct dp_peer *peer)
4084{
4085 struct dp_soc *soc;
4086 soc = peer->vdev->pdev->soc;
4087 peer->peer_bs_inact_flag = 0;
4088 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4089 return;
4090}
4091#else
4092static void dp_son_peer_authorize(struct dp_peer *peer)
4093{
4094 return;
4095}
4096#endif
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304097/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304098 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4099 * @pdev_handle: device object
4100 * @val: value to be set
4101 *
4102 * Return: void
4103 */
4104static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4105 uint32_t val)
4106{
4107 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4108
4109 /* Enable/Disable smart mesh filtering. This flag will be checked
4110 * during rx processing to check if packets are from NAC clients.
4111 */
4112 pdev->filter_neighbour_peers = val;
4113 return 0;
4114}
4115
4116/*
4117 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4118 * address for smart mesh filtering
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304119 * @vdev_handle: virtual device object
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304120 * @cmd: Add/Del command
4121 * @macaddr: nac client mac address
4122 *
4123 * Return: void
4124 */
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304125static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4126 uint32_t cmd, uint8_t *macaddr)
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304127{
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304128 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4129 struct dp_pdev *pdev = vdev->pdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304130 struct dp_neighbour_peer *peer = NULL;
4131
4132 if (!macaddr)
4133 goto fail0;
4134
4135 /* Store address of NAC (neighbour peer) which will be checked
4136 * against TA of received packets.
4137 */
4138 if (cmd == DP_NAC_PARAM_ADD) {
4139 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4140 sizeof(*peer));
4141
4142 if (!peer) {
4143 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4144 FL("DP neighbour peer node memory allocation failed"));
4145 goto fail0;
4146 }
4147
4148 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4149 macaddr, DP_MAC_ADDR_LEN);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304150 peer->vdev = vdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304151
4152 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304153
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304154 /* add this neighbour peer into the list */
4155 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4156 neighbour_peer_list_elem);
4157 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4158
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304159 /* first neighbour */
4160 if (!pdev->neighbour_peers_added) {
4161 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4162 dp_ppdu_ring_cfg(pdev);
4163 pdev->neighbour_peers_added = true;
4164 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304165 return 1;
4166
4167 } else if (cmd == DP_NAC_PARAM_DEL) {
4168 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4169 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4170 neighbour_peer_list_elem) {
4171 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4172 macaddr, DP_MAC_ADDR_LEN)) {
4173 /* delete this peer from the list */
4174 TAILQ_REMOVE(&pdev->neighbour_peers_list,
4175 peer, neighbour_peer_list_elem);
4176 qdf_mem_free(peer);
4177 break;
4178 }
4179 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304180 /* last neighbour deleted */
4181 if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4182 pdev->neighbour_peers_added = false;
4183
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304184 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4185
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304186 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4187 !pdev->enhanced_stats_en)
4188 dp_ppdu_ring_reset(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304189 return 1;
4190
4191 }
4192
4193fail0:
4194 return 0;
4195}
4196
4197/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05304198 * dp_get_sec_type() - Get the security type
4199 * @peer: Datapath peer handle
4200 * @sec_idx: Security id (mcast, ucast)
4201 *
4202 * return sec_type: Security type
4203 */
4204static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4205{
4206 struct dp_peer *dpeer = (struct dp_peer *)peer;
4207
4208 return dpeer->security[sec_idx].sec_type;
4209}
4210
4211/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004212 * dp_peer_authorize() - authorize txrx peer
4213 * @peer_handle: Datapath peer handle
4214 * @authorize
4215 *
4216 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05304217static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004218{
4219 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4220 struct dp_soc *soc;
4221
4222 if (peer != NULL) {
4223 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004224 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07004225 dp_son_peer_authorize(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004226 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004227 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4228 }
4229}
4230
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304231#ifdef QCA_SUPPORT_SON
4232/*
4233 * dp_txrx_update_inact_threshold() - Update inact timer threshold
4234 * @pdev_handle: Device handle
4235 * @new_threshold : updated threshold value
4236 *
4237 */
4238static void
4239dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4240 u_int16_t new_threshold)
4241{
4242 struct dp_vdev *vdev;
4243 struct dp_peer *peer;
4244 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4245 struct dp_soc *soc = pdev->soc;
4246 u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4247
4248 if (old_threshold == new_threshold)
4249 return;
4250
4251 soc->pdev_bs_inact_reload = new_threshold;
4252
4253 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304254 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304255 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4256 if (vdev->opmode != wlan_op_mode_ap)
4257 continue;
4258
4259 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4260 if (!peer->authorize)
4261 continue;
4262
4263 if (old_threshold - peer->peer_bs_inact >=
4264 new_threshold) {
4265 dp_mark_peer_inact((void *)peer, true);
4266 peer->peer_bs_inact = 0;
4267 } else {
4268 peer->peer_bs_inact = new_threshold -
4269 (old_threshold - peer->peer_bs_inact);
4270 }
4271 }
4272 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304273 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304274 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4275}
4276
4277/**
4278 * dp_txrx_reset_inact_count(): Reset inact count
4279 * @pdev_handle - device handle
4280 *
4281 * Return: void
4282 */
4283static void
4284dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4285{
4286 struct dp_vdev *vdev = NULL;
4287 struct dp_peer *peer = NULL;
4288 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4289 struct dp_soc *soc = pdev->soc;
4290
4291 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304292 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304293 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4294 if (vdev->opmode != wlan_op_mode_ap)
4295 continue;
4296
4297 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4298 if (!peer->authorize)
4299 continue;
4300
4301 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4302 }
4303 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304304 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304305 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4306}
4307
4308/**
4309 * dp_set_inact_params(): set inactivity params
4310 * @pdev_handle - device handle
4311 * @inact_check_interval - inactivity interval
4312 * @inact_normal - Inactivity normal
4313 * @inact_overload - Inactivity overload
4314 *
4315 * Return: bool
4316 */
4317bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4318 u_int16_t inact_check_interval,
4319 u_int16_t inact_normal, u_int16_t inact_overload)
4320{
4321 struct dp_soc *soc;
4322 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4323
4324 if (!pdev)
4325 return false;
4326
4327 soc = pdev->soc;
4328 if (!soc)
4329 return false;
4330
4331 soc->pdev_bs_inact_interval = inact_check_interval;
4332 soc->pdev_bs_inact_normal = inact_normal;
4333 soc->pdev_bs_inact_overload = inact_overload;
4334
4335 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4336 soc->pdev_bs_inact_normal);
4337
4338 return true;
4339}
4340
4341/**
4342 * dp_start_inact_timer(): Inactivity timer start
4343 * @pdev_handle - device handle
4344 * @enable - Inactivity timer start/stop
4345 *
4346 * Return: bool
4347 */
4348bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4349{
4350 struct dp_soc *soc;
4351 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4352
4353 if (!pdev)
4354 return false;
4355
4356 soc = pdev->soc;
4357 if (!soc)
4358 return false;
4359
4360 if (enable) {
4361 dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4362 qdf_timer_mod(&soc->pdev_bs_inact_timer,
4363 soc->pdev_bs_inact_interval * 1000);
4364 } else {
4365 qdf_timer_stop(&soc->pdev_bs_inact_timer);
4366 }
4367
4368 return true;
4369}
4370
4371/**
4372 * dp_set_overload(): Set inactivity overload
4373 * @pdev_handle - device handle
4374 * @overload - overload status
4375 *
4376 * Return: void
4377 */
4378void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4379{
4380 struct dp_soc *soc;
4381 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4382
4383 if (!pdev)
4384 return;
4385
4386 soc = pdev->soc;
4387 if (!soc)
4388 return;
4389
4390 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4391 overload ? soc->pdev_bs_inact_overload :
4392 soc->pdev_bs_inact_normal);
4393}
4394
4395/**
4396 * dp_peer_is_inact(): check whether peer is inactive
4397 * @peer_handle - datapath peer handle
4398 *
4399 * Return: bool
4400 */
4401bool dp_peer_is_inact(void *peer_handle)
4402{
4403 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4404
4405 if (!peer)
4406 return false;
4407
4408 return peer->peer_bs_inact_flag == 1;
4409}
4410
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05304411/**
4412 * dp_init_inact_timer: initialize the inact timer
4413 * @soc - SOC handle
4414 *
4415 * Return: void
4416 */
4417void dp_init_inact_timer(struct dp_soc *soc)
4418{
4419 qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4420 dp_txrx_peer_find_inact_timeout_handler,
4421 (void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4422}
4423
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304424#else
4425
4426bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4427 u_int16_t inact_normal, u_int16_t inact_overload)
4428{
4429 return false;
4430}
4431
4432bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4433{
4434 return false;
4435}
4436
4437void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4438{
4439 return;
4440}
4441
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05304442void dp_init_inact_timer(struct dp_soc *soc)
4443{
4444 return;
4445}
4446
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304447bool dp_peer_is_inact(void *peer)
4448{
4449 return false;
4450}
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304451#endif
4452
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004453/*
4454 * dp_peer_unref_delete() - unref and delete peer
4455 * @peer_handle: Datapath peer handle
4456 *
4457 */
4458void dp_peer_unref_delete(void *peer_handle)
4459{
4460 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304461 struct dp_peer *bss_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004462 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05304463 struct dp_pdev *pdev = vdev->pdev;
4464 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004465 struct dp_peer *tmppeer;
4466 int found = 0;
4467 uint16_t peer_id;
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004468 uint16_t vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004469
4470 /*
4471 * Hold the lock all the way from checking if the peer ref count
4472 * is zero until the peer references are removed from the hash
4473 * table and vdev list (if the peer ref count is zero).
4474 * This protects against a new HL tx operation starting to use the
4475 * peer object just after this function concludes it's done being used.
4476 * Furthermore, the lock needs to be held while checking whether the
4477 * vdev's list of peers is empty, to make sure that list is not modified
4478 * concurrently with the empty check.
4479 */
4480 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004481 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004482 "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004483 peer, qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004484 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4485 peer_id = peer->peer_ids[0];
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004486 vdev_id = vdev->vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004487
4488 /*
4489 * Make sure that the reference to the peer in
4490 * peer object map is removed
4491 */
4492 if (peer_id != HTT_INVALID_PEER)
4493 soc->peer_id_to_obj_map[peer_id] = NULL;
4494
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004496 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004497
4498 /* remove the reference to the peer from the hash table */
4499 dp_peer_find_hash_remove(soc, peer);
4500
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05304501 qdf_spin_lock_bh(&soc->ast_lock);
4502 if (peer->self_ast_entry) {
4503 dp_peer_del_ast(soc, peer->self_ast_entry);
4504 peer->self_ast_entry = NULL;
4505 }
4506 qdf_spin_unlock_bh(&soc->ast_lock);
4507
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004508 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4509 if (tmppeer == peer) {
4510 found = 1;
4511 break;
4512 }
4513 }
4514 if (found) {
4515 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4516 peer_list_elem);
4517 } else {
4518 /*Ignoring the remove operation as peer not found*/
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304519 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004520 "peer %pK not found in vdev (%pK)->peer_list:%pK",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004521 peer, vdev, &peer->vdev->peer_list);
4522 }
4523
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08004524 /* cleanup the peer data */
4525 dp_peer_cleanup(vdev, peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004526
4527 /* check whether the parent vdev has no peers left */
4528 if (TAILQ_EMPTY(&vdev->peer_list)) {
4529 /*
4530 * Now that there are no references to the peer, we can
4531 * release the peer reference lock.
4532 */
4533 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4534 /*
4535 * Check if the parent vdev was waiting for its peers
4536 * to be deleted, in order for it to be deleted too.
4537 */
4538 if (vdev->delete.pending) {
4539 ol_txrx_vdev_delete_cb vdev_delete_cb =
4540 vdev->delete.callback;
4541 void *vdev_delete_context =
4542 vdev->delete.context;
4543
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304544 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004545 QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004546 FL("deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304547 " - its last peer is done"),
4548 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004549 /* all peers are gone, go ahead and delete it */
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004550 dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
chenguo7853b792017-12-28 20:59:12 +08004551 FLOW_TYPE_VDEV,
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004552 vdev_id);
chenguo7853b792017-12-28 20:59:12 +08004553 dp_tx_vdev_detach(vdev);
4554 QDF_TRACE(QDF_MODULE_ID_DP,
4555 QDF_TRACE_LEVEL_INFO_HIGH,
4556 FL("deleting vdev object %pK (%pM)"),
4557 vdev, vdev->mac_addr.raw);
4558
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004559 qdf_mem_free(vdev);
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004560 vdev = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004561 if (vdev_delete_cb)
4562 vdev_delete_cb(vdev_delete_context);
4563 }
4564 } else {
4565 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4566 }
chenguo1dead6f2018-01-08 14:51:44 +08004567
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004568 if (vdev) {
4569 if (vdev->vap_bss_peer == peer) {
4570 vdev->vap_bss_peer = NULL;
4571 }
4572 }
4573
chenguo1dead6f2018-01-08 14:51:44 +08004574 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05304575 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004576 vdev_id, peer->mac_addr.raw);
chenguo1dead6f2018-01-08 14:51:44 +08004577 }
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004578
4579 if (!vdev || !vdev->vap_bss_peer) {
4580 goto free_peer;
4581 }
4582
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004583#ifdef notyet
4584 qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4585#else
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304586 bss_peer = vdev->vap_bss_peer;
4587 DP_UPDATE_STATS(bss_peer, peer);
4588
4589free_peer:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004590 qdf_mem_free(peer);
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004591
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004592#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004593 } else {
4594 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4595 }
4596}
4597
4598/*
4599 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07004600 * @peer_handle: Datapath peer handle
4601 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004602 *
4603 */
Naveen Rawat761329b2017-09-19 10:30:11 -07004604static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004605{
4606 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4607
4608 /* redirect the peer's rx delivery function to point to a
4609 * discard func
4610 */
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304611
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004612 peer->rx_opt_proc = dp_rx_discard;
Akshay Kosigi78eced82018-05-14 14:53:48 +05304613 peer->ctrl_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004614
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304615 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004616 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004617
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004618 dp_local_peer_id_free(peer->vdev->pdev, peer);
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004619 qdf_spinlock_destroy(&peer->peer_info_lock);
4620
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004621 /*
4622 * Remove the reference added during peer_attach.
4623 * The peer will still be left allocated until the
4624 * PEER_UNMAP message arrives to remove the other
4625 * reference, added by the PEER_MAP message.
4626 */
4627 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07004628}
4629
4630/*
4631 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4632 * @peer_handle: Datapath peer handle
4633 *
4634 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004635static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004636{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004637 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004638 return vdev->mac_addr.raw;
4639}
4640
4641/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08004642 * dp_vdev_set_wds() - Enable per packet stats
4643 * @vdev_handle: DP VDEV handle
4644 * @val: value
4645 *
4646 * Return: none
4647 */
4648static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4649{
4650 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4651
4652 vdev->wds_enabled = val;
4653 return 0;
4654}
4655
4656/*
Leo Chang5ea93a42016-11-03 12:39:49 -07004657 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4658 * @peer_handle: Datapath peer handle
4659 *
4660 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004661static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4662 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07004663{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004664 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004665 struct dp_vdev *vdev = NULL;
4666
4667 if (qdf_unlikely(!pdev))
4668 return NULL;
4669
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304670 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004671 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4672 if (vdev->vdev_id == vdev_id)
4673 break;
4674 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304675 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004676
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004677 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004678}
4679
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004680static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07004681{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004682 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07004683
4684 return vdev->opmode;
4685}
4686
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004687static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004688{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004689 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004690 struct dp_pdev *pdev = vdev->pdev;
4691
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004692 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07004693}
phadiman7821bf82018-02-06 16:03:54 +05304694
Kai Chen6eca1a62017-01-12 10:17:53 -08004695/**
sumedh baikady84613b02017-09-19 16:36:14 -07004696 * dp_reset_monitor_mode() - Disable monitor mode
4697 * @pdev_handle: Datapath PDEV handle
4698 *
4699 * Return: 0 on success, not 0 on failure
4700 */
4701static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4702{
4703 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4704 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004705 struct dp_soc *soc = pdev->soc;
sumedh baikady84613b02017-09-19 16:36:14 -07004706 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004707 int mac_id;
sumedh baikady84613b02017-09-19 16:36:14 -07004708
4709 pdev_id = pdev->pdev_id;
4710 soc = pdev->soc;
4711
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004712 qdf_spin_lock_bh(&pdev->mon_lock);
4713
sumedh baikady84613b02017-09-19 16:36:14 -07004714 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4715
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004716 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4717 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
sumedh baikady84613b02017-09-19 16:36:14 -07004718
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004719 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4720 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4721 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4722
4723 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4724 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4725 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4726 }
sumedh baikady84613b02017-09-19 16:36:14 -07004727
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004728 pdev->monitor_vdev = NULL;
4729
4730 qdf_spin_unlock_bh(&pdev->mon_lock);
4731
sumedh baikady84613b02017-09-19 16:36:14 -07004732 return 0;
4733}
phadiman7821bf82018-02-06 16:03:54 +05304734
4735/**
4736 * dp_set_nac() - set peer_nac
4737 * @peer_handle: Datapath PEER handle
4738 *
4739 * Return: void
4740 */
4741static void dp_set_nac(struct cdp_peer *peer_handle)
4742{
4743 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4744
4745 peer->nac = 1;
4746}
4747
4748/**
4749 * dp_get_tx_pending() - read pending tx
4750 * @pdev_handle: Datapath PDEV handle
4751 *
4752 * Return: outstanding tx
4753 */
4754static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4755{
4756 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4757
4758 return qdf_atomic_read(&pdev->num_tx_outstanding);
4759}
4760
4761/**
4762 * dp_get_peer_mac_from_peer_id() - get peer mac
4763 * @pdev_handle: Datapath PDEV handle
4764 * @peer_id: Peer ID
4765 * @peer_mac: MAC addr of PEER
4766 *
4767 * Return: void
4768 */
4769static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4770 uint32_t peer_id, uint8_t *peer_mac)
4771{
4772 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4773 struct dp_peer *peer;
4774
4775 if (pdev && peer_mac) {
4776 peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4777 if (peer && peer->mac_addr.raw) {
4778 qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4779 DP_MAC_ADDR_LEN);
4780 }
4781 }
4782}
4783
sumedh baikady84613b02017-09-19 16:36:14 -07004784/**
Kai Chen6eca1a62017-01-12 10:17:53 -08004785 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4786 * @vdev_handle: Datapath VDEV handle
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304787 * @smart_monitor: Flag to denote if its smart monitor mode
Kai Chen6eca1a62017-01-12 10:17:53 -08004788 *
4789 * Return: 0 on success, not 0 on failure
4790 */
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304791static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4792 uint8_t smart_monitor)
Kai Chen6eca1a62017-01-12 10:17:53 -08004793{
4794 /* Many monitor VAPs can exists in a system but only one can be up at
4795 * anytime
4796 */
4797 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4798 struct dp_pdev *pdev;
4799 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4800 struct dp_soc *soc;
4801 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004802 int mac_id;
Kai Chen6eca1a62017-01-12 10:17:53 -08004803
4804 qdf_assert(vdev);
4805
4806 pdev = vdev->pdev;
4807 pdev_id = pdev->pdev_id;
4808 soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08004809 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004810 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
Kai Chen6eca1a62017-01-12 10:17:53 -08004811 pdev, pdev_id, soc, vdev);
4812
4813 /*Check if current pdev's monitor_vdev exists */
4814 if (pdev->monitor_vdev) {
4815 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004816 "vdev=%pK\n", vdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08004817 qdf_assert(vdev);
4818 }
4819
4820 pdev->monitor_vdev = vdev;
4821
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304822 /* If smart monitor mode, do not configure monitor ring */
4823 if (smart_monitor)
4824 return QDF_STATUS_SUCCESS;
4825
nobeljd124b742017-10-16 11:59:12 -07004826 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4827 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4828 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4829 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4830 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4831 pdev->mo_data_filter);
4832
nobelj1c31fee2018-03-21 11:47:05 -07004833 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4834
Kai Chen6eca1a62017-01-12 10:17:53 -08004835 htt_tlv_filter.mpdu_start = 1;
4836 htt_tlv_filter.msdu_start = 1;
4837 htt_tlv_filter.packet = 1;
4838 htt_tlv_filter.msdu_end = 1;
4839 htt_tlv_filter.mpdu_end = 1;
4840 htt_tlv_filter.packet_header = 1;
4841 htt_tlv_filter.attention = 1;
4842 htt_tlv_filter.ppdu_start = 0;
4843 htt_tlv_filter.ppdu_end = 0;
4844 htt_tlv_filter.ppdu_end_user_stats = 0;
4845 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4846 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07004847 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07004848 htt_tlv_filter.enable_fp =
4849 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004850 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07004851 htt_tlv_filter.enable_mo =
4852 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4853 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4854 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4855 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4856 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4857 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4858 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kai Chen6eca1a62017-01-12 10:17:53 -08004859
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004860 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4861 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4862
4863 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4864 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4865 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4866 }
Kai Chen6eca1a62017-01-12 10:17:53 -08004867
nobelj1c31fee2018-03-21 11:47:05 -07004868 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4869
Kai Chen6eca1a62017-01-12 10:17:53 -08004870 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004871 htt_tlv_filter.msdu_start = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004872 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004873 htt_tlv_filter.msdu_end = 0;
4874 htt_tlv_filter.mpdu_end = 0;
4875 htt_tlv_filter.attention = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004876 htt_tlv_filter.ppdu_start = 1;
4877 htt_tlv_filter.ppdu_end = 1;
4878 htt_tlv_filter.ppdu_end_user_stats = 1;
4879 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4880 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004881 htt_tlv_filter.enable_fp = 1;
Karunakar Dasineni40555682017-03-26 22:44:39 -07004882 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004883 htt_tlv_filter.enable_mo = 1;
4884 if (pdev->mcopy_mode) {
4885 htt_tlv_filter.packet_header = 1;
4886 }
4887 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4888 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4889 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4890 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4891 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4892 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
nobeljd124b742017-10-16 11:59:12 -07004893
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004894 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07004895 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4896 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004897
4898 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4899 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4900 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4901 }
nobeljd124b742017-10-16 11:59:12 -07004902
4903 return QDF_STATUS_SUCCESS;
4904}
4905
4906/**
4907 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4908 * @pdev_handle: Datapath PDEV handle
4909 * @filter_val: Flag to select Filter for monitor mode
4910 * Return: 0 on success, not 0 on failure
4911 */
4912static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4913 struct cdp_monitor_filter *filter_val)
4914{
4915 /* Many monitor VAPs can exists in a system but only one can be up at
4916 * anytime
4917 */
4918 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4919 struct dp_vdev *vdev = pdev->monitor_vdev;
4920 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4921 struct dp_soc *soc;
4922 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004923 int mac_id;
nobeljd124b742017-10-16 11:59:12 -07004924
4925 pdev_id = pdev->pdev_id;
4926 soc = pdev->soc;
4927
4928 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4929 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4930 pdev, pdev_id, soc, vdev);
4931
4932 /*Check if current pdev's monitor_vdev exists */
4933 if (!pdev->monitor_vdev) {
4934 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4935 "vdev=%pK\n", vdev);
4936 qdf_assert(vdev);
4937 }
4938
4939 /* update filter mode, type in pdev structure */
4940 pdev->mon_filter_mode = filter_val->mode;
4941 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4942 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4943 pdev->fp_data_filter = filter_val->fp_data;
4944 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4945 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4946 pdev->mo_data_filter = filter_val->mo_data;
4947
4948 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4949 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4950 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4951 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4952 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4953 pdev->mo_data_filter);
4954
4955 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4956
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004957 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4958 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
nobeljd124b742017-10-16 11:59:12 -07004959
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004960 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4961 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4962 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4963
4964 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4965 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4966 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4967 }
nobeljd124b742017-10-16 11:59:12 -07004968
4969 htt_tlv_filter.mpdu_start = 1;
4970 htt_tlv_filter.msdu_start = 1;
4971 htt_tlv_filter.packet = 1;
4972 htt_tlv_filter.msdu_end = 1;
4973 htt_tlv_filter.mpdu_end = 1;
4974 htt_tlv_filter.packet_header = 1;
4975 htt_tlv_filter.attention = 1;
4976 htt_tlv_filter.ppdu_start = 0;
4977 htt_tlv_filter.ppdu_end = 0;
4978 htt_tlv_filter.ppdu_end_user_stats = 0;
4979 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4980 htt_tlv_filter.ppdu_end_status_done = 0;
4981 htt_tlv_filter.header_per_msdu = 1;
4982 htt_tlv_filter.enable_fp =
4983 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4984 htt_tlv_filter.enable_md = 0;
4985 htt_tlv_filter.enable_mo =
4986 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4987 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4988 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4989 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4990 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4991 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4992 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4993
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004994 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4995 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4996
4997 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4998 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4999 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5000 }
nobeljd124b742017-10-16 11:59:12 -07005001
nobelj1c31fee2018-03-21 11:47:05 -07005002 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5003
nobeljd124b742017-10-16 11:59:12 -07005004 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005005 htt_tlv_filter.msdu_start = 0;
nobeljd124b742017-10-16 11:59:12 -07005006 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005007 htt_tlv_filter.msdu_end = 0;
5008 htt_tlv_filter.mpdu_end = 0;
5009 htt_tlv_filter.attention = 0;
nobeljd124b742017-10-16 11:59:12 -07005010 htt_tlv_filter.ppdu_start = 1;
5011 htt_tlv_filter.ppdu_end = 1;
5012 htt_tlv_filter.ppdu_end_user_stats = 1;
5013 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5014 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005015 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07005016 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005017 htt_tlv_filter.enable_mo = 1;
5018 if (pdev->mcopy_mode) {
5019 htt_tlv_filter.packet_header = 1;
5020 }
5021 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5022 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5023 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5024 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5025 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5026 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Karunakar Dasineni40555682017-03-26 22:44:39 -07005027
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005028 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07005029 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5030 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005031
5032 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5033 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5034 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5035 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305036
Kai Chen6eca1a62017-01-12 10:17:53 -08005037 return QDF_STATUS_SUCCESS;
5038}
Leo Chang5ea93a42016-11-03 12:39:49 -07005039
nobeljc8eb4d62018-01-04 14:29:32 -08005040/**
phadiman7821bf82018-02-06 16:03:54 +05305041 * dp_get_pdev_id_frm_pdev() - get pdev_id
5042 * @pdev_handle: Datapath PDEV handle
5043 *
5044 * Return: pdev_id
5045 */
5046static
5047uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5048{
5049 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5050
5051 return pdev->pdev_id;
5052}
5053
5054/**
nobeljc8eb4d62018-01-04 14:29:32 -08005055 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5056 * @vdev_handle: Datapath VDEV handle
5057 * Return: true on ucast filter flag set
5058 */
5059static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5060{
5061 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5062 struct dp_pdev *pdev;
5063
5064 pdev = vdev->pdev;
5065
5066 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5067 (pdev->mo_data_filter & FILTER_DATA_UCAST))
5068 return true;
5069
5070 return false;
5071}
5072
5073/**
5074 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5075 * @vdev_handle: Datapath VDEV handle
5076 * Return: true on mcast filter flag set
5077 */
5078static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5079{
5080 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5081 struct dp_pdev *pdev;
5082
5083 pdev = vdev->pdev;
5084
5085 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5086 (pdev->mo_data_filter & FILTER_DATA_MCAST))
5087 return true;
5088
5089 return false;
5090}
5091
5092/**
5093 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5094 * @vdev_handle: Datapath VDEV handle
5095 * Return: true on non data filter flag set
5096 */
5097static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5098{
5099 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5100 struct dp_pdev *pdev;
5101
5102 pdev = vdev->pdev;
5103
5104 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5105 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5106 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5107 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5108 return true;
5109 }
5110 }
5111
5112 return false;
5113}
5114
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305115#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305116void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305117{
5118 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5119
5120 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05305121 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305122 vdev->mesh_vdev = val;
5123}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305124
5125/*
5126 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5127 * @vdev_hdl: virtual device object
5128 * @val: value to be set
5129 *
5130 * Return: void
5131 */
5132void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5133{
5134 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5135
5136 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5137 FL("val %d"), val);
5138 vdev->mesh_rx_filter = val;
5139}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305140#endif
5141
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305142/*
5143 * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
Jeff Johnson2d821eb2018-05-06 16:25:49 -07005144 * Current scope is bar received count
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305145 *
5146 * @pdev_handle: DP_PDEV handle
5147 *
5148 * Return: void
5149 */
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305150#define STATS_PROC_TIMEOUT (HZ/1000)
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305151
5152static void
5153dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5154{
5155 struct dp_vdev *vdev;
5156 struct dp_peer *peer;
5157 uint32_t waitcnt;
5158
5159 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5160 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5161 if (!peer) {
5162 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5163 FL("DP Invalid Peer refernce"));
5164 return;
5165 }
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305166
5167 if (peer->delete_in_progress) {
5168 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5169 FL("DP Peer deletion in progress"));
5170 continue;
5171 }
5172
5173 qdf_atomic_inc(&peer->ref_cnt);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305174 waitcnt = 0;
5175 dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305176 while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305177 && waitcnt < 10) {
5178 schedule_timeout_interruptible(
5179 STATS_PROC_TIMEOUT);
5180 waitcnt++;
5181 }
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305182 qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305183 dp_peer_unref_delete(peer);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305184 }
5185 }
5186}
5187
5188/**
5189 * dp_rx_bar_stats_cb(): BAR received stats callback
5190 * @soc: SOC handle
5191 * @cb_ctxt: Call back context
5192 * @reo_status: Reo status
5193 *
5194 * return: void
5195 */
5196void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5197 union hal_reo_status *reo_status)
5198{
5199 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5200 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5201
5202 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5203 DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5204 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305205 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305206 return;
5207 }
5208
5209 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305210 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305211
5212}
5213
Ishank Jain1e7401c2017-02-17 15:38:39 +05305214/**
5215 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5216 * @vdev: DP VDEV handle
5217 *
5218 * return: void
5219 */
5220void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
5221{
5222 struct dp_peer *peer = NULL;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305223 struct dp_soc *soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305224
5225 qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
5226 qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
5227
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305228 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5229 DP_UPDATE_STATS(vdev, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305230
psimhafb49db32017-08-31 15:33:33 -07005231 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305232 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305233 &vdev->stats, (uint16_t) vdev->vdev_id,
5234 UPDATE_VDEV_STATS);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07005235
Ishank Jain1e7401c2017-02-17 15:38:39 +05305236}
5237
5238/**
5239 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5240 * @pdev: DP PDEV handle
5241 *
5242 * return: void
5243 */
5244static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5245{
5246 struct dp_vdev *vdev = NULL;
Pranita Solankea38c7a32018-01-04 10:50:59 +05305247 struct dp_soc *soc = pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305248
5249 qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5250 qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5251 qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5252
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305253 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305254 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05305255
Ishank Jain1e7401c2017-02-17 15:38:39 +05305256 dp_aggregate_vdev_stats(vdev);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305257 DP_UPDATE_STATS(pdev, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305258
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305259 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305260
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305261 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5262 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5263 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5264 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5265 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5266 DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5267 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
Amir Patel02911572018-07-02 13:00:53 +05305268 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305269 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
Amir Patel02911572018-07-02 13:00:53 +05305270 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305271 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5272 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5273 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5274 DP_STATS_AGGR(pdev, vdev,
5275 tx_i.mcast_en.dropped_map_error);
5276 DP_STATS_AGGR(pdev, vdev,
5277 tx_i.mcast_en.dropped_self_mac);
5278 DP_STATS_AGGR(pdev, vdev,
5279 tx_i.mcast_en.dropped_send_fail);
5280 DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5281 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5282 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5283 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305284 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305285 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5286 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5287 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305288 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5289 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305290
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305291 pdev->stats.tx_i.dropped.dropped_pkt.num =
5292 pdev->stats.tx_i.dropped.dma_error +
5293 pdev->stats.tx_i.dropped.ring_full +
5294 pdev->stats.tx_i.dropped.enqueue_fail +
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305295 pdev->stats.tx_i.dropped.desc_na.num +
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305296 pdev->stats.tx_i.dropped.res_full;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305297
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305298 pdev->stats.tx.last_ack_rssi =
5299 vdev->stats.tx.last_ack_rssi;
5300 pdev->stats.tx_i.tso.num_seg =
5301 vdev->stats.tx_i.tso.num_seg;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305302 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305303 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pranita Solankea38c7a32018-01-04 10:50:59 +05305304 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305305 soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
Pranita Solankea38c7a32018-01-04 10:50:59 +05305306 &pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305307
Ishank Jain1e7401c2017-02-17 15:38:39 +05305308}
5309
5310/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305311 * dp_vdev_getstats() - get vdev packet level stats
5312 * @vdev_handle: Datapath VDEV handle
5313 * @stats: cdp network device stats structure
5314 *
5315 * Return: void
5316 */
5317static void dp_vdev_getstats(void *vdev_handle,
5318 struct cdp_dev_stats *stats)
5319{
5320 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5321
5322 dp_aggregate_vdev_stats(vdev);
5323}
5324
5325
5326/**
Anish Natarajf12b0a32018-03-14 14:27:13 +05305327 * dp_pdev_getstats() - get pdev packet level stats
5328 * @pdev_handle: Datapath PDEV handle
5329 * @stats: cdp network device stats structure
5330 *
5331 * Return: void
5332 */
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305333static void dp_pdev_getstats(void *pdev_handle,
Anish Natarajf12b0a32018-03-14 14:27:13 +05305334 struct cdp_dev_stats *stats)
5335{
5336 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5337
5338 dp_aggregate_pdev_stats(pdev);
5339
5340 stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5341 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5342
5343 stats->tx_errors = pdev->stats.tx.tx_failed +
5344 pdev->stats.tx_i.dropped.dropped_pkt.num;
5345 stats->tx_dropped = stats->tx_errors;
5346
5347 stats->rx_packets = pdev->stats.rx.unicast.num +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305348 pdev->stats.rx.multicast.num +
5349 pdev->stats.rx.bcast.num;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305350 stats->rx_bytes = pdev->stats.rx.unicast.bytes +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305351 pdev->stats.rx.multicast.bytes +
5352 pdev->stats.rx.bcast.bytes;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305353}
5354
5355/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305356 * dp_get_device_stats() - get interface level packet stats
5357 * @handle: device handle
5358 * @stats: cdp network device stats structure
5359 * @type: device type pdev/vdev
5360 *
5361 * Return: void
5362 */
5363static void dp_get_device_stats(void *handle,
5364 struct cdp_dev_stats *stats, uint8_t type)
5365{
5366 switch (type) {
5367 case UPDATE_VDEV_STATS:
5368 dp_vdev_getstats(handle, stats);
5369 break;
5370 case UPDATE_PDEV_STATS:
5371 dp_pdev_getstats(handle, stats);
5372 break;
5373 default:
5374 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5375 "apstats cannot be updated for this input "
5376 "type %d\n", type);
5377 break;
5378 }
5379
5380}
5381
5382
5383/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305384 * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5385 * @pdev: DP_PDEV Handle
5386 *
5387 * Return:void
5388 */
5389static inline void
5390dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5391{
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305392 uint8_t index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305393 DP_PRINT_STATS("PDEV Tx Stats:\n");
5394 DP_PRINT_STATS("Received From Stack:");
5395 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305396 pdev->stats.tx_i.rcvd.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305397 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305398 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305399 DP_PRINT_STATS("Processed:");
5400 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305401 pdev->stats.tx_i.processed.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305402 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305403 pdev->stats.tx_i.processed.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005404 DP_PRINT_STATS("Total Completions:");
5405 DP_PRINT_STATS(" Packets = %u",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305406 pdev->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305407 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305408 pdev->stats.tx.comp_pkt.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005409 DP_PRINT_STATS("Successful Completions:");
5410 DP_PRINT_STATS(" Packets = %u",
5411 pdev->stats.tx.tx_success.num);
5412 DP_PRINT_STATS(" Bytes = %llu",
5413 pdev->stats.tx.tx_success.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305414 DP_PRINT_STATS("Dropped:");
5415 DP_PRINT_STATS(" Total = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305416 pdev->stats.tx_i.dropped.dropped_pkt.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305417 DP_PRINT_STATS(" Dma_map_error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305418 pdev->stats.tx_i.dropped.dma_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305419 DP_PRINT_STATS(" Ring Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305420 pdev->stats.tx_i.dropped.ring_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305421 DP_PRINT_STATS(" Descriptor Not available = %d",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305422 pdev->stats.tx_i.dropped.desc_na.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305423 DP_PRINT_STATS(" HW enqueue failed= %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305424 pdev->stats.tx_i.dropped.enqueue_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305425 DP_PRINT_STATS(" Resources Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305426 pdev->stats.tx_i.dropped.res_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305427 DP_PRINT_STATS(" FW removed = %d",
5428 pdev->stats.tx.dropped.fw_rem);
5429 DP_PRINT_STATS(" FW removed transmitted = %d",
5430 pdev->stats.tx.dropped.fw_rem_tx);
5431 DP_PRINT_STATS(" FW removed untransmitted = %d",
5432 pdev->stats.tx.dropped.fw_rem_notx);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005433 DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
5434 pdev->stats.tx.dropped.fw_reason1);
5435 DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
5436 pdev->stats.tx.dropped.fw_reason2);
5437 DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
5438 pdev->stats.tx.dropped.fw_reason3);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305439 DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
5440 pdev->stats.tx.dropped.age_out);
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005441 DP_PRINT_STATS(" Multicast:");
5442 DP_PRINT_STATS(" Packets: %u",
5443 pdev->stats.tx.mcast.num);
5444 DP_PRINT_STATS(" Bytes: %llu",
5445 pdev->stats.tx.mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305446 DP_PRINT_STATS("Scatter Gather:");
5447 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305448 pdev->stats.tx_i.sg.sg_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305449 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305450 pdev->stats.tx_i.sg.sg_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305451 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305452 pdev->stats.tx_i.sg.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305453 DP_PRINT_STATS(" Dropped By Target = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305454 pdev->stats.tx_i.sg.dropped_target);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305455 DP_PRINT_STATS("TSO:");
5456 DP_PRINT_STATS(" Number of Segments = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305457 pdev->stats.tx_i.tso.num_seg);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305458 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305459 pdev->stats.tx_i.tso.tso_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305460 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305461 pdev->stats.tx_i.tso.tso_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305462 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305463 pdev->stats.tx_i.tso.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305464 DP_PRINT_STATS("Mcast Enhancement:");
5465 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305466 pdev->stats.tx_i.mcast_en.mcast_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305467 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305468 pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305469 DP_PRINT_STATS(" Dropped: Map Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305470 pdev->stats.tx_i.mcast_en.dropped_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305471 DP_PRINT_STATS(" Dropped: Self Mac = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305472 pdev->stats.tx_i.mcast_en.dropped_self_mac);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305473 DP_PRINT_STATS(" Dropped: Send Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305474 pdev->stats.tx_i.mcast_en.dropped_send_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305475 DP_PRINT_STATS(" Unicast sent = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305476 pdev->stats.tx_i.mcast_en.ucast);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305477 DP_PRINT_STATS("Raw:");
5478 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305479 pdev->stats.tx_i.raw.raw_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305480 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305481 pdev->stats.tx_i.raw.raw_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305482 DP_PRINT_STATS(" DMA map error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305483 pdev->stats.tx_i.raw.dma_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305484 DP_PRINT_STATS("Reinjected:");
5485 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305486 pdev->stats.tx_i.reinject_pkts.num);
chenguo6a027fb2018-05-21 18:42:54 +08005487 DP_PRINT_STATS(" Bytes = %llu\n",
5488 pdev->stats.tx_i.reinject_pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305489 DP_PRINT_STATS("Inspected:");
5490 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305491 pdev->stats.tx_i.inspect_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305492 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305493 pdev->stats.tx_i.inspect_pkts.bytes);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305494 DP_PRINT_STATS("Nawds Multicast:");
5495 DP_PRINT_STATS(" Packets = %d",
5496 pdev->stats.tx_i.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305497 DP_PRINT_STATS(" Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305498 pdev->stats.tx_i.nawds_mcast.bytes);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305499 DP_PRINT_STATS("CCE Classified:");
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305500 DP_PRINT_STATS(" CCE Classified Packets: %u",
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305501 pdev->stats.tx_i.cce_classified);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305502 DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05305503 pdev->stats.tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305504 DP_PRINT_STATS("Mesh stats:");
5505 DP_PRINT_STATS(" frames to firmware: %u",
5506 pdev->stats.tx_i.mesh.exception_fw);
5507 DP_PRINT_STATS(" completions from fw: %u",
5508 pdev->stats.tx_i.mesh.completion_fw);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305509 DP_PRINT_STATS("PPDU stats counter");
5510 for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5511 DP_PRINT_STATS(" Tag[%d] = %llu", index,
5512 pdev->stats.ppdu_stats_counter[index]);
5513 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05305514}
5515
5516/**
5517 * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5518 * @pdev: DP_PDEV Handle
5519 *
5520 * Return: void
5521 */
5522static inline void
5523dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5524{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305525 DP_PRINT_STATS("PDEV Rx Stats:\n");
5526 DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5527 DP_PRINT_STATS(" Packets = %d %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305528 pdev->stats.rx.rcvd_reo[0].num,
5529 pdev->stats.rx.rcvd_reo[1].num,
5530 pdev->stats.rx.rcvd_reo[2].num,
5531 pdev->stats.rx.rcvd_reo[3].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305532 DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305533 pdev->stats.rx.rcvd_reo[0].bytes,
5534 pdev->stats.rx.rcvd_reo[1].bytes,
5535 pdev->stats.rx.rcvd_reo[2].bytes,
5536 pdev->stats.rx.rcvd_reo[3].bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305537 DP_PRINT_STATS("Replenished:");
5538 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305539 pdev->stats.replenish.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305540 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305541 pdev->stats.replenish.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305542 DP_PRINT_STATS(" Buffers Added To Freelist = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305543 pdev->stats.buf_freelist);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07005544 DP_PRINT_STATS(" Low threshold intr = %d",
5545 pdev->stats.replenish.low_thresh_intrs);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305546 DP_PRINT_STATS("Dropped:");
5547 DP_PRINT_STATS(" msdu_not_done = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305548 pdev->stats.dropped.msdu_not_done);
Neil Zhao0bd967d2018-03-02 16:00:00 -08005549 DP_PRINT_STATS(" mon_rx_drop = %d",
5550 pdev->stats.dropped.mon_rx_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305551 DP_PRINT_STATS("Sent To Stack:");
5552 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305553 pdev->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305554 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305555 pdev->stats.rx.to_stack.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305556 DP_PRINT_STATS("Multicast/Broadcast:");
5557 DP_PRINT_STATS(" Packets = %d",
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305558 (pdev->stats.rx.multicast.num +
5559 pdev->stats.rx.bcast.num));
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305560 DP_PRINT_STATS(" Bytes = %llu",
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305561 (pdev->stats.rx.multicast.bytes +
5562 pdev->stats.rx.bcast.bytes));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305563 DP_PRINT_STATS("Errors:");
5564 DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305565 pdev->stats.replenish.rxdma_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305566 DP_PRINT_STATS(" Desc Alloc Failed: = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305567 pdev->stats.err.desc_alloc_fail);
chenguo6a027fb2018-05-21 18:42:54 +08005568 DP_PRINT_STATS(" IP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305569 pdev->stats.err.ip_csum_err);
chenguo6a027fb2018-05-21 18:42:54 +08005570 DP_PRINT_STATS(" TCP/UDP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305571 pdev->stats.err.tcp_udp_csum_err);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305572
5573 /* Get bar_recv_cnt */
5574 dp_aggregate_pdev_ctrl_frames_stats(pdev);
5575 DP_PRINT_STATS("BAR Received Count: = %d",
5576 pdev->stats.rx.bar_recv_cnt);
5577
Ishank Jain1e7401c2017-02-17 15:38:39 +05305578}
5579
5580/**
Kai Chen783e0382018-01-25 16:29:08 -08005581 * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5582 * @pdev: DP_PDEV Handle
5583 *
5584 * Return: void
5585 */
5586static inline void
5587dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5588{
5589 struct cdp_pdev_mon_stats *rx_mon_stats;
5590
5591 rx_mon_stats = &pdev->rx_mon_stats;
5592
5593 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5594
5595 dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5596
5597 DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5598 rx_mon_stats->status_ppdu_done);
5599 DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5600 rx_mon_stats->dest_ppdu_done);
5601 DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5602 rx_mon_stats->dest_mpdu_done);
Karunakar Dasinenibb7848e2018-05-07 15:09:46 -07005603 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5604 rx_mon_stats->dest_mpdu_drop);
Kai Chen783e0382018-01-25 16:29:08 -08005605}
5606
5607/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305608 * dp_print_soc_tx_stats(): Print SOC level stats
5609 * @soc DP_SOC Handle
5610 *
5611 * Return: void
5612 */
5613static inline void
5614dp_print_soc_tx_stats(struct dp_soc *soc)
5615{
Soumya Bhatdbb85302018-05-18 11:01:34 +05305616 uint8_t desc_pool_id;
5617 soc->stats.tx.desc_in_use = 0;
5618
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305619 DP_PRINT_STATS("SOC Tx Stats:\n");
Soumya Bhatdbb85302018-05-18 11:01:34 +05305620
5621 for (desc_pool_id = 0;
5622 desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5623 desc_pool_id++)
5624 soc->stats.tx.desc_in_use +=
5625 soc->tx_desc[desc_pool_id].num_allocated;
5626
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305627 DP_PRINT_STATS("Tx Descriptors In Use = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305628 soc->stats.tx.desc_in_use);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305629 DP_PRINT_STATS("Invalid peer:");
5630 DP_PRINT_STATS(" Packets = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305631 soc->stats.tx.tx_invalid_peer.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305632 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jaine73c4032017-03-16 11:48:15 +05305633 soc->stats.tx.tx_invalid_peer.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305634 DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305635 soc->stats.tx.tcl_ring_full[0],
5636 soc->stats.tx.tcl_ring_full[1],
5637 soc->stats.tx.tcl_ring_full[2]);
5638
Ishank Jain1e7401c2017-02-17 15:38:39 +05305639}
Ishank Jain1e7401c2017-02-17 15:38:39 +05305640/**
5641 * dp_print_soc_rx_stats: Print SOC level Rx stats
5642 * @soc: DP_SOC Handle
5643 *
5644 * Return:void
5645 */
5646static inline void
5647dp_print_soc_rx_stats(struct dp_soc *soc)
5648{
5649 uint32_t i;
5650 char reo_error[DP_REO_ERR_LENGTH];
5651 char rxdma_error[DP_RXDMA_ERR_LENGTH];
5652 uint8_t index = 0;
5653
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305654 DP_PRINT_STATS("SOC Rx Stats:\n");
5655 DP_PRINT_STATS("Errors:\n");
5656 DP_PRINT_STATS("Rx Decrypt Errors = %d",
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305657 (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5658 soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305659 DP_PRINT_STATS("Invalid RBM = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305660 soc->stats.rx.err.invalid_rbm);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305661 DP_PRINT_STATS("Invalid Vdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305662 soc->stats.rx.err.invalid_vdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305663 DP_PRINT_STATS("Invalid Pdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305664 soc->stats.rx.err.invalid_pdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305665 DP_PRINT_STATS("Invalid Peer = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305666 soc->stats.rx.err.rx_invalid_peer.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305667 DP_PRINT_STATS("HAL Ring Access Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305668 soc->stats.rx.err.hal_ring_access_fail);
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305669
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305670 for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305671 index += qdf_snprint(&rxdma_error[index],
5672 DP_RXDMA_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305673 " %d", soc->stats.rx.err.rxdma_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305674 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305675 DP_PRINT_STATS("RXDMA Error (0-31):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305676 rxdma_error);
5677
5678 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305679 for (i = 0; i < HAL_REO_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305680 index += qdf_snprint(&reo_error[index],
5681 DP_REO_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305682 " %d", soc->stats.rx.err.reo_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305683 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305684 DP_PRINT_STATS("REO Error(0-14):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305685 reo_error);
5686}
5687
sumedh baikady72b1c712017-08-24 12:11:46 -07005688
5689/**
5690 * dp_print_ring_stat_from_hal(): Print hal level ring stats
5691 * @soc: DP_SOC handle
5692 * @srng: DP_SRNG handle
5693 * @ring_name: SRNG name
5694 *
5695 * Return: void
5696 */
5697static inline void
5698dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
5699 char *ring_name)
5700{
5701 uint32_t tailp;
5702 uint32_t headp;
5703
5704 if (srng->hal_srng != NULL) {
5705 hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5706 DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d\n",
5707 ring_name, headp, tailp);
5708 }
5709}
5710
5711/**
5712 * dp_print_ring_stats(): Print tail and head pointer
5713 * @pdev: DP_PDEV handle
5714 *
5715 * Return:void
5716 */
5717static inline void
5718dp_print_ring_stats(struct dp_pdev *pdev)
5719{
5720 uint32_t i;
5721 char ring_name[STR_MAXLEN + 1];
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005722 int mac_id;
sumedh baikady72b1c712017-08-24 12:11:46 -07005723
5724 dp_print_ring_stat_from_hal(pdev->soc,
5725 &pdev->soc->reo_exception_ring,
5726 "Reo Exception Ring");
5727 dp_print_ring_stat_from_hal(pdev->soc,
5728 &pdev->soc->reo_reinject_ring,
5729 "Reo Inject Ring");
5730 dp_print_ring_stat_from_hal(pdev->soc,
5731 &pdev->soc->reo_cmd_ring,
5732 "Reo Command Ring");
5733 dp_print_ring_stat_from_hal(pdev->soc,
5734 &pdev->soc->reo_status_ring,
5735 "Reo Status Ring");
5736 dp_print_ring_stat_from_hal(pdev->soc,
5737 &pdev->soc->rx_rel_ring,
5738 "Rx Release ring");
5739 dp_print_ring_stat_from_hal(pdev->soc,
5740 &pdev->soc->tcl_cmd_ring,
5741 "Tcl command Ring");
5742 dp_print_ring_stat_from_hal(pdev->soc,
5743 &pdev->soc->tcl_status_ring,
5744 "Tcl Status Ring");
5745 dp_print_ring_stat_from_hal(pdev->soc,
5746 &pdev->soc->wbm_desc_rel_ring,
5747 "Wbm Desc Rel Ring");
5748 for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5749 snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5750 dp_print_ring_stat_from_hal(pdev->soc,
5751 &pdev->soc->reo_dest_ring[i],
5752 ring_name);
5753 }
5754 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5755 snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5756 dp_print_ring_stat_from_hal(pdev->soc,
5757 &pdev->soc->tcl_data_ring[i],
5758 ring_name);
5759 }
5760 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5761 snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5762 dp_print_ring_stat_from_hal(pdev->soc,
5763 &pdev->soc->tx_comp_ring[i],
5764 ring_name);
5765 }
5766 dp_print_ring_stat_from_hal(pdev->soc,
5767 &pdev->rx_refill_buf_ring,
5768 "Rx Refill Buf Ring");
5769
sumedh baikady72b1c712017-08-24 12:11:46 -07005770 dp_print_ring_stat_from_hal(pdev->soc,
Yun Park601d0d82017-08-28 21:49:31 -07005771 &pdev->rx_refill_buf_ring2,
5772 "Second Rx Refill Buf Ring");
sumedh baikady72b1c712017-08-24 12:11:46 -07005773
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005774 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5775 dp_print_ring_stat_from_hal(pdev->soc,
5776 &pdev->rxdma_mon_buf_ring[mac_id],
5777 "Rxdma Mon Buf Ring");
5778 dp_print_ring_stat_from_hal(pdev->soc,
5779 &pdev->rxdma_mon_dst_ring[mac_id],
5780 "Rxdma Mon Dst Ring");
5781 dp_print_ring_stat_from_hal(pdev->soc,
5782 &pdev->rxdma_mon_status_ring[mac_id],
5783 "Rxdma Mon Status Ring");
5784 dp_print_ring_stat_from_hal(pdev->soc,
5785 &pdev->rxdma_mon_desc_ring[mac_id],
5786 "Rxdma mon desc Ring");
5787 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005788
narayan4b25ab22018-06-19 12:52:24 +05305789 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005790 snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5791 dp_print_ring_stat_from_hal(pdev->soc,
5792 &pdev->rxdma_err_dst_ring[i],
5793 ring_name);
5794 }
5795
sumedh baikady72b1c712017-08-24 12:11:46 -07005796 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5797 snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5798 dp_print_ring_stat_from_hal(pdev->soc,
5799 &pdev->rx_mac_buf_ring[i],
5800 ring_name);
5801 }
5802}
5803
Ishank Jain1e7401c2017-02-17 15:38:39 +05305804/**
5805 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5806 * @vdev: DP_VDEV handle
5807 *
5808 * Return:void
5809 */
5810static inline void
5811dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5812{
5813 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05305814 struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5815
Ishank Jain1e7401c2017-02-17 15:38:39 +05305816 DP_STATS_CLR(vdev->pdev);
5817 DP_STATS_CLR(vdev->pdev->soc);
5818 DP_STATS_CLR(vdev);
5819 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5820 if (!peer)
5821 return;
5822 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05305823
5824 if (soc->cdp_soc.ol_ops->update_dp_stats) {
5825 soc->cdp_soc.ol_ops->update_dp_stats(
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305826 vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305827 &peer->stats,
5828 peer->peer_ids[0],
5829 UPDATE_PEER_STATS);
5830 }
5831
Ishank Jain1e7401c2017-02-17 15:38:39 +05305832 }
5833
Anish Nataraj28490c42018-01-19 19:34:54 +05305834 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305835 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305836 &vdev->stats, (uint16_t)vdev->vdev_id,
5837 UPDATE_VDEV_STATS);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305838}
5839
5840/**
5841 * dp_print_rx_rates(): Print Rx rate stats
5842 * @vdev: DP_VDEV handle
5843 *
5844 * Return:void
5845 */
5846static inline void
5847dp_print_rx_rates(struct dp_vdev *vdev)
5848{
5849 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305850 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305851 uint8_t index = 0;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305852 char nss[DP_NSS_LENGTH];
5853
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305854 DP_PRINT_STATS("Rx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305855
Ishank Jain57c42a12017-04-12 10:42:22 +05305856 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5857 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305858 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5859 if (!dp_rate_string[pkt_type][mcs].valid)
5860 continue;
5861
5862 DP_PRINT_STATS(" %s = %d",
5863 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain57c42a12017-04-12 10:42:22 +05305864 pdev->stats.rx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305865 mcs_count[mcs]);
Ishank Jain57c42a12017-04-12 10:42:22 +05305866 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305867
5868 DP_PRINT_STATS("\n");
Ishank Jain57c42a12017-04-12 10:42:22 +05305869 }
5870
Ishank Jain1e7401c2017-02-17 15:38:39 +05305871 index = 0;
5872 for (i = 0; i < SS_COUNT; i++) {
5873 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305874 " %d", pdev->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305875 }
Anish Nataraj072d8972018-01-09 18:23:33 +05305876 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305877 nss);
5878
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305879 DP_PRINT_STATS("SGI ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305880 " 0.8us %d,"
5881 " 0.4us %d,"
5882 " 1.6us %d,"
5883 " 3.2us %d,",
5884 pdev->stats.rx.sgi_count[0],
5885 pdev->stats.rx.sgi_count[1],
5886 pdev->stats.rx.sgi_count[2],
5887 pdev->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305888 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305889 pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5890 pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305891 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305892 " SU: %d,"
5893 " MU_MIMO:%d,"
5894 " MU_OFDMA:%d,"
Ishank Jain57c42a12017-04-12 10:42:22 +05305895 " MU_OFDMA_MIMO:%d\n",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305896 pdev->stats.rx.reception_type[0],
5897 pdev->stats.rx.reception_type[1],
5898 pdev->stats.rx.reception_type[2],
5899 pdev->stats.rx.reception_type[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305900 DP_PRINT_STATS("Aggregation:\n");
5901 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305902 pdev->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305903 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305904 pdev->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305905 DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305906 pdev->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305907 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305908 pdev->stats.rx.non_amsdu_cnt);
5909}
5910
5911/**
5912 * dp_print_tx_rates(): Print tx rates
5913 * @vdev: DP_VDEV handle
5914 *
5915 * Return:void
5916 */
5917static inline void
5918dp_print_tx_rates(struct dp_vdev *vdev)
5919{
5920 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305921 uint8_t mcs, pkt_type;
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005922 uint8_t index;
5923 char nss[DP_NSS_LENGTH];
5924 int nss_index;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305925
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305926 DP_PRINT_STATS("Tx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305927
5928 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5929 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305930 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5931 if (!dp_rate_string[pkt_type][mcs].valid)
5932 continue;
5933
5934 DP_PRINT_STATS(" %s = %d",
5935 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05305936 pdev->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305937 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305938 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305939
5940 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305941 }
5942
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305943 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05305944 " 0.8us %d"
5945 " 0.4us %d"
5946 " 1.6us %d"
5947 " 3.2us %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305948 pdev->stats.tx.sgi_count[0],
5949 pdev->stats.tx.sgi_count[1],
5950 pdev->stats.tx.sgi_count[2],
5951 pdev->stats.tx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305952
5953 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
chenguoec849832018-04-11 19:14:06 +08005954 pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5955 pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305956
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005957 index = 0;
5958 for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
5959 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5960 " %d", pdev->stats.tx.nss[nss_index]);
5961 }
5962
5963 DP_PRINT_STATS("NSS(1-8) = %s", nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305964 DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5965 DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5966 DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5967 DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5968 DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5969
5970 DP_PRINT_STATS("Aggregation:\n");
5971 DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305972 pdev->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305973 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305974 pdev->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305975}
5976
5977/**
5978 * dp_print_peer_stats():print peer stats
5979 * @peer: DP_PEER handle
5980 *
5981 * return void
5982 */
5983static inline void dp_print_peer_stats(struct dp_peer *peer)
5984{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305985 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305986 uint32_t index;
5987 char nss[DP_NSS_LENGTH];
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305988 DP_PRINT_STATS("Node Tx Stats:\n");
5989 DP_PRINT_STATS("Total Packet Completions = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305990 peer->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305991 DP_PRINT_STATS("Total Bytes Completions = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305992 peer->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305993 DP_PRINT_STATS("Success Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305994 peer->stats.tx.tx_success.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305995 DP_PRINT_STATS("Success Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305996 peer->stats.tx.tx_success.bytes);
Pranita Solankefc2ff392017-12-15 19:25:13 +05305997 DP_PRINT_STATS("Unicast Success Packets = %d",
5998 peer->stats.tx.ucast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305999 DP_PRINT_STATS("Unicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306000 peer->stats.tx.ucast.bytes);
6001 DP_PRINT_STATS("Multicast Success Packets = %d",
6002 peer->stats.tx.mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306003 DP_PRINT_STATS("Multicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306004 peer->stats.tx.mcast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306005 DP_PRINT_STATS("Broadcast Success Packets = %d",
6006 peer->stats.tx.bcast.num);
6007 DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6008 peer->stats.tx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306009 DP_PRINT_STATS("Packets Failed = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306010 peer->stats.tx.tx_failed);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306011 DP_PRINT_STATS("Packets In OFDMA = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306012 peer->stats.tx.ofdma);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306013 DP_PRINT_STATS("Packets In STBC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306014 peer->stats.tx.stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306015 DP_PRINT_STATS("Packets In LDPC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306016 peer->stats.tx.ldpc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306017 DP_PRINT_STATS("Packet Retries = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306018 peer->stats.tx.retries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306019 DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306020 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306021 DP_PRINT_STATS("Last Packet RSSI = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306022 peer->stats.tx.last_ack_rssi);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306023 DP_PRINT_STATS("Dropped At FW: Removed = %d",
6024 peer->stats.tx.dropped.fw_rem);
6025 DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6026 peer->stats.tx.dropped.fw_rem_tx);
6027 DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6028 peer->stats.tx.dropped.fw_rem_notx);
6029 DP_PRINT_STATS("Dropped : Age Out = %d",
6030 peer->stats.tx.dropped.age_out);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306031 DP_PRINT_STATS("NAWDS : ");
6032 DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
6033 peer->stats.tx.nawds_mcast_drop);
6034 DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
6035 peer->stats.tx.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306036 DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306037 peer->stats.tx.nawds_mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306038
6039 DP_PRINT_STATS("Rate Info:");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306040
6041 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6042 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306043 for (mcs = 0; mcs < MAX_MCS; mcs++) {
6044 if (!dp_rate_string[pkt_type][mcs].valid)
6045 continue;
6046
6047 DP_PRINT_STATS(" %s = %d",
6048 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05306049 peer->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306050 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306051 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306052
6053 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306054 }
6055
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306056 DP_PRINT_STATS("SGI = "
Ishank Jain57c42a12017-04-12 10:42:22 +05306057 " 0.8us %d"
6058 " 0.4us %d"
6059 " 1.6us %d"
6060 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306061 peer->stats.tx.sgi_count[0],
6062 peer->stats.tx.sgi_count[1],
6063 peer->stats.tx.sgi_count[2],
6064 peer->stats.tx.sgi_count[3]);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306065 DP_PRINT_STATS("Excess Retries per AC ");
6066 DP_PRINT_STATS(" Best effort = %d",
6067 peer->stats.tx.excess_retries_per_ac[0]);
6068 DP_PRINT_STATS(" Background= %d",
6069 peer->stats.tx.excess_retries_per_ac[1]);
6070 DP_PRINT_STATS(" Video = %d",
6071 peer->stats.tx.excess_retries_per_ac[2]);
6072 DP_PRINT_STATS(" Voice = %d",
6073 peer->stats.tx.excess_retries_per_ac[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306074 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
Pranita Solanked7e10ba2017-12-13 15:40:38 +05306075 peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6076 peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306077
Pranita Solankeed0aba62018-01-12 19:14:31 +05306078 index = 0;
6079 for (i = 0; i < SS_COUNT; i++) {
6080 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6081 " %d", peer->stats.tx.nss[i]);
6082 }
6083 DP_PRINT_STATS("NSS(1-8) = %s",
6084 nss);
6085
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306086 DP_PRINT_STATS("Aggregation:");
6087 DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306088 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306089 DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
Ishank Jaine73c4032017-03-16 11:48:15 +05306090 peer->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306091
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306092 DP_PRINT_STATS("Node Rx Stats:");
6093 DP_PRINT_STATS("Packets Sent To Stack = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306094 peer->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306095 DP_PRINT_STATS("Bytes Sent To Stack = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306096 peer->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05306097 for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
Pranita Solankefc2ff392017-12-15 19:25:13 +05306098 DP_PRINT_STATS("Ring Id = %d", i);
6099 DP_PRINT_STATS(" Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306100 peer->stats.rx.rcvd_reo[i].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306101 DP_PRINT_STATS(" Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306102 peer->stats.rx.rcvd_reo[i].bytes);
6103 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306104 DP_PRINT_STATS("Multicast Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306105 peer->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306106 DP_PRINT_STATS("Multicast Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306107 peer->stats.rx.multicast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306108 DP_PRINT_STATS("Broadcast Packets Received = %d",
6109 peer->stats.rx.bcast.num);
6110 DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6111 peer->stats.rx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306112 DP_PRINT_STATS("Intra BSS Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306113 peer->stats.rx.intra_bss.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306114 DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306115 peer->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306116 DP_PRINT_STATS("Raw Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306117 peer->stats.rx.raw.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306118 DP_PRINT_STATS("Raw Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306119 peer->stats.rx.raw.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306120 DP_PRINT_STATS("Errors: MIC Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306121 peer->stats.rx.err.mic_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306122 DP_PRINT_STATS("Erros: Decryption Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306123 peer->stats.rx.err.decrypt_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306124 DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306125 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306126 DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306127 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306128 DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306129 peer->stats.rx.non_amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306130 DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306131 peer->stats.rx.amsdu_cnt);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306132 DP_PRINT_STATS("NAWDS : ");
6133 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
Ruchi, Agrawal27550482018-02-20 19:43:41 +05306134 peer->stats.rx.nawds_mcast_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306135 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05306136 " 0.8us %d"
6137 " 0.4us %d"
6138 " 1.6us %d"
6139 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306140 peer->stats.rx.sgi_count[0],
6141 peer->stats.rx.sgi_count[1],
6142 peer->stats.rx.sgi_count[2],
6143 peer->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306144 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306145 peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6146 peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306147 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05306148 " SU %d,"
6149 " MU_MIMO %d,"
6150 " MU_OFDMA %d,"
6151 " MU_OFDMA_MIMO %d",
6152 peer->stats.rx.reception_type[0],
6153 peer->stats.rx.reception_type[1],
6154 peer->stats.rx.reception_type[2],
6155 peer->stats.rx.reception_type[3]);
6156
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306157
Ishank Jain57c42a12017-04-12 10:42:22 +05306158 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6159 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306160 for (mcs = 0; mcs < MAX_MCS; mcs++) {
6161 if (!dp_rate_string[pkt_type][mcs].valid)
6162 continue;
Ishank Jain57c42a12017-04-12 10:42:22 +05306163
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306164 DP_PRINT_STATS(" %s = %d",
6165 dp_rate_string[pkt_type][mcs].mcs_type,
6166 peer->stats.rx.pkt_type[pkt_type].
6167 mcs_count[mcs]);
6168 }
6169
6170 DP_PRINT_STATS("\n");
6171 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05306172
6173 index = 0;
6174 for (i = 0; i < SS_COUNT; i++) {
6175 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05306176 " %d", peer->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306177 }
Anish Nataraj072d8972018-01-09 18:23:33 +05306178 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306179 nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306180
6181 DP_PRINT_STATS("Aggregation:");
6182 DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306183 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306184 DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306185 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306186 DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306187 peer->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306188 DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306189 peer->stats.rx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306190}
6191
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006192/*
6193 * dp_get_host_peer_stats()- function to print peer stats
6194 * @pdev_handle: DP_PDEV handle
6195 * @mac_addr: mac address of the peer
6196 *
6197 * Return: void
6198 */
6199static void
6200dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6201{
6202 struct dp_peer *peer;
6203 uint8_t local_id;
6204
6205 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6206 &local_id);
6207
6208 if (!peer) {
6209 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6210 "%s: Invalid peer\n", __func__);
6211 return;
6212 }
6213
6214 dp_print_peer_stats(peer);
6215 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6216}
6217
Ishank Jain1e7401c2017-02-17 15:38:39 +05306218/**
6219 * dp_print_host_stats()- Function to print the stats aggregated at host
6220 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05306221 * @type: host stats type
6222 *
6223 * Available Stat types
Ishank Jain6290a3c2017-03-21 10:49:39 +05306224 * TXRX_CLEAR_STATS : Clear the stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306225 * TXRX_RX_RATE_STATS: Print Rx Rate Info
6226 * TXRX_TX_RATE_STATS: Print Tx Rate Info
6227 * TXRX_TX_HOST_STATS: Print Tx Stats
6228 * TXRX_RX_HOST_STATS: Print Rx Stats
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306229 * TXRX_AST_STATS: Print AST Stats
sumedh baikady72b1c712017-08-24 12:11:46 -07006230 * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306231 *
6232 * Return: 0 on success, print error message in case of failure
6233 */
6234static int
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006235dp_print_host_stats(struct cdp_vdev *vdev_handle,
6236 struct cdp_txrx_stats_req *req)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306237{
6238 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6239 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006240 enum cdp_host_txrx_stats type =
6241 dp_stats_mapping_table[req->stats][STATS_HOST];
Ishank Jain1e7401c2017-02-17 15:38:39 +05306242
6243 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306244
Ishank Jain1e7401c2017-02-17 15:38:39 +05306245 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05306246 case TXRX_CLEAR_STATS:
6247 dp_txrx_host_stats_clr(vdev);
6248 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306249 case TXRX_RX_RATE_STATS:
6250 dp_print_rx_rates(vdev);
6251 break;
6252 case TXRX_TX_RATE_STATS:
6253 dp_print_tx_rates(vdev);
6254 break;
6255 case TXRX_TX_HOST_STATS:
6256 dp_print_pdev_tx_stats(pdev);
6257 dp_print_soc_tx_stats(pdev->soc);
6258 break;
6259 case TXRX_RX_HOST_STATS:
6260 dp_print_pdev_rx_stats(pdev);
6261 dp_print_soc_rx_stats(pdev->soc);
6262 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306263 case TXRX_AST_STATS:
6264 dp_print_ast_stats(pdev->soc);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05306265 dp_print_peer_table(vdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306266 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07006267 case TXRX_SRNG_PTR_STATS:
Kai Chen783e0382018-01-25 16:29:08 -08006268 dp_print_ring_stats(pdev);
6269 break;
6270 case TXRX_RX_MON_STATS:
6271 dp_print_pdev_rx_mon_stats(pdev);
6272 break;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006273 case TXRX_REO_QUEUE_STATS:
6274 dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6275 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306276 default:
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006277 DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306278 break;
6279 }
6280 return 0;
6281}
6282
6283/*
Soumya Bhat7422db82017-12-15 13:48:53 +05306284 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6285 * @pdev: DP_PDEV handle
6286 *
6287 * Return: void
6288 */
6289static void
6290dp_ppdu_ring_reset(struct dp_pdev *pdev)
6291{
6292 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006293 int mac_id;
Soumya Bhat7422db82017-12-15 13:48:53 +05306294
6295 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6296
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006297 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6298 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6299 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306300
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006301 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6302 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6303 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6304 }
Soumya Bhat7422db82017-12-15 13:48:53 +05306305}
6306
6307/*
Anish Nataraj38a29562017-08-18 19:41:17 +05306308 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6309 * @pdev: DP_PDEV handle
6310 *
6311 * Return: void
6312 */
6313static void
6314dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6315{
6316 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006317 int mac_id;
Anish Nataraj38a29562017-08-18 19:41:17 +05306318
Soumya Bhat35fc6992018-03-09 18:39:03 +05306319 htt_tlv_filter.mpdu_start = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306320 htt_tlv_filter.msdu_start = 0;
6321 htt_tlv_filter.packet = 0;
6322 htt_tlv_filter.msdu_end = 0;
6323 htt_tlv_filter.mpdu_end = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006324 htt_tlv_filter.attention = 0;
Anish Nataraj38a29562017-08-18 19:41:17 +05306325 htt_tlv_filter.ppdu_start = 1;
6326 htt_tlv_filter.ppdu_end = 1;
6327 htt_tlv_filter.ppdu_end_user_stats = 1;
6328 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6329 htt_tlv_filter.ppdu_end_status_done = 1;
6330 htt_tlv_filter.enable_fp = 1;
6331 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006332 if (pdev->mcopy_mode) {
6333 htt_tlv_filter.packet_header = 1;
Soumya Bhat2f54de22018-02-21 09:54:28 +05306334 htt_tlv_filter.enable_mo = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006335 }
nobeljd124b742017-10-16 11:59:12 -07006336 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6337 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6338 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6339 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6340 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6341 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05306342
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006343 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6344 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6345 pdev->pdev_id);
6346
6347 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6348 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6349 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6350 }
Anish Nataraj38a29562017-08-18 19:41:17 +05306351}
6352
6353/*
Alok Singh40a622b2018-06-28 10:47:26 +05306354 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6355 * modes are enabled or not.
6356 * @dp_pdev: dp pdev handle.
6357 *
6358 * Return: bool
6359 */
6360static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6361{
6362 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6363 !pdev->mcopy_mode)
6364 return true;
6365 else
6366 return false;
6367}
6368
6369/*
Vinay Adella873dc402018-05-28 12:06:34 +05306370 *dp_set_bpr_enable() - API to enable/disable bpr feature
6371 *@pdev_handle: DP_PDEV handle.
6372 *@val: Provided value.
6373 *
6374 *Return: void
6375 */
6376static void
6377dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6378{
6379 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6380
6381 switch (val) {
6382 case CDP_BPR_DISABLE:
6383 pdev->bpr_enable = CDP_BPR_DISABLE;
6384 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6385 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6386 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6387 } else if (pdev->enhanced_stats_en &&
6388 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6389 !pdev->pktlog_ppdu_stats) {
6390 dp_h2t_cfg_stats_msg_send(pdev,
6391 DP_PPDU_STATS_CFG_ENH_STATS,
6392 pdev->pdev_id);
6393 }
6394 break;
6395 case CDP_BPR_ENABLE:
6396 pdev->bpr_enable = CDP_BPR_ENABLE;
6397 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6398 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6399 dp_h2t_cfg_stats_msg_send(pdev,
6400 DP_PPDU_STATS_CFG_BPR,
6401 pdev->pdev_id);
6402 } else if (pdev->enhanced_stats_en &&
6403 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6404 !pdev->pktlog_ppdu_stats) {
6405 dp_h2t_cfg_stats_msg_send(pdev,
6406 DP_PPDU_STATS_CFG_BPR_ENH,
6407 pdev->pdev_id);
6408 } else if (pdev->pktlog_ppdu_stats) {
6409 dp_h2t_cfg_stats_msg_send(pdev,
6410 DP_PPDU_STATS_CFG_BPR_PKTLOG,
6411 pdev->pdev_id);
6412 }
6413 break;
6414 default:
6415 break;
6416 }
6417}
6418
6419/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306420 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306421 * @pdev_handle: DP_PDEV handle
6422 * @val: user provided value
6423 *
6424 * Return: void
6425 */
6426static void
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306427dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306428{
6429 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6430
Soumya Bhat89647ef2017-11-16 17:23:48 +05306431 switch (val) {
6432 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306433 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05306434 pdev->mcopy_mode = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306435
Alok Singh40a622b2018-06-28 10:47:26 +05306436 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6437 !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006438 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306439 dp_ppdu_ring_reset(pdev);
Alok Singh40a622b2018-06-28 10:47:26 +05306440 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306441 dp_h2t_cfg_stats_msg_send(pdev,
6442 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306443 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6444 dp_h2t_cfg_stats_msg_send(pdev,
6445 DP_PPDU_STATS_CFG_BPR_ENH,
6446 pdev->pdev_id);
6447 } else {
6448 dp_h2t_cfg_stats_msg_send(pdev,
6449 DP_PPDU_STATS_CFG_BPR,
6450 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306451 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05306452 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306453
Soumya Bhat89647ef2017-11-16 17:23:48 +05306454 case 1:
6455 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05306456 pdev->mcopy_mode = 0;
6457
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306458 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306459 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306460 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306461 break;
6462 case 2:
Soumya Bhat7422db82017-12-15 13:48:53 +05306463 pdev->mcopy_mode = 1;
Soumya Bhat89647ef2017-11-16 17:23:48 +05306464 pdev->tx_sniffer_enable = 0;
Soumya Bhat14b6f262018-06-20 16:33:49 +05306465 dp_ppdu_ring_cfg(pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306466
6467 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306468 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306469 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306470 break;
6471 default:
6472 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6473 "Invalid value\n");
6474 break;
6475 }
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306476}
6477
6478/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306479 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6480 * @pdev_handle: DP_PDEV handle
6481 *
6482 * Return: void
6483 */
6484static void
6485dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6486{
6487 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6488 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306489
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05306490 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
Soumya Bhat7422db82017-12-15 13:48:53 +05306491 dp_ppdu_ring_cfg(pdev);
6492
Alok Singh40a622b2018-06-28 10:47:26 +05306493 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306494 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306495 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6496 dp_h2t_cfg_stats_msg_send(pdev,
6497 DP_PPDU_STATS_CFG_BPR_ENH,
6498 pdev->pdev_id);
6499 }
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306500}
6501
6502/*
6503 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6504 * @pdev_handle: DP_PDEV handle
6505 *
6506 * Return: void
6507 */
6508static void
6509dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6510{
6511 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306512
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306513 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306514
Alok Singh40a622b2018-06-28 10:47:26 +05306515 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006516 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306517 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6518 dp_h2t_cfg_stats_msg_send(pdev,
6519 DP_PPDU_STATS_CFG_BPR,
6520 pdev->pdev_id);
6521 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05306522
6523 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
Soumya Bhat7422db82017-12-15 13:48:53 +05306524 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306525}
6526
6527/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05306528 * dp_get_fw_peer_stats()- function to print peer stats
6529 * @pdev_handle: DP_PDEV handle
6530 * @mac_addr: mac address of the peer
6531 * @cap: Type of htt stats requested
6532 *
6533 * Currently Supporting only MAC ID based requests Only
6534 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6535 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6536 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6537 *
6538 * Return: void
6539 */
6540static void
6541dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6542 uint32_t cap)
6543{
6544 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306545 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306546 uint32_t config_param0 = 0;
6547 uint32_t config_param1 = 0;
6548 uint32_t config_param2 = 0;
6549 uint32_t config_param3 = 0;
6550
6551 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6552 config_param0 |= (1 << (cap + 1));
6553
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306554 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6555 config_param1 |= (1 << i);
6556 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306557
6558 config_param2 |= (mac_addr[0] & 0x000000ff);
6559 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6560 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6561 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6562
6563 config_param3 |= (mac_addr[4] & 0x000000ff);
6564 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6565
6566 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6567 config_param0, config_param1, config_param2,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006568 config_param3, 0, 0, 0);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07006569
Ishank Jain6290a3c2017-03-21 10:49:39 +05306570}
6571
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306572/* This struct definition will be removed from here
6573 * once it get added in FW headers*/
6574struct httstats_cmd_req {
6575 uint32_t config_param0;
6576 uint32_t config_param1;
6577 uint32_t config_param2;
6578 uint32_t config_param3;
6579 int cookie;
6580 u_int8_t stats_id;
6581};
6582
6583/*
6584 * dp_get_htt_stats: function to process the httstas request
6585 * @pdev_handle: DP pdev handle
6586 * @data: pointer to request data
6587 * @data_len: length for request data
6588 *
6589 * return: void
6590 */
6591static void
6592dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6593{
6594 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6595 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6596
6597 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6598 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6599 req->config_param0, req->config_param1,
6600 req->config_param2, req->config_param3,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006601 req->cookie, 0, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306602}
Vinay Adella873dc402018-05-28 12:06:34 +05306603
Ishank Jain9f174c62017-03-30 18:37:42 +05306604/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306605 * dp_set_pdev_param: function to set parameters in pdev
6606 * @pdev_handle: DP pdev handle
6607 * @param: parameter type to be set
6608 * @val: value of parameter to be set
6609 *
6610 * return: void
6611 */
6612static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6613 enum cdp_pdev_param_type param, uint8_t val)
6614{
6615 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306616 case CDP_CONFIG_DEBUG_SNIFFER:
6617 dp_config_debug_sniffer(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306618 break;
Vinay Adella873dc402018-05-28 12:06:34 +05306619 case CDP_CONFIG_BPR_ENABLE:
6620 dp_set_bpr_enable(pdev_handle, val);
6621 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306622 default:
6623 break;
6624 }
6625}
6626
6627/*
Ishank Jain9f174c62017-03-30 18:37:42 +05306628 * dp_set_vdev_param: function to set parameters in vdev
6629 * @param: parameter type to be set
6630 * @val: value of parameter to be set
6631 *
6632 * return: void
6633 */
6634static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6635 enum cdp_vdev_param_type param, uint32_t val)
6636{
6637 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05306638 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306639 case CDP_ENABLE_WDS:
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05306640 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6641 "wds_enable %d for vdev(%p) id(%d)\n",
6642 val, vdev, vdev->vdev_id);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306643 vdev->wds_enabled = val;
6644 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306645 case CDP_ENABLE_NAWDS:
6646 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306647 break;
Ishank Jainc838b132017-02-17 11:08:18 +05306648 case CDP_ENABLE_MCAST_EN:
6649 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306650 break;
6651 case CDP_ENABLE_PROXYSTA:
6652 vdev->proxysta_vdev = val;
6653 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07006654 case CDP_UPDATE_TDLS_FLAGS:
6655 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306656 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306657 case CDP_CFG_WDS_AGING_TIMER:
6658 if (val == 0)
6659 qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6660 else if (val != vdev->wds_aging_timer_val)
6661 qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6662
6663 vdev->wds_aging_timer_val = val;
6664 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05306665 case CDP_ENABLE_AP_BRIDGE:
6666 if (wlan_op_mode_sta != vdev->opmode)
6667 vdev->ap_bridge_enabled = val;
6668 else
6669 vdev->ap_bridge_enabled = false;
6670 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05306671 case CDP_ENABLE_CIPHER:
6672 vdev->sec_type = val;
6673 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05306674 case CDP_ENABLE_QWRAP_ISOLATION:
6675 vdev->isolation_vdev = val;
6676 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306677 default:
6678 break;
6679 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306680
6681 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05306682}
6683
6684/**
6685 * dp_peer_set_nawds: set nawds bit in peer
6686 * @peer_handle: pointer to peer
6687 * @value: enable/disable nawds
6688 *
6689 * return: void
6690 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05306691static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05306692{
6693 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6694 peer->nawds_enabled = value;
6695}
Ishank Jain1e7401c2017-02-17 15:38:39 +05306696
Ishank Jain949674c2017-02-27 17:09:29 +05306697/*
6698 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6699 * @vdev_handle: DP_VDEV handle
6700 * @map_id:ID of map that needs to be updated
6701 *
6702 * Return: void
6703 */
6704static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6705 uint8_t map_id)
6706{
6707 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6708 vdev->dscp_tid_map_id = map_id;
6709 return;
6710}
6711
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306712/*
6713 * dp_txrx_stats_publish(): publish pdev stats into a buffer
6714 * @pdev_handle: DP_PDEV handle
6715 * @buf: to hold pdev_stats
6716 *
6717 * Return: int
6718 */
6719static int
6720dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6721{
6722 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6723 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306724 struct cdp_txrx_stats_req req = {0,};
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306725
6726 dp_aggregate_pdev_stats(pdev);
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306727 req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6728 req.cookie_val = 1;
6729 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006730 req.param1, req.param2, req.param3, 0,
6731 req.cookie_val, 0);
6732
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306733 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306734
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306735 req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6736 req.cookie_val = 1;
6737 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006738 req.param1, req.param2, req.param3, 0,
6739 req.cookie_val, 0);
6740
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306741 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306742 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6743
6744 return TXRX_STATS_LEVEL;
6745}
6746
Ishank Jain949674c2017-02-27 17:09:29 +05306747/**
6748 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6749 * @pdev: DP_PDEV handle
6750 * @map_id: ID of map that needs to be updated
6751 * @tos: index value in map
6752 * @tid: tid value passed by the user
6753 *
6754 * Return: void
6755 */
6756static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6757 uint8_t map_id, uint8_t tos, uint8_t tid)
6758{
6759 uint8_t dscp;
6760 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6761 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6762 pdev->dscp_tid_map[map_id][dscp] = tid;
Om Prakash Tripathi5425c522017-08-18 11:11:34 +05306763 if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6764 hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
Ishank Jain949674c2017-02-27 17:09:29 +05306765 map_id, dscp);
6766 return;
6767}
6768
Ishank Jain6290a3c2017-03-21 10:49:39 +05306769/**
6770 * dp_fw_stats_process(): Process TxRX FW stats request
6771 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306772 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05306773 *
6774 * return: int
6775 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306776static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6777 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05306778{
6779 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6780 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306781 uint32_t stats = req->stats;
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07006782 uint8_t mac_id = req->mac_id;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306783
6784 if (!vdev) {
6785 DP_TRACE(NONE, "VDEV not found");
6786 return 1;
6787 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306788 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306789
chenguocda25122018-01-24 17:39:38 +08006790 /*
6791 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6792 * from param0 to param3 according to below rule:
6793 *
6794 * PARAM:
6795 * - config_param0 : start_offset (stats type)
6796 * - config_param1 : stats bmask from start offset
6797 * - config_param2 : stats bmask from start offset + 32
6798 * - config_param3 : stats bmask from start offset + 64
6799 */
6800 if (req->stats == CDP_TXRX_STATS_0) {
6801 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6802 req->param1 = 0xFFFFFFFF;
6803 req->param2 = 0xFFFFFFFF;
6804 req->param3 = 0xFFFFFFFF;
6805 }
6806
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306807 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006808 req->param1, req->param2, req->param3,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07006809 0, 0, mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05306810}
6811
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306812/**
6813 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006814 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306815 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006816 *
6817 * Return: integer
6818 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306819static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6820 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006821{
6822 int host_stats;
6823 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306824 enum cdp_stats stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006825
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306826 if (!vdev || !req) {
6827 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6828 "Invalid vdev/req instance");
6829 return 0;
6830 }
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006831
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306832 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006833 if (stats >= CDP_TXRX_MAX_STATS)
6834 return 0;
6835
Ishank Jain6290a3c2017-03-21 10:49:39 +05306836 /*
6837 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6838 * has to be updated if new FW HTT stats added
6839 */
6840 if (stats > CDP_TXRX_STATS_HTT_MAX)
6841 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006842 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6843 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6844
6845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6846 "stats: %u fw_stats_type: %d host_stats_type: %d",
6847 stats, fw_stats, host_stats);
6848
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306849 if (fw_stats != TXRX_FW_STATS_INVALID) {
6850 /* update request with FW stats type */
6851 req->stats = fw_stats;
6852 return dp_fw_stats_process(vdev, req);
6853 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006854
Ishank Jain57c42a12017-04-12 10:42:22 +05306855 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6856 (host_stats <= TXRX_HOST_STATS_MAX))
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006857 return dp_print_host_stats(vdev, req);
Ishank Jain57c42a12017-04-12 10:42:22 +05306858 else
6859 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6860 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006861
6862 return 0;
6863}
6864
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006865/*
psimha61b1a362017-07-27 15:45:49 -07006866 * dp_print_napi_stats(): NAPI stats
6867 * @soc - soc handle
6868 */
6869static void dp_print_napi_stats(struct dp_soc *soc)
6870{
6871 hif_print_napi_stats(soc->hif_handle);
6872}
6873
6874/*
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006875 * dp_print_per_ring_stats(): Packet count per ring
6876 * @soc - soc handle
6877 */
6878static void dp_print_per_ring_stats(struct dp_soc *soc)
6879{
chenguo8107b662017-12-13 16:31:13 +08006880 uint8_t ring;
6881 uint16_t core;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006882 uint64_t total_packets;
6883
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006884 DP_TRACE(FATAL, "Reo packets per ring:");
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006885 for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6886 total_packets = 0;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006887 DP_TRACE(FATAL, "Packets on ring %u:", ring);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006888 for (core = 0; core < NR_CPUS; core++) {
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006889 DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006890 core, soc->stats.rx.ring_packets[core][ring]);
6891 total_packets += soc->stats.rx.ring_packets[core][ring];
6892 }
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006893 DP_TRACE(FATAL, "Total packets on ring %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006894 ring, total_packets);
6895 }
6896}
6897
6898/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006899 * dp_txrx_path_stats() - Function to display dump stats
6900 * @soc - soc handle
6901 *
6902 * return: none
6903 */
6904static void dp_txrx_path_stats(struct dp_soc *soc)
6905{
6906 uint8_t error_code;
6907 uint8_t loop_pdev;
6908 struct dp_pdev *pdev;
Ishank Jain57c42a12017-04-12 10:42:22 +05306909 uint8_t i;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006910
6911 for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6912
6913 pdev = soc->pdev_list[loop_pdev];
6914 dp_aggregate_pdev_stats(pdev);
6915 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6916 "Tx path Statistics:");
6917
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306918 DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006919 pdev->stats.tx_i.rcvd.num,
6920 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306921 DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006922 pdev->stats.tx_i.processed.num,
6923 pdev->stats.tx_i.processed.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306924 DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006925 pdev->stats.tx.tx_success.num,
6926 pdev->stats.tx.tx_success.bytes);
6927
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006928 DP_TRACE(FATAL, "Dropped in host:");
6929 DP_TRACE(FATAL, "Total packets dropped: %u,",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006930 pdev->stats.tx_i.dropped.dropped_pkt.num);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006931 DP_TRACE(FATAL, "Descriptor not available: %u",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05306932 pdev->stats.tx_i.dropped.desc_na.num);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006933 DP_TRACE(FATAL, "Ring full: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006934 pdev->stats.tx_i.dropped.ring_full);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006935 DP_TRACE(FATAL, "Enqueue fail: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006936 pdev->stats.tx_i.dropped.enqueue_fail);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006937 DP_TRACE(FATAL, "DMA Error: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006938 pdev->stats.tx_i.dropped.dma_error);
6939
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006940 DP_TRACE(FATAL, "Dropped in hardware:");
6941 DP_TRACE(FATAL, "total packets dropped: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006942 pdev->stats.tx.tx_failed);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006943 DP_TRACE(FATAL, "mpdu age out: %u",
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306944 pdev->stats.tx.dropped.age_out);
6945 DP_TRACE(FATAL, "firmware removed: %u",
6946 pdev->stats.tx.dropped.fw_rem);
6947 DP_TRACE(FATAL, "firmware removed tx: %u",
6948 pdev->stats.tx.dropped.fw_rem_tx);
6949 DP_TRACE(FATAL, "firmware removed notx %u",
6950 pdev->stats.tx.dropped.fw_rem_notx);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006951 DP_TRACE(FATAL, "peer_invalid: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006952 pdev->soc->stats.tx.tx_invalid_peer.num);
6953
6954
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006955 DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6956 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006957 pdev->stats.tx_comp_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006958 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006959 pdev->stats.tx_comp_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006960 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006961 pdev->stats.tx_comp_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006962 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006963 pdev->stats.tx_comp_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006964 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006965 pdev->stats.tx_comp_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006966 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006967 pdev->stats.tx_comp_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006968 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006969 pdev->stats.tx_comp_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006970 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006971 pdev->stats.tx_comp_histogram.pkts_201_plus);
6972
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006973 DP_TRACE(FATAL, "Rx path statistics");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006974
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306975 DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006976 pdev->stats.rx.to_stack.num,
6977 pdev->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05306978 for (i = 0; i < CDP_MAX_RX_RINGS; i++)
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306979 DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05306980 i, pdev->stats.rx.rcvd_reo[i].num,
6981 pdev->stats.rx.rcvd_reo[i].bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306982 DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05306983 pdev->stats.rx.intra_bss.pkts.num,
6984 pdev->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306985 DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
Yun Park92af7132017-09-13 16:33:35 -07006986 pdev->stats.rx.intra_bss.fail.num,
6987 pdev->stats.rx.intra_bss.fail.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306988 DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006989 pdev->stats.rx.raw.num,
6990 pdev->stats.rx.raw.bytes);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006991 DP_TRACE(FATAL, "dropped: error %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006992 pdev->stats.rx.err.mic_err);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006993 DP_TRACE(FATAL, "peer invalid %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006994 pdev->soc->stats.rx.err.rx_invalid_peer.num);
6995
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006996 DP_TRACE(FATAL, "Reo Statistics");
6997 DP_TRACE(FATAL, "rbm error: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006998 pdev->soc->stats.rx.err.invalid_rbm);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006999 DP_TRACE(FATAL, "hal ring access fail: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007000 pdev->soc->stats.rx.err.hal_ring_access_fail);
7001
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307002 for (error_code = 0; error_code < HAL_REO_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007003 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007004 if (!pdev->soc->stats.rx.err.reo_error[error_code])
7005 continue;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007006 DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007007 error_code,
7008 pdev->soc->stats.rx.err.reo_error[error_code]);
7009 }
7010
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307011 for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007012 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007013 if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7014 continue;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007015 DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007016 error_code,
7017 pdev->soc->stats.rx.err
7018 .rxdma_error[error_code]);
7019 }
7020
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007021 DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
7022 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007023 pdev->stats.rx_ind_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007024 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007025 pdev->stats.rx_ind_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007026 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007027 pdev->stats.rx_ind_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007028 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007029 pdev->stats.rx_ind_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007030 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007031 pdev->stats.rx_ind_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007032 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007033 pdev->stats.rx_ind_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007034 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007035 pdev->stats.rx_ind_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007036 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007037 pdev->stats.rx_ind_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007038
7039 DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7040 __func__,
7041 pdev->soc->wlan_cfg_ctx->tso_enabled,
7042 pdev->soc->wlan_cfg_ctx->lro_enabled,
7043 pdev->soc->wlan_cfg_ctx->rx_hash,
7044 pdev->soc->wlan_cfg_ctx->napi_enabled);
7045#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7046 DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7047 __func__,
7048 pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
7049 pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
7050#endif
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007051 }
7052}
7053
7054/*
7055 * dp_txrx_dump_stats() - Dump statistics
7056 * @value - Statistics option
7057 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07007058static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7059 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007060{
7061 struct dp_soc *soc =
7062 (struct dp_soc *)psoc;
7063 QDF_STATUS status = QDF_STATUS_SUCCESS;
7064
7065 if (!soc) {
7066 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7067 "%s: soc is NULL", __func__);
7068 return QDF_STATUS_E_INVAL;
7069 }
7070
7071 switch (value) {
7072 case CDP_TXRX_PATH_STATS:
7073 dp_txrx_path_stats(soc);
7074 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007075
7076 case CDP_RX_RING_STATS:
7077 dp_print_per_ring_stats(soc);
7078 break;
7079
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007080 case CDP_TXRX_TSO_STATS:
7081 /* TODO: NOT IMPLEMENTED */
7082 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007083
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007084 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007085 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007086 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007087
psimha61b1a362017-07-27 15:45:49 -07007088 case CDP_DP_NAPI_STATS:
7089 dp_print_napi_stats(soc);
7090 break;
7091
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007092 case CDP_TXRX_DESC_STATS:
7093 /* TODO: NOT IMPLEMENTED */
7094 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007095
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007096 default:
7097 status = QDF_STATUS_E_INVAL;
7098 break;
7099 }
7100
7101 return status;
7102
7103}
7104
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007105#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7106/**
7107 * dp_update_flow_control_parameters() - API to store datapath
7108 * config parameters
7109 * @soc: soc handle
7110 * @cfg: ini parameter handle
7111 *
7112 * Return: void
7113 */
7114static inline
7115void dp_update_flow_control_parameters(struct dp_soc *soc,
7116 struct cdp_config_params *params)
7117{
7118 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7119 params->tx_flow_stop_queue_threshold;
7120 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7121 params->tx_flow_start_queue_offset;
7122}
7123#else
7124static inline
7125void dp_update_flow_control_parameters(struct dp_soc *soc,
7126 struct cdp_config_params *params)
7127{
7128}
7129#endif
7130
7131/**
7132 * dp_update_config_parameters() - API to store datapath
7133 * config parameters
7134 * @soc: soc handle
7135 * @cfg: ini parameter handle
7136 *
7137 * Return: status
7138 */
7139static
7140QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7141 struct cdp_config_params *params)
7142{
7143 struct dp_soc *soc = (struct dp_soc *)psoc;
7144
7145 if (!(soc)) {
7146 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7147 "%s: Invalid handle", __func__);
7148 return QDF_STATUS_E_INVAL;
7149 }
7150
7151 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7152 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7153 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7154 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7155 params->tcp_udp_checksumoffload;
7156 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007157 dp_update_flow_control_parameters(soc, params);
7158
7159 return QDF_STATUS_SUCCESS;
7160}
7161
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307162/**
7163 * dp_txrx_set_wds_rx_policy() - API to store datapath
7164 * config parameters
7165 * @vdev_handle - datapath vdev handle
7166 * @cfg: ini parameter handle
7167 *
7168 * Return: status
7169 */
7170#ifdef WDS_VENDOR_EXTENSION
7171void
7172dp_txrx_set_wds_rx_policy(
7173 struct cdp_vdev *vdev_handle,
7174 u_int32_t val)
7175{
7176 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7177 struct dp_peer *peer;
7178 if (vdev->opmode == wlan_op_mode_ap) {
7179 /* for ap, set it on bss_peer */
7180 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7181 if (peer->bss_peer) {
7182 peer->wds_ecm.wds_rx_filter = 1;
7183 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7184 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7185 break;
7186 }
7187 }
7188 } else if (vdev->opmode == wlan_op_mode_sta) {
7189 peer = TAILQ_FIRST(&vdev->peer_list);
7190 peer->wds_ecm.wds_rx_filter = 1;
7191 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7192 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7193 }
7194}
7195
7196/**
7197 * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7198 *
7199 * @peer_handle - datapath peer handle
7200 * @wds_tx_ucast: policy for unicast transmission
7201 * @wds_tx_mcast: policy for multicast transmission
7202 *
7203 * Return: void
7204 */
7205void
7206dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7207 int wds_tx_ucast, int wds_tx_mcast)
7208{
7209 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7210 if (wds_tx_ucast || wds_tx_mcast) {
7211 peer->wds_enabled = 1;
7212 peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7213 peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7214 } else {
7215 peer->wds_enabled = 0;
7216 peer->wds_ecm.wds_tx_ucast_4addr = 0;
7217 peer->wds_ecm.wds_tx_mcast_4addr = 0;
7218 }
7219
7220 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7221 FL("Policy Update set to :\
7222 peer->wds_enabled %d\
7223 peer->wds_ecm.wds_tx_ucast_4addr %d\
7224 peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
7225 peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7226 peer->wds_ecm.wds_tx_mcast_4addr);
7227 return;
7228}
7229#endif
7230
Karunakar Dasinenica792542017-01-16 10:08:58 -08007231static struct cdp_wds_ops dp_ops_wds = {
7232 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307233#ifdef WDS_VENDOR_EXTENSION
7234 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7235 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7236#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08007237};
7238
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307239/*
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007240 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7241 * @vdev_handle - datapath vdev handle
7242 * @callback - callback function
7243 * @ctxt: callback context
7244 *
7245 */
7246static void
7247dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7248 ol_txrx_data_tx_cb callback, void *ctxt)
7249{
7250 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7251
7252 vdev->tx_non_std_data_callback.func = callback;
7253 vdev->tx_non_std_data_callback.ctxt = ctxt;
7254}
7255
Santosh Anbu2280e862018-01-03 22:25:53 +05307256/**
7257 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7258 * @pdev_hdl: datapath pdev handle
7259 *
7260 * Return: opaque pointer to dp txrx handle
7261 */
7262static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7263{
7264 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7265
7266 return pdev->dp_txrx_handle;
7267}
7268
7269/**
7270 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7271 * @pdev_hdl: datapath pdev handle
7272 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7273 *
7274 * Return: void
7275 */
7276static void
7277dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7278{
7279 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7280
7281 pdev->dp_txrx_handle = dp_txrx_hdl;
7282}
7283
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307284/**
7285 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7286 * @soc_handle: datapath soc handle
7287 *
7288 * Return: opaque pointer to external dp (non-core DP)
7289 */
7290static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7291{
7292 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7293
7294 return soc->external_txrx_handle;
7295}
7296
7297/**
7298 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7299 * @soc_handle: datapath soc handle
7300 * @txrx_handle: opaque pointer to external dp (non-core DP)
7301 *
7302 * Return: void
7303 */
7304static void
7305dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7306{
7307 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7308
7309 soc->external_txrx_handle = txrx_handle;
7310}
7311
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307312#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307313static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7314{
7315 struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7316 struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7317 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7318
Aditya Sathish6add3db2018-04-10 19:43:34 +05307319 /*
7320 * For BSS peer, new peer is not created on alloc_node if the
7321 * peer with same address already exists , instead refcnt is
7322 * increased for existing peer. Correspondingly in delete path,
7323 * only refcnt is decreased; and peer is only deleted , when all
7324 * references are deleted. So delete_in_progress should not be set
7325 * for bss_peer, unless only 2 reference remains (peer map reference
7326 * and peer hash table reference).
7327 */
7328 if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7329 return;
7330 }
7331
Karunakar Dasineni372647d2018-01-15 22:27:39 -08007332 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307333 dp_peer_delete_ast_entries(soc, peer);
7334}
7335#endif
7336
Soumya Bhatbc719e62018-02-18 18:21:25 +05307337#ifdef ATH_SUPPORT_NAC_RSSI
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307338/**
7339 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7340 * @vdev_hdl: DP vdev handle
7341 * @rssi: rssi value
7342 *
7343 * Return: 0 for success. nonzero for failure.
7344 */
7345QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7346 char *mac_addr,
7347 uint8_t *rssi)
7348{
7349 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7350 struct dp_pdev *pdev = vdev->pdev;
7351 struct dp_neighbour_peer *peer = NULL;
7352 QDF_STATUS status = QDF_STATUS_E_FAILURE;
7353
7354 *rssi = 0;
7355 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7356 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7357 neighbour_peer_list_elem) {
7358 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7359 mac_addr, DP_MAC_ADDR_LEN) == 0) {
7360 *rssi = peer->rssi;
7361 status = QDF_STATUS_SUCCESS;
7362 break;
7363 }
7364 }
7365 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7366 return status;
7367}
7368
Soumya Bhatbc719e62018-02-18 18:21:25 +05307369static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7370 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7371 uint8_t chan_num)
7372{
7373
7374 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7375 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7376 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7377
7378 pdev->nac_rssi_filtering = 1;
7379 /* Store address of NAC (neighbour peer) which will be checked
7380 * against TA of received packets.
7381 */
7382
7383 if (cmd == CDP_NAC_PARAM_ADD) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307384 dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7385 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307386 } else if (cmd == CDP_NAC_PARAM_DEL) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307387 dp_update_filter_neighbour_peers(vdev_handle,
7388 DP_NAC_PARAM_DEL,
7389 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307390 }
7391
7392 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7393 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05307394 ((void *)vdev->pdev->ctrl_pdev,
7395 vdev->vdev_id, cmd, bssid);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307396
7397 return QDF_STATUS_SUCCESS;
7398}
7399#endif
7400
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307401static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
7402 uint32_t max_peers)
7403{
7404 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7405
7406 soc->max_peers = max_peers;
7407
7408 qdf_print ("%s max_peers %u\n", __func__, max_peers);
7409
7410 if (dp_peer_find_attach(soc))
7411 return QDF_STATUS_E_FAILURE;
7412
7413 return QDF_STATUS_SUCCESS;
7414}
7415
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307416/**
7417 * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7418 * @dp_pdev: dp pdev handle
7419 * @ctrl_pdev: UMAC ctrl pdev handle
7420 *
7421 * Return: void
7422 */
7423static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7424 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7425{
7426 struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7427
7428 pdev->ctrl_pdev = ctrl_pdev;
7429}
7430
Leo Chang5ea93a42016-11-03 12:39:49 -07007431static struct cdp_cmn_ops dp_ops_cmn = {
7432 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7433 .txrx_vdev_attach = dp_vdev_attach_wifi3,
7434 .txrx_vdev_detach = dp_vdev_detach_wifi3,
7435 .txrx_pdev_attach = dp_pdev_attach_wifi3,
7436 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007437 .txrx_peer_create = dp_peer_create_wifi3,
7438 .txrx_peer_setup = dp_peer_setup_wifi3,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307439#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307440 .txrx_peer_teardown = dp_peer_teardown_wifi3,
7441#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007442 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307443#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05307444 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7445 .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7446 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7447 .txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7448 .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7449 .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7450 .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05307451 .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007452 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07007453 .txrx_vdev_register = dp_vdev_register_wifi3,
7454 .txrx_soc_detach = dp_soc_detach_wifi3,
7455 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7456 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7457 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05307458 .txrx_ath_getstats = dp_get_device_stats,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007459 .addba_requestprocess = dp_addba_requestprocess_wifi3,
7460 .addba_responsesetup = dp_addba_responsesetup_wifi3,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08007461 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007462 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08007463 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +05307464 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -07007465 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +05307466 /* TODO: get API's for dscp-tid need to be added*/
7467 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7468 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307469 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -08007470 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
phadiman7821bf82018-02-06 16:03:54 +05307471 .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7472 .txrx_set_nac = dp_set_nac,
7473 .txrx_get_tx_pending = dp_get_tx_pending,
7474 .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7475 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007476 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05307477 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7478 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -07007479 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05307480 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05307481 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007482 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -07007483 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +05307484 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7485 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7486 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307487 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7488 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7489 .tx_send = dp_tx_send,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05307490 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7491 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7492 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307493 .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307494 .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07007495};
7496
7497static struct cdp_ctrl_ops dp_ops_ctrl = {
7498 .txrx_peer_authorize = dp_peer_authorize,
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05307499#ifdef QCA_SUPPORT_SON
7500 .txrx_set_inact_params = dp_set_inact_params,
7501 .txrx_start_inact_timer = dp_start_inact_timer,
7502 .txrx_set_overload = dp_set_overload,
7503 .txrx_peer_is_inact = dp_peer_is_inact,
7504 .txrx_mark_peer_inact = dp_mark_peer_inact,
7505#endif
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05307506 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7507 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307508#ifdef MESH_MODE_SUPPORT
7509 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05307510 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307511#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05307512 .txrx_set_vdev_param = dp_set_vdev_param,
7513 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05307514 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7515 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05307516 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7517 .txrx_update_filter_neighbour_peers =
7518 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05307519 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -07007520 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -07007521 .txrx_wdi_event_sub = dp_wdi_event_sub,
7522 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007523#ifdef WDI_EVENT_ENABLE
7524 .txrx_get_pldev = dp_get_pldev,
7525#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307526 .txrx_set_pdev_param = dp_set_pdev_param,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307527#ifdef ATH_SUPPORT_NAC_RSSI
7528 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307529 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307530#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -07007531 .set_key = dp_set_michael_key,
Leo Chang5ea93a42016-11-03 12:39:49 -07007532};
7533
7534static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +05307535#ifdef ATH_SUPPORT_IQUE
7536 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7537 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
7538 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7539#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007540};
7541
7542static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -08007543 .txrx_monitor_set_filter_ucast_data = NULL,
7544 .txrx_monitor_set_filter_mcast_data = NULL,
7545 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -08007546 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7547 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7548 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -07007549 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -07007550 /* Added support for HK advance filter */
7551 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -07007552};
7553
7554static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +05307555 .txrx_per_peer_stats = dp_get_host_peer_stats,
7556 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05307557 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307558 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7559 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307560 .txrx_stats_publish = dp_txrx_stats_publish,
Leo Chang5ea93a42016-11-03 12:39:49 -07007561 /* TODO */
7562};
7563
Leo Chang5ea93a42016-11-03 12:39:49 -07007564static struct cdp_raw_ops dp_ops_raw = {
7565 /* TODO */
7566};
7567
7568#ifdef CONFIG_WIN
7569static struct cdp_pflow_ops dp_ops_pflow = {
7570 /* TODO */
7571};
7572#endif /* CONFIG_WIN */
7573
Yue Ma245b47b2017-02-21 16:35:31 -08007574#ifdef FEATURE_RUNTIME_PM
7575/**
7576 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7577 * @opaque_pdev: DP pdev context
7578 *
7579 * DP is ready to runtime suspend if there are no pending TX packets.
7580 *
7581 * Return: QDF_STATUS
7582 */
7583static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7584{
7585 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7586 struct dp_soc *soc = pdev->soc;
7587
7588 /* Call DP TX flow control API to check if there is any
7589 pending packets */
7590
7591 if (soc->intr_mode == DP_INTR_POLL)
7592 qdf_timer_stop(&soc->int_timer);
7593
7594 return QDF_STATUS_SUCCESS;
7595}
7596
7597/**
7598 * dp_runtime_resume() - ensure DP is ready to runtime resume
7599 * @opaque_pdev: DP pdev context
7600 *
7601 * Resume DP for runtime PM.
7602 *
7603 * Return: QDF_STATUS
7604 */
7605static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7606{
7607 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7608 struct dp_soc *soc = pdev->soc;
7609 void *hal_srng;
7610 int i;
7611
7612 if (soc->intr_mode == DP_INTR_POLL)
7613 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7614
7615 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7616 hal_srng = soc->tcl_data_ring[i].hal_srng;
7617 if (hal_srng) {
7618 /* We actually only need to acquire the lock */
7619 hal_srng_access_start(soc->hal_soc, hal_srng);
7620 /* Update SRC ring head pointer for HW to send
7621 all pending packets */
7622 hal_srng_access_end(soc->hal_soc, hal_srng);
7623 }
7624 }
7625
7626 return QDF_STATUS_SUCCESS;
7627}
7628#endif /* FEATURE_RUNTIME_PM */
7629
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007630static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7631{
7632 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7633 struct dp_soc *soc = pdev->soc;
7634
psimhac983d7e2017-07-26 15:20:07 -07007635 if (soc->intr_mode == DP_INTR_POLL)
7636 qdf_timer_stop(&soc->int_timer);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007637
7638 return QDF_STATUS_SUCCESS;
7639}
7640
7641static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7642{
7643 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7644 struct dp_soc *soc = pdev->soc;
7645
psimhac983d7e2017-07-26 15:20:07 -07007646 if (soc->intr_mode == DP_INTR_POLL)
7647 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007648
7649 return QDF_STATUS_SUCCESS;
7650}
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007651
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307652#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07007653static struct cdp_misc_ops dp_ops_misc = {
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007654 .tx_non_std = dp_tx_non_std,
Leo Chang5ea93a42016-11-03 12:39:49 -07007655 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007656#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -08007657 .runtime_suspend = dp_runtime_suspend,
7658 .runtime_resume = dp_runtime_resume,
7659#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007660 .pkt_log_init = dp_pkt_log_init,
7661 .pkt_log_con_service = dp_pkt_log_con_service,
Leo Chang5ea93a42016-11-03 12:39:49 -07007662};
7663
7664static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007665 /* WIFI 3.0 DP implement as required. */
7666#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08007667 .flow_pool_map_handler = dp_tx_flow_pool_map,
7668 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007669 .register_pause_cb = dp_txrx_register_pause_cb,
7670 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7671#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -07007672};
7673
7674static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7675 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7676};
7677
Yun Parkfde6b9e2017-06-26 17:13:11 -07007678#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07007679static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -07007680 .ipa_get_resource = dp_ipa_get_resource,
7681 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7682 .ipa_op_response = dp_ipa_op_response,
7683 .ipa_register_op_cb = dp_ipa_register_op_cb,
7684 .ipa_get_stat = dp_ipa_get_stat,
7685 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7686 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
7687 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
7688 .ipa_setup = dp_ipa_setup,
7689 .ipa_cleanup = dp_ipa_cleanup,
7690 .ipa_setup_iface = dp_ipa_setup_iface,
7691 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
7692 .ipa_enable_pipes = dp_ipa_enable_pipes,
7693 .ipa_disable_pipes = dp_ipa_disable_pipes,
7694 .ipa_set_perf_level = dp_ipa_set_perf_level
Leo Chang5ea93a42016-11-03 12:39:49 -07007695};
Yun Parkfde6b9e2017-06-26 17:13:11 -07007696#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007697
Leo Chang5ea93a42016-11-03 12:39:49 -07007698static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007699 .bus_suspend = dp_bus_suspend,
7700 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -07007701};
7702
7703static struct cdp_ocb_ops dp_ops_ocb = {
7704 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7705};
7706
7707
7708static struct cdp_throttle_ops dp_ops_throttle = {
7709 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7710};
7711
7712static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007713 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -07007714};
7715
7716static struct cdp_cfg_ops dp_ops_cfg = {
7717 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7718};
7719
Mohit Khannaadfe9082017-11-17 13:11:17 -08007720/*
7721 * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7722 * @dev: physical device instance
7723 * @peer_mac_addr: peer mac address
7724 * @local_id: local id for the peer
7725 * @debug_id: to track enum peer access
7726
7727 * Return: peer instance pointer
7728 */
7729static inline void *
7730dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7731 u8 *local_id,
7732 enum peer_debug_id_type debug_id)
7733{
7734 /*
7735 * Currently this function does not implement the "get ref"
7736 * functionality and is mapped to dp_find_peer_by_addr which does not
7737 * increment the peer ref count. So the peer state is uncertain after
7738 * calling this API. The functionality needs to be implemented.
7739 * Accordingly the corresponding release_ref function is NULL.
7740 */
7741 return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7742}
7743
Leo Chang5ea93a42016-11-03 12:39:49 -07007744static struct cdp_peer_ops dp_ops_peer = {
7745 .register_peer = dp_register_peer,
7746 .clear_peer = dp_clear_peer,
7747 .find_peer_by_addr = dp_find_peer_by_addr,
7748 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Mohit Khannaadfe9082017-11-17 13:11:17 -08007749 .peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7750 .peer_release_ref = NULL,
Leo Chang5ea93a42016-11-03 12:39:49 -07007751 .local_peer_id = dp_local_peer_id,
7752 .peer_find_by_local_id = dp_peer_find_by_local_id,
7753 .peer_state_update = dp_peer_state_update,
7754 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007755 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -07007756 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7757 .get_vdev_for_peer = dp_get_vdev_for_peer,
7758 .get_peer_state = dp_get_peer_state,
Alok Kumarfcdb1852018-07-05 18:55:48 +05307759 .get_last_mgmt_timestamp = dp_get_last_mgmt_timestamp,
7760 .update_last_mgmt_timestamp = dp_update_last_mgmt_timestamp,
Leo Chang5ea93a42016-11-03 12:39:49 -07007761};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307762#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007763
7764static struct cdp_ops dp_txrx_ops = {
7765 .cmn_drv_ops = &dp_ops_cmn,
7766 .ctrl_ops = &dp_ops_ctrl,
7767 .me_ops = &dp_ops_me,
7768 .mon_ops = &dp_ops_mon,
7769 .host_stats_ops = &dp_ops_host_stats,
7770 .wds_ops = &dp_ops_wds,
7771 .raw_ops = &dp_ops_raw,
7772#ifdef CONFIG_WIN
7773 .pflow_ops = &dp_ops_pflow,
7774#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307775#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07007776 .misc_ops = &dp_ops_misc,
7777 .cfg_ops = &dp_ops_cfg,
7778 .flowctl_ops = &dp_ops_flowctl,
7779 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007780#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07007781 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007782#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007783 .bus_ops = &dp_ops_bus,
7784 .ocb_ops = &dp_ops_ocb,
7785 .peer_ops = &dp_ops_peer,
7786 .throttle_ops = &dp_ops_throttle,
7787 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307788#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007789};
7790
7791/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05307792 * dp_soc_set_txrx_ring_map()
7793 * @dp_soc: DP handler for soc
7794 *
7795 * Return: Void
7796 */
7797static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7798{
7799 uint32_t i;
7800 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7801 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7802 }
7803}
7804
7805/*
Leo Chang5ea93a42016-11-03 12:39:49 -07007806 * dp_soc_attach_wifi3() - Attach txrx SOC
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307807 * @ctrl_psoc: Opaque SOC handle from control plane
Leo Chang5ea93a42016-11-03 12:39:49 -07007808 * @htc_handle: Opaque HTC handle
7809 * @hif_handle: Opaque HIF handle
7810 * @qdf_osdev: QDF device
7811 *
7812 * Return: DP SOC handle on success, NULL on failure
7813 */
Jeff Johnson07718572017-01-10 13:57:15 -08007814/*
7815 * Local prototype added to temporarily address warning caused by
7816 * -Wmissing-prototypes. A more correct solution, namely to expose
7817 * a prototype in an appropriate header file, will come later.
7818 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307819void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Jeff Johnson07718572017-01-10 13:57:15 -08007820 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307821 struct ol_if_ops *ol_ops);
7822void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07007823 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307824 struct ol_if_ops *ol_ops)
Leo Chang5ea93a42016-11-03 12:39:49 -07007825{
7826 struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307827 int target_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07007828
7829 if (!soc) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307830 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7831 FL("DP SOC memory allocation failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007832 goto fail0;
7833 }
7834
7835 soc->cdp_soc.ops = &dp_txrx_ops;
7836 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307837 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -07007838 soc->osdev = qdf_osdev;
7839 soc->hif_handle = hif_handle;
7840
7841 soc->hal_soc = hif_get_hal_handle(hif_handle);
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307842 soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07007843 soc->hal_soc, qdf_osdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307844 if (!soc->htt_handle) {
7845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7846 FL("HTT attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007847 goto fail1;
7848 }
7849
Vivek126db5d2018-07-25 22:05:04 +05307850 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
Leo Chang5ea93a42016-11-03 12:39:49 -07007851 if (!soc->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05307853 FL("wlan_cfg_soc_attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007854 goto fail2;
7855 }
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307856 target_type = hal_get_target_type(soc->hal_soc);
7857 switch (target_type) {
7858 case TARGET_TYPE_QCA6290:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05307859#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307860 case TARGET_TYPE_QCA6390:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05307861#endif
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307862 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7863 REO_DST_RING_SIZE_QCA6290);
7864 break;
7865 case TARGET_TYPE_QCA8074:
7866 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7867 REO_DST_RING_SIZE_QCA8074);
7868 break;
7869 default:
7870 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
7871 qdf_assert_always(0);
7872 break;
7873 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307874
Vivek126db5d2018-07-25 22:05:04 +05307875 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
7876 cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307877 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05307878
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307879 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307880 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307881 CDP_CFG_MAX_PEER_ID);
7882
7883 if (ret != -EINVAL) {
7884 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7885 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307886
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307887 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307888 CDP_CFG_CCE_DISABLE);
Ruchi, Agrawalf279a4a2018-02-26 18:12:44 +05307889 if (ret == 1)
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307890 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307891 }
7892
Leo Chang5ea93a42016-11-03 12:39:49 -07007893 qdf_spinlock_create(&soc->peer_ref_mutex);
7894
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08007895 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7896 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7897
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05307898 /* fill the tx/rx cpu ring map*/
7899 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05307900
7901 qdf_spinlock_create(&soc->htt_stats.lock);
7902 /* initialize work queue for stats processing */
7903 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7904
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05307905 /*Initialize inactivity timer for wifison */
7906 dp_init_inact_timer(soc);
7907
Leo Chang5ea93a42016-11-03 12:39:49 -07007908 return (void *)soc;
7909
7910fail2:
7911 htt_soc_detach(soc->htt_handle);
7912fail1:
7913 qdf_mem_free(soc);
7914fail0:
7915 return NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07007916}
Keyur Parekhfad6d082017-05-07 08:54:47 -07007917
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08007918/*
7919 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
7920 *
7921 * @soc: handle to DP soc
7922 * @mac_id: MAC id
7923 *
7924 * Return: Return pdev corresponding to MAC
7925 */
7926void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7927{
7928 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7929 return soc->pdev_list[mac_id];
7930
7931 /* Typically for MCL as there only 1 PDEV*/
7932 return soc->pdev_list[0];
7933}
7934
7935/*
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007936 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7937 * @soc: DP SoC context
7938 * @max_mac_rings: No of MAC rings
7939 *
7940 * Return: None
7941 */
7942static
7943void dp_is_hw_dbs_enable(struct dp_soc *soc,
7944 int *max_mac_rings)
7945{
7946 bool dbs_enable = false;
7947 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7948 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307949 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007950
7951 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7952}
7953
Keyur Parekhfad6d082017-05-07 08:54:47 -07007954/*
7955* dp_set_pktlog_wifi3() - attach txrx vdev
7956* @pdev: Datapath PDEV handle
7957* @event: which event's notifications are being subscribed to
7958* @enable: WDI event subscribe or not. (True or False)
7959*
7960* Return: Success, NULL on failure
7961*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007962#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -07007963int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7964 bool enable)
7965{
7966 struct dp_soc *soc = pdev->soc;
7967 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007968 int max_mac_rings = wlan_cfg_get_num_mac_rings
7969 (pdev->wlan_cfg_ctx);
7970 uint8_t mac_id = 0;
7971
7972 dp_is_hw_dbs_enable(soc, &max_mac_rings);
7973
7974 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7975 FL("Max_mac_rings %d \n"),
7976 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -07007977
7978 if (enable) {
7979 switch (event) {
7980 case WDI_EVENT_RX_DESC:
7981 if (pdev->monitor_vdev) {
7982 /* Nothing needs to be done if monitor mode is
7983 * enabled
7984 */
7985 return 0;
7986 }
7987 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7988 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7989 htt_tlv_filter.mpdu_start = 1;
7990 htt_tlv_filter.msdu_start = 1;
7991 htt_tlv_filter.msdu_end = 1;
7992 htt_tlv_filter.mpdu_end = 1;
7993 htt_tlv_filter.packet_header = 1;
7994 htt_tlv_filter.attention = 1;
7995 htt_tlv_filter.ppdu_start = 1;
7996 htt_tlv_filter.ppdu_end = 1;
7997 htt_tlv_filter.ppdu_end_user_stats = 1;
7998 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7999 htt_tlv_filter.ppdu_end_status_done = 1;
8000 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008001 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8002 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8003 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8004 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8005 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8006 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008007
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008008 for (mac_id = 0; mac_id < max_mac_rings;
8009 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008010 int mac_for_pdev =
8011 dp_get_mac_id_for_pdev(mac_id,
8012 pdev->pdev_id);
8013
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008014 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008015 mac_for_pdev,
8016 pdev->rxdma_mon_status_ring[mac_id]
8017 .hal_srng,
8018 RXDMA_MONITOR_STATUS,
8019 RX_BUFFER_SIZE,
8020 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008021
8022 }
8023
8024 if (soc->reap_timer_init)
8025 qdf_timer_mod(&soc->mon_reap_timer,
8026 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008027 }
8028 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008029
Keyur Parekhfad6d082017-05-07 08:54:47 -07008030 case WDI_EVENT_LITE_RX:
8031 if (pdev->monitor_vdev) {
8032 /* Nothing needs to be done if monitor mode is
8033 * enabled
8034 */
8035 return 0;
8036 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008037
Keyur Parekhfad6d082017-05-07 08:54:47 -07008038 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8039 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008040
Keyur Parekhfad6d082017-05-07 08:54:47 -07008041 htt_tlv_filter.ppdu_start = 1;
8042 htt_tlv_filter.ppdu_end = 1;
8043 htt_tlv_filter.ppdu_end_user_stats = 1;
8044 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8045 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008046 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008047 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008048 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8049 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8050 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8051 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8052 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8053 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008054
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008055 for (mac_id = 0; mac_id < max_mac_rings;
8056 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008057 int mac_for_pdev =
8058 dp_get_mac_id_for_pdev(mac_id,
8059 pdev->pdev_id);
8060
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008061 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008062 mac_for_pdev,
8063 pdev->rxdma_mon_status_ring[mac_id]
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008064 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -07008065 RXDMA_MONITOR_STATUS,
8066 RX_BUFFER_SIZE_PKTLOG_LITE,
8067 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008068 }
8069
8070 if (soc->reap_timer_init)
8071 qdf_timer_mod(&soc->mon_reap_timer,
8072 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008073 }
8074 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008075
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008076 case WDI_EVENT_LITE_T2H:
8077 if (pdev->monitor_vdev) {
8078 /* Nothing needs to be done if monitor mode is
8079 * enabled
8080 */
8081 return 0;
8082 }
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008083
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008084 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008085 int mac_for_pdev = dp_get_mac_id_for_pdev(
8086 mac_id, pdev->pdev_id);
8087
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308088 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008089 dp_h2t_cfg_stats_msg_send(pdev,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008090 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8091 mac_for_pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008092 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008093 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008094
Keyur Parekhfad6d082017-05-07 08:54:47 -07008095 default:
8096 /* Nothing needs to be done for other pktlog types */
8097 break;
8098 }
8099 } else {
8100 switch (event) {
8101 case WDI_EVENT_RX_DESC:
8102 case WDI_EVENT_LITE_RX:
8103 if (pdev->monitor_vdev) {
8104 /* Nothing needs to be done if monitor mode is
8105 * enabled
8106 */
8107 return 0;
8108 }
8109 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8110 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008111
8112 for (mac_id = 0; mac_id < max_mac_rings;
8113 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008114 int mac_for_pdev =
8115 dp_get_mac_id_for_pdev(mac_id,
8116 pdev->pdev_id);
8117
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008118 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008119 mac_for_pdev,
8120 pdev->rxdma_mon_status_ring[mac_id]
8121 .hal_srng,
8122 RXDMA_MONITOR_STATUS,
8123 RX_BUFFER_SIZE,
8124 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008125 }
8126
8127 if (soc->reap_timer_init)
8128 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008129 }
8130 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008131 case WDI_EVENT_LITE_T2H:
8132 if (pdev->monitor_vdev) {
8133 /* Nothing needs to be done if monitor mode is
8134 * enabled
8135 */
8136 return 0;
8137 }
8138 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8139 * passing value 0. Once these macros will define in htt
8140 * header file will use proper macros
8141 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008142 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008143 int mac_for_pdev =
8144 dp_get_mac_id_for_pdev(mac_id,
8145 pdev->pdev_id);
8146
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308147 pdev->pktlog_ppdu_stats = false;
8148 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8149 dp_h2t_cfg_stats_msg_send(pdev, 0,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008150 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308151 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8152 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008153 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308154 } else if (pdev->enhanced_stats_en) {
8155 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008156 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308157 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008158 }
8159
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008160 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008161 default:
8162 /* Nothing needs to be done for other pktlog types */
8163 break;
8164 }
8165 }
8166 return 0;
8167}
8168#endif