blob: 1dbb70c145650f6a33277da712581adbcb06261a [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053024#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
26#include <hif.h>
27#include <htt.h>
28#include <wdi_event.h>
29#include <queue.h>
30#include "dp_htt.h"
31#include "dp_types.h"
32#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053033#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070034#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070035#include "dp_rx.h"
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080036#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080037#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053038#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053039#include "cdp_txrx_stats_struct.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080040#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053041#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080042#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053043#include "htt_stats.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070044#include "qdf_mem.h" /* qdf_mem_malloc,free */
Vivek126db5d2018-07-25 22:05:04 +053045#include "cfg_ucfg_api.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070046#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070047#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070048#else
49static inline void
50cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
51{
52 return;
53}
54#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070055#include "dp_ipa.h"
Ravi Joshiaf9ace82017-02-17 12:41:48 -080056
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070057#ifdef CONFIG_MCL
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070058#ifndef REMOVE_PKT_LOG
59#include <pktlog_ac_api.h>
60#include <pktlog_ac.h>
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070061#endif
62#endif
63static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053064static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +053065 uint8_t *peer_mac_addr,
66 struct cdp_ctrl_objmgr_peer *ctrl_peer);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053067static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +053068static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
69static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070070
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -070071#define DP_INTR_POLL_TIMER_MS 10
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +053072#define DP_WDS_AGING_TIMER_DEFAULT_MS 120000
Ishank Jainbc2d91f2017-01-03 18:14:54 +053073#define DP_MCS_LENGTH (6*MAX_MCS)
74#define DP_NSS_LENGTH (6*SS_COUNT)
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +053075#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
76#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
77#define DP_MAX_MCS_STRING_LEN 30
Ishank Jain6290a3c2017-03-21 10:49:39 +053078#define DP_CURR_FW_STATS_AVAIL 19
79#define DP_HTT_DBG_EXT_STATS_MAX 256
Prathyusha Guduri43bb0562018-02-12 18:30:54 +053080#define DP_MAX_SLEEP_TIME 100
Ishank Jain949674c2017-02-27 17:09:29 +053081
Yun Parkfde6b9e2017-06-26 17:13:11 -070082#ifdef IPA_OFFLOAD
83/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -070084#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -070085#define RX_RING_MASK_VAL 0x7
86#else
87#define TX_RING_MASK_VAL 0xF
88#define RX_RING_MASK_VAL 0xF
89#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +053090
sumedh baikady72b1c712017-08-24 12:11:46 -070091#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +053092
Soumya Bhat0d6245c2018-02-08 21:02:57 +053093#define DP_PPDU_STATS_CFG_ALL 0xFFFF
94
95/* PPDU stats mask sent to FW to enable enhanced stats */
96#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97/* PPDU stats mask sent to FW to support debug sniffer feature */
98#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
Vinay Adella873dc402018-05-28 12:06:34 +053099/* PPDU stats mask sent to FW to support BPR feature*/
100#define DP_PPDU_STATS_CFG_BPR 0x2000
101/* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
102#define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
103 DP_PPDU_STATS_CFG_ENH_STATS)
104/* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
105#define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
106 DP_PPDU_TXLITE_STATS_BITMASK_CFG)
107
Vivek126db5d2018-07-25 22:05:04 +0530108#define RNG_ERR "SRNG setup failed for"
Ishank Jain949674c2017-02-27 17:09:29 +0530109/**
110 * default_dscp_tid_map - Default DSCP-TID mapping
111 *
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530112 * DSCP TID
113 * 000000 0
114 * 001000 1
115 * 010000 2
116 * 011000 3
117 * 100000 4
118 * 101000 5
119 * 110000 6
120 * 111000 7
Ishank Jain949674c2017-02-27 17:09:29 +0530121 */
122static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
123 0, 0, 0, 0, 0, 0, 0, 0,
124 1, 1, 1, 1, 1, 1, 1, 1,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530125 2, 2, 2, 2, 2, 2, 2, 2,
126 3, 3, 3, 3, 3, 3, 3, 3,
127 4, 4, 4, 4, 4, 4, 4, 4,
Ishank Jain949674c2017-02-27 17:09:29 +0530128 5, 5, 5, 5, 5, 5, 5, 5,
129 6, 6, 6, 6, 6, 6, 6, 6,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530130 7, 7, 7, 7, 7, 7, 7, 7,
Ishank Jain949674c2017-02-27 17:09:29 +0530131};
132
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530133/*
134 * struct dp_rate_debug
135 *
136 * @mcs_type: print string for a given mcs
137 * @valid: valid mcs rate?
138 */
139struct dp_rate_debug {
140 char mcs_type[DP_MAX_MCS_STRING_LEN];
141 uint8_t valid;
142};
143
144#define MCS_VALID 1
145#define MCS_INVALID 0
146
147static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
Anish Nataraj072d8972018-01-09 18:23:33 +0530148
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530149 {
150 {"OFDM 48 Mbps", MCS_VALID},
151 {"OFDM 24 Mbps", MCS_VALID},
152 {"OFDM 12 Mbps", MCS_VALID},
153 {"OFDM 6 Mbps ", MCS_VALID},
154 {"OFDM 54 Mbps", MCS_VALID},
155 {"OFDM 36 Mbps", MCS_VALID},
156 {"OFDM 18 Mbps", MCS_VALID},
157 {"OFDM 9 Mbps ", MCS_VALID},
158 {"INVALID ", MCS_INVALID},
159 {"INVALID ", MCS_INVALID},
160 {"INVALID ", MCS_INVALID},
161 {"INVALID ", MCS_INVALID},
162 {"INVALID ", MCS_VALID},
163 },
164 {
Anish Nataraj072d8972018-01-09 18:23:33 +0530165 {"CCK 11 Mbps Long ", MCS_VALID},
166 {"CCK 5.5 Mbps Long ", MCS_VALID},
167 {"CCK 2 Mbps Long ", MCS_VALID},
168 {"CCK 1 Mbps Long ", MCS_VALID},
169 {"CCK 11 Mbps Short ", MCS_VALID},
170 {"CCK 5.5 Mbps Short", MCS_VALID},
171 {"CCK 2 Mbps Short ", MCS_VALID},
172 {"INVALID ", MCS_INVALID},
173 {"INVALID ", MCS_INVALID},
174 {"INVALID ", MCS_INVALID},
175 {"INVALID ", MCS_INVALID},
176 {"INVALID ", MCS_INVALID},
177 {"INVALID ", MCS_VALID},
178 },
179 {
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530180 {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
181 {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
182 {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
183 {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
184 {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
185 {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
186 {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
187 {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
188 {"INVALID ", MCS_INVALID},
189 {"INVALID ", MCS_INVALID},
190 {"INVALID ", MCS_INVALID},
191 {"INVALID ", MCS_INVALID},
192 {"INVALID ", MCS_VALID},
193 },
194 {
195 {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
196 {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
197 {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
198 {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
199 {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
200 {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
201 {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
202 {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
203 {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
204 {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
205 {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530206 {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530207 {"INVALID ", MCS_VALID},
208 },
209 {
210 {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
211 {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
212 {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
213 {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
214 {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
215 {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
216 {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
217 {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
218 {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
219 {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
220 {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530221 {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530222 {"INVALID ", MCS_VALID},
223 }
224};
225
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700226/**
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530227 * @brief Cpu ring map types
228 */
229enum dp_cpu_ring_map_types {
230 DP_DEFAULT_MAP,
231 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
232 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
233 DP_NSS_ALL_RADIO_OFFLOADED_MAP,
234 DP_CPU_RING_MAP_MAX
235};
236
237/**
238 * @brief Cpu to tx ring map
239 */
240static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
241 {0x0, 0x1, 0x2, 0x0},
242 {0x1, 0x2, 0x1, 0x2},
243 {0x0, 0x2, 0x0, 0x2},
244 {0x2, 0x2, 0x2, 0x2}
245};
246
247/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800248 * @brief Select the type of statistics
249 */
250enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530251 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800252 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530253 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800254};
255
256/**
257 * @brief General Firmware statistics options
258 *
259 */
260enum dp_fw_stats {
261 TXRX_FW_STATS_INVALID = -1,
262};
263
264/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530265 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800266 * currently supported
267 */
268const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530269 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
270 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
271 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
272 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
273 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
274 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
275 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
276 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
277 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
278 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
279 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800280 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530281 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
282 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
283 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
284 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
285 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
286 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
287 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
288 /* Last ENUM for HTT FW STATS */
289 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800290 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530291 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
292 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
293 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800294 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530295 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700296 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Kai Chen783e0382018-01-25 16:29:08 -0800297 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700298 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800299};
300
Mohit Khannadba82f22018-07-12 10:59:17 -0700301/* MCL specific functions */
302#ifdef CONFIG_MCL
303/**
304 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
305 * @soc: pointer to dp_soc handle
306 * @intr_ctx_num: interrupt context number for which mon mask is needed
307 *
308 * For MCL, monitor mode rings are being processed in timer contexts (polled).
309 * This function is returning 0, since in interrupt mode(softirq based RX),
310 * we donot want to process monitor mode rings in a softirq.
311 *
312 * So, in case packet log is enabled for SAP/STA/P2P modes,
313 * regular interrupt processing will not process monitor mode rings. It would be
314 * done in a separate timer context.
315 *
316 * Return: 0
317 */
318static inline
319uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
320{
321 return 0;
322}
323
324/*
325 * dp_service_mon_rings()- timer to reap monitor rings
326 * reqd as we are not getting ppdu end interrupts
327 * @arg: SoC Handle
328 *
329 * Return:
330 *
331 */
332static void dp_service_mon_rings(void *arg)
333{
334 struct dp_soc *soc = (struct dp_soc *)arg;
335 int ring = 0, work_done, mac_id;
336 struct dp_pdev *pdev = NULL;
337
338 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
339 pdev = soc->pdev_list[ring];
340 if (!pdev)
341 continue;
342 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
343 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
344 pdev->pdev_id);
345 work_done = dp_mon_process(soc, mac_for_pdev,
346 QCA_NAPI_BUDGET);
347
348 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
349 FL("Reaped %d descs from Monitor rings"),
350 work_done);
351 }
352 }
353
354 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
355}
356
357#ifndef REMOVE_PKT_LOG
358/**
359 * dp_pkt_log_init() - API to initialize packet log
360 * @ppdev: physical device handle
361 * @scn: HIF context
362 *
363 * Return: none
364 */
365void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
366{
367 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
368
369 if (handle->pkt_log_init) {
370 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
371 "%s: Packet log not initialized", __func__);
372 return;
373 }
374
375 pktlog_sethandle(&handle->pl_dev, scn);
376 pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
377
378 if (pktlogmod_init(scn)) {
379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
380 "%s: pktlogmod_init failed", __func__);
381 handle->pkt_log_init = false;
382 } else {
383 handle->pkt_log_init = true;
384 }
385}
386
387/**
388 * dp_pkt_log_con_service() - connect packet log service
389 * @ppdev: physical device handle
390 * @scn: device context
391 *
392 * Return: none
393 */
394static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
395{
396 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
397
398 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
399 pktlog_htc_attach();
400}
401
402/**
403 * dp_pktlogmod_exit() - API to cleanup pktlog info
404 * @handle: Pdev handle
405 *
406 * Return: none
407 */
408static void dp_pktlogmod_exit(struct dp_pdev *handle)
409{
410 void *scn = (void *)handle->soc->hif_handle;
411
412 if (!scn) {
413 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
414 "%s: Invalid hif(scn) handle", __func__);
415 return;
416 }
417
418 pktlogmod_exit(scn);
419 handle->pkt_log_init = false;
420}
421#endif
422#else
423static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
424
425/**
426 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
427 * @soc: pointer to dp_soc handle
428 * @intr_ctx_num: interrupt context number for which mon mask is needed
429 *
430 * Return: mon mask value
431 */
432static inline
433uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
434{
435 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
436}
437#endif
438
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530439static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
440 struct cdp_peer *peer_hdl,
441 uint8_t *mac_addr,
442 enum cdp_txrx_ast_entry_type type,
443 uint32_t flags)
444{
445
446 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
447 (struct dp_peer *)peer_hdl,
448 mac_addr,
449 type,
450 flags);
451}
452
453static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
454 void *ast_entry_hdl)
455{
456 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
457 qdf_spin_lock_bh(&soc->ast_lock);
458 dp_peer_del_ast((struct dp_soc *)soc_hdl,
459 (struct dp_ast_entry *)ast_entry_hdl);
460 qdf_spin_unlock_bh(&soc->ast_lock);
461}
462
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530463
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530464static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
465 struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530466 uint8_t *wds_macaddr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530467 uint32_t flags)
468{
phadiman0381f562018-06-29 15:40:52 +0530469 int status = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530470 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530471 struct dp_ast_entry *ast_entry = NULL;
472
473 qdf_spin_lock_bh(&soc->ast_lock);
474 ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
475
phadiman0381f562018-06-29 15:40:52 +0530476 if (ast_entry) {
477 status = dp_peer_update_ast(soc,
478 (struct dp_peer *)peer_hdl,
479 ast_entry, flags);
480 }
481
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530482 qdf_spin_unlock_bh(&soc->ast_lock);
483
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530484 return status;
485}
486
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530487/*
488 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530489 * @soc_handle: Datapath SOC handle
490 * @wds_macaddr: WDS entry MAC Address
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530491 * Return: None
492 */
493static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530494 uint8_t *wds_macaddr, void *vdev_handle)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530495{
496 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
497 struct dp_ast_entry *ast_entry = NULL;
498
499 qdf_spin_lock_bh(&soc->ast_lock);
500 ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
501
phadiman0381f562018-06-29 15:40:52 +0530502 if (ast_entry) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530503 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
504 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
phadiman0381f562018-06-29 15:40:52 +0530505 ast_entry->is_active = TRUE;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530506 }
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530507 }
phadiman0381f562018-06-29 15:40:52 +0530508
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530509 qdf_spin_unlock_bh(&soc->ast_lock);
510}
511
512/*
513 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530514 * @soc: Datapath SOC handle
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530515 *
516 * Return: None
517 */
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530518static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
Santosh Anbu76693bc2018-04-23 16:38:54 +0530519 void *vdev_hdl)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530520{
521 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
522 struct dp_pdev *pdev;
523 struct dp_vdev *vdev;
524 struct dp_peer *peer;
525 struct dp_ast_entry *ase, *temp_ase;
526 int i;
527
528 qdf_spin_lock_bh(&soc->ast_lock);
529
530 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
531 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530532 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530533 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
534 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
535 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530536 if ((ase->type ==
537 CDP_TXRX_AST_TYPE_STATIC) ||
538 (ase->type ==
539 CDP_TXRX_AST_TYPE_SELF))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530540 continue;
541 ase->is_active = TRUE;
542 }
543 }
544 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530545 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530546 }
547
548 qdf_spin_unlock_bh(&soc->ast_lock);
549}
550
551/*
552 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
553 * @soc: Datapath SOC handle
554 *
555 * Return: None
556 */
557static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
558{
559 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
560 struct dp_pdev *pdev;
561 struct dp_vdev *vdev;
562 struct dp_peer *peer;
563 struct dp_ast_entry *ase, *temp_ase;
564 int i;
565
566 qdf_spin_lock_bh(&soc->ast_lock);
567
568 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
569 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530570 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530571 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
572 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
573 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530574 if ((ase->type ==
575 CDP_TXRX_AST_TYPE_STATIC) ||
576 (ase->type ==
577 CDP_TXRX_AST_TYPE_SELF))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530578 continue;
579 dp_peer_del_ast(soc, ase);
580 }
581 }
582 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530583 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530584 }
585
586 qdf_spin_unlock_bh(&soc->ast_lock);
587}
588
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530589static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
590 uint8_t *ast_mac_addr)
591{
592 struct dp_ast_entry *ast_entry;
593 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
594 qdf_spin_lock_bh(&soc->ast_lock);
595 ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
596 qdf_spin_unlock_bh(&soc->ast_lock);
597 return (void *)ast_entry;
598}
599
600static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
601 void *ast_entry_hdl)
602{
603 return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
604 (struct dp_ast_entry *)ast_entry_hdl);
605}
606
607static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
608 void *ast_entry_hdl)
609{
610 return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
611 (struct dp_ast_entry *)ast_entry_hdl);
612}
613
614static void dp_peer_ast_set_type_wifi3(
615 struct cdp_soc_t *soc_hdl,
616 void *ast_entry_hdl,
617 enum cdp_txrx_ast_entry_type type)
618{
619 dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
620 (struct dp_ast_entry *)ast_entry_hdl,
621 type);
622}
623
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530624static enum cdp_txrx_ast_entry_type dp_peer_ast_get_type_wifi3(
625 struct cdp_soc_t *soc_hdl,
626 void *ast_entry_hdl)
627{
628 return ((struct dp_ast_entry *)ast_entry_hdl)->type;
629}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530630
Houston Hoffman648a9182017-05-21 23:27:50 -0700631/**
632 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
633 * @ring_num: ring num of the ring being queried
634 * @grp_mask: the grp_mask array for the ring type in question.
635 *
636 * The grp_mask array is indexed by group number and the bit fields correspond
637 * to ring numbers. We are finding which interrupt group a ring belongs to.
638 *
639 * Return: the index in the grp_mask array with the ring number.
640 * -QDF_STATUS_E_NOENT if no entry is found
641 */
642static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
643{
644 int ext_group_num;
645 int mask = 1 << ring_num;
646
647 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
648 ext_group_num++) {
649 if (mask & grp_mask[ext_group_num])
650 return ext_group_num;
651 }
652
653 return -QDF_STATUS_E_NOENT;
654}
655
656static int dp_srng_calculate_msi_group(struct dp_soc *soc,
657 enum hal_ring_type ring_type,
658 int ring_num)
659{
660 int *grp_mask;
661
662 switch (ring_type) {
663 case WBM2SW_RELEASE:
664 /* dp_tx_comp_handler - soc->tx_comp_ring */
665 if (ring_num < 3)
666 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
667
668 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
669 else if (ring_num == 3) {
670 /* sw treats this as a separate ring type */
671 grp_mask = &soc->wlan_cfg_ctx->
672 int_rx_wbm_rel_ring_mask[0];
673 ring_num = 0;
674 } else {
675 qdf_assert(0);
676 return -QDF_STATUS_E_NOENT;
677 }
678 break;
679
680 case REO_EXCEPTION:
681 /* dp_rx_err_process - &soc->reo_exception_ring */
682 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
683 break;
684
685 case REO_DST:
686 /* dp_rx_process - soc->reo_dest_ring */
687 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
688 break;
689
690 case REO_STATUS:
691 /* dp_reo_status_ring_handler - soc->reo_status_ring */
692 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
693 break;
694
695 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
696 case RXDMA_MONITOR_STATUS:
697 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
698 case RXDMA_MONITOR_DST:
699 /* dp_mon_process */
700 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
701 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700702 case RXDMA_DST:
703 /* dp_rxdma_err_process */
704 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
705 break;
Houston Hoffman648a9182017-05-21 23:27:50 -0700706
Houston Hoffman648a9182017-05-21 23:27:50 -0700707 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700708 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
709 break;
710
711 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -0700712 /* TODO: support low_thresh interrupt */
713 return -QDF_STATUS_E_NOENT;
714 break;
715
716 case TCL_DATA:
717 case TCL_CMD:
718 case REO_CMD:
719 case SW2WBM_RELEASE:
720 case WBM_IDLE_LINK:
721 /* normally empty SW_TO_HW rings */
722 return -QDF_STATUS_E_NOENT;
723 break;
724
725 case TCL_STATUS:
726 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -0700727 /* misc unused rings */
728 return -QDF_STATUS_E_NOENT;
729 break;
730
731 case CE_SRC:
732 case CE_DST:
733 case CE_DST_STATUS:
734 /* CE_rings - currently handled by hif */
735 default:
736 return -QDF_STATUS_E_NOENT;
737 break;
738 }
739
740 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
741}
742
743static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
744 *ring_params, int ring_type, int ring_num)
745{
746 int msi_group_number;
747 int msi_data_count;
748 int ret;
749 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
750
751 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
752 &msi_data_count, &msi_data_start,
753 &msi_irq_start);
754
755 if (ret)
756 return;
757
758 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
759 ring_num);
760 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -0700761 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -0700762 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
763 ring_type, ring_num);
764 ring_params->msi_addr = 0;
765 ring_params->msi_data = 0;
766 return;
767 }
768
769 if (msi_group_number > msi_data_count) {
770 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
771 FL("2 msi_groups will share an msi; msi_group_num %d"),
772 msi_group_number);
773
774 QDF_ASSERT(0);
775 }
776
777 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
778
779 ring_params->msi_addr = addr_low;
780 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
781 ring_params->msi_data = (msi_group_number % msi_data_count)
782 + msi_data_start;
783 ring_params->flags |= HAL_SRNG_MSI_INTR;
784}
785
786/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530787 * dp_print_ast_stats() - Dump AST table contents
788 * @soc: Datapath soc handle
789 *
790 * return void
791 */
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530792#ifdef FEATURE_AST
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530793static void dp_print_ast_stats(struct dp_soc *soc)
794{
795 uint8_t i;
796 uint8_t num_entries = 0;
797 struct dp_vdev *vdev;
798 struct dp_pdev *pdev;
799 struct dp_peer *peer;
800 struct dp_ast_entry *ase, *tmp_ase;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530801 char type[CDP_TXRX_AST_TYPE_MAX][10] = {
802 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS"};
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530803
804 DP_PRINT_STATS("AST Stats:");
805 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
806 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
807 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
808 DP_PRINT_STATS("AST Table:");
809 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
810 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530811 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530812 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
813 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
814 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
815 DP_PRINT_STATS("%6d mac_addr = %pM"
816 " peer_mac_addr = %pM"
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530817 " type = %s"
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530818 " next_hop = %d"
819 " is_active = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530820 " is_bss = %d"
821 " ast_idx = %d"
822 " pdev_id = %d"
823 " vdev_id = %d",
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530824 ++num_entries,
825 ase->mac_addr.raw,
826 ase->peer->mac_addr.raw,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530827 type[ase->type],
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530828 ase->next_hop,
829 ase->is_active,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530830 ase->is_bss,
831 ase->ast_idx,
832 ase->pdev_id,
833 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530834 }
835 }
836 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530837 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530838 }
839}
840#else
841static void dp_print_ast_stats(struct dp_soc *soc)
842{
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530843 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530844 return;
845}
846#endif
847
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530848static void dp_print_peer_table(struct dp_vdev *vdev)
849{
850 struct dp_peer *peer = NULL;
851
852 DP_PRINT_STATS("Dumping Peer Table Stats:");
853 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
854 if (!peer) {
855 DP_PRINT_STATS("Invalid Peer");
856 return;
857 }
858 DP_PRINT_STATS(" peer_mac_addr = %pM"
859 " nawds_enabled = %d"
860 " bss_peer = %d"
861 " wapi = %d"
862 " wds_enabled = %d"
863 " delete in progress = %d",
864 peer->mac_addr.raw,
865 peer->nawds_enabled,
866 peer->bss_peer,
867 peer->wapi,
868 peer->wds_enabled,
869 peer->delete_in_progress);
870 }
871}
872
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530873/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700874 * dp_setup_srng - Internal function to setup SRNG rings used by data path
875 */
876static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800877 int ring_type, int ring_num, int mac_id, uint32_t num_entries)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700878{
879 void *hal_soc = soc->hal_soc;
880 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
881 /* TODO: See if we should get align size from hal */
882 uint32_t ring_base_align = 8;
883 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800884 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700885
Houston Hoffman648a9182017-05-21 23:27:50 -0700886 /* TODO: Currently hal layer takes care of endianness related settings.
887 * See if these settings need to passed from DP layer
888 */
889 ring_params.flags = 0;
Houston Hoffman41b912c2017-08-30 14:27:51 -0700890 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Yun Parkfde6b9e2017-06-26 17:13:11 -0700891 FL("Ring type: %d, num:%d"), ring_type, ring_num);
Houston Hoffman648a9182017-05-21 23:27:50 -0700892
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800893 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700894 srng->hal_srng = NULL;
895 srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700896 srng->num_entries = num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700897 srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700898 soc->osdev, soc->osdev->dev, srng->alloc_size,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700899 &(srng->base_paddr_unaligned));
900
901 if (!srng->base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530902 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
903 FL("alloc failed - ring_type: %d, ring_num %d"),
904 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700905 return QDF_STATUS_E_NOMEM;
906 }
907
908 ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
909 ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
910 ring_params.ring_base_paddr = srng->base_paddr_unaligned +
911 ((unsigned long)(ring_params.ring_base_vaddr) -
912 (unsigned long)srng->base_vaddr_unaligned);
913 ring_params.num_entries = num_entries;
914
psimhac983d7e2017-07-26 15:20:07 -0700915 if (soc->intr_mode == DP_INTR_MSI) {
916 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
Aditya Sathishded018e2018-07-02 16:25:21 +0530917 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
918 FL("Using MSI for ring_type: %d, ring_num %d"),
919 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -0700920
921 } else {
922 ring_params.msi_data = 0;
923 ring_params.msi_addr = 0;
Aditya Sathishded018e2018-07-02 16:25:21 +0530924 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
925 FL("Skipping MSI for ring_type: %d, ring_num %d"),
926 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -0700927 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700928
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530929 /*
930 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700931 * interrupt mitigation based on ring type
932 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530933 if (ring_type == REO_DST) {
934 ring_params.intr_timer_thres_us =
935 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
936 ring_params.intr_batch_cntr_thres_entries =
937 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
938 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
939 ring_params.intr_timer_thres_us =
940 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
941 ring_params.intr_batch_cntr_thres_entries =
942 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
943 } else {
944 ring_params.intr_timer_thres_us =
945 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
946 ring_params.intr_batch_cntr_thres_entries =
Karunakar Dasineni25f1b042018-02-15 23:26:17 -0800947 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530948 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700949
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700950 /* Enable low threshold interrupts for rx buffer rings (regular and
951 * monitor buffer rings.
952 * TODO: See if this is required for any other ring
953 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800954 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
955 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700956 /* TODO: Setting low threshold to 1/8th of ring size
957 * see if this needs to be configurable
958 */
959 ring_params.low_threshold = num_entries >> 3;
960 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Karunakar Dasinenibef3b1b2018-03-28 22:23:57 -0700961 ring_params.intr_timer_thres_us =
962 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
963 ring_params.intr_batch_cntr_thres_entries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700964 }
965
966 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800967 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -0800968
969 if (!srng->hal_srng) {
970 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
971 srng->alloc_size,
972 srng->base_vaddr_unaligned,
973 srng->base_paddr_unaligned, 0);
974 }
975
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700976 return 0;
977}
978
979/**
980 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
981 * Any buffers allocated and attached to ring entries are expected to be freed
982 * before calling this function.
983 */
984static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
985 int ring_type, int ring_num)
986{
987 if (!srng->hal_srng) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530988 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
989 FL("Ring type: %d, num:%d not setup"),
990 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700991 return;
992 }
993
994 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
995
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700996 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700997 srng->alloc_size,
998 srng->base_vaddr_unaligned,
999 srng->base_paddr_unaligned, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001000 srng->hal_srng = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001001}
1002
1003/* TODO: Need this interface from HIF */
1004void *hif_get_hal_handle(void *hif_handle);
1005
1006/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301007 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1008 * @dp_ctx: DP SOC handle
1009 * @budget: Number of frames/descriptors that can be processed in one shot
1010 *
1011 * Return: remaining budget/quota for the soc device
1012 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001013static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301014{
1015 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1016 struct dp_soc *soc = int_ctx->soc;
1017 int ring = 0;
1018 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301019 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301020 uint8_t tx_mask = int_ctx->tx_ring_mask;
1021 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301022 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1023 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001024 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301025 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001026 struct dp_pdev *pdev = NULL;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001027 int mac_id;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301028
1029 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301030 while (tx_mask) {
1031 if (tx_mask & 0x1) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001032 work_done = dp_tx_comp_handler(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301033 soc->tx_comp_ring[ring].hal_srng,
1034 remaining_quota);
1035
Houston Hoffmanae850c62017-08-11 16:47:50 -07001036 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1037 "tx mask 0x%x ring %d, budget %d, work_done %d",
1038 tx_mask, ring, budget, work_done);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301039
1040 budget -= work_done;
1041 if (budget <= 0)
1042 goto budget_done;
1043
1044 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301045 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301046 tx_mask = tx_mask >> 1;
1047 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301048 }
1049
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301050
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301051 /* Process REO Exception ring interrupt */
1052 if (rx_err_mask) {
1053 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301054 soc->reo_exception_ring.hal_srng,
1055 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301056
Houston Hoffmanae850c62017-08-11 16:47:50 -07001057 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1058 "REO Exception Ring: work_done %d budget %d",
1059 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301060
1061 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301062 if (budget <= 0) {
1063 goto budget_done;
1064 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301065 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301066 }
1067
1068 /* Process Rx WBM release ring interrupt */
1069 if (rx_wbm_rel_mask) {
1070 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301071 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301072
Houston Hoffmanae850c62017-08-11 16:47:50 -07001073 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1074 "WBM Release Ring: work_done %d budget %d",
1075 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301076
1077 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301078 if (budget <= 0) {
1079 goto budget_done;
1080 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301081 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301082 }
1083
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301084 /* Process Rx interrupts */
1085 if (rx_mask) {
1086 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1087 if (rx_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001088 work_done = dp_rx_process(int_ctx,
Leo Chang5ea93a42016-11-03 12:39:49 -07001089 soc->reo_dest_ring[ring].hal_srng,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301090 remaining_quota);
1091
Houston Hoffmanae850c62017-08-11 16:47:50 -07001092 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1093 "rx mask 0x%x ring %d, work_done %d budget %d",
1094 rx_mask, ring, work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301095
1096 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301097 if (budget <= 0)
1098 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301099 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301100 }
1101 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001102 for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001103 work_done = dp_rxdma_err_process(soc, ring,
1104 remaining_quota);
1105 budget -= work_done;
1106 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301107 }
1108
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001109 if (reo_status_mask)
1110 dp_reo_status_ring_handler(soc);
1111
Karunakar Dasineni10185472017-06-19 16:32:06 -07001112 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -08001113 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001114 pdev = soc->pdev_list[ring];
1115 if (pdev == NULL)
Karunakar Dasineni10185472017-06-19 16:32:06 -07001116 continue;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001117 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1118 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1119 pdev->pdev_id);
1120
1121 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1122 work_done = dp_mon_process(soc, mac_for_pdev,
1123 remaining_quota);
1124 budget -= work_done;
1125 if (budget <= 0)
1126 goto budget_done;
1127 remaining_quota = budget;
1128 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001129
chenguocd0f3132018-02-28 15:53:50 -08001130 if (int_ctx->rxdma2host_ring_mask &
1131 (1 << mac_for_pdev)) {
1132 work_done = dp_rxdma_err_process(soc,
1133 mac_for_pdev,
1134 remaining_quota);
1135 budget -= work_done;
1136 if (budget <= 0)
1137 goto budget_done;
1138 remaining_quota = budget;
1139 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001140
chenguocd0f3132018-02-28 15:53:50 -08001141 if (int_ctx->host2rxdma_ring_mask &
1142 (1 << mac_for_pdev)) {
1143 union dp_rx_desc_list_elem_t *desc_list = NULL;
1144 union dp_rx_desc_list_elem_t *tail = NULL;
1145 struct dp_srng *rx_refill_buf_ring =
1146 &pdev->rx_refill_buf_ring;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001147
chenguocd0f3132018-02-28 15:53:50 -08001148 DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1149 1);
1150 dp_rx_buffers_replenish(soc, mac_for_pdev,
1151 rx_refill_buf_ring,
1152 &soc->rx_desc_buf[mac_for_pdev], 0,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001153 &desc_list, &tail);
chenguocd0f3132018-02-28 15:53:50 -08001154 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001155 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001156 }
1157
Dhanashri Atre0da31222017-03-23 12:30:58 -07001158 qdf_lro_flush(int_ctx->lro_ctx);
1159
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301160budget_done:
1161 return dp_budget - budget;
1162}
1163
psimhac983d7e2017-07-26 15:20:07 -07001164#ifdef DP_INTR_POLL_BASED
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301165/* dp_interrupt_timer()- timer poll for interrupts
1166 *
1167 * @arg: SoC Handle
1168 *
1169 * Return:
1170 *
1171 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001172static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301173{
1174 struct dp_soc *soc = (struct dp_soc *) arg;
1175 int i;
1176
Ravi Joshi86e98262017-03-01 13:47:03 -08001177 if (qdf_atomic_read(&soc->cmn_init_done)) {
1178 for (i = 0;
1179 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1180 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301181
Ravi Joshi86e98262017-03-01 13:47:03 -08001182 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1183 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301184}
1185
1186/*
psimhac983d7e2017-07-26 15:20:07 -07001187 * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301188 * @txrx_soc: DP SOC handle
1189 *
1190 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1191 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1192 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1193 *
1194 * Return: 0 for success. nonzero for failure.
1195 */
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301196static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301197{
1198 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1199 int i;
1200
psimhac983d7e2017-07-26 15:20:07 -07001201 soc->intr_mode = DP_INTR_POLL;
1202
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301203 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -07001204 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001205 soc->intr_ctx[i].tx_ring_mask =
1206 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1207 soc->intr_ctx[i].rx_ring_mask =
1208 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1209 soc->intr_ctx[i].rx_mon_ring_mask =
1210 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1211 soc->intr_ctx[i].rx_err_ring_mask =
1212 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1213 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1214 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1215 soc->intr_ctx[i].reo_status_ring_mask =
1216 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1217 soc->intr_ctx[i].rxdma2host_ring_mask =
1218 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301219 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001220 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301221 }
1222
1223 qdf_timer_init(soc->osdev, &soc->int_timer,
1224 dp_interrupt_timer, (void *)soc,
1225 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301226
1227 return QDF_STATUS_SUCCESS;
1228}
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301229#else
1230static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1231{
1232 return -QDF_STATUS_E_NOSUPPORT;
1233}
1234#endif
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301235
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301236static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001237#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301238extern int con_mode_monitor;
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301239/*
1240 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1241 * @txrx_soc: DP SOC handle
1242 *
1243 * Call the appropriate attach function based on the mode of operation.
1244 * This is a WAR for enabling monitor mode.
1245 *
1246 * Return: 0 for success. nonzero for failure.
1247 */
1248static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1249{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001250 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1251
1252 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1253 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1255 "%s: Poll mode", __func__);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301256 return dp_soc_attach_poll(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301257 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001258
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001259 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1260 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301261 return dp_soc_interrupt_attach(txrx_soc);
1262 }
1263}
1264#else
1265static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1266{
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301267 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1268
1269 if (hif_is_polled_mode_enabled(soc->hif_handle))
1270 return dp_soc_attach_poll(txrx_soc);
1271 else
1272 return dp_soc_interrupt_attach(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301273}
1274#endif
Houston Hoffman648a9182017-05-21 23:27:50 -07001275
1276static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1277 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1278{
1279 int j;
1280 int num_irq = 0;
1281
1282 int tx_mask =
1283 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1284 int rx_mask =
1285 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1286 int rx_mon_mask =
1287 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1288 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1289 soc->wlan_cfg_ctx, intr_ctx_num);
1290 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1291 soc->wlan_cfg_ctx, intr_ctx_num);
1292 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1293 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001294 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1295 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001296 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1297 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001298
1299 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1300
1301 if (tx_mask & (1 << j)) {
1302 irq_id_map[num_irq++] =
1303 (wbm2host_tx_completions_ring1 - j);
1304 }
1305
1306 if (rx_mask & (1 << j)) {
1307 irq_id_map[num_irq++] =
1308 (reo2host_destination_ring1 - j);
1309 }
1310
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001311 if (rxdma2host_ring_mask & (1 << j)) {
1312 irq_id_map[num_irq++] =
1313 rxdma2host_destination_ring_mac1 -
1314 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1315 }
1316
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001317 if (host2rxdma_ring_mask & (1 << j)) {
1318 irq_id_map[num_irq++] =
1319 host2rxdma_host_buf_ring_mac1 -
1320 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1321 }
1322
Houston Hoffman648a9182017-05-21 23:27:50 -07001323 if (rx_mon_mask & (1 << j)) {
1324 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001325 ppdu_end_interrupts_mac1 -
1326 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001327 irq_id_map[num_irq++] =
1328 rxdma2host_monitor_status_ring_mac1 -
1329 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001330 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001331
Houston Hoffman648a9182017-05-21 23:27:50 -07001332 if (rx_wbm_rel_ring_mask & (1 << j))
1333 irq_id_map[num_irq++] = wbm2host_rx_release;
1334
1335 if (rx_err_ring_mask & (1 << j))
1336 irq_id_map[num_irq++] = reo2host_exception;
1337
1338 if (reo_status_ring_mask & (1 << j))
1339 irq_id_map[num_irq++] = reo2host_status;
1340
1341 }
1342 *num_irq_r = num_irq;
1343}
1344
1345static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1346 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1347 int msi_vector_count, int msi_vector_start)
1348{
1349 int tx_mask = wlan_cfg_get_tx_ring_mask(
1350 soc->wlan_cfg_ctx, intr_ctx_num);
1351 int rx_mask = wlan_cfg_get_rx_ring_mask(
1352 soc->wlan_cfg_ctx, intr_ctx_num);
1353 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1354 soc->wlan_cfg_ctx, intr_ctx_num);
1355 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1356 soc->wlan_cfg_ctx, intr_ctx_num);
1357 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1358 soc->wlan_cfg_ctx, intr_ctx_num);
1359 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1360 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001361 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1362 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001363
1364 unsigned int vector =
1365 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1366 int num_irq = 0;
1367
psimhac983d7e2017-07-26 15:20:07 -07001368 soc->intr_mode = DP_INTR_MSI;
1369
Houston Hoffman648a9182017-05-21 23:27:50 -07001370 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001371 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001372 irq_id_map[num_irq++] =
1373 pld_get_msi_irq(soc->osdev->dev, vector);
1374
1375 *num_irq_r = num_irq;
1376}
1377
1378static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1379 int *irq_id_map, int *num_irq)
1380{
1381 int msi_vector_count, ret;
1382 uint32_t msi_base_data, msi_vector_start;
1383
1384 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1385 &msi_vector_count,
1386 &msi_base_data,
1387 &msi_vector_start);
1388 if (ret)
1389 return dp_soc_interrupt_map_calculate_integrated(soc,
1390 intr_ctx_num, irq_id_map, num_irq);
1391
1392 else
1393 dp_soc_interrupt_map_calculate_msi(soc,
1394 intr_ctx_num, irq_id_map, num_irq,
1395 msi_vector_count, msi_vector_start);
1396}
1397
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301398/*
1399 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1400 * @txrx_soc: DP SOC handle
1401 *
1402 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1403 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1404 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1405 *
1406 * Return: 0 for success. nonzero for failure.
1407 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001408static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301409{
1410 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1411
1412 int i = 0;
1413 int num_irq = 0;
1414
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301415 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001416 int ret = 0;
1417
1418 /* Map of IRQ ids registered with one interrupt context */
1419 int irq_id_map[HIF_MAX_GRP_IRQ];
1420
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301421 int tx_mask =
1422 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1423 int rx_mask =
1424 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1425 int rx_mon_mask =
Mohit Khannadba82f22018-07-12 10:59:17 -07001426 dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301427 int rx_err_ring_mask =
1428 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1429 int rx_wbm_rel_ring_mask =
1430 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1431 int reo_status_ring_mask =
1432 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001433 int rxdma2host_ring_mask =
1434 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001435 int host2rxdma_ring_mask =
1436 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1437
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301438
Houston Hoffman648a9182017-05-21 23:27:50 -07001439 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301440 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1441 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1442 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301443 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001444 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001445 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301446 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1447 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1448
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301449 soc->intr_ctx[i].soc = soc;
1450
1451 num_irq = 0;
1452
Houston Hoffman648a9182017-05-21 23:27:50 -07001453 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1454 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301455
Houston Hoffmandef86a32017-04-21 20:23:45 -07001456 ret = hif_register_ext_group(soc->hif_handle,
1457 num_irq, irq_id_map, dp_service_srngs,
1458 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001459 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301460
1461 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301462 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1463 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301464
1465 return QDF_STATUS_E_FAILURE;
1466 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001467 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301468 }
1469
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301470 hif_configure_ext_group_interrupts(soc->hif_handle);
1471
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301472 return QDF_STATUS_SUCCESS;
1473}
1474
1475/*
1476 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1477 * @txrx_soc: DP SOC handle
1478 *
1479 * Return: void
1480 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001481static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301482{
1483 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001484 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301485
psimhac983d7e2017-07-26 15:20:07 -07001486 if (soc->intr_mode == DP_INTR_POLL) {
1487 qdf_timer_stop(&soc->int_timer);
1488 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001489 } else {
1490 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001491 }
1492
Leo Chang5ea93a42016-11-03 12:39:49 -07001493 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1494 soc->intr_ctx[i].tx_ring_mask = 0;
1495 soc->intr_ctx[i].rx_ring_mask = 0;
1496 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001497 soc->intr_ctx[i].rx_err_ring_mask = 0;
1498 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1499 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001500 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001501 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001502
Dhanashri Atre0da31222017-03-23 12:30:58 -07001503 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001504 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301505}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301506
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001507#define AVG_MAX_MPDUS_PER_TID 128
1508#define AVG_TIDS_PER_CLIENT 2
1509#define AVG_FLOWS_PER_TID 2
1510#define AVG_MSDUS_PER_FLOW 128
1511#define AVG_MSDUS_PER_MPDU 4
1512
1513/*
1514 * Allocate and setup link descriptor pool that will be used by HW for
1515 * various link and queue descriptors and managed by WBM
1516 */
1517static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1518{
1519 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1520 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1521 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1522 uint32_t num_mpdus_per_link_desc =
1523 hal_num_mpdus_per_link_desc(soc->hal_soc);
1524 uint32_t num_msdus_per_link_desc =
1525 hal_num_msdus_per_link_desc(soc->hal_soc);
1526 uint32_t num_mpdu_links_per_queue_desc =
1527 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1528 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1529 uint32_t total_link_descs, total_mem_size;
1530 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1531 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1532 uint32_t num_link_desc_banks;
1533 uint32_t last_bank_size = 0;
1534 uint32_t entry_size, num_entries;
1535 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001536 uint32_t desc_id = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001537
1538 /* Only Tx queue descriptors are allocated from common link descriptor
1539 * pool Rx queue descriptors are not included in this because (REO queue
1540 * extension descriptors) they are expected to be allocated contiguously
1541 * with REO queue descriptors
1542 */
1543 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1544 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1545
1546 num_mpdu_queue_descs = num_mpdu_link_descs /
1547 num_mpdu_links_per_queue_desc;
1548
1549 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1550 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1551 num_msdus_per_link_desc;
1552
1553 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1554 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1555
1556 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1557 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1558
1559 /* Round up to power of 2 */
1560 total_link_descs = 1;
1561 while (total_link_descs < num_entries)
1562 total_link_descs <<= 1;
1563
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301564 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1565 FL("total_link_descs: %u, link_desc_size: %d"),
1566 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001567 total_mem_size = total_link_descs * link_desc_size;
1568
1569 total_mem_size += link_desc_align;
1570
1571 if (total_mem_size <= max_alloc_size) {
1572 num_link_desc_banks = 0;
1573 last_bank_size = total_mem_size;
1574 } else {
1575 num_link_desc_banks = (total_mem_size) /
1576 (max_alloc_size - link_desc_align);
1577 last_bank_size = total_mem_size %
1578 (max_alloc_size - link_desc_align);
1579 }
1580
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301581 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1582 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1583 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001584
1585 for (i = 0; i < num_link_desc_banks; i++) {
1586 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001587 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001588 max_alloc_size,
1589 &(soc->link_desc_banks[i].base_paddr_unaligned));
1590 soc->link_desc_banks[i].size = max_alloc_size;
1591
1592 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1593 soc->link_desc_banks[i].base_vaddr_unaligned) +
1594 ((unsigned long)(
1595 soc->link_desc_banks[i].base_vaddr_unaligned) %
1596 link_desc_align));
1597
1598 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1599 soc->link_desc_banks[i].base_paddr_unaligned) +
1600 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1601 (unsigned long)(
1602 soc->link_desc_banks[i].base_vaddr_unaligned));
1603
1604 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301605 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1606 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001607 goto fail;
1608 }
1609 }
1610
1611 if (last_bank_size) {
1612 /* Allocate last bank in case total memory required is not exact
1613 * multiple of max_alloc_size
1614 */
1615 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001616 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001617 last_bank_size,
1618 &(soc->link_desc_banks[i].base_paddr_unaligned));
1619 soc->link_desc_banks[i].size = last_bank_size;
1620
1621 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1622 (soc->link_desc_banks[i].base_vaddr_unaligned) +
1623 ((unsigned long)(
1624 soc->link_desc_banks[i].base_vaddr_unaligned) %
1625 link_desc_align));
1626
1627 soc->link_desc_banks[i].base_paddr =
1628 (unsigned long)(
1629 soc->link_desc_banks[i].base_paddr_unaligned) +
1630 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1631 (unsigned long)(
1632 soc->link_desc_banks[i].base_vaddr_unaligned));
1633 }
1634
1635
1636 /* Allocate and setup link descriptor idle list for HW internal use */
1637 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1638 total_mem_size = entry_size * total_link_descs;
1639
1640 if (total_mem_size <= max_alloc_size) {
1641 void *desc;
1642
1643 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1644 WBM_IDLE_LINK, 0, 0, total_link_descs)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301645 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1646 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001647 goto fail;
1648 }
1649
1650 hal_srng_access_start_unlocked(soc->hal_soc,
1651 soc->wbm_idle_link_ring.hal_srng);
1652
1653 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1654 soc->link_desc_banks[i].base_paddr; i++) {
1655 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001656 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001657 soc->link_desc_banks[i].base_vaddr) -
1658 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001659 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001660 / link_desc_size;
1661 unsigned long paddr = (unsigned long)(
1662 soc->link_desc_banks[i].base_paddr);
1663
1664 while (num_entries && (desc = hal_srng_src_get_next(
1665 soc->hal_soc,
1666 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001667 hal_set_link_desc_addr(desc,
1668 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001669 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001670 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001671 paddr += link_desc_size;
1672 }
1673 }
1674 hal_srng_access_end_unlocked(soc->hal_soc,
1675 soc->wbm_idle_link_ring.hal_srng);
1676 } else {
1677 uint32_t num_scatter_bufs;
1678 uint32_t num_entries_per_buf;
1679 uint32_t rem_entries;
1680 uint8_t *scatter_buf_ptr;
1681 uint16_t scatter_buf_num;
1682
1683 soc->wbm_idle_scatter_buf_size =
1684 hal_idle_list_scatter_buf_size(soc->hal_soc);
1685 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1686 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001687 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1688 soc->hal_soc, total_mem_size,
1689 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001690
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001691 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1692 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1693 FL("scatter bufs size out of bounds"));
1694 goto fail;
1695 }
1696
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001697 for (i = 0; i < num_scatter_bufs; i++) {
1698 soc->wbm_idle_scatter_buf_base_vaddr[i] =
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001699 qdf_mem_alloc_consistent(soc->osdev,
1700 soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001701 soc->wbm_idle_scatter_buf_size,
1702 &(soc->wbm_idle_scatter_buf_base_paddr[i]));
1703 if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301704 QDF_TRACE(QDF_MODULE_ID_DP,
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07001705 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301706 FL("Scatter list memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001707 goto fail;
1708 }
1709 }
1710
1711 /* Populate idle list scatter buffers with link descriptor
1712 * pointers
1713 */
1714 scatter_buf_num = 0;
1715 scatter_buf_ptr = (uint8_t *)(
1716 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1717 rem_entries = num_entries_per_buf;
1718
1719 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1720 soc->link_desc_banks[i].base_paddr; i++) {
1721 uint32_t num_link_descs =
1722 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001723 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001724 soc->link_desc_banks[i].base_vaddr) -
1725 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001726 soc->link_desc_banks[i].base_vaddr_unaligned)))
1727 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001728 unsigned long paddr = (unsigned long)(
1729 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001730
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001731 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001732 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001733 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001734 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001735 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001736 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001737 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001738 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001739 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001740 } else {
1741 rem_entries = num_entries_per_buf;
1742 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001743
1744 if (scatter_buf_num >= num_scatter_bufs)
1745 break;
1746
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001747 scatter_buf_ptr = (uint8_t *)(
1748 soc->wbm_idle_scatter_buf_base_vaddr[
1749 scatter_buf_num]);
1750 }
1751 }
1752 }
1753 /* Setup link descriptor idle list in HW */
1754 hal_setup_link_idle_list(soc->hal_soc,
1755 soc->wbm_idle_scatter_buf_base_paddr,
1756 soc->wbm_idle_scatter_buf_base_vaddr,
1757 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07001758 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001759 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1760 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001761 }
1762 return 0;
1763
1764fail:
1765 if (soc->wbm_idle_link_ring.hal_srng) {
1766 dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1767 WBM_IDLE_LINK, 0);
1768 }
1769
1770 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1771 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001772 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001773 soc->wbm_idle_scatter_buf_size,
1774 soc->wbm_idle_scatter_buf_base_vaddr[i],
1775 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001776 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001777 }
1778 }
1779
1780 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1781 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001782 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001783 soc->link_desc_banks[i].size,
1784 soc->link_desc_banks[i].base_vaddr_unaligned,
1785 soc->link_desc_banks[i].base_paddr_unaligned,
1786 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001787 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001788 }
1789 }
1790 return QDF_STATUS_E_FAILURE;
1791}
1792
1793/*
1794 * Free link descriptor pool that was setup HW
1795 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001796static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001797{
1798 int i;
1799
1800 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001801 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001802 WBM_IDLE_LINK, 0);
1803 }
1804
1805 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1806 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001807 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001808 soc->wbm_idle_scatter_buf_size,
1809 soc->wbm_idle_scatter_buf_base_vaddr[i],
1810 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001811 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001812 }
1813 }
1814
1815 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1816 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001817 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001818 soc->link_desc_banks[i].size,
1819 soc->link_desc_banks[i].base_vaddr_unaligned,
1820 soc->link_desc_banks[i].base_paddr_unaligned,
1821 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001822 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001823 }
1824 }
1825}
1826
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301827#define REO_DST_RING_SIZE_QCA6290 1024
1828#define REO_DST_RING_SIZE_QCA8074 2048
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001829
1830/*
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301831 * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1832 * @soc: Datapath SOC handle
1833 *
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301834 * This is a timer function used to age out stale AST nodes from
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301835 * AST table
1836 */
1837#ifdef FEATURE_WDS
1838static void dp_wds_aging_timer_fn(void *soc_hdl)
1839{
1840 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1841 struct dp_pdev *pdev;
1842 struct dp_vdev *vdev;
1843 struct dp_peer *peer;
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301844 struct dp_ast_entry *ase, *temp_ase;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301845 int i;
1846
1847 qdf_spin_lock_bh(&soc->ast_lock);
1848
1849 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1850 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301851 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301852 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1853 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301854 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301855 /*
1856 * Do not expire static ast entries
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301857 * and HM WDS entries
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301858 */
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05301859 if (ase->type != CDP_TXRX_AST_TYPE_WDS)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301860 continue;
1861
1862 if (ase->is_active) {
1863 ase->is_active = FALSE;
1864 continue;
1865 }
1866
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301867 DP_STATS_INC(soc, ast.aged_out, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301868 dp_peer_del_ast(soc, ase);
1869 }
1870 }
1871 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301872 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301873 }
1874
1875 qdf_spin_unlock_bh(&soc->ast_lock);
1876
1877 if (qdf_atomic_read(&soc->cmn_init_done))
1878 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1879}
1880
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301881
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301882/*
1883 * dp_soc_wds_attach() - Setup WDS timer and AST table
1884 * @soc: Datapath SOC handle
1885 *
1886 * Return: None
1887 */
1888static void dp_soc_wds_attach(struct dp_soc *soc)
1889{
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301890 qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1891 dp_wds_aging_timer_fn, (void *)soc,
1892 QDF_TIMER_TYPE_WAKE_APPS);
1893
1894 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1895}
1896
1897/*
1898 * dp_soc_wds_detach() - Detach WDS data structures and timers
1899 * @txrx_soc: DP SOC handle
1900 *
1901 * Return: None
1902 */
1903static void dp_soc_wds_detach(struct dp_soc *soc)
1904{
1905 qdf_timer_stop(&soc->wds_aging_timer);
1906 qdf_timer_free(&soc->wds_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301907}
1908#else
1909static void dp_soc_wds_attach(struct dp_soc *soc)
1910{
1911}
1912
1913static void dp_soc_wds_detach(struct dp_soc *soc)
1914{
1915}
1916#endif
1917
1918/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05301919 * dp_soc_reset_ring_map() - Reset cpu ring map
1920 * @soc: Datapath soc handler
1921 *
1922 * This api resets the default cpu ring map
1923 */
1924
1925static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1926{
1927 uint8_t i;
1928 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1929
1930 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1931 if (nss_config == 1) {
1932 /*
1933 * Setting Tx ring map for one nss offloaded radio
1934 */
1935 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1936 } else if (nss_config == 2) {
1937 /*
1938 * Setting Tx ring for two nss offloaded radios
1939 */
1940 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1941 } else {
1942 /*
1943 * Setting Tx ring map for all nss offloaded radios
1944 */
1945 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1946 }
1947 }
1948}
1949
Aniruddha Paule3a03342017-09-19 16:42:10 +05301950/*
1951 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1952 * @dp_soc - DP soc handle
1953 * @ring_type - ring type
1954 * @ring_num - ring_num
1955 *
1956 * return 0 or 1
1957 */
1958static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1959{
1960 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1961 uint8_t status = 0;
1962
1963 switch (ring_type) {
1964 case WBM2SW_RELEASE:
1965 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001966 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05301967 status = ((nss_config) & (1 << ring_num));
1968 break;
1969 default:
1970 break;
1971 }
1972
1973 return status;
1974}
1975
1976/*
1977 * dp_soc_reset_intr_mask() - reset interrupt mask
1978 * @dp_soc - DP Soc handle
1979 *
1980 * Return: Return void
1981 */
1982static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1983{
1984 uint8_t j;
1985 int *grp_mask = NULL;
1986 int group_number, mask, num_ring;
1987
1988 /* number of tx ring */
1989 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1990
1991 /*
1992 * group mask for tx completion ring.
1993 */
1994 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1995
1996 /* loop and reset the mask for only offloaded ring */
1997 for (j = 0; j < num_ring; j++) {
1998 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1999 continue;
2000 }
2001
2002 /*
2003 * Group number corresponding to tx offloaded ring.
2004 */
2005 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2006 if (group_number < 0) {
2007 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002008 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302009 WBM2SW_RELEASE, j);
2010 return;
2011 }
2012
2013 /* reset the tx mask for offloaded ring */
2014 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2015 mask &= (~(1 << j));
2016
2017 /*
2018 * reset the interrupt mask for offloaded ring.
2019 */
2020 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2021 }
2022
2023 /* number of rx rings */
2024 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2025
2026 /*
2027 * group mask for reo destination ring.
2028 */
2029 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2030
2031 /* loop and reset the mask for only offloaded ring */
2032 for (j = 0; j < num_ring; j++) {
2033 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2034 continue;
2035 }
2036
2037 /*
2038 * Group number corresponding to rx offloaded ring.
2039 */
2040 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2041 if (group_number < 0) {
2042 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002043 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302044 REO_DST, j);
2045 return;
2046 }
2047
2048 /* set the interrupt mask for offloaded ring */
2049 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2050 mask &= (~(1 << j));
2051
2052 /*
2053 * set the interrupt mask to zero for rx offloaded radio.
2054 */
2055 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2056 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002057
2058 /*
2059 * group mask for Rx buffer refill ring
2060 */
2061 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2062
2063 /* loop and reset the mask for only offloaded ring */
2064 for (j = 0; j < MAX_PDEV_CNT; j++) {
2065 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2066 continue;
2067 }
2068
2069 /*
2070 * Group number corresponding to rx offloaded ring.
2071 */
2072 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2073 if (group_number < 0) {
2074 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2075 FL("ring not part of any group; ring_type: %d,ring_num %d"),
2076 REO_DST, j);
2077 return;
2078 }
2079
2080 /* set the interrupt mask for offloaded ring */
2081 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2082 group_number);
2083 mask &= (~(1 << j));
2084
2085 /*
2086 * set the interrupt mask to zero for rx offloaded radio.
2087 */
2088 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2089 group_number, mask);
2090 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05302091}
2092
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302093#ifdef IPA_OFFLOAD
2094/**
2095 * dp_reo_remap_config() - configure reo remap register value based
2096 * nss configuration.
2097 * based on offload_radio value below remap configuration
2098 * get applied.
2099 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2100 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2101 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2102 * 3 - both Radios handled by NSS (remap not required)
2103 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2104 *
2105 * @remap1: output parameter indicates reo remap 1 register value
2106 * @remap2: output parameter indicates reo remap 2 register value
2107 * Return: bool type, true if remap is configured else false.
2108 */
2109static bool dp_reo_remap_config(struct dp_soc *soc,
2110 uint32_t *remap1,
2111 uint32_t *remap2)
2112{
2113
2114 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2115 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2116
2117 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2118 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2119
2120 return true;
2121}
2122#else
2123static bool dp_reo_remap_config(struct dp_soc *soc,
2124 uint32_t *remap1,
2125 uint32_t *remap2)
2126{
2127 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2128
2129 switch (offload_radio) {
2130 case 0:
2131 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2132 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2133 (0x3 << 18) | (0x4 << 21)) << 8;
2134
2135 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2136 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2137 (0x3 << 18) | (0x4 << 21)) << 8;
2138 break;
2139
2140 case 1:
2141 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2142 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2143 (0x2 << 18) | (0x3 << 21)) << 8;
2144
2145 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2146 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2147 (0x4 << 18) | (0x2 << 21)) << 8;
2148 break;
2149
2150 case 2:
2151 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2152 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2153 (0x1 << 18) | (0x3 << 21)) << 8;
2154
2155 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2156 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2157 (0x4 << 18) | (0x1 << 21)) << 8;
2158 break;
2159
2160 case 3:
2161 /* return false if both radios are offloaded to NSS */
2162 return false;
2163 }
2164 return true;
2165}
2166#endif
2167
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302168/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302169 * dp_reo_frag_dst_set() - configure reo register to set the
2170 * fragment destination ring
2171 * @soc : Datapath soc
2172 * @frag_dst_ring : output parameter to set fragment destination ring
2173 *
2174 * Based on offload_radio below fragment destination rings is selected
2175 * 0 - TCL
2176 * 1 - SW1
2177 * 2 - SW2
2178 * 3 - SW3
2179 * 4 - SW4
2180 * 5 - Release
2181 * 6 - FW
2182 * 7 - alternate select
2183 *
2184 * return: void
2185 */
2186static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2187{
2188 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2189
2190 switch (offload_radio) {
2191 case 0:
2192 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2193 break;
2194 case 3:
2195 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2196 break;
2197 default:
2198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2199 FL("dp_reo_frag_dst_set invalid offload radio config"));
2200 break;
2201 }
2202}
2203
2204/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002205 * dp_soc_cmn_setup() - Common SoC level initializion
2206 * @soc: Datapath SOC handle
2207 *
2208 * This is an internal function used to setup common SOC data structures,
2209 * to be called from PDEV attach after receiving HW mode capabilities from FW
2210 */
2211static int dp_soc_cmn_setup(struct dp_soc *soc)
2212{
2213 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08002214 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302215 int tx_ring_size;
2216 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302217 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302218 uint32_t entries;
2219 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002220
Ravi Joshi86e98262017-03-01 13:47:03 -08002221 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002222 return 0;
2223
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002224 if (dp_hw_link_desc_pool_setup(soc))
2225 goto fail1;
2226
Vivek126db5d2018-07-25 22:05:04 +05302227 soc_cfg_ctx = soc->wlan_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002228 /* Setup SRNG rings */
2229 /* Common rings */
2230 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302231 wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302232 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2233 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002234 goto fail1;
2235 }
2236
2237
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302238 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002239 /* Tx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302240 if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002241 soc->num_tcl_data_rings =
Vivek126db5d2018-07-25 22:05:04 +05302242 wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302243 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302244 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302245 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302246 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002247 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2248 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302249 TCL_DATA, i, 0, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302250 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002251 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302252 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002253 goto fail1;
2254 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07002255 /*
2256 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2257 * count
2258 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002259 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302260 WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302261 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002262 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302263 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002264 goto fail1;
2265 }
2266 }
2267 } else {
2268 /* This will be incremented during per pdev ring setup */
2269 soc->num_tcl_data_rings = 0;
2270 }
2271
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302272 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302273 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2274 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302275 goto fail1;
2276 }
2277
Vivek126db5d2018-07-25 22:05:04 +05302278 entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002279 /* TCL command and status rings */
2280 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302281 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302282 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2283 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002284 goto fail1;
2285 }
2286
Vivek126db5d2018-07-25 22:05:04 +05302287 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002288 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302289 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302290 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2291 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002292 goto fail1;
2293 }
2294
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302295 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002296
2297 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2298 * descriptors
2299 */
2300
2301 /* Rx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302302 if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002303 soc->num_reo_dest_rings =
Vivek126db5d2018-07-25 22:05:04 +05302304 wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08002305 QDF_TRACE(QDF_MODULE_ID_DP,
Aditya Sathishded018e2018-07-02 16:25:21 +05302306 QDF_TRACE_LEVEL_INFO,
2307 FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002308 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002309 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302310 i, 0, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302311 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302312 QDF_TRACE_LEVEL_ERROR,
2313 FL(RNG_ERR "reo_dest_ring [%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002314 goto fail1;
2315 }
2316 }
2317 } else {
2318 /* This will be incremented during per pdev ring setup */
2319 soc->num_reo_dest_rings = 0;
2320 }
2321
Vivek126db5d2018-07-25 22:05:04 +05302322 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002323 /* LMAC RxDMA to SW Rings configuration */
Vivek126db5d2018-07-25 22:05:04 +05302324 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002325 /* Only valid for MCL */
2326 struct dp_pdev *pdev = soc->pdev_list[0];
2327
2328 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2329 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302330 RXDMA_DST, 0, i,
2331 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002332 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302333 QDF_TRACE_LEVEL_ERROR,
2334 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002335 goto fail1;
2336 }
2337 }
2338 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002339 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2340
2341 /* REO reinjection ring */
Vivek126db5d2018-07-25 22:05:04 +05302342 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002343 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302344 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302345 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302346 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002347 goto fail1;
2348 }
2349
2350
2351 /* Rx release ring */
2352 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
Vivek126db5d2018-07-25 22:05:04 +05302353 wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302354 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302355 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002356 goto fail1;
2357 }
2358
2359
2360 /* Rx exception ring */
Vivek126db5d2018-07-25 22:05:04 +05302361 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2362 if (dp_srng_setup(soc, &soc->reo_exception_ring,
2363 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302364 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302365 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002366 goto fail1;
2367 }
2368
2369
2370 /* REO command and status rings */
2371 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302372 wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302373 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2374 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002375 goto fail1;
2376 }
2377
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002378 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2379 TAILQ_INIT(&soc->rx.reo_cmd_list);
2380 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2381
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002382 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302383 wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302384 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2385 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002386 goto fail1;
2387 }
2388
Yun Park92af7132017-09-13 16:33:35 -07002389 qdf_spinlock_create(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302390 dp_soc_wds_attach(soc);
2391
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302392 /* Reset the cpu ring map if radio is NSS offloaded */
Vivek126db5d2018-07-25 22:05:04 +05302393 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302394 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302395 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302396 }
2397
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002398 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002399 qdf_mem_zero(&reo_params, sizeof(reo_params));
2400
Vivek126db5d2018-07-25 22:05:04 +05302401 if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002402
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302403 /*
2404 * Reo ring remap is not required if both radios
2405 * are offloaded to NSS
2406 */
2407 if (!dp_reo_remap_config(soc,
2408 &reo_params.remap1,
2409 &reo_params.remap2))
2410 goto out;
2411
2412 reo_params.rx_hash_enabled = true;
2413 }
2414
psimhafc2f91b2018-01-10 15:30:03 -08002415 /* setup the global rx defrag waitlist */
2416 TAILQ_INIT(&soc->rx.defrag.waitlist);
2417 soc->rx.defrag.timeout_ms =
Vivek126db5d2018-07-25 22:05:04 +05302418 wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
psimhafc2f91b2018-01-10 15:30:03 -08002419 soc->rx.flags.defrag_timeout_check =
Vivek126db5d2018-07-25 22:05:04 +05302420 wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
Lin Baif1c577e2018-05-22 20:45:42 +08002421 qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
psimhafc2f91b2018-01-10 15:30:03 -08002422
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302423out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302424 /*
2425 * set the fragment destination ring
2426 */
2427 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2428
Dhanashri Atre14049172016-11-11 18:32:36 -08002429 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002430
Ravi Joshi86e98262017-03-01 13:47:03 -08002431 qdf_atomic_set(&soc->cmn_init_done, 1);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302432 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002433 return 0;
2434fail1:
2435 /*
2436 * Cleanup will be done as part of soc_detach, which will
2437 * be called on pdev attach failure
2438 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002439 return QDF_STATUS_E_FAILURE;
2440}
2441
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002442static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002443
Dhanashri Atre14049172016-11-11 18:32:36 -08002444static void dp_lro_hash_setup(struct dp_soc *soc)
2445{
2446 struct cdp_lro_hash_config lro_hash;
2447
2448 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2449 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2450 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2451 FL("LRO disabled RX hash disabled"));
2452 return;
2453 }
2454
2455 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2456
2457 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2458 lro_hash.lro_enable = 1;
2459 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2460 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002461 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2462 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002463 }
2464
Houston Hoffman41b912c2017-08-30 14:27:51 -07002465 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2466 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002467 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2468 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002469 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2470 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2471 LRO_IPV6_SEED_ARR_SZ));
2472
Houston Hoffman41b912c2017-08-30 14:27:51 -07002473 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2474 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
Dhanashri Atre14049172016-11-11 18:32:36 -08002475 lro_hash.lro_enable, lro_hash.tcp_flag,
2476 lro_hash.tcp_flag_mask);
2477
Dhanashri Atre14049172016-11-11 18:32:36 -08002478 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2479 QDF_TRACE_LEVEL_ERROR,
2480 (void *)lro_hash.toeplitz_hash_ipv4,
2481 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2482 LRO_IPV4_SEED_ARR_SZ));
2483
Dhanashri Atre14049172016-11-11 18:32:36 -08002484 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2485 QDF_TRACE_LEVEL_ERROR,
2486 (void *)lro_hash.toeplitz_hash_ipv6,
2487 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2488 LRO_IPV6_SEED_ARR_SZ));
2489
2490 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2491
2492 if (soc->cdp_soc.ol_ops->lro_hash_config)
2493 (void)soc->cdp_soc.ol_ops->lro_hash_config
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302494 (soc->ctrl_psoc, &lro_hash);
Dhanashri Atre14049172016-11-11 18:32:36 -08002495}
2496
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002497/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002498* dp_rxdma_ring_setup() - configure the RX DMA rings
2499* @soc: data path SoC handle
2500* @pdev: Physical device handle
2501*
2502* Return: 0 - success, > 0 - failure
2503*/
2504#ifdef QCA_HOST2FW_RXBUF_RING
2505static int dp_rxdma_ring_setup(struct dp_soc *soc,
2506 struct dp_pdev *pdev)
2507{
Vivek126db5d2018-07-25 22:05:04 +05302508 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2509 int max_mac_rings;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002510 int i;
2511
Vivek126db5d2018-07-25 22:05:04 +05302512 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2513 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2514
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002515 for (i = 0; i < max_mac_rings; i++) {
2516 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05302517 "%s: pdev_id %d mac_id %d",
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002518 __func__, pdev->pdev_id, i);
2519 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302520 RXDMA_BUF, 1, i,
2521 wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002522 QDF_TRACE(QDF_MODULE_ID_DP,
2523 QDF_TRACE_LEVEL_ERROR,
2524 FL("failed rx mac ring setup"));
2525 return QDF_STATUS_E_FAILURE;
2526 }
2527 }
2528 return QDF_STATUS_SUCCESS;
2529}
2530#else
2531static int dp_rxdma_ring_setup(struct dp_soc *soc,
2532 struct dp_pdev *pdev)
2533{
2534 return QDF_STATUS_SUCCESS;
2535}
2536#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302537
2538/**
2539 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2540 * @pdev - DP_PDEV handle
2541 *
2542 * Return: void
2543 */
2544static inline void
2545dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2546{
2547 uint8_t map_id;
2548 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2549 qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2550 sizeof(default_dscp_tid_map));
2551 }
2552 for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2553 hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2554 pdev->dscp_tid_map[map_id],
2555 map_id);
2556 }
2557}
2558
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302559#ifdef QCA_SUPPORT_SON
2560/**
2561 * dp_mark_peer_inact(): Update peer inactivity status
2562 * @peer_handle - datapath peer handle
2563 *
2564 * Return: void
2565 */
2566void dp_mark_peer_inact(void *peer_handle, bool inactive)
2567{
2568 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2569 struct dp_pdev *pdev;
2570 struct dp_soc *soc;
2571 bool inactive_old;
2572
2573 if (!peer)
2574 return;
2575
2576 pdev = peer->vdev->pdev;
2577 soc = pdev->soc;
2578
2579 inactive_old = peer->peer_bs_inact_flag == 1;
2580 if (!inactive)
2581 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2582 peer->peer_bs_inact_flag = inactive ? 1 : 0;
2583
2584 if (inactive_old != inactive) {
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302585 /**
2586 * Note: a node lookup can happen in RX datapath context
2587 * when a node changes from inactive to active (at most once
2588 * per inactivity timeout threshold)
2589 */
2590 if (soc->cdp_soc.ol_ops->record_act_change) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302591 soc->cdp_soc.ol_ops->record_act_change(
2592 (void *)pdev->ctrl_pdev,
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302593 peer->mac_addr.raw, !inactive);
2594 }
2595 }
2596}
2597
2598/**
2599 * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2600 *
2601 * Periodically checks the inactivity status
2602 */
2603static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2604{
2605 struct dp_pdev *pdev;
2606 struct dp_vdev *vdev;
2607 struct dp_peer *peer;
2608 struct dp_soc *soc;
2609 int i;
2610
2611 OS_GET_TIMER_ARG(soc, struct dp_soc *);
2612
2613 qdf_spin_lock(&soc->peer_ref_mutex);
2614
2615 for (i = 0; i < soc->pdev_count; i++) {
2616 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302617 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302618 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2619 if (vdev->opmode != wlan_op_mode_ap)
2620 continue;
2621
2622 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2623 if (!peer->authorize) {
2624 /**
2625 * Inactivity check only interested in
2626 * connected node
2627 */
2628 continue;
2629 }
2630 if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2631 /**
2632 * This check ensures we do not wait extra long
2633 * due to the potential race condition
2634 */
2635 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2636 }
2637 if (peer->peer_bs_inact > 0) {
2638 /* Do not let it wrap around */
2639 peer->peer_bs_inact--;
2640 }
2641 if (peer->peer_bs_inact == 0)
2642 dp_mark_peer_inact(peer, true);
2643 }
2644 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302645 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302646 }
2647
2648 qdf_spin_unlock(&soc->peer_ref_mutex);
2649 qdf_timer_mod(&soc->pdev_bs_inact_timer,
2650 soc->pdev_bs_inact_interval * 1000);
2651}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302652
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302653
2654/**
2655 * dp_free_inact_timer(): free inact timer
2656 * @timer - inact timer handle
2657 *
2658 * Return: bool
2659 */
2660void dp_free_inact_timer(struct dp_soc *soc)
2661{
2662 qdf_timer_free(&soc->pdev_bs_inact_timer);
2663}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302664#else
2665
2666void dp_mark_peer_inact(void *peer, bool inactive)
2667{
2668 return;
2669}
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302670
2671void dp_free_inact_timer(struct dp_soc *soc)
2672{
2673 return;
2674}
2675
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302676#endif
2677
Yun Park47e6af82018-01-17 12:15:01 -08002678#ifdef IPA_OFFLOAD
2679/**
2680 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2681 * @soc: data path instance
2682 * @pdev: core txrx pdev context
2683 *
2684 * Return: QDF_STATUS_SUCCESS: success
2685 * QDF_STATUS_E_RESOURCES: Error return
2686 */
2687static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2688 struct dp_pdev *pdev)
2689{
Vivek126db5d2018-07-25 22:05:04 +05302690 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2691 int entries;
2692
2693 soc_cfg_ctx = soc->wlan_cfg_ctx;
2694 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2695
Yun Park47e6af82018-01-17 12:15:01 -08002696 /* Setup second Rx refill buffer ring */
2697 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2698 IPA_RX_REFILL_BUF_RING_IDX,
Vivek126db5d2018-07-25 22:05:04 +05302699 pdev->pdev_id,
2700 entries)) {
Yun Park47e6af82018-01-17 12:15:01 -08002701 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2702 FL("dp_srng_setup failed second rx refill ring"));
2703 return QDF_STATUS_E_FAILURE;
2704 }
2705 return QDF_STATUS_SUCCESS;
2706}
2707
2708/**
2709 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2710 * @soc: data path instance
2711 * @pdev: core txrx pdev context
2712 *
2713 * Return: void
2714 */
2715static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2716 struct dp_pdev *pdev)
2717{
2718 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2719 IPA_RX_REFILL_BUF_RING_IDX);
2720}
2721
2722#else
Yun Park47e6af82018-01-17 12:15:01 -08002723static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2724 struct dp_pdev *pdev)
2725{
2726 return QDF_STATUS_SUCCESS;
2727}
2728
2729static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2730 struct dp_pdev *pdev)
2731{
2732}
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002733#endif
Yun Park47e6af82018-01-17 12:15:01 -08002734
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002735#ifndef QCA_WIFI_QCA6390
2736static
2737QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2738{
2739 int mac_id = 0;
2740 int pdev_id = pdev->pdev_id;
Vivek126db5d2018-07-25 22:05:04 +05302741 int entries;
2742 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2743
2744 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002745
2746 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2747 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2748
Vivek126db5d2018-07-25 22:05:04 +05302749 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002750 if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2751 RXDMA_MONITOR_BUF, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302752 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302754 FL(RNG_ERR "rxdma_mon_buf_ring "));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002755 return QDF_STATUS_E_NOMEM;
2756 }
2757
Vivek126db5d2018-07-25 22:05:04 +05302758 entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002759 if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2760 RXDMA_MONITOR_DST, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302761 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002762 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302763 FL(RNG_ERR "rxdma_mon_dst_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002764 return QDF_STATUS_E_NOMEM;
2765 }
2766
Vivek126db5d2018-07-25 22:05:04 +05302767 entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002768 if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2769 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302770 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002771 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302772 FL(RNG_ERR "rxdma_mon_status_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002773 return QDF_STATUS_E_NOMEM;
2774 }
2775
Vivek126db5d2018-07-25 22:05:04 +05302776 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002777 if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2778 RXDMA_MONITOR_DESC, 0, mac_for_pdev,
Vivek126db5d2018-07-25 22:05:04 +05302779 entries)) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002780 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302781 FL(RNG_ERR "rxdma_mon_desc_ring"));
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002782 return QDF_STATUS_E_NOMEM;
2783 }
2784 }
2785 return QDF_STATUS_SUCCESS;
2786}
2787#else
2788static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2789{
2790 return QDF_STATUS_SUCCESS;
2791}
Yun Park47e6af82018-01-17 12:15:01 -08002792#endif
2793
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002794/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002795* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302796* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002797* @txrx_soc: Datapath SOC handle
2798* @htc_handle: HTC handle for host-target interface
2799* @qdf_osdev: QDF OS device
2800* @pdev_id: PDEV ID
2801*
2802* Return: DP PDEV handle on success, NULL on failure
2803*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002804static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302805 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07002806 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002807{
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302808 int tx_ring_size;
2809 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302810 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302811 int entries;
2812 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2813 int nss_cfg;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302814
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002815 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2816 struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2817
2818 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302819 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2820 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002821 goto fail0;
2822 }
2823
Vivek126db5d2018-07-25 22:05:04 +05302824 soc_cfg_ctx = soc->wlan_cfg_ctx;
2825 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302826
2827 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302828 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2829 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302830
2831 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302832 goto fail0;
2833 }
2834
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302835 /*
2836 * set nss pdev config based on soc config
2837 */
Vivek126db5d2018-07-25 22:05:04 +05302838 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302839 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Vivek126db5d2018-07-25 22:05:04 +05302840 (nss_cfg & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302841
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002842 pdev->soc = soc;
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302843 pdev->ctrl_pdev = ctrl_pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002844 pdev->pdev_id = pdev_id;
2845 soc->pdev_list[pdev_id] = pdev;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002846 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002847
2848 TAILQ_INIT(&pdev->vdev_list);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302849 qdf_spinlock_create(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002850 pdev->vdev_count = 0;
2851
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05302852 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302853 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2854 TAILQ_INIT(&pdev->neighbour_peers_list);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05302855 pdev->neighbour_peers_added = false;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302856
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002857 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302858 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2859 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302860 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002861 }
2862
2863 /* Setup per PDEV TCL rings if configured */
2864 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302865 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302866 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302867 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302868 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302869
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002870 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302871 pdev_id, pdev_id, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302872 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2873 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302874 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002875 }
2876 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302877 WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302878 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2879 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302880 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002881 }
2882 soc->num_tcl_data_rings++;
2883 }
2884
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302885 /* Tx specific init */
2886 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302887 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2888 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302889 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302890 }
2891
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302892 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002893 /* Setup per PDEV REO rings if configured */
Vivek126db5d2018-07-25 22:05:04 +05302894 if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002895 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302896 pdev_id, pdev_id, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2898 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302899 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002900 }
2901 soc->num_reo_dest_rings++;
2902
2903 }
Dhanashri Atre7351d172016-10-12 13:08:09 -07002904 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Vivek126db5d2018-07-25 22:05:04 +05302905 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302906 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2907 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302908 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002909 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002910
2911 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302912 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002913 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302914 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07002915 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002916
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07002917 if (dp_mon_rings_setup(soc, pdev)) {
2918 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2919 FL("MONITOR rings setup failed"));
2920 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002921 }
2922
Vivek126db5d2018-07-25 22:05:04 +05302923 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002924 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2925 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
Vivek126db5d2018-07-25 22:05:04 +05302926 0, pdev_id,
2927 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002928 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302929 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002930 goto fail1;
2931 }
Pramod Simhae382ff82017-06-05 18:09:26 -07002932 }
2933
Yun Park47e6af82018-01-17 12:15:01 -08002934 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
Yun Park601d0d82017-08-28 21:49:31 -07002935 goto fail1;
Yun Park601d0d82017-08-28 21:49:31 -07002936
Yun Parkfde6b9e2017-06-26 17:13:11 -07002937 if (dp_ipa_ring_resource_setup(soc, pdev))
2938 goto fail1;
2939
2940 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07002941 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2942 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002943 goto fail1;
2944 }
2945
Leo Chang5ea93a42016-11-03 12:39:49 -07002946 /* Rx specific init */
2947 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07002948 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Yun Park601d0d82017-08-28 21:49:31 -07002949 FL("dp_rx_pdev_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002950 goto fail0;
Leo Chang5ea93a42016-11-03 12:39:49 -07002951 }
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302952 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002953
nobeljd124b742017-10-16 11:59:12 -07002954 /* Monitor filter init */
2955 pdev->mon_filter_mode = MON_FILTER_ALL;
2956 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2957 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2958 pdev->fp_data_filter = FILTER_DATA_ALL;
2959 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2960 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2961 pdev->mo_data_filter = FILTER_DATA_ALL;
2962
Leo Chang5ea93a42016-11-03 12:39:49 -07002963 dp_local_peer_id_pool_init(pdev);
Sravan Kumar Kairamf1e07662018-06-18 21:36:14 +05302964
Ishank Jain949674c2017-02-27 17:09:29 +05302965 dp_dscp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002966
Kai Chen6eca1a62017-01-12 10:17:53 -08002967 /* Rx monitor mode specific init */
2968 if (dp_rx_pdev_mon_attach(pdev)) {
2969 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05302970 "dp_rx_pdev_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07002971 goto fail1;
2972 }
2973
2974 if (dp_wdi_event_attach(pdev)) {
2975 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05302976 "dp_wdi_evet_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07002977 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002978 }
2979
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05302980 /* set the reo destination during initialization */
2981 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302982
Anish Natarajb9e7d012018-02-16 00:38:10 +05302983 /*
2984 * initialize ppdu tlv list
2985 */
2986 TAILQ_INIT(&pdev->ppdu_info_list);
2987 pdev->tlv_count = 0;
2988 pdev->list_depth = 0;
2989
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002990 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002991
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302992fail1:
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002993 dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302994
2995fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002996 return NULL;
2997}
2998
2999/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003000* dp_rxdma_ring_cleanup() - configure the RX DMA rings
3001* @soc: data path SoC handle
3002* @pdev: Physical device handle
3003*
3004* Return: void
3005*/
3006#ifdef QCA_HOST2FW_RXBUF_RING
3007static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3008 struct dp_pdev *pdev)
3009{
3010 int max_mac_rings =
3011 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3012 int i;
3013
3014 max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3015 max_mac_rings : MAX_RX_MAC_RINGS;
3016 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3017 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3018 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003019
3020 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003021}
3022#else
3023static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3024 struct dp_pdev *pdev)
3025{
3026}
3027#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303028
3029/*
3030 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3031 * @pdev: device object
3032 *
3033 * Return: void
3034 */
3035static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3036{
3037 struct dp_neighbour_peer *peer = NULL;
3038 struct dp_neighbour_peer *temp_peer = NULL;
3039
3040 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3041 neighbour_peer_list_elem, temp_peer) {
3042 /* delete this peer from the list */
3043 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3044 peer, neighbour_peer_list_elem);
3045 qdf_mem_free(peer);
3046 }
3047
3048 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3049}
3050
Anish Natarajcf526b72018-03-26 15:55:30 +05303051/**
3052* dp_htt_ppdu_stats_detach() - detach stats resources
3053* @pdev: Datapath PDEV handle
3054*
3055* Return: void
3056*/
3057static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3058{
3059 struct ppdu_info *ppdu_info, *ppdu_info_next;
3060
3061 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3062 ppdu_info_list_elem, ppdu_info_next) {
3063 if (!ppdu_info)
3064 break;
3065 qdf_assert_always(ppdu_info->nbuf);
3066 qdf_nbuf_free(ppdu_info->nbuf);
3067 qdf_mem_free(ppdu_info);
3068 }
3069}
3070
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003071#ifndef QCA_WIFI_QCA6390
3072static
3073void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3074 int mac_id)
3075{
3076 dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
3077 RXDMA_MONITOR_BUF, 0);
3078 dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
3079 RXDMA_MONITOR_DST, 0);
3080
3081 dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
3082 RXDMA_MONITOR_STATUS, 0);
3083
3084 dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
3085 RXDMA_MONITOR_DESC, 0);
3086 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3087 RXDMA_DST, 0);
3088}
3089#else
3090static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3091 int mac_id)
3092{
3093}
3094#endif
3095
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003096/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003097* dp_pdev_detach_wifi3() - detach txrx pdev
3098* @txrx_pdev: Datapath PDEV handle
3099* @force: Force detach
3100*
3101*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003102static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003103{
3104 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3105 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303106 qdf_nbuf_t curr_nbuf, next_nbuf;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003107 int mac_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003108
Keyur Parekhfad6d082017-05-07 08:54:47 -07003109 dp_wdi_event_detach(pdev);
3110
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303111 dp_tx_pdev_detach(pdev);
3112
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003113 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3114 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3115 TCL_DATA, pdev->pdev_id);
3116 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3117 WBM2SW_RELEASE, pdev->pdev_id);
3118 }
3119
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003120 dp_pktlogmod_exit(pdev);
3121
Leo Chang5ea93a42016-11-03 12:39:49 -07003122 dp_rx_pdev_detach(pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08003123 dp_rx_pdev_mon_detach(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303124 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303125 qdf_spinlock_destroy(&pdev->tx_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303126 qdf_spinlock_destroy(&pdev->vdev_list_lock);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303127
Yun Parkfde6b9e2017-06-26 17:13:11 -07003128 dp_ipa_uc_detach(soc, pdev);
3129
Yun Park47e6af82018-01-17 12:15:01 -08003130 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Yun Park601d0d82017-08-28 21:49:31 -07003131
Yun Parkfde6b9e2017-06-26 17:13:11 -07003132 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003133 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3134 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3135 REO_DST, pdev->pdev_id);
3136 }
3137
Dhanashri Atre7351d172016-10-12 13:08:09 -07003138 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003139
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003140 dp_rxdma_ring_cleanup(soc, pdev);
3141
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003142 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003143 dp_mon_ring_deinit(soc, pdev, mac_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003144 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3145 RXDMA_DST, 0);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003146 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003147
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303148 curr_nbuf = pdev->invalid_peer_head_msdu;
3149 while (curr_nbuf) {
3150 next_nbuf = qdf_nbuf_next(curr_nbuf);
3151 qdf_nbuf_free(curr_nbuf);
3152 curr_nbuf = next_nbuf;
3153 }
3154
Anish Natarajcf526b72018-03-26 15:55:30 +05303155 dp_htt_ppdu_stats_detach(pdev);
3156
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003157 soc->pdev_list[pdev->pdev_id] = NULL;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003158 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003159 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Santosh Anbu2280e862018-01-03 22:25:53 +05303160 qdf_mem_free(pdev->dp_txrx_handle);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003161 qdf_mem_free(pdev);
3162}
3163
3164/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003165 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3166 * @soc: DP SOC handle
3167 */
3168static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3169{
3170 struct reo_desc_list_node *desc;
3171 struct dp_rx_tid *rx_tid;
3172
3173 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3174 while (qdf_list_remove_front(&soc->reo_desc_freelist,
3175 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3176 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003177 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07003178 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003179 QDF_DMA_BIDIRECTIONAL,
3180 rx_tid->hw_qdesc_alloc_size);
3181 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003182 qdf_mem_free(desc);
3183 }
3184 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3185 qdf_list_destroy(&soc->reo_desc_freelist);
3186 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3187}
3188
3189/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003190 * dp_soc_detach_wifi3() - Detach txrx SOC
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003191 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003192 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003193static void dp_soc_detach_wifi3(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003194{
3195 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003196 int i;
3197
Ravi Joshi86e98262017-03-01 13:47:03 -08003198 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003199
Dustin Brownf653d162017-09-19 11:29:41 -07003200 qdf_flush_work(&soc->htt_stats.work);
3201 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303202
3203 /* Free pending htt stats messages */
3204 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303205
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05303206 dp_free_inact_timer(soc);
3207
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003208 for (i = 0; i < MAX_PDEV_CNT; i++) {
3209 if (soc->pdev_list[i])
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +05303210 dp_pdev_detach_wifi3(
3211 (struct cdp_pdev *)soc->pdev_list[i], 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003212 }
3213
3214 dp_peer_find_detach(soc);
3215
3216 /* TBD: Call Tx and Rx cleanup functions to free buffers and
3217 * SW descriptors
3218 */
3219
3220 /* Free the ring memories */
3221 /* Common rings */
3222 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3223
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003224 dp_tx_soc_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003225 /* Tx data rings */
3226 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3227 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3228 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3229 TCL_DATA, i);
3230 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3231 WBM2SW_RELEASE, i);
3232 }
3233 }
3234
3235 /* TCL command and status rings */
3236 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3237 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3238
3239 /* Rx data rings */
3240 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3241 soc->num_reo_dest_rings =
3242 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3243 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3244 /* TODO: Get number of rings and ring sizes
3245 * from wlan_cfg
3246 */
3247 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3248 REO_DST, i);
3249 }
3250 }
3251 /* REO reinjection ring */
3252 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3253
3254 /* Rx release ring */
3255 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3256
3257 /* Rx exception ring */
3258 /* TODO: Better to store ring_type and ring_num in
3259 * dp_srng during setup
3260 */
3261 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3262
3263 /* REO command and status rings */
3264 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3265 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07003266 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003267
Leo Chang5ea93a42016-11-03 12:39:49 -07003268 qdf_spinlock_destroy(&soc->peer_ref_mutex);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303269 qdf_spinlock_destroy(&soc->htt_stats.lock);
3270
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003271 htt_soc_detach(soc->htt_handle);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003272
Lin Baif1c577e2018-05-22 20:45:42 +08003273 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3274
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003275 dp_reo_cmdlist_destroy(soc);
3276 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003277 dp_reo_desc_freelist_destroy(soc);
Manoj Ekbote2a372d22017-06-29 14:54:57 -07003278
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003279 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303280
3281 dp_soc_wds_detach(soc);
Yun Park92af7132017-09-13 16:33:35 -07003282 qdf_spinlock_destroy(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303283
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08003284 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003285}
3286
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003287#ifndef QCA_WIFI_QCA6390
3288static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3289 struct dp_pdev *pdev,
3290 int mac_id,
3291 int mac_for_pdev)
3292{
3293 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3294 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3295 RXDMA_MONITOR_BUF);
3296
3297 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3298 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3299 RXDMA_MONITOR_DST);
3300
3301 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3302 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3303 RXDMA_MONITOR_STATUS);
3304
3305 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3306 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3307 RXDMA_MONITOR_DESC);
3308}
3309#else
3310static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3311 struct dp_pdev *pdev,
3312 int mac_id,
3313 int mac_for_pdev)
3314{
3315}
3316#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003317/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07003318 * dp_rxdma_ring_config() - configure the RX DMA rings
3319 *
3320 * This function is used to configure the MAC rings.
3321 * On MCL host provides buffers in Host2FW ring
3322 * FW refills (copies) buffers to the ring and updates
3323 * ring_idx in register
3324 *
3325 * @soc: data path SoC handle
3326 *
3327 * Return: void
3328 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003329#ifdef QCA_HOST2FW_RXBUF_RING
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003330static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003331{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003332 int i;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003333
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003334 for (i = 0; i < MAX_PDEV_CNT; i++) {
3335 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003336
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003337 if (pdev) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003338 int mac_id;
Dhanashri Atre398935e2017-03-31 15:34:28 -07003339 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003340 int max_mac_rings =
3341 wlan_cfg_get_num_mac_rings
3342 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003343
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003344 htt_srng_setup(soc->htt_handle, 0,
3345 pdev->rx_refill_buf_ring.hal_srng,
3346 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003347
Yun Park601d0d82017-08-28 21:49:31 -07003348 if (pdev->rx_refill_buf_ring2.hal_srng)
3349 htt_srng_setup(soc->htt_handle, 0,
3350 pdev->rx_refill_buf_ring2.hal_srng,
3351 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003352
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07003353 if (soc->cdp_soc.ol_ops->
3354 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003355 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05303356 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07003357 }
3358
3359 if (dbs_enable) {
3360 QDF_TRACE(QDF_MODULE_ID_TXRX,
3361 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303362 FL("DBS enabled max_mac_rings %d"),
Dhanashri Atre398935e2017-03-31 15:34:28 -07003363 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003364 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07003365 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003366 QDF_TRACE(QDF_MODULE_ID_TXRX,
3367 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303368 FL("DBS disabled, max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003369 max_mac_rings);
3370 }
3371
3372 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303373 FL("pdev_id %d max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003374 pdev->pdev_id, max_mac_rings);
3375
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003376 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3377 int mac_for_pdev = dp_get_mac_id_for_pdev(
3378 mac_id, pdev->pdev_id);
3379
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003380 QDF_TRACE(QDF_MODULE_ID_TXRX,
3381 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303382 FL("mac_id %d"), mac_for_pdev);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003383 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3384 pdev->rx_mac_buf_ring[mac_id]
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003385 .hal_srng,
3386 RXDMA_BUF);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003387 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3388 pdev->rxdma_err_dst_ring[mac_id]
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003389 .hal_srng,
3390 RXDMA_DST);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003391
3392 /* Configure monitor mode rings */
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003393 dp_mon_htt_srng_setup(soc, pdev, mac_id,
3394 mac_for_pdev);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003395
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003396 }
3397 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003398 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003399
3400 /*
3401 * Timer to reap rxdma status rings.
3402 * Needed until we enable ppdu end interrupts
3403 */
3404 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3405 dp_service_mon_rings, (void *)soc,
3406 QDF_TIMER_TYPE_WAKE_APPS);
3407 soc->reap_timer_init = 1;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003408}
3409#else
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003410/* This is only for WIN */
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003411static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003412{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003413 int i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003414 int mac_id;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003415
3416 for (i = 0; i < MAX_PDEV_CNT; i++) {
3417 struct dp_pdev *pdev = soc->pdev_list[i];
3418
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003419 if (pdev == NULL)
3420 continue;
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003421
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003422 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3423 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3424
3425 htt_srng_setup(soc->htt_handle, mac_for_pdev,
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003426 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Kai Chen6eca1a62017-01-12 10:17:53 -08003427
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003428 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3429 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3430 RXDMA_MONITOR_BUF);
3431 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3432 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3433 RXDMA_MONITOR_DST);
3434 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3435 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003436 RXDMA_MONITOR_STATUS);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003437 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3438 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08003439 RXDMA_MONITOR_DESC);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003440 htt_srng_setup(soc->htt_handle, mac_for_pdev,
3441 pdev->rxdma_err_dst_ring[mac_id].hal_srng,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003442 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003443 }
3444 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003445}
3446#endif
3447
3448/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003449 * dp_soc_attach_target_wifi3() - SOC initialization in the target
3450 * @txrx_soc: Datapath SOC handle
3451 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08003452static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003453{
Leo Chang5ea93a42016-11-03 12:39:49 -07003454 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003455
3456 htt_soc_attach_target(soc->htt_handle);
3457
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08003458 dp_rxdma_ring_config(soc);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003459
Ishank Jainbc2d91f2017-01-03 18:14:54 +05303460 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303461
3462 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303463 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303464
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003465 return 0;
3466}
3467
3468/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303469 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3470 * @txrx_soc: Datapath SOC handle
3471 */
3472static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3473{
3474 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3475 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3476}
3477/*
3478 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3479 * @txrx_soc: Datapath SOC handle
3480 * @nss_cfg: nss config
3481 */
3482static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3483{
3484 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05303485 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3486
3487 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3488
3489 /*
3490 * TODO: masked out based on the per offloaded radio
3491 */
3492 if (config == dp_nss_cfg_dbdc) {
3493 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3494 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3495 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3496 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3497 }
3498
Aditya Sathishded018e2018-07-02 16:25:21 +05303499 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3500 FL("nss-wifi<0> nss config is enabled"));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303501}
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303502/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003503* dp_vdev_attach_wifi3() - attach txrx vdev
3504* @txrx_pdev: Datapath PDEV handle
3505* @vdev_mac_addr: MAC address of the virtual interface
3506* @vdev_id: VDEV Id
3507* @wlan_op_mode: VDEV operating mode
3508*
3509* Return: DP VDEV handle on success, NULL on failure
3510*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003511static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003512 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3513{
3514 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3515 struct dp_soc *soc = pdev->soc;
3516 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3517
3518 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303519 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3520 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003521 goto fail0;
3522 }
3523
3524 vdev->pdev = pdev;
3525 vdev->vdev_id = vdev_id;
3526 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303527 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003528
3529 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303530 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303531 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003532 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303533 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003534 vdev->osif_vdev = NULL;
3535
3536 vdev->delete.pending = 0;
3537 vdev->safemode = 0;
3538 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05303539 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003540#ifdef notyet
3541 vdev->filters_num = 0;
3542#endif
3543
3544 qdf_mem_copy(
3545 &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3546
3547 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3548 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
Ishank Jain949674c2017-02-27 17:09:29 +05303549 vdev->dscp_tid_map_id = 0;
Ishank Jainc838b132017-02-17 11:08:18 +05303550 vdev->mcast_enhancement_en = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003551
3552 /* TODO: Initialize default HTT meta data that will be used in
3553 * TCL descriptors for packets transmitted from this VDEV
3554 */
3555
3556 TAILQ_INIT(&vdev->peer_list);
3557
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303558 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003559 /* add this vdev into the pdev's list */
3560 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303561 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003562 pdev->vdev_count++;
3563
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303564 dp_tx_vdev_attach(vdev);
3565
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003566
psimhac983d7e2017-07-26 15:20:07 -07003567 if ((soc->intr_mode == DP_INTR_POLL) &&
3568 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303569 if (pdev->vdev_count == 1)
3570 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3571 }
Vijay Pamidipati88c40ff2016-11-17 21:27:02 +05303572
Dhanashri Atreb178eb42017-03-21 12:32:33 -07003573 dp_lro_hash_setup(soc);
3574
Dhanashri Atre0da31222017-03-23 12:30:58 -07003575 /* LRO */
3576 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3577 wlan_op_mode_sta == vdev->opmode)
3578 vdev->lro_enable = true;
3579
3580 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3581 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3582
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303583 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003584 "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303585 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003586
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303587 if (wlan_op_mode_sta == vdev->opmode)
3588 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303589 vdev->mac_addr.raw,
3590 NULL);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303591
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003592 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003593
3594fail0:
3595 return NULL;
3596}
3597
3598/**
3599 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3600 * @vdev: Datapath VDEV handle
3601 * @osif_vdev: OSIF vdev handle
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303602 * @ctrl_vdev: UMAC vdev handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003603 * @txrx_ops: Tx and Rx operations
3604 *
3605 * Return: DP VDEV handle on success, NULL on failure
3606 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003607static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303608 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003609 struct ol_txrx_ops *txrx_ops)
3610{
3611 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3612 vdev->osif_vdev = osif_vdev;
Akshay Kosigidbbaef42018-05-03 23:39:27 +05303613 vdev->ctrl_vdev = ctrl_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003614 vdev->osif_rx = txrx_ops->rx.rx;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303615 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303616 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003617 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303618 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003619#ifdef notyet
3620#if ATH_SUPPORT_WAPI
3621 vdev->osif_check_wai = txrx_ops->rx.wai_check;
3622#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003623#endif
Ishank Jain997955e2017-03-24 18:18:50 +05303624#ifdef UMAC_SUPPORT_PROXY_ARP
3625 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003626#endif
Ishank Jainc838b132017-02-17 11:08:18 +05303627 vdev->me_convert = txrx_ops->me_convert;
3628
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003629 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05303630 if (vdev->mesh_vdev)
3631 txrx_ops->tx.tx = dp_tx_send_mesh;
3632 else
3633 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07003634
Prathyusha Guduribe41d972018-01-19 14:17:14 +05303635 txrx_ops->tx.tx_exception = dp_tx_send_exception;
3636
Houston Hoffman41b912c2017-08-30 14:27:51 -07003637 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303638 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003639}
3640
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303641/**
3642 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3643 * @vdev: Datapath VDEV handle
3644 *
3645 * Return: void
3646 */
3647static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3648{
3649 struct dp_pdev *pdev = vdev->pdev;
3650 struct dp_soc *soc = pdev->soc;
3651 struct dp_peer *peer;
3652 uint16_t *peer_ids;
3653 uint8_t i = 0, j = 0;
3654
3655 peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3656 if (!peer_ids) {
3657 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3658 "DP alloc failure - unable to flush peers");
3659 return;
3660 }
3661
3662 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3663 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3664 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3665 if (peer->peer_ids[i] != HTT_INVALID_PEER)
3666 if (j < soc->max_peers)
3667 peer_ids[j++] = peer->peer_ids[i];
3668 }
3669 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3670
3671 for (i = 0; i < j ; i++)
3672 dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3673
3674 qdf_mem_free(peer_ids);
3675
3676 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3677 FL("Flushed peers for vdev object %pK "), vdev);
3678}
3679
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003680/*
3681 * dp_vdev_detach_wifi3() - Detach txrx vdev
3682 * @txrx_vdev: Datapath VDEV handle
3683 * @callback: Callback OL_IF on completion of detach
3684 * @cb_context: Callback context
3685 *
3686 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003687static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003688 ol_txrx_vdev_delete_cb callback, void *cb_context)
3689{
3690 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3691 struct dp_pdev *pdev = vdev->pdev;
3692 struct dp_soc *soc = pdev->soc;
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303693 struct dp_neighbour_peer *peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003694
3695 /* preconditions */
3696 qdf_assert(vdev);
3697
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303698 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003699 /* remove the vdev from its parent pdev's list */
3700 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303701 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003702
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05303703 if (wlan_op_mode_sta == vdev->opmode)
3704 dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3705
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003706 /*
Vinay Adella4ca1bf62018-02-26 11:03:05 +05303707 * If Target is hung, flush all peers before detaching vdev
3708 * this will free all references held due to missing
3709 * unmap commands from Target
3710 */
3711 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3712 dp_vdev_flush_peers(vdev);
3713
3714 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003715 * Use peer_ref_mutex while accessing peer_list, in case
3716 * a peer is in the process of being removed from the list.
3717 */
3718 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3719 /* check that the vdev has no peers allocated */
3720 if (!TAILQ_EMPTY(&vdev->peer_list)) {
3721 /* debug print - will be removed later */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303722 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003723 FL("not deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303724 "until deletion finishes for all its peers"),
3725 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003726 /* indicate that the vdev needs to be deleted */
3727 vdev->delete.pending = 1;
3728 vdev->delete.callback = callback;
3729 vdev->delete.context = cb_context;
3730 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3731 return;
3732 }
3733 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3734
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303735 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3736 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3737 neighbour_peer_list_elem) {
3738 QDF_ASSERT(peer->vdev != vdev);
3739 }
3740 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3741
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303742 dp_tx_vdev_detach(vdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303743 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003744 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003745
3746 qdf_mem_free(vdev);
3747
3748 if (callback)
3749 callback(cb_context);
3750}
3751
3752/*
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303753 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3754 * @soc - datapath soc handle
3755 * @peer - datapath peer handle
3756 *
3757 * Delete the AST entries belonging to a peer
3758 */
3759#ifdef FEATURE_AST
3760static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3761 struct dp_peer *peer)
3762{
3763 struct dp_ast_entry *ast_entry, *temp_ast_entry;
3764
3765 qdf_spin_lock_bh(&soc->ast_lock);
3766 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3767 dp_peer_del_ast(soc, ast_entry);
3768
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05303769 peer->self_ast_entry = NULL;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303770 TAILQ_INIT(&peer->ast_entry_list);
3771 qdf_spin_unlock_bh(&soc->ast_lock);
3772}
3773#else
3774static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3775 struct dp_peer *peer)
3776{
3777}
3778#endif
3779
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303780#if ATH_SUPPORT_WRAP
3781static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3782 uint8_t *peer_mac_addr)
3783{
3784 struct dp_peer *peer;
3785
3786 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3787 0, vdev->vdev_id);
3788 if (!peer)
3789 return NULL;
3790
3791 if (peer->bss_peer)
3792 return peer;
3793
3794 qdf_atomic_dec(&peer->ref_cnt);
3795 return NULL;
3796}
3797#else
3798static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
3799 uint8_t *peer_mac_addr)
3800{
3801 struct dp_peer *peer;
3802
3803 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
3804 0, vdev->vdev_id);
3805 if (!peer)
3806 return NULL;
3807
3808 if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
3809 return peer;
3810
3811 qdf_atomic_dec(&peer->ref_cnt);
3812 return NULL;
3813}
3814#endif
3815
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303816/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003817 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003818 * @txrx_vdev: Datapath VDEV handle
3819 * @peer_mac_addr: Peer MAC address
3820 *
3821 * Return: DP peeer handle on success, NULL on failure
3822 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003823static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +05303824 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003825{
3826 struct dp_peer *peer;
3827 int i;
3828 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3829 struct dp_pdev *pdev;
3830 struct dp_soc *soc;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303831 struct dp_ast_entry *ast_entry;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303832 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003833
3834 /* preconditions */
3835 qdf_assert(vdev);
3836 qdf_assert(peer_mac_addr);
3837
3838 pdev = vdev->pdev;
3839 soc = pdev->soc;
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303840
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303841 /*
3842 * If a peer entry with given MAC address already exists,
3843 * reuse the peer and reset the state of peer.
3844 */
3845 peer = dp_peer_can_reuse(vdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303846
3847 if (peer) {
3848 peer->delete_in_progress = false;
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303849
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303850 dp_peer_delete_ast_entries(soc, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303851
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303852 if ((vdev->opmode == wlan_op_mode_sta) &&
3853 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3854 DP_MAC_ADDR_LEN)) {
3855 ast_type = CDP_TXRX_AST_TYPE_SELF;
3856 }
3857
3858 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
3859
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303860 /*
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303861 * Control path maintains a node count which is incremented
3862 * for every new peer create command. Since new peer is not being
3863 * created and earlier reference is reused here,
3864 * peer_unref_delete event is sent to control path to
3865 * increment the count back.
3866 */
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303867 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303868 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303869 vdev->vdev_id, peer->mac_addr.raw);
3870 }
Akshay Kosigi78eced82018-05-14 14:53:48 +05303871 peer->ctrl_peer = ctrl_peer;
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05303872
Sravan Kumar Kairamda542172018-06-08 12:51:21 +05303873 dp_local_peer_id_alloc(pdev, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05303874 DP_STATS_INIT(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303875
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303876 return (void *)peer;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303877 } else {
3878 /*
3879 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3880 * need to remove the AST entry which was earlier added as a WDS
3881 * entry.
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303882 * If an AST entry exists, but no peer entry exists with a given
3883 * MAC addresses, we could deduce it as a WDS entry
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05303884 */
3885 ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3886 if (ast_entry)
3887 dp_peer_del_ast(soc, ast_entry);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303888 }
3889
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003890#ifdef notyet
3891 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3892 soc->mempool_ol_ath_peer);
3893#else
3894 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3895#endif
3896
3897 if (!peer)
3898 return NULL; /* failure */
3899
Tallapragada57d86602017-03-31 07:53:58 +05303900 qdf_mem_zero(peer, sizeof(struct dp_peer));
3901
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303902 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303903
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303904 /* store provided params */
3905 peer->vdev = vdev;
Akshay Kosigi78eced82018-05-14 14:53:48 +05303906 peer->ctrl_peer = ctrl_peer;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303907
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05303908 if ((vdev->opmode == wlan_op_mode_sta) &&
3909 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
3910 DP_MAC_ADDR_LEN)) {
3911 ast_type = CDP_TXRX_AST_TYPE_SELF;
3912 }
3913
3914 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303915
Leo Chang5ea93a42016-11-03 12:39:49 -07003916 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003917
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003918 qdf_mem_copy(
3919 &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3920
3921 /* TODO: See of rx_opt_proc is really required */
3922 peer->rx_opt_proc = soc->rx_opt_proc;
3923
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003924 /* initialize the peer_id */
3925 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3926 peer->peer_ids[i] = HTT_INVALID_PEER;
3927
3928 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3929
3930 qdf_atomic_init(&peer->ref_cnt);
3931
3932 /* keep one reference for attach */
3933 qdf_atomic_inc(&peer->ref_cnt);
3934
3935 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303936 if (wlan_op_mode_sta == vdev->opmode)
3937 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3938 else
3939 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3940
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003941 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3942
3943 /* TODO: See if hash based search is required */
3944 dp_peer_find_hash_add(soc, peer);
3945
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08003946 /* Initialize the peer state */
3947 peer->state = OL_TXRX_PEER_STATE_DISC;
3948
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303949 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003950 "vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003951 vdev, peer, peer->mac_addr.raw,
3952 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003953 /*
3954 * For every peer MAp message search and set if bss_peer
3955 */
3956 if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303957 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3958 "vdev bss_peer!!!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003959 peer->bss_peer = 1;
3960 vdev->vap_bss_peer = peer;
3961 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08003962 for (i = 0; i < DP_MAX_TIDS; i++)
3963 qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303964
Leo Chang5ea93a42016-11-03 12:39:49 -07003965 dp_local_peer_id_alloc(pdev, peer);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303966 DP_STATS_INIT(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003967 return (void *)peer;
3968}
3969
3970/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003971 * dp_peer_setup_wifi3() - initialize the peer
3972 * @vdev_hdl: virtual device object
3973 * @peer: Peer object
3974 *
3975 * Return: void
3976 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003977static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003978{
3979 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3980 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3981 struct dp_pdev *pdev;
3982 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08003983 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303984 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003985
3986 /* preconditions */
3987 qdf_assert(vdev);
3988 qdf_assert(peer);
3989
3990 pdev = vdev->pdev;
3991 soc = pdev->soc;
3992
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08003993 peer->last_assoc_rcvd = 0;
3994 peer->last_disassoc_rcvd = 0;
3995 peer->last_deauth_rcvd = 0;
3996
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05303997 /*
3998 * hash based steering is disabled for Radios which are offloaded
3999 * to NSS
4000 */
4001 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
4002 hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
4003
4004 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +05304005 FL("hash based steering for pdev: %d is %d"),
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05304006 pdev->pdev_id, hash_based);
Dhanashri Atre14049172016-11-11 18:32:36 -08004007
Tallapragada Kalyan61cb97c2017-09-20 12:42:10 +05304008 /*
Jeff Johnson23dbde82018-05-05 23:55:52 -07004009 * Below line of code will ensure the proper reo_dest ring is chosen
Tallapragada Kalyan61cb97c2017-09-20 12:42:10 +05304010 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
4011 */
4012 reo_dest = pdev->reo_dest;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304013
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004014 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
4015 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08004016 soc->cdp_soc.ol_ops->peer_set_default_routing(
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05304017 pdev->ctrl_pdev, peer->mac_addr.raw,
4018 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004019 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05304020
4021 dp_peer_rx_init(pdev, peer);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004022 return;
4023}
4024
4025/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05304026 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
4027 * @vdev_handle: virtual device object
4028 * @htt_pkt_type: type of pkt
4029 *
4030 * Return: void
4031 */
4032static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
4033 enum htt_cmn_pkt_type val)
4034{
4035 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4036 vdev->tx_encap_type = val;
4037}
4038
4039/*
4040 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
4041 * @vdev_handle: virtual device object
4042 * @htt_pkt_type: type of pkt
4043 *
4044 * Return: void
4045 */
4046static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
4047 enum htt_cmn_pkt_type val)
4048{
4049 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4050 vdev->rx_decap_type = val;
4051}
4052
4053/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304054 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
4055 * @pdev_handle: physical device object
4056 * @val: reo destination ring index (1 - 4)
4057 *
4058 * Return: void
4059 */
4060static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
4061 enum cdp_host_reo_dest_ring val)
4062{
4063 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4064
4065 if (pdev)
4066 pdev->reo_dest = val;
4067}
4068
4069/*
4070 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
4071 * @pdev_handle: physical device object
4072 *
4073 * Return: reo destination ring index
4074 */
4075static enum cdp_host_reo_dest_ring
4076dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
4077{
4078 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4079
4080 if (pdev)
4081 return pdev->reo_dest;
4082 else
4083 return cdp_host_reo_dest_ring_unknown;
4084}
4085
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07004086#ifdef QCA_SUPPORT_SON
4087static void dp_son_peer_authorize(struct dp_peer *peer)
4088{
4089 struct dp_soc *soc;
4090 soc = peer->vdev->pdev->soc;
4091 peer->peer_bs_inact_flag = 0;
4092 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4093 return;
4094}
4095#else
4096static void dp_son_peer_authorize(struct dp_peer *peer)
4097{
4098 return;
4099}
4100#endif
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05304101/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304102 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
4103 * @pdev_handle: device object
4104 * @val: value to be set
4105 *
4106 * Return: void
4107 */
4108static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
4109 uint32_t val)
4110{
4111 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4112
4113 /* Enable/Disable smart mesh filtering. This flag will be checked
4114 * during rx processing to check if packets are from NAC clients.
4115 */
4116 pdev->filter_neighbour_peers = val;
4117 return 0;
4118}
4119
4120/*
4121 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
4122 * address for smart mesh filtering
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304123 * @vdev_handle: virtual device object
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304124 * @cmd: Add/Del command
4125 * @macaddr: nac client mac address
4126 *
4127 * Return: void
4128 */
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304129static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
4130 uint32_t cmd, uint8_t *macaddr)
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304131{
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304132 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4133 struct dp_pdev *pdev = vdev->pdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304134 struct dp_neighbour_peer *peer = NULL;
4135
4136 if (!macaddr)
4137 goto fail0;
4138
4139 /* Store address of NAC (neighbour peer) which will be checked
4140 * against TA of received packets.
4141 */
4142 if (cmd == DP_NAC_PARAM_ADD) {
4143 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
4144 sizeof(*peer));
4145
4146 if (!peer) {
4147 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4148 FL("DP neighbour peer node memory allocation failed"));
4149 goto fail0;
4150 }
4151
4152 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
4153 macaddr, DP_MAC_ADDR_LEN);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304154 peer->vdev = vdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304155
4156 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304157
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304158 /* add this neighbour peer into the list */
4159 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
4160 neighbour_peer_list_elem);
4161 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4162
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304163 /* first neighbour */
4164 if (!pdev->neighbour_peers_added) {
4165 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
4166 dp_ppdu_ring_cfg(pdev);
4167 pdev->neighbour_peers_added = true;
4168 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304169 return 1;
4170
4171 } else if (cmd == DP_NAC_PARAM_DEL) {
4172 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4173 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4174 neighbour_peer_list_elem) {
4175 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
4176 macaddr, DP_MAC_ADDR_LEN)) {
4177 /* delete this peer from the list */
4178 TAILQ_REMOVE(&pdev->neighbour_peers_list,
4179 peer, neighbour_peer_list_elem);
4180 qdf_mem_free(peer);
4181 break;
4182 }
4183 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304184 /* last neighbour deleted */
4185 if (TAILQ_EMPTY(&pdev->neighbour_peers_list))
4186 pdev->neighbour_peers_added = false;
4187
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304188 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4189
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304190 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
4191 !pdev->enhanced_stats_en)
4192 dp_ppdu_ring_reset(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304193 return 1;
4194
4195 }
4196
4197fail0:
4198 return 0;
4199}
4200
4201/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05304202 * dp_get_sec_type() - Get the security type
4203 * @peer: Datapath peer handle
4204 * @sec_idx: Security id (mcast, ucast)
4205 *
4206 * return sec_type: Security type
4207 */
4208static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
4209{
4210 struct dp_peer *dpeer = (struct dp_peer *)peer;
4211
4212 return dpeer->security[sec_idx].sec_type;
4213}
4214
4215/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004216 * dp_peer_authorize() - authorize txrx peer
4217 * @peer_handle: Datapath peer handle
4218 * @authorize
4219 *
4220 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05304221static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004222{
4223 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4224 struct dp_soc *soc;
4225
4226 if (peer != NULL) {
4227 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004228 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07004229 dp_son_peer_authorize(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004230 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004231 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4232 }
4233}
4234
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304235#ifdef QCA_SUPPORT_SON
4236/*
4237 * dp_txrx_update_inact_threshold() - Update inact timer threshold
4238 * @pdev_handle: Device handle
4239 * @new_threshold : updated threshold value
4240 *
4241 */
4242static void
4243dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
4244 u_int16_t new_threshold)
4245{
4246 struct dp_vdev *vdev;
4247 struct dp_peer *peer;
4248 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4249 struct dp_soc *soc = pdev->soc;
4250 u_int16_t old_threshold = soc->pdev_bs_inact_reload;
4251
4252 if (old_threshold == new_threshold)
4253 return;
4254
4255 soc->pdev_bs_inact_reload = new_threshold;
4256
4257 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304258 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304259 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4260 if (vdev->opmode != wlan_op_mode_ap)
4261 continue;
4262
4263 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4264 if (!peer->authorize)
4265 continue;
4266
4267 if (old_threshold - peer->peer_bs_inact >=
4268 new_threshold) {
4269 dp_mark_peer_inact((void *)peer, true);
4270 peer->peer_bs_inact = 0;
4271 } else {
4272 peer->peer_bs_inact = new_threshold -
4273 (old_threshold - peer->peer_bs_inact);
4274 }
4275 }
4276 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304277 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304278 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4279}
4280
4281/**
4282 * dp_txrx_reset_inact_count(): Reset inact count
4283 * @pdev_handle - device handle
4284 *
4285 * Return: void
4286 */
4287static void
4288dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4289{
4290 struct dp_vdev *vdev = NULL;
4291 struct dp_peer *peer = NULL;
4292 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4293 struct dp_soc *soc = pdev->soc;
4294
4295 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304296 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304297 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4298 if (vdev->opmode != wlan_op_mode_ap)
4299 continue;
4300
4301 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4302 if (!peer->authorize)
4303 continue;
4304
4305 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4306 }
4307 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304308 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304309 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4310}
4311
4312/**
4313 * dp_set_inact_params(): set inactivity params
4314 * @pdev_handle - device handle
4315 * @inact_check_interval - inactivity interval
4316 * @inact_normal - Inactivity normal
4317 * @inact_overload - Inactivity overload
4318 *
4319 * Return: bool
4320 */
4321bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4322 u_int16_t inact_check_interval,
4323 u_int16_t inact_normal, u_int16_t inact_overload)
4324{
4325 struct dp_soc *soc;
4326 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4327
4328 if (!pdev)
4329 return false;
4330
4331 soc = pdev->soc;
4332 if (!soc)
4333 return false;
4334
4335 soc->pdev_bs_inact_interval = inact_check_interval;
4336 soc->pdev_bs_inact_normal = inact_normal;
4337 soc->pdev_bs_inact_overload = inact_overload;
4338
4339 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4340 soc->pdev_bs_inact_normal);
4341
4342 return true;
4343}
4344
4345/**
4346 * dp_start_inact_timer(): Inactivity timer start
4347 * @pdev_handle - device handle
4348 * @enable - Inactivity timer start/stop
4349 *
4350 * Return: bool
4351 */
4352bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4353{
4354 struct dp_soc *soc;
4355 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4356
4357 if (!pdev)
4358 return false;
4359
4360 soc = pdev->soc;
4361 if (!soc)
4362 return false;
4363
4364 if (enable) {
4365 dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4366 qdf_timer_mod(&soc->pdev_bs_inact_timer,
4367 soc->pdev_bs_inact_interval * 1000);
4368 } else {
4369 qdf_timer_stop(&soc->pdev_bs_inact_timer);
4370 }
4371
4372 return true;
4373}
4374
4375/**
4376 * dp_set_overload(): Set inactivity overload
4377 * @pdev_handle - device handle
4378 * @overload - overload status
4379 *
4380 * Return: void
4381 */
4382void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4383{
4384 struct dp_soc *soc;
4385 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4386
4387 if (!pdev)
4388 return;
4389
4390 soc = pdev->soc;
4391 if (!soc)
4392 return;
4393
4394 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4395 overload ? soc->pdev_bs_inact_overload :
4396 soc->pdev_bs_inact_normal);
4397}
4398
4399/**
4400 * dp_peer_is_inact(): check whether peer is inactive
4401 * @peer_handle - datapath peer handle
4402 *
4403 * Return: bool
4404 */
4405bool dp_peer_is_inact(void *peer_handle)
4406{
4407 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4408
4409 if (!peer)
4410 return false;
4411
4412 return peer->peer_bs_inact_flag == 1;
4413}
4414
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05304415/**
4416 * dp_init_inact_timer: initialize the inact timer
4417 * @soc - SOC handle
4418 *
4419 * Return: void
4420 */
4421void dp_init_inact_timer(struct dp_soc *soc)
4422{
4423 qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4424 dp_txrx_peer_find_inact_timeout_handler,
4425 (void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4426}
4427
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304428#else
4429
4430bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4431 u_int16_t inact_normal, u_int16_t inact_overload)
4432{
4433 return false;
4434}
4435
4436bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4437{
4438 return false;
4439}
4440
4441void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4442{
4443 return;
4444}
4445
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05304446void dp_init_inact_timer(struct dp_soc *soc)
4447{
4448 return;
4449}
4450
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304451bool dp_peer_is_inact(void *peer)
4452{
4453 return false;
4454}
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05304455#endif
4456
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004457/*
4458 * dp_peer_unref_delete() - unref and delete peer
4459 * @peer_handle: Datapath peer handle
4460 *
4461 */
4462void dp_peer_unref_delete(void *peer_handle)
4463{
4464 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304465 struct dp_peer *bss_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004466 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05304467 struct dp_pdev *pdev = vdev->pdev;
4468 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004469 struct dp_peer *tmppeer;
4470 int found = 0;
4471 uint16_t peer_id;
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004472 uint16_t vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004473
4474 /*
4475 * Hold the lock all the way from checking if the peer ref count
4476 * is zero until the peer references are removed from the hash
4477 * table and vdev list (if the peer ref count is zero).
4478 * This protects against a new HL tx operation starting to use the
4479 * peer object just after this function concludes it's done being used.
4480 * Furthermore, the lock needs to be held while checking whether the
4481 * vdev's list of peers is empty, to make sure that list is not modified
4482 * concurrently with the empty check.
4483 */
4484 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004485 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304486 "%s: peer %pK ref_cnt(before decrement): %d", __func__,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004487 peer, qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004488 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4489 peer_id = peer->peer_ids[0];
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004490 vdev_id = vdev->vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004491
4492 /*
4493 * Make sure that the reference to the peer in
4494 * peer object map is removed
4495 */
4496 if (peer_id != HTT_INVALID_PEER)
4497 soc->peer_id_to_obj_map[peer_id] = NULL;
4498
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304499 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004500 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004501
4502 /* remove the reference to the peer from the hash table */
4503 dp_peer_find_hash_remove(soc, peer);
4504
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05304505 qdf_spin_lock_bh(&soc->ast_lock);
4506 if (peer->self_ast_entry) {
4507 dp_peer_del_ast(soc, peer->self_ast_entry);
4508 peer->self_ast_entry = NULL;
4509 }
4510 qdf_spin_unlock_bh(&soc->ast_lock);
4511
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004512 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4513 if (tmppeer == peer) {
4514 found = 1;
4515 break;
4516 }
4517 }
4518 if (found) {
4519 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4520 peer_list_elem);
4521 } else {
4522 /*Ignoring the remove operation as peer not found*/
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304523 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004524 "peer %pK not found in vdev (%pK)->peer_list:%pK",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004525 peer, vdev, &peer->vdev->peer_list);
4526 }
4527
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08004528 /* cleanup the peer data */
4529 dp_peer_cleanup(vdev, peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004530
4531 /* check whether the parent vdev has no peers left */
4532 if (TAILQ_EMPTY(&vdev->peer_list)) {
4533 /*
4534 * Now that there are no references to the peer, we can
4535 * release the peer reference lock.
4536 */
4537 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4538 /*
4539 * Check if the parent vdev was waiting for its peers
4540 * to be deleted, in order for it to be deleted too.
4541 */
4542 if (vdev->delete.pending) {
4543 ol_txrx_vdev_delete_cb vdev_delete_cb =
4544 vdev->delete.callback;
4545 void *vdev_delete_context =
4546 vdev->delete.context;
4547
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304548 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004549 QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004550 FL("deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304551 " - its last peer is done"),
4552 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004553 /* all peers are gone, go ahead and delete it */
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004554 dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
chenguo7853b792017-12-28 20:59:12 +08004555 FLOW_TYPE_VDEV,
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004556 vdev_id);
chenguo7853b792017-12-28 20:59:12 +08004557 dp_tx_vdev_detach(vdev);
4558 QDF_TRACE(QDF_MODULE_ID_DP,
4559 QDF_TRACE_LEVEL_INFO_HIGH,
4560 FL("deleting vdev object %pK (%pM)"),
4561 vdev, vdev->mac_addr.raw);
4562
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004563 qdf_mem_free(vdev);
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004564 vdev = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004565 if (vdev_delete_cb)
4566 vdev_delete_cb(vdev_delete_context);
4567 }
4568 } else {
4569 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4570 }
chenguo1dead6f2018-01-08 14:51:44 +08004571
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004572 if (vdev) {
4573 if (vdev->vap_bss_peer == peer) {
4574 vdev->vap_bss_peer = NULL;
4575 }
4576 }
4577
chenguo1dead6f2018-01-08 14:51:44 +08004578 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05304579 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004580 vdev_id, peer->mac_addr.raw);
chenguo1dead6f2018-01-08 14:51:44 +08004581 }
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004582
4583 if (!vdev || !vdev->vap_bss_peer) {
4584 goto free_peer;
4585 }
4586
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004587#ifdef notyet
4588 qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4589#else
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304590 bss_peer = vdev->vap_bss_peer;
4591 DP_UPDATE_STATS(bss_peer, peer);
4592
4593free_peer:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004594 qdf_mem_free(peer);
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08004595
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004596#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004597 } else {
4598 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4599 }
4600}
4601
4602/*
4603 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07004604 * @peer_handle: Datapath peer handle
4605 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004606 *
4607 */
Naveen Rawat761329b2017-09-19 10:30:11 -07004608static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004609{
4610 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4611
4612 /* redirect the peer's rx delivery function to point to a
4613 * discard func
4614 */
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304615
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004616 peer->rx_opt_proc = dp_rx_discard;
Akshay Kosigi78eced82018-05-14 14:53:48 +05304617 peer->ctrl_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004618
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304619 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004620 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004621
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004622 dp_local_peer_id_free(peer->vdev->pdev, peer);
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08004623 qdf_spinlock_destroy(&peer->peer_info_lock);
4624
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004625 /*
4626 * Remove the reference added during peer_attach.
4627 * The peer will still be left allocated until the
4628 * PEER_UNMAP message arrives to remove the other
4629 * reference, added by the PEER_MAP message.
4630 */
4631 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07004632}
4633
4634/*
4635 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4636 * @peer_handle: Datapath peer handle
4637 *
4638 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004639static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004640{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004641 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004642 return vdev->mac_addr.raw;
4643}
4644
4645/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08004646 * dp_vdev_set_wds() - Enable per packet stats
4647 * @vdev_handle: DP VDEV handle
4648 * @val: value
4649 *
4650 * Return: none
4651 */
4652static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4653{
4654 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4655
4656 vdev->wds_enabled = val;
4657 return 0;
4658}
4659
4660/*
Leo Chang5ea93a42016-11-03 12:39:49 -07004661 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4662 * @peer_handle: Datapath peer handle
4663 *
4664 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004665static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4666 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07004667{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004668 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004669 struct dp_vdev *vdev = NULL;
4670
4671 if (qdf_unlikely(!pdev))
4672 return NULL;
4673
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304674 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004675 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4676 if (vdev->vdev_id == vdev_id)
4677 break;
4678 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304679 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07004680
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004681 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004682}
4683
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004684static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07004685{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004686 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07004687
4688 return vdev->opmode;
4689}
4690
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004691static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004692{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004693 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004694 struct dp_pdev *pdev = vdev->pdev;
4695
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004696 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07004697}
phadiman7821bf82018-02-06 16:03:54 +05304698
Kai Chen6eca1a62017-01-12 10:17:53 -08004699/**
sumedh baikady84613b02017-09-19 16:36:14 -07004700 * dp_reset_monitor_mode() - Disable monitor mode
4701 * @pdev_handle: Datapath PDEV handle
4702 *
4703 * Return: 0 on success, not 0 on failure
4704 */
4705static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4706{
4707 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4708 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004709 struct dp_soc *soc = pdev->soc;
sumedh baikady84613b02017-09-19 16:36:14 -07004710 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004711 int mac_id;
sumedh baikady84613b02017-09-19 16:36:14 -07004712
4713 pdev_id = pdev->pdev_id;
4714 soc = pdev->soc;
4715
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004716 qdf_spin_lock_bh(&pdev->mon_lock);
4717
sumedh baikady84613b02017-09-19 16:36:14 -07004718 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4719
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004720 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4721 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
sumedh baikady84613b02017-09-19 16:36:14 -07004722
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004723 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4724 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4725 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4726
4727 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4728 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4729 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4730 }
sumedh baikady84613b02017-09-19 16:36:14 -07004731
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08004732 pdev->monitor_vdev = NULL;
4733
4734 qdf_spin_unlock_bh(&pdev->mon_lock);
4735
sumedh baikady84613b02017-09-19 16:36:14 -07004736 return 0;
4737}
phadiman7821bf82018-02-06 16:03:54 +05304738
4739/**
4740 * dp_set_nac() - set peer_nac
4741 * @peer_handle: Datapath PEER handle
4742 *
4743 * Return: void
4744 */
4745static void dp_set_nac(struct cdp_peer *peer_handle)
4746{
4747 struct dp_peer *peer = (struct dp_peer *)peer_handle;
4748
4749 peer->nac = 1;
4750}
4751
4752/**
4753 * dp_get_tx_pending() - read pending tx
4754 * @pdev_handle: Datapath PDEV handle
4755 *
4756 * Return: outstanding tx
4757 */
4758static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4759{
4760 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4761
4762 return qdf_atomic_read(&pdev->num_tx_outstanding);
4763}
4764
4765/**
4766 * dp_get_peer_mac_from_peer_id() - get peer mac
4767 * @pdev_handle: Datapath PDEV handle
4768 * @peer_id: Peer ID
4769 * @peer_mac: MAC addr of PEER
4770 *
4771 * Return: void
4772 */
4773static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4774 uint32_t peer_id, uint8_t *peer_mac)
4775{
4776 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4777 struct dp_peer *peer;
4778
4779 if (pdev && peer_mac) {
4780 peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4781 if (peer && peer->mac_addr.raw) {
4782 qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4783 DP_MAC_ADDR_LEN);
4784 }
4785 }
4786}
4787
sumedh baikady84613b02017-09-19 16:36:14 -07004788/**
Kai Chen6eca1a62017-01-12 10:17:53 -08004789 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4790 * @vdev_handle: Datapath VDEV handle
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304791 * @smart_monitor: Flag to denote if its smart monitor mode
Kai Chen6eca1a62017-01-12 10:17:53 -08004792 *
4793 * Return: 0 on success, not 0 on failure
4794 */
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304795static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4796 uint8_t smart_monitor)
Kai Chen6eca1a62017-01-12 10:17:53 -08004797{
4798 /* Many monitor VAPs can exists in a system but only one can be up at
4799 * anytime
4800 */
4801 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4802 struct dp_pdev *pdev;
4803 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4804 struct dp_soc *soc;
4805 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004806 int mac_id;
Kai Chen6eca1a62017-01-12 10:17:53 -08004807
4808 qdf_assert(vdev);
4809
4810 pdev = vdev->pdev;
4811 pdev_id = pdev->pdev_id;
4812 soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08004813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05304814 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -08004815 pdev, pdev_id, soc, vdev);
4816
4817 /*Check if current pdev's monitor_vdev exists */
4818 if (pdev->monitor_vdev) {
4819 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304820 "vdev=%pK", vdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08004821 qdf_assert(vdev);
4822 }
4823
4824 pdev->monitor_vdev = vdev;
4825
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304826 /* If smart monitor mode, do not configure monitor ring */
4827 if (smart_monitor)
4828 return QDF_STATUS_SUCCESS;
4829
nobeljd124b742017-10-16 11:59:12 -07004830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05304831 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07004832 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4833 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4834 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4835 pdev->mo_data_filter);
4836
nobelj1c31fee2018-03-21 11:47:05 -07004837 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4838
Kai Chen6eca1a62017-01-12 10:17:53 -08004839 htt_tlv_filter.mpdu_start = 1;
4840 htt_tlv_filter.msdu_start = 1;
4841 htt_tlv_filter.packet = 1;
4842 htt_tlv_filter.msdu_end = 1;
4843 htt_tlv_filter.mpdu_end = 1;
4844 htt_tlv_filter.packet_header = 1;
4845 htt_tlv_filter.attention = 1;
4846 htt_tlv_filter.ppdu_start = 0;
4847 htt_tlv_filter.ppdu_end = 0;
4848 htt_tlv_filter.ppdu_end_user_stats = 0;
4849 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4850 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07004851 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07004852 htt_tlv_filter.enable_fp =
4853 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004854 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07004855 htt_tlv_filter.enable_mo =
4856 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4857 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4858 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4859 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4860 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4861 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4862 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kai Chen6eca1a62017-01-12 10:17:53 -08004863
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004864 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4865 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4866
4867 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4868 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4869 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4870 }
Kai Chen6eca1a62017-01-12 10:17:53 -08004871
nobelj1c31fee2018-03-21 11:47:05 -07004872 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4873
Kai Chen6eca1a62017-01-12 10:17:53 -08004874 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004875 htt_tlv_filter.msdu_start = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004876 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004877 htt_tlv_filter.msdu_end = 0;
4878 htt_tlv_filter.mpdu_end = 0;
4879 htt_tlv_filter.attention = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004880 htt_tlv_filter.ppdu_start = 1;
4881 htt_tlv_filter.ppdu_end = 1;
4882 htt_tlv_filter.ppdu_end_user_stats = 1;
4883 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4884 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07004885 htt_tlv_filter.enable_fp = 1;
Karunakar Dasineni40555682017-03-26 22:44:39 -07004886 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07004887 htt_tlv_filter.enable_mo = 1;
4888 if (pdev->mcopy_mode) {
4889 htt_tlv_filter.packet_header = 1;
4890 }
4891 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4892 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4893 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4894 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4895 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4896 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
nobeljd124b742017-10-16 11:59:12 -07004897
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004898 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07004899 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4900 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004901
4902 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4903 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4904 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4905 }
nobeljd124b742017-10-16 11:59:12 -07004906
4907 return QDF_STATUS_SUCCESS;
4908}
4909
4910/**
4911 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4912 * @pdev_handle: Datapath PDEV handle
4913 * @filter_val: Flag to select Filter for monitor mode
4914 * Return: 0 on success, not 0 on failure
4915 */
4916static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4917 struct cdp_monitor_filter *filter_val)
4918{
4919 /* Many monitor VAPs can exists in a system but only one can be up at
4920 * anytime
4921 */
4922 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4923 struct dp_vdev *vdev = pdev->monitor_vdev;
4924 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4925 struct dp_soc *soc;
4926 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004927 int mac_id;
nobeljd124b742017-10-16 11:59:12 -07004928
4929 pdev_id = pdev->pdev_id;
4930 soc = pdev->soc;
4931
4932 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05304933 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
nobeljd124b742017-10-16 11:59:12 -07004934 pdev, pdev_id, soc, vdev);
4935
4936 /*Check if current pdev's monitor_vdev exists */
4937 if (!pdev->monitor_vdev) {
4938 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304939 "vdev=%pK", vdev);
nobeljd124b742017-10-16 11:59:12 -07004940 qdf_assert(vdev);
4941 }
4942
4943 /* update filter mode, type in pdev structure */
4944 pdev->mon_filter_mode = filter_val->mode;
4945 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4946 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4947 pdev->fp_data_filter = filter_val->fp_data;
4948 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4949 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4950 pdev->mo_data_filter = filter_val->mo_data;
4951
4952 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05304953 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07004954 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4955 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4956 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4957 pdev->mo_data_filter);
4958
4959 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4960
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004961 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4962 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
nobeljd124b742017-10-16 11:59:12 -07004963
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004964 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4965 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4966 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4967
4968 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4969 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4970 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4971 }
nobeljd124b742017-10-16 11:59:12 -07004972
4973 htt_tlv_filter.mpdu_start = 1;
4974 htt_tlv_filter.msdu_start = 1;
4975 htt_tlv_filter.packet = 1;
4976 htt_tlv_filter.msdu_end = 1;
4977 htt_tlv_filter.mpdu_end = 1;
4978 htt_tlv_filter.packet_header = 1;
4979 htt_tlv_filter.attention = 1;
4980 htt_tlv_filter.ppdu_start = 0;
4981 htt_tlv_filter.ppdu_end = 0;
4982 htt_tlv_filter.ppdu_end_user_stats = 0;
4983 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4984 htt_tlv_filter.ppdu_end_status_done = 0;
4985 htt_tlv_filter.header_per_msdu = 1;
4986 htt_tlv_filter.enable_fp =
4987 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4988 htt_tlv_filter.enable_md = 0;
4989 htt_tlv_filter.enable_mo =
4990 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4991 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4992 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4993 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4994 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4995 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4996 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4997
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004998 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4999 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5000
5001 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5002 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
5003 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
5004 }
nobeljd124b742017-10-16 11:59:12 -07005005
nobelj1c31fee2018-03-21 11:47:05 -07005006 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5007
nobeljd124b742017-10-16 11:59:12 -07005008 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005009 htt_tlv_filter.msdu_start = 0;
nobeljd124b742017-10-16 11:59:12 -07005010 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005011 htt_tlv_filter.msdu_end = 0;
5012 htt_tlv_filter.mpdu_end = 0;
5013 htt_tlv_filter.attention = 0;
nobeljd124b742017-10-16 11:59:12 -07005014 htt_tlv_filter.ppdu_start = 1;
5015 htt_tlv_filter.ppdu_end = 1;
5016 htt_tlv_filter.ppdu_end_user_stats = 1;
5017 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5018 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07005019 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07005020 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07005021 htt_tlv_filter.enable_mo = 1;
5022 if (pdev->mcopy_mode) {
5023 htt_tlv_filter.packet_header = 1;
5024 }
5025 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5026 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5027 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5028 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5029 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5030 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Karunakar Dasineni40555682017-03-26 22:44:39 -07005031
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005032 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07005033 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5034 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005035
5036 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5037 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5038 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5039 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305040
Kai Chen6eca1a62017-01-12 10:17:53 -08005041 return QDF_STATUS_SUCCESS;
5042}
Leo Chang5ea93a42016-11-03 12:39:49 -07005043
nobeljc8eb4d62018-01-04 14:29:32 -08005044/**
phadiman7821bf82018-02-06 16:03:54 +05305045 * dp_get_pdev_id_frm_pdev() - get pdev_id
5046 * @pdev_handle: Datapath PDEV handle
5047 *
5048 * Return: pdev_id
5049 */
5050static
5051uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
5052{
5053 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5054
5055 return pdev->pdev_id;
5056}
5057
5058/**
nobeljc8eb4d62018-01-04 14:29:32 -08005059 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
5060 * @vdev_handle: Datapath VDEV handle
5061 * Return: true on ucast filter flag set
5062 */
5063static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
5064{
5065 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5066 struct dp_pdev *pdev;
5067
5068 pdev = vdev->pdev;
5069
5070 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5071 (pdev->mo_data_filter & FILTER_DATA_UCAST))
5072 return true;
5073
5074 return false;
5075}
5076
5077/**
5078 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
5079 * @vdev_handle: Datapath VDEV handle
5080 * Return: true on mcast filter flag set
5081 */
5082static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
5083{
5084 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5085 struct dp_pdev *pdev;
5086
5087 pdev = vdev->pdev;
5088
5089 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5090 (pdev->mo_data_filter & FILTER_DATA_MCAST))
5091 return true;
5092
5093 return false;
5094}
5095
5096/**
5097 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
5098 * @vdev_handle: Datapath VDEV handle
5099 * Return: true on non data filter flag set
5100 */
5101static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
5102{
5103 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5104 struct dp_pdev *pdev;
5105
5106 pdev = vdev->pdev;
5107
5108 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5109 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5110 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5111 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5112 return true;
5113 }
5114 }
5115
5116 return false;
5117}
5118
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305119#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305120void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305121{
5122 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5123
5124 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05305125 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305126 vdev->mesh_vdev = val;
5127}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05305128
5129/*
5130 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
5131 * @vdev_hdl: virtual device object
5132 * @val: value to be set
5133 *
5134 * Return: void
5135 */
5136void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
5137{
5138 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5139
5140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5141 FL("val %d"), val);
5142 vdev->mesh_rx_filter = val;
5143}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05305144#endif
5145
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305146/*
5147 * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
Jeff Johnson2d821eb2018-05-06 16:25:49 -07005148 * Current scope is bar received count
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305149 *
5150 * @pdev_handle: DP_PDEV handle
5151 *
5152 * Return: void
5153 */
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305154#define STATS_PROC_TIMEOUT (HZ/1000)
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305155
5156static void
5157dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
5158{
5159 struct dp_vdev *vdev;
5160 struct dp_peer *peer;
5161 uint32_t waitcnt;
5162
5163 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5164 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5165 if (!peer) {
5166 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5167 FL("DP Invalid Peer refernce"));
5168 return;
5169 }
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305170
5171 if (peer->delete_in_progress) {
5172 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5173 FL("DP Peer deletion in progress"));
5174 continue;
5175 }
5176
5177 qdf_atomic_inc(&peer->ref_cnt);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305178 waitcnt = 0;
5179 dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305180 while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305181 && waitcnt < 10) {
5182 schedule_timeout_interruptible(
5183 STATS_PROC_TIMEOUT);
5184 waitcnt++;
5185 }
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305186 qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
Pratik Gandhi81fe0622018-02-23 12:36:10 +05305187 dp_peer_unref_delete(peer);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305188 }
5189 }
5190}
5191
5192/**
5193 * dp_rx_bar_stats_cb(): BAR received stats callback
5194 * @soc: SOC handle
5195 * @cb_ctxt: Call back context
5196 * @reo_status: Reo status
5197 *
5198 * return: void
5199 */
5200void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
5201 union hal_reo_status *reo_status)
5202{
5203 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
5204 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
5205
5206 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
5207 DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
5208 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305209 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305210 return;
5211 }
5212
5213 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305214 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305215
5216}
5217
Ishank Jain1e7401c2017-02-17 15:38:39 +05305218/**
5219 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
5220 * @vdev: DP VDEV handle
5221 *
5222 * return: void
5223 */
5224void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
5225{
5226 struct dp_peer *peer = NULL;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305227 struct dp_soc *soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305228
5229 qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
5230 qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
5231
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305232 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
5233 DP_UPDATE_STATS(vdev, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305234
psimhafb49db32017-08-31 15:33:33 -07005235 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305236 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305237 &vdev->stats, (uint16_t) vdev->vdev_id,
5238 UPDATE_VDEV_STATS);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07005239
Ishank Jain1e7401c2017-02-17 15:38:39 +05305240}
5241
5242/**
5243 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
5244 * @pdev: DP PDEV handle
5245 *
5246 * return: void
5247 */
5248static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
5249{
5250 struct dp_vdev *vdev = NULL;
Pranita Solankea38c7a32018-01-04 10:50:59 +05305251 struct dp_soc *soc = pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305252
5253 qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
5254 qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
5255 qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
5256
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305257 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305258 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05305259
Ishank Jain1e7401c2017-02-17 15:38:39 +05305260 dp_aggregate_vdev_stats(vdev);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305261 DP_UPDATE_STATS(pdev, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305262
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305263 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305264
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305265 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
5266 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
5267 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
5268 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
5269 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5270 DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5271 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
Amir Patel02911572018-07-02 13:00:53 +05305272 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305273 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
Amir Patel02911572018-07-02 13:00:53 +05305274 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305275 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5276 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5277 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5278 DP_STATS_AGGR(pdev, vdev,
5279 tx_i.mcast_en.dropped_map_error);
5280 DP_STATS_AGGR(pdev, vdev,
5281 tx_i.mcast_en.dropped_self_mac);
5282 DP_STATS_AGGR(pdev, vdev,
5283 tx_i.mcast_en.dropped_send_fail);
5284 DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5285 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5286 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5287 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305288 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na.num);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305289 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5290 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5291 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305292 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5293 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305294
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305295 pdev->stats.tx_i.dropped.dropped_pkt.num =
5296 pdev->stats.tx_i.dropped.dma_error +
5297 pdev->stats.tx_i.dropped.ring_full +
5298 pdev->stats.tx_i.dropped.enqueue_fail +
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305299 pdev->stats.tx_i.dropped.desc_na.num +
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305300 pdev->stats.tx_i.dropped.res_full;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305301
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305302 pdev->stats.tx.last_ack_rssi =
5303 vdev->stats.tx.last_ack_rssi;
5304 pdev->stats.tx_i.tso.num_seg =
5305 vdev->stats.tx_i.tso.num_seg;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305306 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305307 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pranita Solankea38c7a32018-01-04 10:50:59 +05305308 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305309 soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
Pranita Solankea38c7a32018-01-04 10:50:59 +05305310 &pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05305311
Ishank Jain1e7401c2017-02-17 15:38:39 +05305312}
5313
5314/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305315 * dp_vdev_getstats() - get vdev packet level stats
5316 * @vdev_handle: Datapath VDEV handle
5317 * @stats: cdp network device stats structure
5318 *
5319 * Return: void
5320 */
5321static void dp_vdev_getstats(void *vdev_handle,
5322 struct cdp_dev_stats *stats)
5323{
5324 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5325
5326 dp_aggregate_vdev_stats(vdev);
5327}
5328
5329
5330/**
Anish Natarajf12b0a32018-03-14 14:27:13 +05305331 * dp_pdev_getstats() - get pdev packet level stats
5332 * @pdev_handle: Datapath PDEV handle
5333 * @stats: cdp network device stats structure
5334 *
5335 * Return: void
5336 */
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305337static void dp_pdev_getstats(void *pdev_handle,
Anish Natarajf12b0a32018-03-14 14:27:13 +05305338 struct cdp_dev_stats *stats)
5339{
5340 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5341
5342 dp_aggregate_pdev_stats(pdev);
5343
5344 stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5345 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5346
5347 stats->tx_errors = pdev->stats.tx.tx_failed +
5348 pdev->stats.tx_i.dropped.dropped_pkt.num;
5349 stats->tx_dropped = stats->tx_errors;
5350
5351 stats->rx_packets = pdev->stats.rx.unicast.num +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305352 pdev->stats.rx.multicast.num +
5353 pdev->stats.rx.bcast.num;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305354 stats->rx_bytes = pdev->stats.rx.unicast.bytes +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305355 pdev->stats.rx.multicast.bytes +
5356 pdev->stats.rx.bcast.bytes;
Anish Natarajf12b0a32018-03-14 14:27:13 +05305357}
5358
5359/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305360 * dp_get_device_stats() - get interface level packet stats
5361 * @handle: device handle
5362 * @stats: cdp network device stats structure
5363 * @type: device type pdev/vdev
5364 *
5365 * Return: void
5366 */
5367static void dp_get_device_stats(void *handle,
5368 struct cdp_dev_stats *stats, uint8_t type)
5369{
5370 switch (type) {
5371 case UPDATE_VDEV_STATS:
5372 dp_vdev_getstats(handle, stats);
5373 break;
5374 case UPDATE_PDEV_STATS:
5375 dp_pdev_getstats(handle, stats);
5376 break;
5377 default:
5378 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5379 "apstats cannot be updated for this input "
Aditya Sathishded018e2018-07-02 16:25:21 +05305380 "type %d", type);
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05305381 break;
5382 }
5383
5384}
5385
5386
5387/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305388 * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5389 * @pdev: DP_PDEV Handle
5390 *
5391 * Return:void
5392 */
5393static inline void
5394dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5395{
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305396 uint8_t index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305397 DP_PRINT_STATS("PDEV Tx Stats:\n");
5398 DP_PRINT_STATS("Received From Stack:");
5399 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305400 pdev->stats.tx_i.rcvd.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305401 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305402 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305403 DP_PRINT_STATS("Processed:");
5404 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305405 pdev->stats.tx_i.processed.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305406 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305407 pdev->stats.tx_i.processed.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005408 DP_PRINT_STATS("Total Completions:");
5409 DP_PRINT_STATS(" Packets = %u",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305410 pdev->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305411 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305412 pdev->stats.tx.comp_pkt.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005413 DP_PRINT_STATS("Successful Completions:");
5414 DP_PRINT_STATS(" Packets = %u",
5415 pdev->stats.tx.tx_success.num);
5416 DP_PRINT_STATS(" Bytes = %llu",
5417 pdev->stats.tx.tx_success.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305418 DP_PRINT_STATS("Dropped:");
5419 DP_PRINT_STATS(" Total = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305420 pdev->stats.tx_i.dropped.dropped_pkt.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305421 DP_PRINT_STATS(" Dma_map_error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305422 pdev->stats.tx_i.dropped.dma_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305423 DP_PRINT_STATS(" Ring Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305424 pdev->stats.tx_i.dropped.ring_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305425 DP_PRINT_STATS(" Descriptor Not available = %d",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05305426 pdev->stats.tx_i.dropped.desc_na.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305427 DP_PRINT_STATS(" HW enqueue failed= %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305428 pdev->stats.tx_i.dropped.enqueue_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305429 DP_PRINT_STATS(" Resources Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305430 pdev->stats.tx_i.dropped.res_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305431 DP_PRINT_STATS(" FW removed = %d",
5432 pdev->stats.tx.dropped.fw_rem);
5433 DP_PRINT_STATS(" FW removed transmitted = %d",
5434 pdev->stats.tx.dropped.fw_rem_tx);
5435 DP_PRINT_STATS(" FW removed untransmitted = %d",
5436 pdev->stats.tx.dropped.fw_rem_notx);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08005437 DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
5438 pdev->stats.tx.dropped.fw_reason1);
5439 DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
5440 pdev->stats.tx.dropped.fw_reason2);
5441 DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
5442 pdev->stats.tx.dropped.fw_reason3);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305443 DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
5444 pdev->stats.tx.dropped.age_out);
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005445 DP_PRINT_STATS(" Multicast:");
5446 DP_PRINT_STATS(" Packets: %u",
5447 pdev->stats.tx.mcast.num);
5448 DP_PRINT_STATS(" Bytes: %llu",
5449 pdev->stats.tx.mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305450 DP_PRINT_STATS("Scatter Gather:");
5451 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305452 pdev->stats.tx_i.sg.sg_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305453 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305454 pdev->stats.tx_i.sg.sg_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305455 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305456 pdev->stats.tx_i.sg.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305457 DP_PRINT_STATS(" Dropped By Target = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305458 pdev->stats.tx_i.sg.dropped_target);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305459 DP_PRINT_STATS("TSO:");
5460 DP_PRINT_STATS(" Number of Segments = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305461 pdev->stats.tx_i.tso.num_seg);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305462 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305463 pdev->stats.tx_i.tso.tso_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305464 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305465 pdev->stats.tx_i.tso.tso_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305466 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05305467 pdev->stats.tx_i.tso.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305468 DP_PRINT_STATS("Mcast Enhancement:");
5469 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305470 pdev->stats.tx_i.mcast_en.mcast_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305471 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305472 pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305473 DP_PRINT_STATS(" Dropped: Map Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305474 pdev->stats.tx_i.mcast_en.dropped_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305475 DP_PRINT_STATS(" Dropped: Self Mac = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305476 pdev->stats.tx_i.mcast_en.dropped_self_mac);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305477 DP_PRINT_STATS(" Dropped: Send Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305478 pdev->stats.tx_i.mcast_en.dropped_send_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305479 DP_PRINT_STATS(" Unicast sent = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305480 pdev->stats.tx_i.mcast_en.ucast);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305481 DP_PRINT_STATS("Raw:");
5482 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305483 pdev->stats.tx_i.raw.raw_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305484 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305485 pdev->stats.tx_i.raw.raw_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305486 DP_PRINT_STATS(" DMA map error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305487 pdev->stats.tx_i.raw.dma_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305488 DP_PRINT_STATS("Reinjected:");
5489 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305490 pdev->stats.tx_i.reinject_pkts.num);
chenguo6a027fb2018-05-21 18:42:54 +08005491 DP_PRINT_STATS(" Bytes = %llu\n",
5492 pdev->stats.tx_i.reinject_pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305493 DP_PRINT_STATS("Inspected:");
5494 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305495 pdev->stats.tx_i.inspect_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305496 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305497 pdev->stats.tx_i.inspect_pkts.bytes);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305498 DP_PRINT_STATS("Nawds Multicast:");
5499 DP_PRINT_STATS(" Packets = %d",
5500 pdev->stats.tx_i.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305501 DP_PRINT_STATS(" Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305502 pdev->stats.tx_i.nawds_mcast.bytes);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305503 DP_PRINT_STATS("CCE Classified:");
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305504 DP_PRINT_STATS(" CCE Classified Packets: %u",
Ruchi, Agrawal34721392017-11-13 18:02:09 +05305505 pdev->stats.tx_i.cce_classified);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305506 DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05305507 pdev->stats.tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05305508 DP_PRINT_STATS("Mesh stats:");
5509 DP_PRINT_STATS(" frames to firmware: %u",
5510 pdev->stats.tx_i.mesh.exception_fw);
5511 DP_PRINT_STATS(" completions from fw: %u",
5512 pdev->stats.tx_i.mesh.completion_fw);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305513 DP_PRINT_STATS("PPDU stats counter");
5514 for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5515 DP_PRINT_STATS(" Tag[%d] = %llu", index,
5516 pdev->stats.ppdu_stats_counter[index]);
5517 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05305518}
5519
5520/**
5521 * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5522 * @pdev: DP_PDEV Handle
5523 *
5524 * Return: void
5525 */
5526static inline void
5527dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5528{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305529 DP_PRINT_STATS("PDEV Rx Stats:\n");
5530 DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5531 DP_PRINT_STATS(" Packets = %d %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305532 pdev->stats.rx.rcvd_reo[0].num,
5533 pdev->stats.rx.rcvd_reo[1].num,
5534 pdev->stats.rx.rcvd_reo[2].num,
5535 pdev->stats.rx.rcvd_reo[3].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305536 DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305537 pdev->stats.rx.rcvd_reo[0].bytes,
5538 pdev->stats.rx.rcvd_reo[1].bytes,
5539 pdev->stats.rx.rcvd_reo[2].bytes,
5540 pdev->stats.rx.rcvd_reo[3].bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305541 DP_PRINT_STATS("Replenished:");
5542 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305543 pdev->stats.replenish.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305544 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305545 pdev->stats.replenish.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305546 DP_PRINT_STATS(" Buffers Added To Freelist = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305547 pdev->stats.buf_freelist);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07005548 DP_PRINT_STATS(" Low threshold intr = %d",
5549 pdev->stats.replenish.low_thresh_intrs);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305550 DP_PRINT_STATS("Dropped:");
5551 DP_PRINT_STATS(" msdu_not_done = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305552 pdev->stats.dropped.msdu_not_done);
Neil Zhao0bd967d2018-03-02 16:00:00 -08005553 DP_PRINT_STATS(" mon_rx_drop = %d",
5554 pdev->stats.dropped.mon_rx_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305555 DP_PRINT_STATS("Sent To Stack:");
5556 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305557 pdev->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305558 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305559 pdev->stats.rx.to_stack.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305560 DP_PRINT_STATS("Multicast/Broadcast:");
5561 DP_PRINT_STATS(" Packets = %d",
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305562 (pdev->stats.rx.multicast.num +
5563 pdev->stats.rx.bcast.num));
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305564 DP_PRINT_STATS(" Bytes = %llu",
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05305565 (pdev->stats.rx.multicast.bytes +
5566 pdev->stats.rx.bcast.bytes));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305567 DP_PRINT_STATS("Errors:");
5568 DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305569 pdev->stats.replenish.rxdma_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305570 DP_PRINT_STATS(" Desc Alloc Failed: = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305571 pdev->stats.err.desc_alloc_fail);
chenguo6a027fb2018-05-21 18:42:54 +08005572 DP_PRINT_STATS(" IP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305573 pdev->stats.err.ip_csum_err);
chenguo6a027fb2018-05-21 18:42:54 +08005574 DP_PRINT_STATS(" TCP/UDP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05305575 pdev->stats.err.tcp_udp_csum_err);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305576
5577 /* Get bar_recv_cnt */
5578 dp_aggregate_pdev_ctrl_frames_stats(pdev);
5579 DP_PRINT_STATS("BAR Received Count: = %d",
5580 pdev->stats.rx.bar_recv_cnt);
5581
Ishank Jain1e7401c2017-02-17 15:38:39 +05305582}
5583
5584/**
Kai Chen783e0382018-01-25 16:29:08 -08005585 * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5586 * @pdev: DP_PDEV Handle
5587 *
5588 * Return: void
5589 */
5590static inline void
5591dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5592{
5593 struct cdp_pdev_mon_stats *rx_mon_stats;
5594
5595 rx_mon_stats = &pdev->rx_mon_stats;
5596
5597 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5598
5599 dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5600
5601 DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5602 rx_mon_stats->status_ppdu_done);
5603 DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5604 rx_mon_stats->dest_ppdu_done);
5605 DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5606 rx_mon_stats->dest_mpdu_done);
Karunakar Dasinenibb7848e2018-05-07 15:09:46 -07005607 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5608 rx_mon_stats->dest_mpdu_drop);
Kai Chen783e0382018-01-25 16:29:08 -08005609}
5610
5611/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05305612 * dp_print_soc_tx_stats(): Print SOC level stats
5613 * @soc DP_SOC Handle
5614 *
5615 * Return: void
5616 */
5617static inline void
5618dp_print_soc_tx_stats(struct dp_soc *soc)
5619{
Soumya Bhatdbb85302018-05-18 11:01:34 +05305620 uint8_t desc_pool_id;
5621 soc->stats.tx.desc_in_use = 0;
5622
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305623 DP_PRINT_STATS("SOC Tx Stats:\n");
Soumya Bhatdbb85302018-05-18 11:01:34 +05305624
5625 for (desc_pool_id = 0;
5626 desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5627 desc_pool_id++)
5628 soc->stats.tx.desc_in_use +=
5629 soc->tx_desc[desc_pool_id].num_allocated;
5630
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305631 DP_PRINT_STATS("Tx Descriptors In Use = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305632 soc->stats.tx.desc_in_use);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305633 DP_PRINT_STATS("Invalid peer:");
5634 DP_PRINT_STATS(" Packets = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305635 soc->stats.tx.tx_invalid_peer.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305636 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jaine73c4032017-03-16 11:48:15 +05305637 soc->stats.tx.tx_invalid_peer.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305638 DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305639 soc->stats.tx.tcl_ring_full[0],
5640 soc->stats.tx.tcl_ring_full[1],
5641 soc->stats.tx.tcl_ring_full[2]);
5642
Ishank Jain1e7401c2017-02-17 15:38:39 +05305643}
Ishank Jain1e7401c2017-02-17 15:38:39 +05305644/**
5645 * dp_print_soc_rx_stats: Print SOC level Rx stats
5646 * @soc: DP_SOC Handle
5647 *
5648 * Return:void
5649 */
5650static inline void
5651dp_print_soc_rx_stats(struct dp_soc *soc)
5652{
5653 uint32_t i;
5654 char reo_error[DP_REO_ERR_LENGTH];
5655 char rxdma_error[DP_RXDMA_ERR_LENGTH];
5656 uint8_t index = 0;
5657
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305658 DP_PRINT_STATS("SOC Rx Stats:\n");
5659 DP_PRINT_STATS("Errors:\n");
5660 DP_PRINT_STATS("Rx Decrypt Errors = %d",
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305661 (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5662 soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305663 DP_PRINT_STATS("Invalid RBM = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305664 soc->stats.rx.err.invalid_rbm);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305665 DP_PRINT_STATS("Invalid Vdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305666 soc->stats.rx.err.invalid_vdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305667 DP_PRINT_STATS("Invalid Pdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305668 soc->stats.rx.err.invalid_pdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305669 DP_PRINT_STATS("Invalid Peer = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305670 soc->stats.rx.err.rx_invalid_peer.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305671 DP_PRINT_STATS("HAL Ring Access Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305672 soc->stats.rx.err.hal_ring_access_fail);
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05305673
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305674 for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305675 index += qdf_snprint(&rxdma_error[index],
5676 DP_RXDMA_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305677 " %d", soc->stats.rx.err.rxdma_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305678 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305679 DP_PRINT_STATS("RXDMA Error (0-31):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305680 rxdma_error);
5681
5682 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305683 for (i = 0; i < HAL_REO_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05305684 index += qdf_snprint(&reo_error[index],
5685 DP_REO_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305686 " %d", soc->stats.rx.err.reo_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305687 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305688 DP_PRINT_STATS("REO Error(0-14):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305689 reo_error);
5690}
5691
sumedh baikady72b1c712017-08-24 12:11:46 -07005692
5693/**
5694 * dp_print_ring_stat_from_hal(): Print hal level ring stats
5695 * @soc: DP_SOC handle
5696 * @srng: DP_SRNG handle
5697 * @ring_name: SRNG name
5698 *
5699 * Return: void
5700 */
5701static inline void
5702dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
5703 char *ring_name)
5704{
5705 uint32_t tailp;
5706 uint32_t headp;
5707
5708 if (srng->hal_srng != NULL) {
5709 hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5710 DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d\n",
5711 ring_name, headp, tailp);
5712 }
5713}
5714
5715/**
5716 * dp_print_ring_stats(): Print tail and head pointer
5717 * @pdev: DP_PDEV handle
5718 *
5719 * Return:void
5720 */
5721static inline void
5722dp_print_ring_stats(struct dp_pdev *pdev)
5723{
5724 uint32_t i;
5725 char ring_name[STR_MAXLEN + 1];
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005726 int mac_id;
sumedh baikady72b1c712017-08-24 12:11:46 -07005727
5728 dp_print_ring_stat_from_hal(pdev->soc,
5729 &pdev->soc->reo_exception_ring,
5730 "Reo Exception Ring");
5731 dp_print_ring_stat_from_hal(pdev->soc,
5732 &pdev->soc->reo_reinject_ring,
5733 "Reo Inject Ring");
5734 dp_print_ring_stat_from_hal(pdev->soc,
5735 &pdev->soc->reo_cmd_ring,
5736 "Reo Command Ring");
5737 dp_print_ring_stat_from_hal(pdev->soc,
5738 &pdev->soc->reo_status_ring,
5739 "Reo Status Ring");
5740 dp_print_ring_stat_from_hal(pdev->soc,
5741 &pdev->soc->rx_rel_ring,
5742 "Rx Release ring");
5743 dp_print_ring_stat_from_hal(pdev->soc,
5744 &pdev->soc->tcl_cmd_ring,
5745 "Tcl command Ring");
5746 dp_print_ring_stat_from_hal(pdev->soc,
5747 &pdev->soc->tcl_status_ring,
5748 "Tcl Status Ring");
5749 dp_print_ring_stat_from_hal(pdev->soc,
5750 &pdev->soc->wbm_desc_rel_ring,
5751 "Wbm Desc Rel Ring");
5752 for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5753 snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5754 dp_print_ring_stat_from_hal(pdev->soc,
5755 &pdev->soc->reo_dest_ring[i],
5756 ring_name);
5757 }
5758 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5759 snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5760 dp_print_ring_stat_from_hal(pdev->soc,
5761 &pdev->soc->tcl_data_ring[i],
5762 ring_name);
5763 }
5764 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5765 snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5766 dp_print_ring_stat_from_hal(pdev->soc,
5767 &pdev->soc->tx_comp_ring[i],
5768 ring_name);
5769 }
5770 dp_print_ring_stat_from_hal(pdev->soc,
5771 &pdev->rx_refill_buf_ring,
5772 "Rx Refill Buf Ring");
5773
sumedh baikady72b1c712017-08-24 12:11:46 -07005774 dp_print_ring_stat_from_hal(pdev->soc,
Yun Park601d0d82017-08-28 21:49:31 -07005775 &pdev->rx_refill_buf_ring2,
5776 "Second Rx Refill Buf Ring");
sumedh baikady72b1c712017-08-24 12:11:46 -07005777
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005778 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5779 dp_print_ring_stat_from_hal(pdev->soc,
5780 &pdev->rxdma_mon_buf_ring[mac_id],
5781 "Rxdma Mon Buf Ring");
5782 dp_print_ring_stat_from_hal(pdev->soc,
5783 &pdev->rxdma_mon_dst_ring[mac_id],
5784 "Rxdma Mon Dst Ring");
5785 dp_print_ring_stat_from_hal(pdev->soc,
5786 &pdev->rxdma_mon_status_ring[mac_id],
5787 "Rxdma Mon Status Ring");
5788 dp_print_ring_stat_from_hal(pdev->soc,
5789 &pdev->rxdma_mon_desc_ring[mac_id],
5790 "Rxdma mon desc Ring");
5791 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005792
narayan4b25ab22018-06-19 12:52:24 +05305793 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08005794 snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5795 dp_print_ring_stat_from_hal(pdev->soc,
5796 &pdev->rxdma_err_dst_ring[i],
5797 ring_name);
5798 }
5799
sumedh baikady72b1c712017-08-24 12:11:46 -07005800 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5801 snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5802 dp_print_ring_stat_from_hal(pdev->soc,
5803 &pdev->rx_mac_buf_ring[i],
5804 ring_name);
5805 }
5806}
5807
Ishank Jain1e7401c2017-02-17 15:38:39 +05305808/**
5809 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5810 * @vdev: DP_VDEV handle
5811 *
5812 * Return:void
5813 */
5814static inline void
5815dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5816{
5817 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05305818 struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5819
Ishank Jain1e7401c2017-02-17 15:38:39 +05305820 DP_STATS_CLR(vdev->pdev);
5821 DP_STATS_CLR(vdev->pdev->soc);
5822 DP_STATS_CLR(vdev);
5823 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5824 if (!peer)
5825 return;
5826 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05305827
5828 if (soc->cdp_soc.ol_ops->update_dp_stats) {
5829 soc->cdp_soc.ol_ops->update_dp_stats(
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305830 vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305831 &peer->stats,
5832 peer->peer_ids[0],
5833 UPDATE_PEER_STATS);
5834 }
5835
Ishank Jain1e7401c2017-02-17 15:38:39 +05305836 }
5837
Anish Nataraj28490c42018-01-19 19:34:54 +05305838 if (soc->cdp_soc.ol_ops->update_dp_stats)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05305839 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
Anish Nataraj28490c42018-01-19 19:34:54 +05305840 &vdev->stats, (uint16_t)vdev->vdev_id,
5841 UPDATE_VDEV_STATS);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305842}
5843
5844/**
5845 * dp_print_rx_rates(): Print Rx rate stats
5846 * @vdev: DP_VDEV handle
5847 *
5848 * Return:void
5849 */
5850static inline void
5851dp_print_rx_rates(struct dp_vdev *vdev)
5852{
5853 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305854 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305855 uint8_t index = 0;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305856 char nss[DP_NSS_LENGTH];
5857
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305858 DP_PRINT_STATS("Rx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305859
Ishank Jain57c42a12017-04-12 10:42:22 +05305860 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5861 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305862 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5863 if (!dp_rate_string[pkt_type][mcs].valid)
5864 continue;
5865
5866 DP_PRINT_STATS(" %s = %d",
5867 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain57c42a12017-04-12 10:42:22 +05305868 pdev->stats.rx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305869 mcs_count[mcs]);
Ishank Jain57c42a12017-04-12 10:42:22 +05305870 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305871
5872 DP_PRINT_STATS("\n");
Ishank Jain57c42a12017-04-12 10:42:22 +05305873 }
5874
Ishank Jain1e7401c2017-02-17 15:38:39 +05305875 index = 0;
5876 for (i = 0; i < SS_COUNT; i++) {
5877 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305878 " %d", pdev->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305879 }
Anish Nataraj072d8972018-01-09 18:23:33 +05305880 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305881 nss);
5882
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305883 DP_PRINT_STATS("SGI ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305884 " 0.8us %d,"
5885 " 0.4us %d,"
5886 " 1.6us %d,"
5887 " 3.2us %d,",
5888 pdev->stats.rx.sgi_count[0],
5889 pdev->stats.rx.sgi_count[1],
5890 pdev->stats.rx.sgi_count[2],
5891 pdev->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305892 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305893 pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5894 pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305895 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305896 " SU: %d,"
5897 " MU_MIMO:%d,"
5898 " MU_OFDMA:%d,"
Ishank Jain57c42a12017-04-12 10:42:22 +05305899 " MU_OFDMA_MIMO:%d\n",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305900 pdev->stats.rx.reception_type[0],
5901 pdev->stats.rx.reception_type[1],
5902 pdev->stats.rx.reception_type[2],
5903 pdev->stats.rx.reception_type[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305904 DP_PRINT_STATS("Aggregation:\n");
5905 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305906 pdev->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305907 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305908 pdev->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305909 DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305910 pdev->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305911 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305912 pdev->stats.rx.non_amsdu_cnt);
5913}
5914
5915/**
5916 * dp_print_tx_rates(): Print tx rates
5917 * @vdev: DP_VDEV handle
5918 *
5919 * Return:void
5920 */
5921static inline void
5922dp_print_tx_rates(struct dp_vdev *vdev)
5923{
5924 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305925 uint8_t mcs, pkt_type;
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005926 uint8_t index;
5927 char nss[DP_NSS_LENGTH];
5928 int nss_index;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305929
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305930 DP_PRINT_STATS("Tx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305931
5932 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5933 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305934 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5935 if (!dp_rate_string[pkt_type][mcs].valid)
5936 continue;
5937
5938 DP_PRINT_STATS(" %s = %d",
5939 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05305940 pdev->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305941 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305942 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305943
5944 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305945 }
5946
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305947 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05305948 " 0.8us %d"
5949 " 0.4us %d"
5950 " 1.6us %d"
5951 " 3.2us %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305952 pdev->stats.tx.sgi_count[0],
5953 pdev->stats.tx.sgi_count[1],
5954 pdev->stats.tx.sgi_count[2],
5955 pdev->stats.tx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305956
5957 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
chenguoec849832018-04-11 19:14:06 +08005958 pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5959 pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305960
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07005961 index = 0;
5962 for (nss_index = 0; nss_index < SS_COUNT; nss_index++) {
5963 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5964 " %d", pdev->stats.tx.nss[nss_index]);
5965 }
5966
5967 DP_PRINT_STATS("NSS(1-8) = %s", nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305968 DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5969 DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5970 DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5971 DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5972 DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5973
5974 DP_PRINT_STATS("Aggregation:\n");
5975 DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305976 pdev->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305977 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305978 pdev->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305979}
5980
5981/**
5982 * dp_print_peer_stats():print peer stats
5983 * @peer: DP_PEER handle
5984 *
5985 * return void
5986 */
5987static inline void dp_print_peer_stats(struct dp_peer *peer)
5988{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305989 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305990 uint32_t index;
5991 char nss[DP_NSS_LENGTH];
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305992 DP_PRINT_STATS("Node Tx Stats:\n");
5993 DP_PRINT_STATS("Total Packet Completions = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305994 peer->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305995 DP_PRINT_STATS("Total Bytes Completions = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305996 peer->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305997 DP_PRINT_STATS("Success Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305998 peer->stats.tx.tx_success.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305999 DP_PRINT_STATS("Success Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306000 peer->stats.tx.tx_success.bytes);
Pranita Solankefc2ff392017-12-15 19:25:13 +05306001 DP_PRINT_STATS("Unicast Success Packets = %d",
6002 peer->stats.tx.ucast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306003 DP_PRINT_STATS("Unicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306004 peer->stats.tx.ucast.bytes);
6005 DP_PRINT_STATS("Multicast Success Packets = %d",
6006 peer->stats.tx.mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306007 DP_PRINT_STATS("Multicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05306008 peer->stats.tx.mcast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306009 DP_PRINT_STATS("Broadcast Success Packets = %d",
6010 peer->stats.tx.bcast.num);
6011 DP_PRINT_STATS("Broadcast Success Bytes = %llu",
6012 peer->stats.tx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306013 DP_PRINT_STATS("Packets Failed = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306014 peer->stats.tx.tx_failed);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306015 DP_PRINT_STATS("Packets In OFDMA = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306016 peer->stats.tx.ofdma);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306017 DP_PRINT_STATS("Packets In STBC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306018 peer->stats.tx.stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306019 DP_PRINT_STATS("Packets In LDPC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306020 peer->stats.tx.ldpc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306021 DP_PRINT_STATS("Packet Retries = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306022 peer->stats.tx.retries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306023 DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306024 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306025 DP_PRINT_STATS("Last Packet RSSI = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306026 peer->stats.tx.last_ack_rssi);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306027 DP_PRINT_STATS("Dropped At FW: Removed = %d",
6028 peer->stats.tx.dropped.fw_rem);
6029 DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
6030 peer->stats.tx.dropped.fw_rem_tx);
6031 DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
6032 peer->stats.tx.dropped.fw_rem_notx);
6033 DP_PRINT_STATS("Dropped : Age Out = %d",
6034 peer->stats.tx.dropped.age_out);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306035 DP_PRINT_STATS("NAWDS : ");
6036 DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
6037 peer->stats.tx.nawds_mcast_drop);
6038 DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
6039 peer->stats.tx.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306040 DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306041 peer->stats.tx.nawds_mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306042
6043 DP_PRINT_STATS("Rate Info:");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306044
6045 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6046 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306047 for (mcs = 0; mcs < MAX_MCS; mcs++) {
6048 if (!dp_rate_string[pkt_type][mcs].valid)
6049 continue;
6050
6051 DP_PRINT_STATS(" %s = %d",
6052 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05306053 peer->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306054 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306055 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306056
6057 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306058 }
6059
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306060 DP_PRINT_STATS("SGI = "
Ishank Jain57c42a12017-04-12 10:42:22 +05306061 " 0.8us %d"
6062 " 0.4us %d"
6063 " 1.6us %d"
6064 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306065 peer->stats.tx.sgi_count[0],
6066 peer->stats.tx.sgi_count[1],
6067 peer->stats.tx.sgi_count[2],
6068 peer->stats.tx.sgi_count[3]);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306069 DP_PRINT_STATS("Excess Retries per AC ");
6070 DP_PRINT_STATS(" Best effort = %d",
6071 peer->stats.tx.excess_retries_per_ac[0]);
6072 DP_PRINT_STATS(" Background= %d",
6073 peer->stats.tx.excess_retries_per_ac[1]);
6074 DP_PRINT_STATS(" Video = %d",
6075 peer->stats.tx.excess_retries_per_ac[2]);
6076 DP_PRINT_STATS(" Voice = %d",
6077 peer->stats.tx.excess_retries_per_ac[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306078 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
Pranita Solanked7e10ba2017-12-13 15:40:38 +05306079 peer->stats.tx.bw[2], peer->stats.tx.bw[3],
6080 peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306081
Pranita Solankeed0aba62018-01-12 19:14:31 +05306082 index = 0;
6083 for (i = 0; i < SS_COUNT; i++) {
6084 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
6085 " %d", peer->stats.tx.nss[i]);
6086 }
6087 DP_PRINT_STATS("NSS(1-8) = %s",
6088 nss);
6089
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306090 DP_PRINT_STATS("Aggregation:");
6091 DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306092 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306093 DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
Ishank Jaine73c4032017-03-16 11:48:15 +05306094 peer->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306095
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306096 DP_PRINT_STATS("Node Rx Stats:");
6097 DP_PRINT_STATS("Packets Sent To Stack = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306098 peer->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306099 DP_PRINT_STATS("Bytes Sent To Stack = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306100 peer->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05306101 for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
Pranita Solankefc2ff392017-12-15 19:25:13 +05306102 DP_PRINT_STATS("Ring Id = %d", i);
6103 DP_PRINT_STATS(" Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306104 peer->stats.rx.rcvd_reo[i].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306105 DP_PRINT_STATS(" Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306106 peer->stats.rx.rcvd_reo[i].bytes);
6107 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306108 DP_PRINT_STATS("Multicast Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306109 peer->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306110 DP_PRINT_STATS("Multicast Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306111 peer->stats.rx.multicast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05306112 DP_PRINT_STATS("Broadcast Packets Received = %d",
6113 peer->stats.rx.bcast.num);
6114 DP_PRINT_STATS("Broadcast Bytes Received = %llu",
6115 peer->stats.rx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306116 DP_PRINT_STATS("Intra BSS Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306117 peer->stats.rx.intra_bss.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306118 DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306119 peer->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306120 DP_PRINT_STATS("Raw Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306121 peer->stats.rx.raw.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306122 DP_PRINT_STATS("Raw Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306123 peer->stats.rx.raw.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306124 DP_PRINT_STATS("Errors: MIC Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306125 peer->stats.rx.err.mic_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306126 DP_PRINT_STATS("Erros: Decryption Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306127 peer->stats.rx.err.decrypt_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306128 DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306129 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306130 DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306131 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306132 DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306133 peer->stats.rx.non_amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306134 DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306135 peer->stats.rx.amsdu_cnt);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306136 DP_PRINT_STATS("NAWDS : ");
6137 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
Ruchi, Agrawal27550482018-02-20 19:43:41 +05306138 peer->stats.rx.nawds_mcast_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306139 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05306140 " 0.8us %d"
6141 " 0.4us %d"
6142 " 1.6us %d"
6143 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306144 peer->stats.rx.sgi_count[0],
6145 peer->stats.rx.sgi_count[1],
6146 peer->stats.rx.sgi_count[2],
6147 peer->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306148 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306149 peer->stats.rx.bw[0], peer->stats.rx.bw[1],
6150 peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306151 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05306152 " SU %d,"
6153 " MU_MIMO %d,"
6154 " MU_OFDMA %d,"
6155 " MU_OFDMA_MIMO %d",
6156 peer->stats.rx.reception_type[0],
6157 peer->stats.rx.reception_type[1],
6158 peer->stats.rx.reception_type[2],
6159 peer->stats.rx.reception_type[3]);
6160
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306161
Ishank Jain57c42a12017-04-12 10:42:22 +05306162 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
6163 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306164 for (mcs = 0; mcs < MAX_MCS; mcs++) {
6165 if (!dp_rate_string[pkt_type][mcs].valid)
6166 continue;
Ishank Jain57c42a12017-04-12 10:42:22 +05306167
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306168 DP_PRINT_STATS(" %s = %d",
6169 dp_rate_string[pkt_type][mcs].mcs_type,
6170 peer->stats.rx.pkt_type[pkt_type].
6171 mcs_count[mcs]);
6172 }
6173
6174 DP_PRINT_STATS("\n");
6175 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05306176
6177 index = 0;
6178 for (i = 0; i < SS_COUNT; i++) {
6179 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05306180 " %d", peer->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306181 }
Anish Nataraj072d8972018-01-09 18:23:33 +05306182 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306183 nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306184
6185 DP_PRINT_STATS("Aggregation:");
6186 DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306187 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306188 DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306189 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306190 DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306191 peer->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306192 DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306193 peer->stats.rx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306194}
6195
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006196/*
6197 * dp_get_host_peer_stats()- function to print peer stats
6198 * @pdev_handle: DP_PDEV handle
6199 * @mac_addr: mac address of the peer
6200 *
6201 * Return: void
6202 */
6203static void
6204dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6205{
6206 struct dp_peer *peer;
6207 uint8_t local_id;
6208
6209 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6210 &local_id);
6211
6212 if (!peer) {
6213 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6214 "%s: Invalid peer\n", __func__);
6215 return;
6216 }
6217
6218 dp_print_peer_stats(peer);
6219 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6220}
6221
Ishank Jain1e7401c2017-02-17 15:38:39 +05306222/**
6223 * dp_print_host_stats()- Function to print the stats aggregated at host
6224 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05306225 * @type: host stats type
6226 *
6227 * Available Stat types
Ishank Jain6290a3c2017-03-21 10:49:39 +05306228 * TXRX_CLEAR_STATS : Clear the stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306229 * TXRX_RX_RATE_STATS: Print Rx Rate Info
6230 * TXRX_TX_RATE_STATS: Print Tx Rate Info
6231 * TXRX_TX_HOST_STATS: Print Tx Stats
6232 * TXRX_RX_HOST_STATS: Print Rx Stats
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306233 * TXRX_AST_STATS: Print AST Stats
sumedh baikady72b1c712017-08-24 12:11:46 -07006234 * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05306235 *
6236 * Return: 0 on success, print error message in case of failure
6237 */
6238static int
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006239dp_print_host_stats(struct cdp_vdev *vdev_handle,
6240 struct cdp_txrx_stats_req *req)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306241{
6242 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6243 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006244 enum cdp_host_txrx_stats type =
6245 dp_stats_mapping_table[req->stats][STATS_HOST];
Ishank Jain1e7401c2017-02-17 15:38:39 +05306246
6247 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306248
Ishank Jain1e7401c2017-02-17 15:38:39 +05306249 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05306250 case TXRX_CLEAR_STATS:
6251 dp_txrx_host_stats_clr(vdev);
6252 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306253 case TXRX_RX_RATE_STATS:
6254 dp_print_rx_rates(vdev);
6255 break;
6256 case TXRX_TX_RATE_STATS:
6257 dp_print_tx_rates(vdev);
6258 break;
6259 case TXRX_TX_HOST_STATS:
6260 dp_print_pdev_tx_stats(pdev);
6261 dp_print_soc_tx_stats(pdev->soc);
6262 break;
6263 case TXRX_RX_HOST_STATS:
6264 dp_print_pdev_rx_stats(pdev);
6265 dp_print_soc_rx_stats(pdev->soc);
6266 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306267 case TXRX_AST_STATS:
6268 dp_print_ast_stats(pdev->soc);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05306269 dp_print_peer_table(vdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306270 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07006271 case TXRX_SRNG_PTR_STATS:
Kai Chen783e0382018-01-25 16:29:08 -08006272 dp_print_ring_stats(pdev);
6273 break;
6274 case TXRX_RX_MON_STATS:
6275 dp_print_pdev_rx_mon_stats(pdev);
6276 break;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006277 case TXRX_REO_QUEUE_STATS:
6278 dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6279 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306280 default:
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006281 DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
Ishank Jain1e7401c2017-02-17 15:38:39 +05306282 break;
6283 }
6284 return 0;
6285}
6286
6287/*
Soumya Bhat7422db82017-12-15 13:48:53 +05306288 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6289 * @pdev: DP_PDEV handle
6290 *
6291 * Return: void
6292 */
6293static void
6294dp_ppdu_ring_reset(struct dp_pdev *pdev)
6295{
6296 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006297 int mac_id;
Soumya Bhat7422db82017-12-15 13:48:53 +05306298
6299 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6300
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006301 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6302 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6303 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306304
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006305 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6306 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6307 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6308 }
Soumya Bhat7422db82017-12-15 13:48:53 +05306309}
6310
6311/*
Anish Nataraj38a29562017-08-18 19:41:17 +05306312 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6313 * @pdev: DP_PDEV handle
6314 *
6315 * Return: void
6316 */
6317static void
6318dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6319{
6320 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006321 int mac_id;
Anish Nataraj38a29562017-08-18 19:41:17 +05306322
Soumya Bhat35fc6992018-03-09 18:39:03 +05306323 htt_tlv_filter.mpdu_start = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306324 htt_tlv_filter.msdu_start = 0;
6325 htt_tlv_filter.packet = 0;
6326 htt_tlv_filter.msdu_end = 0;
6327 htt_tlv_filter.mpdu_end = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006328 htt_tlv_filter.attention = 0;
Anish Nataraj38a29562017-08-18 19:41:17 +05306329 htt_tlv_filter.ppdu_start = 1;
6330 htt_tlv_filter.ppdu_end = 1;
6331 htt_tlv_filter.ppdu_end_user_stats = 1;
6332 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6333 htt_tlv_filter.ppdu_end_status_done = 1;
6334 htt_tlv_filter.enable_fp = 1;
6335 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006336 if (pdev->mcopy_mode) {
6337 htt_tlv_filter.packet_header = 1;
Soumya Bhat2f54de22018-02-21 09:54:28 +05306338 htt_tlv_filter.enable_mo = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006339 }
nobeljd124b742017-10-16 11:59:12 -07006340 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6341 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6342 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6343 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6344 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6345 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05306346
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006347 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6348 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6349 pdev->pdev_id);
6350
6351 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6352 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6353 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6354 }
Anish Nataraj38a29562017-08-18 19:41:17 +05306355}
6356
6357/*
Alok Singh40a622b2018-06-28 10:47:26 +05306358 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6359 * modes are enabled or not.
6360 * @dp_pdev: dp pdev handle.
6361 *
6362 * Return: bool
6363 */
6364static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6365{
6366 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6367 !pdev->mcopy_mode)
6368 return true;
6369 else
6370 return false;
6371}
6372
6373/*
Vinay Adella873dc402018-05-28 12:06:34 +05306374 *dp_set_bpr_enable() - API to enable/disable bpr feature
6375 *@pdev_handle: DP_PDEV handle.
6376 *@val: Provided value.
6377 *
6378 *Return: void
6379 */
6380static void
6381dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6382{
6383 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6384
6385 switch (val) {
6386 case CDP_BPR_DISABLE:
6387 pdev->bpr_enable = CDP_BPR_DISABLE;
6388 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6389 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6390 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6391 } else if (pdev->enhanced_stats_en &&
6392 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6393 !pdev->pktlog_ppdu_stats) {
6394 dp_h2t_cfg_stats_msg_send(pdev,
6395 DP_PPDU_STATS_CFG_ENH_STATS,
6396 pdev->pdev_id);
6397 }
6398 break;
6399 case CDP_BPR_ENABLE:
6400 pdev->bpr_enable = CDP_BPR_ENABLE;
6401 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6402 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6403 dp_h2t_cfg_stats_msg_send(pdev,
6404 DP_PPDU_STATS_CFG_BPR,
6405 pdev->pdev_id);
6406 } else if (pdev->enhanced_stats_en &&
6407 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6408 !pdev->pktlog_ppdu_stats) {
6409 dp_h2t_cfg_stats_msg_send(pdev,
6410 DP_PPDU_STATS_CFG_BPR_ENH,
6411 pdev->pdev_id);
6412 } else if (pdev->pktlog_ppdu_stats) {
6413 dp_h2t_cfg_stats_msg_send(pdev,
6414 DP_PPDU_STATS_CFG_BPR_PKTLOG,
6415 pdev->pdev_id);
6416 }
6417 break;
6418 default:
6419 break;
6420 }
6421}
6422
6423/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306424 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306425 * @pdev_handle: DP_PDEV handle
6426 * @val: user provided value
6427 *
6428 * Return: void
6429 */
6430static void
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306431dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306432{
6433 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6434
Soumya Bhat89647ef2017-11-16 17:23:48 +05306435 switch (val) {
6436 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306437 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05306438 pdev->mcopy_mode = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306439
Alok Singh40a622b2018-06-28 10:47:26 +05306440 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6441 !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006442 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306443 dp_ppdu_ring_reset(pdev);
Alok Singh40a622b2018-06-28 10:47:26 +05306444 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306445 dp_h2t_cfg_stats_msg_send(pdev,
6446 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306447 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
6448 dp_h2t_cfg_stats_msg_send(pdev,
6449 DP_PPDU_STATS_CFG_BPR_ENH,
6450 pdev->pdev_id);
6451 } else {
6452 dp_h2t_cfg_stats_msg_send(pdev,
6453 DP_PPDU_STATS_CFG_BPR,
6454 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306455 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05306456 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306457
Soumya Bhat89647ef2017-11-16 17:23:48 +05306458 case 1:
6459 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05306460 pdev->mcopy_mode = 0;
6461
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306462 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306463 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306464 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306465 break;
6466 case 2:
Soumya Bhat7422db82017-12-15 13:48:53 +05306467 pdev->mcopy_mode = 1;
Soumya Bhat89647ef2017-11-16 17:23:48 +05306468 pdev->tx_sniffer_enable = 0;
Soumya Bhat14b6f262018-06-20 16:33:49 +05306469 dp_ppdu_ring_cfg(pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306470
6471 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05306472 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306473 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05306474 break;
6475 default:
6476 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05306477 "Invalid value");
Soumya Bhat89647ef2017-11-16 17:23:48 +05306478 break;
6479 }
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306480}
6481
6482/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306483 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6484 * @pdev_handle: DP_PDEV handle
6485 *
6486 * Return: void
6487 */
6488static void
6489dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6490{
6491 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6492 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306493
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05306494 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
Soumya Bhat7422db82017-12-15 13:48:53 +05306495 dp_ppdu_ring_cfg(pdev);
6496
Alok Singh40a622b2018-06-28 10:47:26 +05306497 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306498 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306499 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6500 dp_h2t_cfg_stats_msg_send(pdev,
6501 DP_PPDU_STATS_CFG_BPR_ENH,
6502 pdev->pdev_id);
6503 }
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306504}
6505
6506/*
6507 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6508 * @pdev_handle: DP_PDEV handle
6509 *
6510 * Return: void
6511 */
6512static void
6513dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6514{
6515 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306516
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306517 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306518
Alok Singh40a622b2018-06-28 10:47:26 +05306519 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006520 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05306521 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
6522 dp_h2t_cfg_stats_msg_send(pdev,
6523 DP_PPDU_STATS_CFG_BPR,
6524 pdev->pdev_id);
6525 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05306526
6527 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added)
Soumya Bhat7422db82017-12-15 13:48:53 +05306528 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306529}
6530
6531/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05306532 * dp_get_fw_peer_stats()- function to print peer stats
6533 * @pdev_handle: DP_PDEV handle
6534 * @mac_addr: mac address of the peer
6535 * @cap: Type of htt stats requested
6536 *
6537 * Currently Supporting only MAC ID based requests Only
6538 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6539 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6540 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6541 *
6542 * Return: void
6543 */
6544static void
6545dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6546 uint32_t cap)
6547{
6548 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306549 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306550 uint32_t config_param0 = 0;
6551 uint32_t config_param1 = 0;
6552 uint32_t config_param2 = 0;
6553 uint32_t config_param3 = 0;
6554
6555 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6556 config_param0 |= (1 << (cap + 1));
6557
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05306558 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6559 config_param1 |= (1 << i);
6560 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306561
6562 config_param2 |= (mac_addr[0] & 0x000000ff);
6563 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6564 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6565 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6566
6567 config_param3 |= (mac_addr[4] & 0x000000ff);
6568 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6569
6570 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6571 config_param0, config_param1, config_param2,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006572 config_param3, 0, 0, 0);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07006573
Ishank Jain6290a3c2017-03-21 10:49:39 +05306574}
6575
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306576/* This struct definition will be removed from here
6577 * once it get added in FW headers*/
6578struct httstats_cmd_req {
6579 uint32_t config_param0;
6580 uint32_t config_param1;
6581 uint32_t config_param2;
6582 uint32_t config_param3;
6583 int cookie;
6584 u_int8_t stats_id;
6585};
6586
6587/*
6588 * dp_get_htt_stats: function to process the httstas request
6589 * @pdev_handle: DP pdev handle
6590 * @data: pointer to request data
6591 * @data_len: length for request data
6592 *
6593 * return: void
6594 */
6595static void
6596dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6597{
6598 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6599 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6600
6601 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6602 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6603 req->config_param0, req->config_param1,
6604 req->config_param2, req->config_param3,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006605 req->cookie, 0, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306606}
Vinay Adella873dc402018-05-28 12:06:34 +05306607
Ishank Jain9f174c62017-03-30 18:37:42 +05306608/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306609 * dp_set_pdev_param: function to set parameters in pdev
6610 * @pdev_handle: DP pdev handle
6611 * @param: parameter type to be set
6612 * @val: value of parameter to be set
6613 *
6614 * return: void
6615 */
6616static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6617 enum cdp_pdev_param_type param, uint8_t val)
6618{
6619 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05306620 case CDP_CONFIG_DEBUG_SNIFFER:
6621 dp_config_debug_sniffer(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306622 break;
Vinay Adella873dc402018-05-28 12:06:34 +05306623 case CDP_CONFIG_BPR_ENABLE:
6624 dp_set_bpr_enable(pdev_handle, val);
6625 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306626 default:
6627 break;
6628 }
6629}
6630
6631/*
Ishank Jain9f174c62017-03-30 18:37:42 +05306632 * dp_set_vdev_param: function to set parameters in vdev
6633 * @param: parameter type to be set
6634 * @val: value of parameter to be set
6635 *
6636 * return: void
6637 */
6638static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6639 enum cdp_vdev_param_type param, uint32_t val)
6640{
6641 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05306642 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306643 case CDP_ENABLE_WDS:
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05306644 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6645 "wds_enable %d for vdev(%p) id(%d)\n",
6646 val, vdev, vdev->vdev_id);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306647 vdev->wds_enabled = val;
6648 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306649 case CDP_ENABLE_NAWDS:
6650 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306651 break;
Ishank Jainc838b132017-02-17 11:08:18 +05306652 case CDP_ENABLE_MCAST_EN:
6653 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306654 break;
6655 case CDP_ENABLE_PROXYSTA:
6656 vdev->proxysta_vdev = val;
6657 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07006658 case CDP_UPDATE_TDLS_FLAGS:
6659 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05306660 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306661 case CDP_CFG_WDS_AGING_TIMER:
6662 if (val == 0)
6663 qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6664 else if (val != vdev->wds_aging_timer_val)
6665 qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6666
6667 vdev->wds_aging_timer_val = val;
6668 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05306669 case CDP_ENABLE_AP_BRIDGE:
6670 if (wlan_op_mode_sta != vdev->opmode)
6671 vdev->ap_bridge_enabled = val;
6672 else
6673 vdev->ap_bridge_enabled = false;
6674 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05306675 case CDP_ENABLE_CIPHER:
6676 vdev->sec_type = val;
6677 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05306678 case CDP_ENABLE_QWRAP_ISOLATION:
6679 vdev->isolation_vdev = val;
6680 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05306681 default:
6682 break;
6683 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05306684
6685 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05306686}
6687
6688/**
6689 * dp_peer_set_nawds: set nawds bit in peer
6690 * @peer_handle: pointer to peer
6691 * @value: enable/disable nawds
6692 *
6693 * return: void
6694 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05306695static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05306696{
6697 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6698 peer->nawds_enabled = value;
6699}
Ishank Jain1e7401c2017-02-17 15:38:39 +05306700
Ishank Jain949674c2017-02-27 17:09:29 +05306701/*
6702 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6703 * @vdev_handle: DP_VDEV handle
6704 * @map_id:ID of map that needs to be updated
6705 *
6706 * Return: void
6707 */
6708static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6709 uint8_t map_id)
6710{
6711 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6712 vdev->dscp_tid_map_id = map_id;
6713 return;
6714}
6715
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306716/*
6717 * dp_txrx_stats_publish(): publish pdev stats into a buffer
6718 * @pdev_handle: DP_PDEV handle
6719 * @buf: to hold pdev_stats
6720 *
6721 * Return: int
6722 */
6723static int
6724dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6725{
6726 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6727 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306728 struct cdp_txrx_stats_req req = {0,};
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306729
6730 dp_aggregate_pdev_stats(pdev);
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306731 req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6732 req.cookie_val = 1;
6733 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006734 req.param1, req.param2, req.param3, 0,
6735 req.cookie_val, 0);
6736
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306737 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306738
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306739 req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6740 req.cookie_val = 1;
6741 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006742 req.param1, req.param2, req.param3, 0,
6743 req.cookie_val, 0);
6744
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05306745 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306746 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6747
6748 return TXRX_STATS_LEVEL;
6749}
6750
Ishank Jain949674c2017-02-27 17:09:29 +05306751/**
6752 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6753 * @pdev: DP_PDEV handle
6754 * @map_id: ID of map that needs to be updated
6755 * @tos: index value in map
6756 * @tid: tid value passed by the user
6757 *
6758 * Return: void
6759 */
6760static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6761 uint8_t map_id, uint8_t tos, uint8_t tid)
6762{
6763 uint8_t dscp;
6764 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6765 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6766 pdev->dscp_tid_map[map_id][dscp] = tid;
Om Prakash Tripathi5425c522017-08-18 11:11:34 +05306767 if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6768 hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
Ishank Jain949674c2017-02-27 17:09:29 +05306769 map_id, dscp);
6770 return;
6771}
6772
Ishank Jain6290a3c2017-03-21 10:49:39 +05306773/**
6774 * dp_fw_stats_process(): Process TxRX FW stats request
6775 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306776 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05306777 *
6778 * return: int
6779 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306780static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6781 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05306782{
6783 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6784 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306785 uint32_t stats = req->stats;
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07006786 uint8_t mac_id = req->mac_id;
Ishank Jain6290a3c2017-03-21 10:49:39 +05306787
6788 if (!vdev) {
6789 DP_TRACE(NONE, "VDEV not found");
6790 return 1;
6791 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05306792 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306793
chenguocda25122018-01-24 17:39:38 +08006794 /*
6795 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6796 * from param0 to param3 according to below rule:
6797 *
6798 * PARAM:
6799 * - config_param0 : start_offset (stats type)
6800 * - config_param1 : stats bmask from start offset
6801 * - config_param2 : stats bmask from start offset + 32
6802 * - config_param3 : stats bmask from start offset + 64
6803 */
6804 if (req->stats == CDP_TXRX_STATS_0) {
6805 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6806 req->param1 = 0xFFFFFFFF;
6807 req->param2 = 0xFFFFFFFF;
6808 req->param3 = 0xFFFFFFFF;
6809 }
6810
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306811 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006812 req->param1, req->param2, req->param3,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07006813 0, 0, mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05306814}
6815
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306816/**
6817 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006818 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306819 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006820 *
6821 * Return: integer
6822 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306823static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6824 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006825{
6826 int host_stats;
6827 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306828 enum cdp_stats stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006829
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306830 if (!vdev || !req) {
6831 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6832 "Invalid vdev/req instance");
6833 return 0;
6834 }
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08006835
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306836 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006837 if (stats >= CDP_TXRX_MAX_STATS)
6838 return 0;
6839
Ishank Jain6290a3c2017-03-21 10:49:39 +05306840 /*
6841 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6842 * has to be updated if new FW HTT stats added
6843 */
6844 if (stats > CDP_TXRX_STATS_HTT_MAX)
6845 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006846 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6847 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6848
6849 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6850 "stats: %u fw_stats_type: %d host_stats_type: %d",
6851 stats, fw_stats, host_stats);
6852
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306853 if (fw_stats != TXRX_FW_STATS_INVALID) {
6854 /* update request with FW stats type */
6855 req->stats = fw_stats;
6856 return dp_fw_stats_process(vdev, req);
6857 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006858
Ishank Jain57c42a12017-04-12 10:42:22 +05306859 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6860 (host_stats <= TXRX_HOST_STATS_MAX))
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006861 return dp_print_host_stats(vdev, req);
Ishank Jain57c42a12017-04-12 10:42:22 +05306862 else
6863 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6864 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006865
6866 return 0;
6867}
6868
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006869/*
psimha61b1a362017-07-27 15:45:49 -07006870 * dp_print_napi_stats(): NAPI stats
6871 * @soc - soc handle
6872 */
6873static void dp_print_napi_stats(struct dp_soc *soc)
6874{
6875 hif_print_napi_stats(soc->hif_handle);
6876}
6877
6878/*
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006879 * dp_print_per_ring_stats(): Packet count per ring
6880 * @soc - soc handle
6881 */
6882static void dp_print_per_ring_stats(struct dp_soc *soc)
6883{
chenguo8107b662017-12-13 16:31:13 +08006884 uint8_t ring;
6885 uint16_t core;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006886 uint64_t total_packets;
6887
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006888 DP_TRACE(FATAL, "Reo packets per ring:");
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006889 for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6890 total_packets = 0;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006891 DP_TRACE(FATAL, "Packets on ring %u:", ring);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006892 for (core = 0; core < NR_CPUS; core++) {
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006893 DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006894 core, soc->stats.rx.ring_packets[core][ring]);
6895 total_packets += soc->stats.rx.ring_packets[core][ring];
6896 }
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006897 DP_TRACE(FATAL, "Total packets on ring %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006898 ring, total_packets);
6899 }
6900}
6901
6902/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006903 * dp_txrx_path_stats() - Function to display dump stats
6904 * @soc - soc handle
6905 *
6906 * return: none
6907 */
6908static void dp_txrx_path_stats(struct dp_soc *soc)
6909{
6910 uint8_t error_code;
6911 uint8_t loop_pdev;
6912 struct dp_pdev *pdev;
Ishank Jain57c42a12017-04-12 10:42:22 +05306913 uint8_t i;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006914
6915 for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6916
6917 pdev = soc->pdev_list[loop_pdev];
6918 dp_aggregate_pdev_stats(pdev);
6919 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6920 "Tx path Statistics:");
6921
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306922 DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006923 pdev->stats.tx_i.rcvd.num,
6924 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306925 DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006926 pdev->stats.tx_i.processed.num,
6927 pdev->stats.tx_i.processed.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306928 DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006929 pdev->stats.tx.tx_success.num,
6930 pdev->stats.tx.tx_success.bytes);
6931
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006932 DP_TRACE(FATAL, "Dropped in host:");
6933 DP_TRACE(FATAL, "Total packets dropped: %u,",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006934 pdev->stats.tx_i.dropped.dropped_pkt.num);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006935 DP_TRACE(FATAL, "Descriptor not available: %u",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05306936 pdev->stats.tx_i.dropped.desc_na.num);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006937 DP_TRACE(FATAL, "Ring full: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006938 pdev->stats.tx_i.dropped.ring_full);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006939 DP_TRACE(FATAL, "Enqueue fail: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006940 pdev->stats.tx_i.dropped.enqueue_fail);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006941 DP_TRACE(FATAL, "DMA Error: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006942 pdev->stats.tx_i.dropped.dma_error);
6943
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006944 DP_TRACE(FATAL, "Dropped in hardware:");
6945 DP_TRACE(FATAL, "total packets dropped: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006946 pdev->stats.tx.tx_failed);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006947 DP_TRACE(FATAL, "mpdu age out: %u",
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306948 pdev->stats.tx.dropped.age_out);
6949 DP_TRACE(FATAL, "firmware removed: %u",
6950 pdev->stats.tx.dropped.fw_rem);
6951 DP_TRACE(FATAL, "firmware removed tx: %u",
6952 pdev->stats.tx.dropped.fw_rem_tx);
6953 DP_TRACE(FATAL, "firmware removed notx %u",
6954 pdev->stats.tx.dropped.fw_rem_notx);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006955 DP_TRACE(FATAL, "peer_invalid: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006956 pdev->soc->stats.tx.tx_invalid_peer.num);
6957
6958
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006959 DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6960 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006961 pdev->stats.tx_comp_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006962 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006963 pdev->stats.tx_comp_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006964 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006965 pdev->stats.tx_comp_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006966 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006967 pdev->stats.tx_comp_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006968 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006969 pdev->stats.tx_comp_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006970 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006971 pdev->stats.tx_comp_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006972 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006973 pdev->stats.tx_comp_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006974 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006975 pdev->stats.tx_comp_histogram.pkts_201_plus);
6976
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006977 DP_TRACE(FATAL, "Rx path statistics");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006978
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306979 DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006980 pdev->stats.rx.to_stack.num,
6981 pdev->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05306982 for (i = 0; i < CDP_MAX_RX_RINGS; i++)
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306983 DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05306984 i, pdev->stats.rx.rcvd_reo[i].num,
6985 pdev->stats.rx.rcvd_reo[i].bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306986 DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05306987 pdev->stats.rx.intra_bss.pkts.num,
6988 pdev->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306989 DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
Yun Park92af7132017-09-13 16:33:35 -07006990 pdev->stats.rx.intra_bss.fail.num,
6991 pdev->stats.rx.intra_bss.fail.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306992 DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006993 pdev->stats.rx.raw.num,
6994 pdev->stats.rx.raw.bytes);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006995 DP_TRACE(FATAL, "dropped: error %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006996 pdev->stats.rx.err.mic_err);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07006997 DP_TRACE(FATAL, "peer invalid %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006998 pdev->soc->stats.rx.err.rx_invalid_peer.num);
6999
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007000 DP_TRACE(FATAL, "Reo Statistics");
7001 DP_TRACE(FATAL, "rbm error: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007002 pdev->soc->stats.rx.err.invalid_rbm);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007003 DP_TRACE(FATAL, "hal ring access fail: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007004 pdev->soc->stats.rx.err.hal_ring_access_fail);
7005
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307006 for (error_code = 0; error_code < HAL_REO_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007007 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007008 if (!pdev->soc->stats.rx.err.reo_error[error_code])
7009 continue;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007010 DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007011 error_code,
7012 pdev->soc->stats.rx.err.reo_error[error_code]);
7013 }
7014
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307015 for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007016 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07007017 if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
7018 continue;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007019 DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007020 error_code,
7021 pdev->soc->stats.rx.err
7022 .rxdma_error[error_code]);
7023 }
7024
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007025 DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
7026 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007027 pdev->stats.rx_ind_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007028 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007029 pdev->stats.rx_ind_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007030 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007031 pdev->stats.rx_ind_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007032 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007033 pdev->stats.rx_ind_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007034 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007035 pdev->stats.rx_ind_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007036 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007037 pdev->stats.rx_ind_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007038 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007039 pdev->stats.rx_ind_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07007040 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007041 pdev->stats.rx_ind_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007042
7043 DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
7044 __func__,
7045 pdev->soc->wlan_cfg_ctx->tso_enabled,
7046 pdev->soc->wlan_cfg_ctx->lro_enabled,
7047 pdev->soc->wlan_cfg_ctx->rx_hash,
7048 pdev->soc->wlan_cfg_ctx->napi_enabled);
7049#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7050 DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
7051 __func__,
7052 pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
7053 pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
7054#endif
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007055 }
7056}
7057
7058/*
7059 * dp_txrx_dump_stats() - Dump statistics
7060 * @value - Statistics option
7061 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07007062static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7063 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007064{
7065 struct dp_soc *soc =
7066 (struct dp_soc *)psoc;
7067 QDF_STATUS status = QDF_STATUS_SUCCESS;
7068
7069 if (!soc) {
7070 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7071 "%s: soc is NULL", __func__);
7072 return QDF_STATUS_E_INVAL;
7073 }
7074
7075 switch (value) {
7076 case CDP_TXRX_PATH_STATS:
7077 dp_txrx_path_stats(soc);
7078 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007079
7080 case CDP_RX_RING_STATS:
7081 dp_print_per_ring_stats(soc);
7082 break;
7083
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007084 case CDP_TXRX_TSO_STATS:
7085 /* TODO: NOT IMPLEMENTED */
7086 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007087
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007088 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007089 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007090 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007091
psimha61b1a362017-07-27 15:45:49 -07007092 case CDP_DP_NAPI_STATS:
7093 dp_print_napi_stats(soc);
7094 break;
7095
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007096 case CDP_TXRX_DESC_STATS:
7097 /* TODO: NOT IMPLEMENTED */
7098 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007099
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007100 default:
7101 status = QDF_STATUS_E_INVAL;
7102 break;
7103 }
7104
7105 return status;
7106
7107}
7108
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007109#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7110/**
7111 * dp_update_flow_control_parameters() - API to store datapath
7112 * config parameters
7113 * @soc: soc handle
7114 * @cfg: ini parameter handle
7115 *
7116 * Return: void
7117 */
7118static inline
7119void dp_update_flow_control_parameters(struct dp_soc *soc,
7120 struct cdp_config_params *params)
7121{
7122 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7123 params->tx_flow_stop_queue_threshold;
7124 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7125 params->tx_flow_start_queue_offset;
7126}
7127#else
7128static inline
7129void dp_update_flow_control_parameters(struct dp_soc *soc,
7130 struct cdp_config_params *params)
7131{
7132}
7133#endif
7134
7135/**
7136 * dp_update_config_parameters() - API to store datapath
7137 * config parameters
7138 * @soc: soc handle
7139 * @cfg: ini parameter handle
7140 *
7141 * Return: status
7142 */
7143static
7144QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7145 struct cdp_config_params *params)
7146{
7147 struct dp_soc *soc = (struct dp_soc *)psoc;
7148
7149 if (!(soc)) {
7150 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7151 "%s: Invalid handle", __func__);
7152 return QDF_STATUS_E_INVAL;
7153 }
7154
7155 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7156 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7157 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7158 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7159 params->tcp_udp_checksumoffload;
7160 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007161 dp_update_flow_control_parameters(soc, params);
7162
7163 return QDF_STATUS_SUCCESS;
7164}
7165
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307166/**
7167 * dp_txrx_set_wds_rx_policy() - API to store datapath
7168 * config parameters
7169 * @vdev_handle - datapath vdev handle
7170 * @cfg: ini parameter handle
7171 *
7172 * Return: status
7173 */
7174#ifdef WDS_VENDOR_EXTENSION
7175void
7176dp_txrx_set_wds_rx_policy(
7177 struct cdp_vdev *vdev_handle,
7178 u_int32_t val)
7179{
7180 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7181 struct dp_peer *peer;
7182 if (vdev->opmode == wlan_op_mode_ap) {
7183 /* for ap, set it on bss_peer */
7184 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7185 if (peer->bss_peer) {
7186 peer->wds_ecm.wds_rx_filter = 1;
7187 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7188 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7189 break;
7190 }
7191 }
7192 } else if (vdev->opmode == wlan_op_mode_sta) {
7193 peer = TAILQ_FIRST(&vdev->peer_list);
7194 peer->wds_ecm.wds_rx_filter = 1;
7195 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7196 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7197 }
7198}
7199
7200/**
7201 * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7202 *
7203 * @peer_handle - datapath peer handle
7204 * @wds_tx_ucast: policy for unicast transmission
7205 * @wds_tx_mcast: policy for multicast transmission
7206 *
7207 * Return: void
7208 */
7209void
7210dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
7211 int wds_tx_ucast, int wds_tx_mcast)
7212{
7213 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7214 if (wds_tx_ucast || wds_tx_mcast) {
7215 peer->wds_enabled = 1;
7216 peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
7217 peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
7218 } else {
7219 peer->wds_enabled = 0;
7220 peer->wds_ecm.wds_tx_ucast_4addr = 0;
7221 peer->wds_ecm.wds_tx_mcast_4addr = 0;
7222 }
7223
7224 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7225 FL("Policy Update set to :\
7226 peer->wds_enabled %d\
7227 peer->wds_ecm.wds_tx_ucast_4addr %d\
Aditya Sathishded018e2018-07-02 16:25:21 +05307228 peer->wds_ecm.wds_tx_mcast_4addr %d"),
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307229 peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
7230 peer->wds_ecm.wds_tx_mcast_4addr);
7231 return;
7232}
7233#endif
7234
Karunakar Dasinenica792542017-01-16 10:08:58 -08007235static struct cdp_wds_ops dp_ops_wds = {
7236 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307237#ifdef WDS_VENDOR_EXTENSION
7238 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7239 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7240#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08007241};
7242
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307243/*
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007244 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7245 * @vdev_handle - datapath vdev handle
7246 * @callback - callback function
7247 * @ctxt: callback context
7248 *
7249 */
7250static void
7251dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7252 ol_txrx_data_tx_cb callback, void *ctxt)
7253{
7254 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7255
7256 vdev->tx_non_std_data_callback.func = callback;
7257 vdev->tx_non_std_data_callback.ctxt = ctxt;
7258}
7259
Santosh Anbu2280e862018-01-03 22:25:53 +05307260/**
7261 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7262 * @pdev_hdl: datapath pdev handle
7263 *
7264 * Return: opaque pointer to dp txrx handle
7265 */
7266static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7267{
7268 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7269
7270 return pdev->dp_txrx_handle;
7271}
7272
7273/**
7274 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7275 * @pdev_hdl: datapath pdev handle
7276 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7277 *
7278 * Return: void
7279 */
7280static void
7281dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7282{
7283 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7284
7285 pdev->dp_txrx_handle = dp_txrx_hdl;
7286}
7287
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307288/**
7289 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7290 * @soc_handle: datapath soc handle
7291 *
7292 * Return: opaque pointer to external dp (non-core DP)
7293 */
7294static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7295{
7296 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7297
7298 return soc->external_txrx_handle;
7299}
7300
7301/**
7302 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7303 * @soc_handle: datapath soc handle
7304 * @txrx_handle: opaque pointer to external dp (non-core DP)
7305 *
7306 * Return: void
7307 */
7308static void
7309dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7310{
7311 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7312
7313 soc->external_txrx_handle = txrx_handle;
7314}
7315
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307316#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307317static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
7318{
7319 struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
7320 struct dp_peer *peer = (struct dp_peer *) peer_hdl;
7321 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7322
Aditya Sathish6add3db2018-04-10 19:43:34 +05307323 /*
7324 * For BSS peer, new peer is not created on alloc_node if the
7325 * peer with same address already exists , instead refcnt is
7326 * increased for existing peer. Correspondingly in delete path,
7327 * only refcnt is decreased; and peer is only deleted , when all
7328 * references are deleted. So delete_in_progress should not be set
7329 * for bss_peer, unless only 2 reference remains (peer map reference
7330 * and peer hash table reference).
7331 */
7332 if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7333 return;
7334 }
7335
Karunakar Dasineni372647d2018-01-15 22:27:39 -08007336 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307337 dp_peer_delete_ast_entries(soc, peer);
7338}
7339#endif
7340
Soumya Bhatbc719e62018-02-18 18:21:25 +05307341#ifdef ATH_SUPPORT_NAC_RSSI
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307342/**
7343 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
7344 * @vdev_hdl: DP vdev handle
7345 * @rssi: rssi value
7346 *
7347 * Return: 0 for success. nonzero for failure.
7348 */
7349QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
7350 char *mac_addr,
7351 uint8_t *rssi)
7352{
7353 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7354 struct dp_pdev *pdev = vdev->pdev;
7355 struct dp_neighbour_peer *peer = NULL;
7356 QDF_STATUS status = QDF_STATUS_E_FAILURE;
7357
7358 *rssi = 0;
7359 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7360 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7361 neighbour_peer_list_elem) {
7362 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7363 mac_addr, DP_MAC_ADDR_LEN) == 0) {
7364 *rssi = peer->rssi;
7365 status = QDF_STATUS_SUCCESS;
7366 break;
7367 }
7368 }
7369 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7370 return status;
7371}
7372
Soumya Bhatbc719e62018-02-18 18:21:25 +05307373static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7374 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7375 uint8_t chan_num)
7376{
7377
7378 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7379 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7380 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7381
7382 pdev->nac_rssi_filtering = 1;
7383 /* Store address of NAC (neighbour peer) which will be checked
7384 * against TA of received packets.
7385 */
7386
7387 if (cmd == CDP_NAC_PARAM_ADD) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307388 dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
7389 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307390 } else if (cmd == CDP_NAC_PARAM_DEL) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307391 dp_update_filter_neighbour_peers(vdev_handle,
7392 DP_NAC_PARAM_DEL,
7393 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307394 }
7395
7396 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7397 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05307398 ((void *)vdev->pdev->ctrl_pdev,
7399 vdev->vdev_id, cmd, bssid);
Soumya Bhatbc719e62018-02-18 18:21:25 +05307400
7401 return QDF_STATUS_SUCCESS;
7402}
7403#endif
7404
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307405static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
7406 uint32_t max_peers)
7407{
7408 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7409
7410 soc->max_peers = max_peers;
7411
7412 qdf_print ("%s max_peers %u\n", __func__, max_peers);
7413
7414 if (dp_peer_find_attach(soc))
7415 return QDF_STATUS_E_FAILURE;
7416
7417 return QDF_STATUS_SUCCESS;
7418}
7419
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307420/**
7421 * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7422 * @dp_pdev: dp pdev handle
7423 * @ctrl_pdev: UMAC ctrl pdev handle
7424 *
7425 * Return: void
7426 */
7427static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7428 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7429{
7430 struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7431
7432 pdev->ctrl_pdev = ctrl_pdev;
7433}
7434
Leo Chang5ea93a42016-11-03 12:39:49 -07007435static struct cdp_cmn_ops dp_ops_cmn = {
7436 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7437 .txrx_vdev_attach = dp_vdev_attach_wifi3,
7438 .txrx_vdev_detach = dp_vdev_detach_wifi3,
7439 .txrx_pdev_attach = dp_pdev_attach_wifi3,
7440 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007441 .txrx_peer_create = dp_peer_create_wifi3,
7442 .txrx_peer_setup = dp_peer_setup_wifi3,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05307443#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307444 .txrx_peer_teardown = dp_peer_teardown_wifi3,
7445#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007446 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307447#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05307448 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7449 .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7450 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7451 .txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7452 .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7453 .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7454 .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05307455 .txrx_peer_ast_get_type = dp_peer_ast_get_type_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08007456 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07007457 .txrx_vdev_register = dp_vdev_register_wifi3,
7458 .txrx_soc_detach = dp_soc_detach_wifi3,
7459 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7460 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7461 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05307462 .txrx_ath_getstats = dp_get_device_stats,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007463 .addba_requestprocess = dp_addba_requestprocess_wifi3,
7464 .addba_responsesetup = dp_addba_responsesetup_wifi3,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08007465 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07007466 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08007467 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +05307468 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -07007469 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +05307470 /* TODO: get API's for dscp-tid need to be added*/
7471 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7472 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307473 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -08007474 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
phadiman7821bf82018-02-06 16:03:54 +05307475 .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7476 .txrx_set_nac = dp_set_nac,
7477 .txrx_get_tx_pending = dp_get_tx_pending,
7478 .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7479 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007480 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05307481 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7482 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -07007483 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05307484 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05307485 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007486 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -07007487 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +05307488 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7489 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7490 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307491 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7492 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7493 .tx_send = dp_tx_send,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05307494 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7495 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7496 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05307497 .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05307498 .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07007499};
7500
7501static struct cdp_ctrl_ops dp_ops_ctrl = {
7502 .txrx_peer_authorize = dp_peer_authorize,
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05307503#ifdef QCA_SUPPORT_SON
7504 .txrx_set_inact_params = dp_set_inact_params,
7505 .txrx_start_inact_timer = dp_start_inact_timer,
7506 .txrx_set_overload = dp_set_overload,
7507 .txrx_peer_is_inact = dp_peer_is_inact,
7508 .txrx_mark_peer_inact = dp_mark_peer_inact,
7509#endif
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05307510 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7511 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307512#ifdef MESH_MODE_SUPPORT
7513 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05307514 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05307515#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05307516 .txrx_set_vdev_param = dp_set_vdev_param,
7517 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05307518 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7519 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05307520 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7521 .txrx_update_filter_neighbour_peers =
7522 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05307523 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -07007524 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -07007525 .txrx_wdi_event_sub = dp_wdi_event_sub,
7526 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007527#ifdef WDI_EVENT_ENABLE
7528 .txrx_get_pldev = dp_get_pldev,
7529#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307530 .txrx_set_pdev_param = dp_set_pdev_param,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307531#ifdef ATH_SUPPORT_NAC_RSSI
7532 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307533 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
Soumya Bhatbc719e62018-02-18 18:21:25 +05307534#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -07007535 .set_key = dp_set_michael_key,
Leo Chang5ea93a42016-11-03 12:39:49 -07007536};
7537
7538static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +05307539#ifdef ATH_SUPPORT_IQUE
7540 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7541 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
7542 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7543#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007544};
7545
7546static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -08007547 .txrx_monitor_set_filter_ucast_data = NULL,
7548 .txrx_monitor_set_filter_mcast_data = NULL,
7549 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -08007550 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7551 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7552 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -07007553 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -07007554 /* Added support for HK advance filter */
7555 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -07007556};
7557
7558static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +05307559 .txrx_per_peer_stats = dp_get_host_peer_stats,
7560 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05307561 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307562 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7563 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307564 .txrx_stats_publish = dp_txrx_stats_publish,
Leo Chang5ea93a42016-11-03 12:39:49 -07007565 /* TODO */
7566};
7567
Leo Chang5ea93a42016-11-03 12:39:49 -07007568static struct cdp_raw_ops dp_ops_raw = {
7569 /* TODO */
7570};
7571
7572#ifdef CONFIG_WIN
7573static struct cdp_pflow_ops dp_ops_pflow = {
7574 /* TODO */
7575};
7576#endif /* CONFIG_WIN */
7577
Yue Ma245b47b2017-02-21 16:35:31 -08007578#ifdef FEATURE_RUNTIME_PM
7579/**
7580 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7581 * @opaque_pdev: DP pdev context
7582 *
7583 * DP is ready to runtime suspend if there are no pending TX packets.
7584 *
7585 * Return: QDF_STATUS
7586 */
7587static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7588{
7589 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7590 struct dp_soc *soc = pdev->soc;
7591
7592 /* Call DP TX flow control API to check if there is any
7593 pending packets */
7594
7595 if (soc->intr_mode == DP_INTR_POLL)
7596 qdf_timer_stop(&soc->int_timer);
7597
7598 return QDF_STATUS_SUCCESS;
7599}
7600
7601/**
7602 * dp_runtime_resume() - ensure DP is ready to runtime resume
7603 * @opaque_pdev: DP pdev context
7604 *
7605 * Resume DP for runtime PM.
7606 *
7607 * Return: QDF_STATUS
7608 */
7609static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7610{
7611 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7612 struct dp_soc *soc = pdev->soc;
7613 void *hal_srng;
7614 int i;
7615
7616 if (soc->intr_mode == DP_INTR_POLL)
7617 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7618
7619 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7620 hal_srng = soc->tcl_data_ring[i].hal_srng;
7621 if (hal_srng) {
7622 /* We actually only need to acquire the lock */
7623 hal_srng_access_start(soc->hal_soc, hal_srng);
7624 /* Update SRC ring head pointer for HW to send
7625 all pending packets */
7626 hal_srng_access_end(soc->hal_soc, hal_srng);
7627 }
7628 }
7629
7630 return QDF_STATUS_SUCCESS;
7631}
7632#endif /* FEATURE_RUNTIME_PM */
7633
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007634static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7635{
7636 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7637 struct dp_soc *soc = pdev->soc;
7638
psimhac983d7e2017-07-26 15:20:07 -07007639 if (soc->intr_mode == DP_INTR_POLL)
7640 qdf_timer_stop(&soc->int_timer);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007641
7642 return QDF_STATUS_SUCCESS;
7643}
7644
7645static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7646{
7647 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7648 struct dp_soc *soc = pdev->soc;
7649
psimhac983d7e2017-07-26 15:20:07 -07007650 if (soc->intr_mode == DP_INTR_POLL)
7651 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007652
7653 return QDF_STATUS_SUCCESS;
7654}
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007655
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307656#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07007657static struct cdp_misc_ops dp_ops_misc = {
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007658 .tx_non_std = dp_tx_non_std,
Leo Chang5ea93a42016-11-03 12:39:49 -07007659 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007660#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -08007661 .runtime_suspend = dp_runtime_suspend,
7662 .runtime_resume = dp_runtime_resume,
7663#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007664 .pkt_log_init = dp_pkt_log_init,
7665 .pkt_log_con_service = dp_pkt_log_con_service,
Leo Chang5ea93a42016-11-03 12:39:49 -07007666};
7667
7668static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007669 /* WIFI 3.0 DP implement as required. */
7670#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08007671 .flow_pool_map_handler = dp_tx_flow_pool_map,
7672 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007673 .register_pause_cb = dp_txrx_register_pause_cb,
7674 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7675#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -07007676};
7677
7678static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7679 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7680};
7681
Yun Parkfde6b9e2017-06-26 17:13:11 -07007682#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07007683static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -07007684 .ipa_get_resource = dp_ipa_get_resource,
7685 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7686 .ipa_op_response = dp_ipa_op_response,
7687 .ipa_register_op_cb = dp_ipa_register_op_cb,
7688 .ipa_get_stat = dp_ipa_get_stat,
7689 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7690 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
7691 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
7692 .ipa_setup = dp_ipa_setup,
7693 .ipa_cleanup = dp_ipa_cleanup,
7694 .ipa_setup_iface = dp_ipa_setup_iface,
7695 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
7696 .ipa_enable_pipes = dp_ipa_enable_pipes,
7697 .ipa_disable_pipes = dp_ipa_disable_pipes,
7698 .ipa_set_perf_level = dp_ipa_set_perf_level
Leo Chang5ea93a42016-11-03 12:39:49 -07007699};
Yun Parkfde6b9e2017-06-26 17:13:11 -07007700#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007701
Leo Chang5ea93a42016-11-03 12:39:49 -07007702static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -07007703 .bus_suspend = dp_bus_suspend,
7704 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -07007705};
7706
7707static struct cdp_ocb_ops dp_ops_ocb = {
7708 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7709};
7710
7711
7712static struct cdp_throttle_ops dp_ops_throttle = {
7713 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7714};
7715
7716static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007717 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -07007718};
7719
7720static struct cdp_cfg_ops dp_ops_cfg = {
7721 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
7722};
7723
Mohit Khannaadfe9082017-11-17 13:11:17 -08007724/*
7725 * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7726 * @dev: physical device instance
7727 * @peer_mac_addr: peer mac address
7728 * @local_id: local id for the peer
7729 * @debug_id: to track enum peer access
7730
7731 * Return: peer instance pointer
7732 */
7733static inline void *
7734dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7735 u8 *local_id,
7736 enum peer_debug_id_type debug_id)
7737{
7738 /*
7739 * Currently this function does not implement the "get ref"
7740 * functionality and is mapped to dp_find_peer_by_addr which does not
7741 * increment the peer ref count. So the peer state is uncertain after
7742 * calling this API. The functionality needs to be implemented.
7743 * Accordingly the corresponding release_ref function is NULL.
7744 */
7745 return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7746}
7747
Leo Chang5ea93a42016-11-03 12:39:49 -07007748static struct cdp_peer_ops dp_ops_peer = {
7749 .register_peer = dp_register_peer,
7750 .clear_peer = dp_clear_peer,
7751 .find_peer_by_addr = dp_find_peer_by_addr,
7752 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Mohit Khannaadfe9082017-11-17 13:11:17 -08007753 .peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7754 .peer_release_ref = NULL,
Leo Chang5ea93a42016-11-03 12:39:49 -07007755 .local_peer_id = dp_local_peer_id,
7756 .peer_find_by_local_id = dp_peer_find_by_local_id,
7757 .peer_state_update = dp_peer_state_update,
7758 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007759 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -07007760 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7761 .get_vdev_for_peer = dp_get_vdev_for_peer,
7762 .get_peer_state = dp_get_peer_state,
Alok Kumarfcdb1852018-07-05 18:55:48 +05307763 .get_last_mgmt_timestamp = dp_get_last_mgmt_timestamp,
7764 .update_last_mgmt_timestamp = dp_update_last_mgmt_timestamp,
Leo Chang5ea93a42016-11-03 12:39:49 -07007765};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307766#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007767
7768static struct cdp_ops dp_txrx_ops = {
7769 .cmn_drv_ops = &dp_ops_cmn,
7770 .ctrl_ops = &dp_ops_ctrl,
7771 .me_ops = &dp_ops_me,
7772 .mon_ops = &dp_ops_mon,
7773 .host_stats_ops = &dp_ops_host_stats,
7774 .wds_ops = &dp_ops_wds,
7775 .raw_ops = &dp_ops_raw,
7776#ifdef CONFIG_WIN
7777 .pflow_ops = &dp_ops_pflow,
7778#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307779#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07007780 .misc_ops = &dp_ops_misc,
7781 .cfg_ops = &dp_ops_cfg,
7782 .flowctl_ops = &dp_ops_flowctl,
7783 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007784#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07007785 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -07007786#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007787 .bus_ops = &dp_ops_bus,
7788 .ocb_ops = &dp_ops_ocb,
7789 .peer_ops = &dp_ops_peer,
7790 .throttle_ops = &dp_ops_throttle,
7791 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05307792#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07007793};
7794
7795/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05307796 * dp_soc_set_txrx_ring_map()
7797 * @dp_soc: DP handler for soc
7798 *
7799 * Return: Void
7800 */
7801static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7802{
7803 uint32_t i;
7804 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7805 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7806 }
7807}
7808
7809/*
Leo Chang5ea93a42016-11-03 12:39:49 -07007810 * dp_soc_attach_wifi3() - Attach txrx SOC
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307811 * @ctrl_psoc: Opaque SOC handle from control plane
Leo Chang5ea93a42016-11-03 12:39:49 -07007812 * @htc_handle: Opaque HTC handle
7813 * @hif_handle: Opaque HIF handle
7814 * @qdf_osdev: QDF device
7815 *
7816 * Return: DP SOC handle on success, NULL on failure
7817 */
Jeff Johnson07718572017-01-10 13:57:15 -08007818/*
7819 * Local prototype added to temporarily address warning caused by
7820 * -Wmissing-prototypes. A more correct solution, namely to expose
7821 * a prototype in an appropriate header file, will come later.
7822 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307823void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Jeff Johnson07718572017-01-10 13:57:15 -08007824 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307825 struct ol_if_ops *ol_ops);
7826void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07007827 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307828 struct ol_if_ops *ol_ops)
Leo Chang5ea93a42016-11-03 12:39:49 -07007829{
7830 struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307831 int target_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07007832
7833 if (!soc) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307834 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7835 FL("DP SOC memory allocation failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007836 goto fail0;
7837 }
7838
7839 soc->cdp_soc.ops = &dp_txrx_ops;
7840 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307841 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -07007842 soc->osdev = qdf_osdev;
7843 soc->hif_handle = hif_handle;
7844
7845 soc->hal_soc = hif_get_hal_handle(hif_handle);
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307846 soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07007847 soc->hal_soc, qdf_osdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307848 if (!soc->htt_handle) {
7849 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7850 FL("HTT attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007851 goto fail1;
7852 }
7853
Vivek126db5d2018-07-25 22:05:04 +05307854 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
Leo Chang5ea93a42016-11-03 12:39:49 -07007855 if (!soc->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05307856 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05307857 FL("wlan_cfg_soc_attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07007858 goto fail2;
7859 }
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307860 target_type = hal_get_target_type(soc->hal_soc);
7861 switch (target_type) {
7862 case TARGET_TYPE_QCA6290:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05307863#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307864 case TARGET_TYPE_QCA6390:
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05307865#endif
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05307866 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7867 REO_DST_RING_SIZE_QCA6290);
7868 break;
7869 case TARGET_TYPE_QCA8074:
7870 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
7871 REO_DST_RING_SIZE_QCA8074);
7872 break;
7873 default:
7874 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
7875 qdf_assert_always(0);
7876 break;
7877 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307878
Vivek126db5d2018-07-25 22:05:04 +05307879 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
7880 cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307881 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05307882
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307883 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307884 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307885 CDP_CFG_MAX_PEER_ID);
7886
7887 if (ret != -EINVAL) {
7888 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7889 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307890
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307891 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307892 CDP_CFG_CCE_DISABLE);
Ruchi, Agrawalf279a4a2018-02-26 18:12:44 +05307893 if (ret == 1)
Ruchi, Agrawal34721392017-11-13 18:02:09 +05307894 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05307895 }
7896
Leo Chang5ea93a42016-11-03 12:39:49 -07007897 qdf_spinlock_create(&soc->peer_ref_mutex);
7898
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08007899 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7900 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7901
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05307902 /* fill the tx/rx cpu ring map*/
7903 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05307904
7905 qdf_spinlock_create(&soc->htt_stats.lock);
7906 /* initialize work queue for stats processing */
7907 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7908
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05307909 /*Initialize inactivity timer for wifison */
7910 dp_init_inact_timer(soc);
7911
Leo Chang5ea93a42016-11-03 12:39:49 -07007912 return (void *)soc;
7913
7914fail2:
7915 htt_soc_detach(soc->htt_handle);
7916fail1:
7917 qdf_mem_free(soc);
7918fail0:
7919 return NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07007920}
Keyur Parekhfad6d082017-05-07 08:54:47 -07007921
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08007922/*
7923 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
7924 *
7925 * @soc: handle to DP soc
7926 * @mac_id: MAC id
7927 *
7928 * Return: Return pdev corresponding to MAC
7929 */
7930void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7931{
7932 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7933 return soc->pdev_list[mac_id];
7934
7935 /* Typically for MCL as there only 1 PDEV*/
7936 return soc->pdev_list[0];
7937}
7938
7939/*
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007940 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7941 * @soc: DP SoC context
7942 * @max_mac_rings: No of MAC rings
7943 *
7944 * Return: None
7945 */
7946static
7947void dp_is_hw_dbs_enable(struct dp_soc *soc,
7948 int *max_mac_rings)
7949{
7950 bool dbs_enable = false;
7951 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7952 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05307953 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007954
7955 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7956}
7957
Keyur Parekhfad6d082017-05-07 08:54:47 -07007958/*
7959* dp_set_pktlog_wifi3() - attach txrx vdev
7960* @pdev: Datapath PDEV handle
7961* @event: which event's notifications are being subscribed to
7962* @enable: WDI event subscribe or not. (True or False)
7963*
7964* Return: Success, NULL on failure
7965*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007966#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -07007967int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7968 bool enable)
7969{
7970 struct dp_soc *soc = pdev->soc;
7971 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007972 int max_mac_rings = wlan_cfg_get_num_mac_rings
7973 (pdev->wlan_cfg_ctx);
7974 uint8_t mac_id = 0;
7975
7976 dp_is_hw_dbs_enable(soc, &max_mac_rings);
7977
7978 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05307979 FL("Max_mac_rings %d "),
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007980 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -07007981
7982 if (enable) {
7983 switch (event) {
7984 case WDI_EVENT_RX_DESC:
7985 if (pdev->monitor_vdev) {
7986 /* Nothing needs to be done if monitor mode is
7987 * enabled
7988 */
7989 return 0;
7990 }
7991 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7992 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7993 htt_tlv_filter.mpdu_start = 1;
7994 htt_tlv_filter.msdu_start = 1;
7995 htt_tlv_filter.msdu_end = 1;
7996 htt_tlv_filter.mpdu_end = 1;
7997 htt_tlv_filter.packet_header = 1;
7998 htt_tlv_filter.attention = 1;
7999 htt_tlv_filter.ppdu_start = 1;
8000 htt_tlv_filter.ppdu_end = 1;
8001 htt_tlv_filter.ppdu_end_user_stats = 1;
8002 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8003 htt_tlv_filter.ppdu_end_status_done = 1;
8004 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008005 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8006 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8007 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8008 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8009 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8010 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008011
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008012 for (mac_id = 0; mac_id < max_mac_rings;
8013 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008014 int mac_for_pdev =
8015 dp_get_mac_id_for_pdev(mac_id,
8016 pdev->pdev_id);
8017
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008018 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008019 mac_for_pdev,
8020 pdev->rxdma_mon_status_ring[mac_id]
8021 .hal_srng,
8022 RXDMA_MONITOR_STATUS,
8023 RX_BUFFER_SIZE,
8024 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008025
8026 }
8027
8028 if (soc->reap_timer_init)
8029 qdf_timer_mod(&soc->mon_reap_timer,
8030 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008031 }
8032 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008033
Keyur Parekhfad6d082017-05-07 08:54:47 -07008034 case WDI_EVENT_LITE_RX:
8035 if (pdev->monitor_vdev) {
8036 /* Nothing needs to be done if monitor mode is
8037 * enabled
8038 */
8039 return 0;
8040 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008041
Keyur Parekhfad6d082017-05-07 08:54:47 -07008042 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
8043 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008044
Keyur Parekhfad6d082017-05-07 08:54:47 -07008045 htt_tlv_filter.ppdu_start = 1;
8046 htt_tlv_filter.ppdu_end = 1;
8047 htt_tlv_filter.ppdu_end_user_stats = 1;
8048 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8049 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008050 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008051 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07008052 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8053 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8054 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8055 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8056 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8057 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008058
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008059 for (mac_id = 0; mac_id < max_mac_rings;
8060 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008061 int mac_for_pdev =
8062 dp_get_mac_id_for_pdev(mac_id,
8063 pdev->pdev_id);
8064
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008065 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008066 mac_for_pdev,
8067 pdev->rxdma_mon_status_ring[mac_id]
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008068 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -07008069 RXDMA_MONITOR_STATUS,
8070 RX_BUFFER_SIZE_PKTLOG_LITE,
8071 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008072 }
8073
8074 if (soc->reap_timer_init)
8075 qdf_timer_mod(&soc->mon_reap_timer,
8076 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008077 }
8078 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008079
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008080 case WDI_EVENT_LITE_T2H:
8081 if (pdev->monitor_vdev) {
8082 /* Nothing needs to be done if monitor mode is
8083 * enabled
8084 */
8085 return 0;
8086 }
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008087
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008088 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008089 int mac_for_pdev = dp_get_mac_id_for_pdev(
8090 mac_id, pdev->pdev_id);
8091
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308092 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08008093 dp_h2t_cfg_stats_msg_send(pdev,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008094 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
8095 mac_for_pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008096 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008097 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008098
Keyur Parekhfad6d082017-05-07 08:54:47 -07008099 default:
8100 /* Nothing needs to be done for other pktlog types */
8101 break;
8102 }
8103 } else {
8104 switch (event) {
8105 case WDI_EVENT_RX_DESC:
8106 case WDI_EVENT_LITE_RX:
8107 if (pdev->monitor_vdev) {
8108 /* Nothing needs to be done if monitor mode is
8109 * enabled
8110 */
8111 return 0;
8112 }
8113 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
8114 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008115
8116 for (mac_id = 0; mac_id < max_mac_rings;
8117 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008118 int mac_for_pdev =
8119 dp_get_mac_id_for_pdev(mac_id,
8120 pdev->pdev_id);
8121
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008122 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008123 mac_for_pdev,
8124 pdev->rxdma_mon_status_ring[mac_id]
8125 .hal_srng,
8126 RXDMA_MONITOR_STATUS,
8127 RX_BUFFER_SIZE,
8128 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008129 }
8130
8131 if (soc->reap_timer_init)
8132 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -07008133 }
8134 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008135 case WDI_EVENT_LITE_T2H:
8136 if (pdev->monitor_vdev) {
8137 /* Nothing needs to be done if monitor mode is
8138 * enabled
8139 */
8140 return 0;
8141 }
8142 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
8143 * passing value 0. Once these macros will define in htt
8144 * header file will use proper macros
8145 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008146 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008147 int mac_for_pdev =
8148 dp_get_mac_id_for_pdev(mac_id,
8149 pdev->pdev_id);
8150
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308151 pdev->pktlog_ppdu_stats = false;
8152 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8153 dp_h2t_cfg_stats_msg_send(pdev, 0,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008154 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308155 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
8156 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008157 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308158 } else if (pdev->enhanced_stats_en) {
8159 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008160 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308161 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008162 }
8163
Keyur Parekhdb0fa142017-07-13 19:40:22 -07008164 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -07008165 default:
8166 /* Nothing needs to be done for other pktlog types */
8167 break;
8168 }
8169 }
8170 return 0;
8171}
8172#endif