blob: 6b6e55d4c087c1de58f016062a7aabd5eacdbd93 [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
phadimanebf4cde2019-01-28 17:50:37 +05302 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
Kai Chen6eca1a62017-01-12 10:17:53 -08003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053018#include "hal_hw_headers.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080019#include "dp_types.h"
20#include "dp_rx.h"
21#include "dp_peer.h"
22#include "hal_rx.h"
23#include "hal_api.h"
24#include "qdf_trace.h"
25#include "qdf_nbuf.h"
26#include "hal_api_mon.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080027#include "dp_rx_mon.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070028#include "dp_internal.h"
29#include "qdf_mem.h" /* qdf_mem_malloc,free */
Kai Chen6eca1a62017-01-12 10:17:53 -080030
nobelj14531642019-06-25 17:41:55 -070031#include "htt.h"
32
ydb247452018-08-08 00:23:16 +053033#ifdef FEATURE_PERPKT_INFO
34#include "dp_ratetable.h"
35#endif
36
Kai Chen52ef33f2019-03-05 18:33:40 -080037#ifdef WLAN_RX_PKT_CAPTURE_ENH
38#include "dp_rx_mon_feature.h"
39#else
40static QDF_STATUS
41dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
42 struct hal_rx_ppdu_info *ppdu_info)
43{
44 return QDF_STATUS_SUCCESS;
45}
46
47static void
48dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
49 qdf_nbuf_t status_nbuf,
50 struct hal_rx_ppdu_info *ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -070051 bool *nbuf_used)
Kai Chen52ef33f2019-03-05 18:33:40 -080052{
53}
54#endif
55
Amir Patel1d4ac982019-04-25 11:49:01 +053056#ifdef FEATURE_PERPKT_INFO
57static inline void
58dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
59 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
60{
61 uint8_t chain, bw;
62 int8_t rssi;
63
64 for (chain = 0; chain < SS_COUNT; chain++) {
65 for (bw = 0; bw < MAX_BW; bw++) {
66 rssi = ppdu_info->rx_status.rssi_chain[chain][bw];
67 if (rssi != DP_RSSI_INVAL)
68 cdp_rx_ppdu->rssi_chain[chain][bw] = rssi;
69 else
70 cdp_rx_ppdu->rssi_chain[chain][bw] = 0;
71 }
72 }
73}
74
Amir Patel5a8bbbe2019-07-17 21:59:39 +053075/*
76 * dp_rx_populate_su_evm_details() - Populate su evm info
77 * @ppdu_info: ppdu info structure from ppdu ring
78 * @cdp_rx_ppdu: rx ppdu indication structure
79 */
80static inline void
81dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
82 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
83{
84 uint8_t pilot_evm;
85 uint8_t nss_count;
86 uint8_t pilot_count;
87
88 nss_count = ppdu_info->evm_info.nss_count;
89 pilot_count = ppdu_info->evm_info.pilot_count;
90
91 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
92 qdf_err("pilot evm count is more than expected");
93 return;
94 }
95 cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
96 cdp_rx_ppdu->evm_info.nss_count = nss_count;
97
98 /* Populate evm for pilot_evm = nss_count*pilot_count */
99 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
100 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
101 ppdu_info->evm_info.pilot_evm[pilot_evm];
102 }
103}
104
Kai Chen6eca1a62017-01-12 10:17:53 -0800105/**
nobelj14531642019-06-25 17:41:55 -0700106 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
107 * @pdev: pdev ctx
108 * @rx_user_status: mon rx user status
109 *
110 * Return: bool
111 */
112static inline bool
113dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
114 struct mon_rx_user_status *rx_user_status)
115{
116 uint32_t ru_size;
117 bool is_data;
118
119 ru_size = rx_user_status->dl_ofdma_ru_size;
120
121 if (dp_is_subtype_data(rx_user_status->frame_control)) {
122 DP_STATS_INC(pdev,
123 ul_ofdma.data_rx_ru_size[ru_size], 1);
124 is_data = true;
125 } else {
126 DP_STATS_INC(pdev,
127 ul_ofdma.nondata_rx_ru_size[ru_size], 1);
128 is_data = false;
129 }
130
131 return is_data;
132}
133
134/**
135 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
136 * @pdev: pdev ctx
137 * @ppdu_info: ppdu info structure from ppdu ring
138 * @ppdu_nbuf: qdf nbuf abstraction for linux skb
139 *
140 * Return: none
141 */
142static inline void
143dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
144 struct hal_rx_ppdu_info *ppdu_info,
145 qdf_nbuf_t ppdu_nbuf)
146{
147 struct dp_peer *peer;
148 struct dp_soc *soc = pdev->soc;
149 struct dp_ast_entry *ast_entry;
150 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
151 uint32_t ast_index;
152 int i;
153 struct mon_rx_user_status *rx_user_status;
154 struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
155 int ru_size;
156 bool is_data = false;
157 uint32_t num_users;
158
159 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
160
161 num_users = ppdu_info->com_info.num_users;
162 for (i = 0; i < num_users; i++) {
163 if (i > OFDMA_NUM_USERS)
164 return;
165
166 rx_user_status = &ppdu_info->rx_user_status[i];
167 rx_stats_peruser = &cdp_rx_ppdu->user[i];
168
169 ast_index = rx_user_status->ast_index;
170 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
171 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
172 return;
173 }
174
175 ast_entry = soc->ast_table[ast_index];
176 if (!ast_entry) {
177 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
178 return;
179 }
180
181 peer = ast_entry->peer;
182 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
183 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
184 return;
185 }
186
187 rx_stats_peruser->first_data_seq_ctrl =
188 rx_user_status->first_data_seq_ctrl;
189
190 rx_stats_peruser->frame_control =
191 rx_user_status->frame_control;
192
193 rx_stats_peruser->tcp_msdu_count =
194 rx_user_status->tcp_msdu_count;
195 rx_stats_peruser->udp_msdu_count =
196 rx_user_status->udp_msdu_count;
197 rx_stats_peruser->other_msdu_count =
198 rx_user_status->other_msdu_count;
199 rx_stats_peruser->preamble_type =
200 rx_user_status->preamble_type;
201 rx_stats_peruser->mpdu_cnt_fcs_ok =
202 rx_user_status->mpdu_cnt_fcs_ok;
203 rx_stats_peruser->mpdu_cnt_fcs_err =
204 rx_user_status->mpdu_cnt_fcs_err;
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530205 qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
206 &rx_user_status->mpdu_fcs_ok_bitmap,
207 HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
208 sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
nobelj14531642019-06-25 17:41:55 -0700209 rx_stats_peruser->mpdu_ok_byte_count =
210 rx_user_status->mpdu_ok_byte_count;
211 rx_stats_peruser->mpdu_err_byte_count =
212 rx_user_status->mpdu_err_byte_count;
213
214 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
215 cdp_rx_ppdu->num_msdu +=
216 (rx_stats_peruser->tcp_msdu_count +
217 rx_stats_peruser->udp_msdu_count +
218 rx_stats_peruser->other_msdu_count);
219 rx_stats_peruser->retries =
220 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
221 rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
222
223 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
224 rx_stats_peruser->is_ampdu = 1;
225 else
226 rx_stats_peruser->is_ampdu = 0;
227
228 rx_stats_peruser->tid = ppdu_info->rx_status.tid;
229
230 qdf_mem_copy(rx_stats_peruser->mac_addr,
231 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
232 rx_stats_peruser->peer_id = peer->peer_ids[0];
233 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
234 rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
235
236 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA) {
237 if (rx_user_status->ofdma_info_valid) {
238 rx_stats_peruser->nss = rx_user_status->nss;
239 rx_stats_peruser->mcs = rx_user_status->mcs;
240 rx_stats_peruser->ofdma_info_valid =
241 rx_user_status->ofdma_info_valid;
242 rx_stats_peruser->ofdma_ru_start_index =
243 rx_user_status->dl_ofdma_ru_start_index;
244 rx_stats_peruser->ofdma_ru_width =
245 rx_user_status->dl_ofdma_ru_width;
246 rx_stats_peruser->user_index = i;
247 ru_size = rx_user_status->dl_ofdma_ru_size;
248 /*
249 * max RU size will be equal to
250 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
251 */
252 if (ru_size >= OFDMA_NUM_RU_SIZE) {
253 dp_err("invalid ru_size %d\n",
254 ru_size);
255 return;
256 }
257 is_data = dp_rx_inc_rusize_cnt(pdev,
258 rx_user_status);
259 } else {
260 rx_stats_peruser->ofdma_info_valid = 0;
261 }
262 if (is_data) {
263 /* counter to get number of MU OFDMA */
264 pdev->stats.ul_ofdma.data_rx_ppdu++;
265 pdev->stats.ul_ofdma.data_users[num_users]++;
266 }
267 }
268 }
269}
270
271/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530272* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
Soumya Bhat560f90c2018-03-30 13:53:26 +0530273* @pdev: pdev ctx
Anish Nataraj38a29562017-08-18 19:41:17 +0530274* @ppdu_info: ppdu info structure from ppdu ring
275* @ppdu_nbuf: qdf nbuf abstraction for linux skb
276*
277* Return: none
278*/
Anish Nataraj38a29562017-08-18 19:41:17 +0530279static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530280dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530281 struct hal_rx_ppdu_info *ppdu_info,
282 qdf_nbuf_t ppdu_nbuf)
283{
284 struct dp_peer *peer;
Soumya Bhat560f90c2018-03-30 13:53:26 +0530285 struct dp_soc *soc = pdev->soc;
Anish Nataraj38a29562017-08-18 19:41:17 +0530286 struct dp_ast_entry *ast_entry;
287 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
288 uint32_t ast_index;
nobelj14531642019-06-25 17:41:55 -0700289 uint32_t i;
Anish Nataraj38a29562017-08-18 19:41:17 +0530290
291 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
292
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +0530293 cdp_rx_ppdu->first_data_seq_ctrl =
294 ppdu_info->rx_status.first_data_seq_ctrl;
Anish Natarajeb30aa72018-09-20 16:34:01 +0530295 cdp_rx_ppdu->frame_ctrl =
296 ppdu_info->rx_status.frame_control;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530297 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
298 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
299 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
Anish Nataraj38a29562017-08-18 19:41:17 +0530300 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
nobelj14531642019-06-25 17:41:55 -0700301 /* num mpdu is consolidated and added together in num user loop */
Pamidipati, Vijayba4b57f2019-02-13 16:47:23 +0530302 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
nobelj14531642019-06-25 17:41:55 -0700303 /* num msdu is consolidated and added together in num user loop */
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530304 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
305 cdp_rx_ppdu->udp_msdu_count +
306 cdp_rx_ppdu->other_msdu_count);
nobelj14531642019-06-25 17:41:55 -0700307
Amir Patelac7d9462019-03-28 16:16:01 +0530308 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
309 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
Pranita Solankeed0aba62018-01-12 19:14:31 +0530310
311 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
312 cdp_rx_ppdu->is_ampdu = 1;
313 else
314 cdp_rx_ppdu->is_ampdu = 0;
Anish Nataraj45d282c2017-12-30 01:03:38 +0530315 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
nobelj14531642019-06-25 17:41:55 -0700316
Soumya Bhat560f90c2018-03-30 13:53:26 +0530317
318 ast_index = ppdu_info->rx_status.ast_index;
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530319 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530320 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
321 return;
322 }
323
324 ast_entry = soc->ast_table[ast_index];
325 if (!ast_entry) {
326 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
327 return;
328 }
329 peer = ast_entry->peer;
330 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
331 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
332 return;
333 }
334
335 qdf_mem_copy(cdp_rx_ppdu->mac_addr,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800336 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
Soumya Bhat560f90c2018-03-30 13:53:26 +0530337 cdp_rx_ppdu->peer_id = peer->peer_ids[0];
338 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
nobelj14531642019-06-25 17:41:55 -0700339
340 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
341 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
342 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
343 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
344 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
345 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
346 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
347 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
348 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
349 else
350 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
351 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
352 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
353 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
354 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
355 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
356 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
357 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
358 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
359 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
360 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
Keyur Parekh44d8f8f2019-03-12 12:39:41 -0700361 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
Amir Patel1d4ac982019-04-25 11:49:01 +0530362
363 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu);
Amir Patel5a8bbbe2019-07-17 21:59:39 +0530364 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
365 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
nobelj14531642019-06-25 17:41:55 -0700366
367 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
368 for (i = 0; i < MAX_CHAIN; i++)
369 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
370
371 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
372
373 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
374
375 cdp_rx_ppdu->num_mpdu = 0;
376 cdp_rx_ppdu->num_msdu = 0;
377
378 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530379}
380#else
381static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530382dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530383 struct hal_rx_ppdu_info *ppdu_info,
384 qdf_nbuf_t ppdu_nbuf)
385{
386}
387#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530388/**
389 * dp_rx_stats_update() - Update per-peer statistics
390 * @soc: Datapath SOC handle
391 * @peer: Datapath peer handle
392 * @ppdu: PPDU Descriptor
393 *
394 * Return: None
395 */
396#ifdef FEATURE_PERPKT_INFO
ydb247452018-08-08 00:23:16 +0530397static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
398 struct cdp_rx_indication_ppdu *ppdu)
399{
400 uint32_t ratekbps = 0;
401 uint32_t ppdu_rx_rate = 0;
402 uint32_t nss = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530403 uint32_t rix;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530404 uint16_t ratecode;
ydb247452018-08-08 00:23:16 +0530405
406 if (!peer || !ppdu)
407 return;
408
409 if (ppdu->u.nss == 0)
410 nss = 0;
411 else
412 nss = ppdu->u.nss - 1;
413
Anish Nataraj376d9b12018-08-13 14:12:01 +0530414 ratekbps = dp_getrateindex(ppdu->u.gi,
415 ppdu->u.mcs,
ydb247452018-08-08 00:23:16 +0530416 nss,
417 ppdu->u.preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530418 ppdu->u.bw,
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530419 &rix,
420 &ratecode);
ydb247452018-08-08 00:23:16 +0530421
422 if (!ratekbps)
423 return;
424
Amir Patel468bded2019-03-21 11:42:31 +0530425 ppdu->rix = rix;
ydb247452018-08-08 00:23:16 +0530426 DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
427 dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
428 ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
429 DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
Amir Patelac7d9462019-03-28 16:16:01 +0530430 ppdu->rx_ratekbps = ratekbps;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530431 ppdu->rx_ratecode = ratecode;
ydb247452018-08-08 00:23:16 +0530432
433 if (peer->vdev)
434 peer->vdev->stats.rx.last_rx_rate = ratekbps;
435}
436
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530437static void dp_rx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
438 struct cdp_rx_indication_ppdu *ppdu)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530439{
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530440 struct dp_soc *soc = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +0530441 uint8_t mcs, preamble, ac = 0;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530442 uint16_t num_msdu;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530443 bool is_invalid_peer = false;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530444
445 mcs = ppdu->u.mcs;
446 preamble = ppdu->u.preamble;
447 num_msdu = ppdu->num_msdu;
448
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530449 if (pdev)
450 soc = pdev->soc;
451 else
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530452 return;
453
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530454 if (!peer) {
455 is_invalid_peer = true;
456 peer = pdev->invalid_peer;
457 }
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530458
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530459 if (!soc || soc->process_rx_status)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530460 return;
Pranita Solankefc2ff392017-12-15 19:25:13 +0530461
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530462 DP_STATS_UPD(peer, rx.rssi, ppdu->rssi);
Surya Prakash07c81e72019-04-29 10:08:01 +0530463 if (peer->stats.rx.avg_rssi == INVALID_RSSI)
Amir Patelbb69cfa2019-03-28 16:16:01 +0530464 peer->stats.rx.avg_rssi = ppdu->rssi;
465 else
466 peer->stats.rx.avg_rssi =
467 DP_GET_AVG_RSSI(peer->stats.rx.avg_rssi, ppdu->rssi);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530468
469 if ((preamble == DOT11_A) || (preamble == DOT11_B))
470 ppdu->u.nss = 1;
471
472 if (ppdu->u.nss)
473 DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu);
474
Pranita Solankea12b4b32017-11-20 23:04:14 +0530475 DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530476 DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
Anish Nataraj28490c42018-01-19 19:34:54 +0530477 DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530478 DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
479 DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
480 DP_STATS_UPD(peer, rx.rx_rate, mcs);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530481 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530482 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530483 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
484 DP_STATS_INCC(peer,
485 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530486 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530487 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530488 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530489 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
490 DP_STATS_INCC(peer,
491 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530492 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530493 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530494 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530495 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
496 DP_STATS_INCC(peer,
497 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530498 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530499 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530500 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530501 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
502 DP_STATS_INCC(peer,
503 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
504 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
505 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530506 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530507 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
508 DP_STATS_INCC(peer,
509 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
510 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Anish Nataraj28490c42018-01-19 19:34:54 +0530511 /*
512 * If invalid TID, it could be a non-qos frame, hence do not update
513 * any AC counters
514 */
515 ac = TID_TO_WME_AC(ppdu->tid);
516 if (ppdu->tid != HAL_TID_INVALID)
517 DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530518 dp_peer_stats_notify(pdev, peer);
Amir Patel52c6b732018-08-03 12:13:22 +0530519 DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi);
Anish Nataraj28490c42018-01-19 19:34:54 +0530520
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530521 if (is_invalid_peer)
522 return;
523
Jeffin Mammen1514e792019-05-20 10:30:44 +0530524 if (dp_is_subtype_data(ppdu->frame_ctrl))
525 dp_rx_rate_stats_update(peer, ppdu);
ydb247452018-08-08 00:23:16 +0530526
Amir Patel756d05e2018-10-10 12:35:30 +0530527#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
528 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
529 &peer->stats, ppdu->peer_id,
530 UPDATE_PEER_STATS, pdev->pdev_id);
531#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530532}
533#endif
Anish Nataraj38a29562017-08-18 19:41:17 +0530534
Amir Patel57e7e052019-05-15 20:49:57 +0530535/*
536 * dp_rx_get_fcs_ok_msdu() - get ppdu status buffer containing fcs_ok msdu
537 * @pdev: pdev object
538 * @ppdu_info: ppdu info object
539 *
540 * Return: nbuf
541 */
542
543static inline qdf_nbuf_t
544dp_rx_get_fcs_ok_msdu(struct dp_pdev *pdev,
545 struct hal_rx_ppdu_info *ppdu_info)
546{
547 uint16_t mpdu_fcs_ok;
548 qdf_nbuf_t status_nbuf = NULL;
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530549 unsigned long *fcs_ok_bitmap;
Amir Patel57e7e052019-05-15 20:49:57 +0530550
Amir Patel44bd8072019-08-05 11:36:17 +0530551 if (qdf_unlikely(qdf_nbuf_is_queue_empty(&pdev->rx_ppdu_buf_q)))
552 return NULL;
553
Amir Patel57e7e052019-05-15 20:49:57 +0530554 /* Obtain fcs_ok passed index from bitmap
555 * this index is used to get fcs passed first msdu payload
556 */
557
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530558 fcs_ok_bitmap =
559 (unsigned long *)&ppdu_info->com_info.mpdu_fcs_ok_bitmap[0];
560 mpdu_fcs_ok = qdf_find_first_bit(fcs_ok_bitmap,
561 HAL_RX_MAX_MPDU);
562
Amir Patel44bd8072019-08-05 11:36:17 +0530563 if (qdf_unlikely(mpdu_fcs_ok >= HAL_RX_MAX_MPDU))
564 goto end;
565
566 if (qdf_unlikely(!ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf))
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530567 goto end;
Amir Patel57e7e052019-05-15 20:49:57 +0530568
569 /* Get status buffer by indexing mpdu_fcs_ok index
570 * containing first msdu payload with fcs passed
571 * and clone the buffer
572 */
573 status_nbuf = ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf;
Amir Patel44bd8072019-08-05 11:36:17 +0530574 ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf = NULL;
575
Amir Patel57e7e052019-05-15 20:49:57 +0530576 /* Take ref of status nbuf as this nbuf is to be
577 * freeed by upper layer.
578 */
579 qdf_nbuf_ref(status_nbuf);
Amir Patel44bd8072019-08-05 11:36:17 +0530580 ppdu_info->fcs_ok_msdu_info.first_msdu_payload =
581 ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].first_msdu_payload;
582 ppdu_info->fcs_ok_msdu_info.payload_len =
583 ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].payload_len;
584
Amir Patel57e7e052019-05-15 20:49:57 +0530585
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530586end:
Amir Patel57e7e052019-05-15 20:49:57 +0530587 /* Free the ppdu status buffer queue */
588 qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
589
Amir Patel44bd8072019-08-05 11:36:17 +0530590 qdf_mem_zero(&ppdu_info->ppdu_msdu_info,
591 (ppdu_info->com_info.mpdu_cnt_fcs_ok +
592 ppdu_info->com_info.mpdu_cnt_fcs_err)
593 * sizeof(struct hal_rx_msdu_payload_info));
Amir Patel57e7e052019-05-15 20:49:57 +0530594 return status_nbuf;
595}
596
597static inline void
598dp_rx_handle_ppdu_status_buf(struct dp_pdev *pdev,
599 struct hal_rx_ppdu_info *ppdu_info,
600 qdf_nbuf_t status_nbuf)
601{
Amir Patel44bd8072019-08-05 11:36:17 +0530602 qdf_nbuf_t dropnbuf;
603
604 if (qdf_nbuf_queue_len(&pdev->rx_ppdu_buf_q) >
605 HAL_RX_MAX_MPDU) {
606 dropnbuf = qdf_nbuf_queue_remove(&pdev->rx_ppdu_buf_q);
607 qdf_nbuf_free(dropnbuf);
608 }
Amir Patel57e7e052019-05-15 20:49:57 +0530609 qdf_nbuf_queue_add(&pdev->rx_ppdu_buf_q, status_nbuf);
610}
Anish Nataraj38a29562017-08-18 19:41:17 +0530611/**
Soumya Bhat7422db82017-12-15 13:48:53 +0530612 * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
Pranita Solankefc2ff392017-12-15 19:25:13 +0530613 * @soc: core txrx main context
614 * @pdev: pdev strcuture
615 * @ppdu_info: structure for rx ppdu ring
616 *
617 * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
618 * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
Soumya Bhat7422db82017-12-15 13:48:53 +0530619 */
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530620#ifdef FEATURE_PERPKT_INFO
621static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530622dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530623 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
624{
625 uint8_t size = 0;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530626 struct ieee80211_frame *wh;
627 uint32_t *nbuf_data;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530628
Amir Patel57e7e052019-05-15 20:49:57 +0530629 if (!ppdu_info->fcs_ok_msdu_info.first_msdu_payload)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530630 return QDF_STATUS_SUCCESS;
631
Soumya Bhat2f54de22018-02-21 09:54:28 +0530632 if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530633 return QDF_STATUS_SUCCESS;
634
Soumya Bhat2f54de22018-02-21 09:54:28 +0530635 pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530636
Amir Patel57e7e052019-05-15 20:49:57 +0530637 wh = (struct ieee80211_frame *)
638 (ppdu_info->fcs_ok_msdu_info.first_msdu_payload + 4);
639
640 size = (ppdu_info->fcs_ok_msdu_info.first_msdu_payload -
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530641 qdf_nbuf_data(nbuf));
Soumya Bhat7422db82017-12-15 13:48:53 +0530642
643 if (qdf_nbuf_pull_head(nbuf, size) == NULL)
644 return QDF_STATUS_SUCCESS;
645
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530646 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
647 IEEE80211_FC0_TYPE_MGT) ||
648 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
649 IEEE80211_FC0_TYPE_CTL)) {
650 return QDF_STATUS_SUCCESS;
651 }
652
Amir Patel44bd8072019-08-05 11:36:17 +0530653 ppdu_info->fcs_ok_msdu_info.first_msdu_payload = NULL;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530654 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
655 *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530656 /* only retain RX MSDU payload in the skb */
657 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
Amir Patel57e7e052019-05-15 20:49:57 +0530658 ppdu_info->fcs_ok_msdu_info.payload_len);
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530659 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
660 nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
661 return QDF_STATUS_E_ALREADY;
662}
663#else
664static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530665dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530666 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
667{
668 return QDF_STATUS_SUCCESS;
669}
670#endif
671
Amir Patel57e7e052019-05-15 20:49:57 +0530672#ifdef FEATURE_PERPKT_INFO
673static inline void
674dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
675 struct hal_rx_ppdu_info *ppdu_info,
676 uint32_t tlv_status,
677 qdf_nbuf_t status_nbuf)
678{
679 QDF_STATUS mcopy_status;
680
681 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) {
682 qdf_nbuf_free(status_nbuf);
683 return;
684 }
685 /* Add buffers to queue until we receive
686 * HAL_TLV_STATUS_PPDU_DONE
687 */
688 dp_rx_handle_ppdu_status_buf(pdev, ppdu_info, status_nbuf);
689
690 /* If tlv_status is PPDU_DONE, process rx_ppdu_buf_q
691 * and devliver fcs_ok msdu buffer
692 */
693 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Amir Patel44bd8072019-08-05 11:36:17 +0530694 if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt !=
695 (ppdu_info->com_info.mpdu_cnt_fcs_ok +
696 ppdu_info->com_info.mpdu_cnt_fcs_err))) {
697 qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
698 return;
699 }
Amir Patel57e7e052019-05-15 20:49:57 +0530700 /* Get rx ppdu status buffer having fcs ok msdu */
701 status_nbuf = dp_rx_get_fcs_ok_msdu(pdev, ppdu_info);
702 if (status_nbuf) {
703 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
704 ppdu_info,
705 status_nbuf);
706 if (mcopy_status == QDF_STATUS_SUCCESS)
707 qdf_nbuf_free(status_nbuf);
708 }
709 }
710}
711#else
712static inline void
713dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
714 struct hal_rx_ppdu_info *ppdu_info,
715 uint32_t tlv_status,
716 qdf_nbuf_t status_nbuf)
717{
718}
719#endif
720
sumedh baikady59a2d332018-05-22 01:50:38 -0700721/**
722 * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
723 * @soc: Datapath SOC handle
724 * @pdev: Datapath PDEV handle
725 * @ppdu_info: Structure for rx ppdu info
726 * @nbuf: Qdf nbuf abstraction for linux skb
727 *
728 * Return: 0 on success, 1 on failure
729 */
730static inline int
731dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
732 struct hal_rx_ppdu_info *ppdu_info,
733 qdf_nbuf_t nbuf)
734{
735 uint8_t size = 0;
736
737 if (!pdev->monitor_vdev) {
738 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
739 "[%s]:[%d] Monitor vdev is NULL !!",
740 __func__, __LINE__);
741 return 1;
742 }
Jeff Johnsona8edf332019-03-18 09:51:52 -0700743 if (!ppdu_info->msdu_info.first_msdu_payload) {
sumedh baikady59a2d332018-05-22 01:50:38 -0700744 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
745 "[%s]:[%d] First msdu payload not present",
746 __func__, __LINE__);
747 return 1;
748 }
749
sumedh baikadyda159202018-11-01 17:31:23 -0700750 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
sumedh baikady59a2d332018-05-22 01:50:38 -0700751 size = (ppdu_info->msdu_info.first_msdu_payload -
sumedh baikadyda159202018-11-01 17:31:23 -0700752 qdf_nbuf_data(nbuf)) + 4;
sumedh baikady59a2d332018-05-22 01:50:38 -0700753 ppdu_info->msdu_info.first_msdu_payload = NULL;
754
755 if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
756 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
757 "[%s]:[%d] No header present",
758 __func__, __LINE__);
759 return 1;
760 }
761
sumedh baikadyda159202018-11-01 17:31:23 -0700762 /* Only retain RX MSDU payload in the skb */
sumedh baikady59a2d332018-05-22 01:50:38 -0700763 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
764 ppdu_info->msdu_info.payload_len);
Shashikala Prabhu08434382019-07-16 15:42:03 +0530765 if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf,
766 qdf_nbuf_headroom(nbuf))) {
767 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
768 return 1;
769 }
770
sumedh baikady59a2d332018-05-22 01:50:38 -0700771 pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
772 nbuf, NULL);
sumedh baikadyda159202018-11-01 17:31:23 -0700773 pdev->ppdu_info.rx_status.monitor_direct_used = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -0700774 return 0;
775}
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530776
777/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530778* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
779* @soc: core txrx main context
780* @pdev: pdev strcuture
781* @ppdu_info: structure for rx ppdu ring
782*
783* Return: none
784*/
785#ifdef FEATURE_PERPKT_INFO
786static inline void
787dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
788 struct hal_rx_ppdu_info *ppdu_info)
789{
790 qdf_nbuf_t ppdu_nbuf;
791 struct dp_peer *peer;
792 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
793
Anish Nataraj28490c42018-01-19 19:34:54 +0530794 /*
795 * Do not allocate if fcs error,
796 * ast idx invalid / fctl invalid
797 */
Anish Nataraj28490c42018-01-19 19:34:54 +0530798 if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
799 return;
800
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530801 if (ppdu_info->nac_info.fc_valid &&
802 ppdu_info->nac_info.to_ds_flag &&
803 ppdu_info->nac_info.mac_addr2_valid) {
804 struct dp_neighbour_peer *peer = NULL;
805 uint8_t rssi = ppdu_info->rx_status.rssi_comb;
806
807 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
808 if (pdev->neighbour_peers_added) {
809 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
810 neighbour_peer_list_elem) {
811 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
812 &ppdu_info->nac_info.mac_addr2,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800813 QDF_MAC_ADDR_SIZE)) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530814 peer->rssi = rssi;
815 break;
816 }
817 }
818 }
819 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
820 }
821
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530822 /* need not generate wdi event when mcopy and
823 * enhanced stats are not enabled
824 */
825 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
826 return;
827
Soumya Bhat560f90c2018-03-30 13:53:26 +0530828 if (!pdev->mcopy_mode) {
829 if (!ppdu_info->rx_status.frame_control_info_valid)
830 return;
Anish Nataraj28490c42018-01-19 19:34:54 +0530831
Soumya Bhat560f90c2018-03-30 13:53:26 +0530832 if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)
833 return;
834 }
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530835 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
Kiran Venkatappa4b50f332019-03-20 18:14:17 +0530836 sizeof(struct cdp_rx_indication_ppdu), 0, 0, FALSE);
Anish Nataraj38a29562017-08-18 19:41:17 +0530837 if (ppdu_nbuf) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530838 dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530839 qdf_nbuf_put_tail(ppdu_nbuf,
840 sizeof(struct cdp_rx_indication_ppdu));
841 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530842 peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
chenguob21a49a2018-11-19 19:17:12 +0800843 if (peer) {
Amir Patel468bded2019-03-21 11:42:31 +0530844 cdp_rx_ppdu->cookie = (void *)peer->wlanstats_ctx;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530845 dp_rx_stats_update(pdev, peer, cdp_rx_ppdu);
chenguob21a49a2018-11-19 19:17:12 +0800846 dp_peer_unref_del_find_by_id(peer);
847 }
848 if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
849 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
850 soc, ppdu_nbuf,
851 cdp_rx_ppdu->peer_id,
852 WDI_NO_VAL, pdev->pdev_id);
853 } else if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +0530854 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
855 ppdu_nbuf, HTT_INVALID_PEER,
856 WDI_NO_VAL, pdev->pdev_id);
857 } else {
Anish Nataraj38a29562017-08-18 19:41:17 +0530858 qdf_nbuf_free(ppdu_nbuf);
Soumya Bhat2f54de22018-02-21 09:54:28 +0530859 }
Anish Nataraj38a29562017-08-18 19:41:17 +0530860 }
861}
862#else
863static inline void
864dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
865 struct hal_rx_ppdu_info *ppdu_info)
866{
867}
868#endif
869
870/**
Keyur Parekhc28f8392018-11-21 02:50:56 -0800871* dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
872* filtering enabled
873* @soc: core txrx main context
874* @ppdu_info: Structure for rx ppdu info
875* @status_nbuf: Qdf nbuf abstraction for linux skb
876* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
877*
878* Return: none
879*/
880static inline void
881dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
882 struct hal_rx_ppdu_info *ppdu_info,
883 qdf_nbuf_t status_nbuf, uint32_t mac_id)
884{
885 struct dp_peer *peer;
886 struct dp_ast_entry *ast_entry;
887 uint32_t ast_index;
888
889 ast_index = ppdu_info->rx_status.ast_index;
890 if (ast_index < (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
891 ast_entry = soc->ast_table[ast_index];
892 if (ast_entry) {
893 peer = ast_entry->peer;
894 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) {
895 if (peer->peer_based_pktlog_filter) {
896 dp_wdi_event_handler(
897 WDI_EVENT_RX_DESC, soc,
898 status_nbuf,
899 peer->peer_ids[0],
900 WDI_NO_VAL, mac_id);
901 }
902 }
903 }
904 }
905}
906
Kai Chen93f7e1b2019-07-10 16:13:48 -0700907#if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
908static inline void
909dp_rx_ul_ofdma_ru_size_to_width(
910 uint32_t ru_size,
911 uint32_t *ru_width)
912{
913 uint32_t width;
914
915 width = 0;
916 switch (ru_size) {
917 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
918 width = 1;
919 break;
920 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
921 width = 2;
922 break;
923 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
924 width = 4;
925 break;
926 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
927 width = 9;
928 break;
929 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
930 width = 18;
931 break;
932 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
933 width = 37;
934 break;
935 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
936 width = 74;
937 break;
938 default:
939 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
940 "RU size to width convert err");
941 break;
942 }
943 *ru_width = width;
944}
945
946static inline void
947dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
948{
949 struct mon_rx_user_status *mon_rx_user_status;
950 uint32_t num_users;
951 uint32_t i;
952 uint32_t ul_ofdma_user_v0_word0;
953 uint32_t ul_ofdma_user_v0_word1;
954 uint32_t ru_width;
955
956 if (ppdu_info->rx_status.reception_type != HAL_RX_TYPE_MU_OFDMA)
957 return;
958
959 num_users = ppdu_info->com_info.num_users;
960 if (num_users > HAL_MAX_UL_MU_USERS)
961 num_users = HAL_MAX_UL_MU_USERS;
962 for (i = 0; i < num_users; i++) {
963 mon_rx_user_status = &ppdu_info->rx_user_status[i];
964 ul_ofdma_user_v0_word0 =
965 mon_rx_user_status->ul_ofdma_user_v0_word0;
966 ul_ofdma_user_v0_word1 =
967 mon_rx_user_status->ul_ofdma_user_v0_word1;
968
969 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
970 ul_ofdma_user_v0_word0) &&
971 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
972 ul_ofdma_user_v0_word0)) {
973 mon_rx_user_status->mcs =
974 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
975 ul_ofdma_user_v0_word1);
976 mon_rx_user_status->nss =
977 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
978 ul_ofdma_user_v0_word1);
979
980 mon_rx_user_status->ofdma_info_valid = 1;
981 mon_rx_user_status->dl_ofdma_ru_start_index =
982 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
983 ul_ofdma_user_v0_word1);
984
985 dp_rx_ul_ofdma_ru_size_to_width(
986 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
987 ul_ofdma_user_v0_word1),
988 &ru_width);
989 mon_rx_user_status->dl_ofdma_ru_width = ru_width;
990 }
991 }
992}
993#else
994static inline void
995dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
996{
997}
998#endif
999
Keyur Parekhc28f8392018-11-21 02:50:56 -08001000/**
Kai Chen6eca1a62017-01-12 10:17:53 -08001001* dp_rx_mon_status_process_tlv() - Process status TLV in status
1002* buffer on Rx status Queue posted by status SRNG processing.
1003* @soc: core txrx main context
1004* @mac_id: mac_id which is one of 3 mac_ids _ring
1005*
1006* Return: none
1007*/
1008static inline void
Karunakar Dasineni40555682017-03-26 22:44:39 -07001009dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
1010 uint32_t quota)
1011{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001012 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001013 struct hal_rx_ppdu_info *ppdu_info;
1014 qdf_nbuf_t status_nbuf;
1015 uint8_t *rx_tlv;
1016 uint8_t *rx_tlv_start;
Kai Chenad516ae2017-09-08 18:35:47 -07001017 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
Kai Chen52ef33f2019-03-05 18:33:40 -08001018 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS;
Kai Chen783e0382018-01-25 16:29:08 -08001019 struct cdp_pdev_mon_stats *rx_mon_stats;
sumedh baikady59a2d332018-05-22 01:50:38 -07001020 int smart_mesh_status;
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -08001021 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
Kai Chen52ef33f2019-03-05 18:33:40 -08001022 bool nbuf_used;
1023 uint32_t rx_enh_capture_mode;
1024
Kai Chen6eca1a62017-01-12 10:17:53 -08001025
Kai Chen6eca1a62017-01-12 10:17:53 -08001026 ppdu_info = &pdev->ppdu_info;
Kai Chen783e0382018-01-25 16:29:08 -08001027 rx_mon_stats = &pdev->rx_mon_stats;
Kai Chen6eca1a62017-01-12 10:17:53 -08001028
1029 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
1030 return;
1031
Kai Chen52ef33f2019-03-05 18:33:40 -08001032 rx_enh_capture_mode = pdev->rx_enh_capture_mode;
1033
Kai Chen6eca1a62017-01-12 10:17:53 -08001034 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
1035
1036 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
phadiman49757302018-12-18 16:13:59 +05301037
Kai Chen6eca1a62017-01-12 10:17:53 -08001038 rx_tlv = qdf_nbuf_data(status_nbuf);
1039 rx_tlv_start = rx_tlv;
Kai Chen52ef33f2019-03-05 18:33:40 -08001040 nbuf_used = false;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001041
Jeff Johnsona8edf332019-03-18 09:51:52 -07001042 if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) ||
Kai Chen52ef33f2019-03-05 18:33:40 -08001043 pdev->mcopy_mode ||
1044 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
Keyur Parekhfad6d082017-05-07 08:54:47 -07001045 do {
1046 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
Amir Patel57e7e052019-05-15 20:49:57 +05301047 ppdu_info, pdev->soc->hal_soc,
1048 status_nbuf);
Kai Chen783e0382018-01-25 16:29:08 -08001049
1050 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
1051 rx_mon_stats);
1052
Kai Chen52ef33f2019-03-05 18:33:40 -08001053 dp_rx_mon_enh_capture_process(pdev, tlv_status,
1054 status_nbuf, ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001055 &nbuf_used);
Kai Chen52ef33f2019-03-05 18:33:40 -08001056
Keyur Parekhfad6d082017-05-07 08:54:47 -07001057 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
Kai Chen6eca1a62017-01-12 10:17:53 -08001058
Keyur Parekhfad6d082017-05-07 08:54:47 -07001059 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
1060 break;
Kai Chen6eca1a62017-01-12 10:17:53 -08001061
Kai Chen52ef33f2019-03-05 18:33:40 -08001062 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1063 (tlv_status == HAL_TLV_STATUS_HEADER) ||
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001064 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1065 (tlv_status == HAL_TLV_STATUS_MSDU_END));
Keyur Parekhfad6d082017-05-07 08:54:47 -07001066 }
Keyur Parekhc28f8392018-11-21 02:50:56 -08001067 if (pdev->dp_peer_based_pktlog) {
1068 dp_rx_process_peer_based_pktlog(soc, ppdu_info,
1069 status_nbuf, mac_id);
1070 } else {
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -08001071 if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1072 pktlog_mode = WDI_EVENT_RX_DESC;
1073 else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1074 pktlog_mode = WDI_EVENT_LITE_RX;
1075
1076 if (pktlog_mode != WDI_NO_VAL)
1077 dp_wdi_event_handler(pktlog_mode, soc,
1078 status_nbuf,
1079 HTT_INVALID_PEER,
1080 WDI_NO_VAL, mac_id);
Keyur Parekhc28f8392018-11-21 02:50:56 -08001081 }
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301082
1083 /* smart monitor vap and m_copy cannot co-exist */
sumedh baikady59a2d332018-05-22 01:50:38 -07001084 if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
1085 && pdev->monitor_vdev) {
1086 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
1087 pdev, ppdu_info, status_nbuf);
1088 if (smart_mesh_status)
1089 qdf_nbuf_free(status_nbuf);
Amir Patel57e7e052019-05-15 20:49:57 +05301090 } else if (qdf_unlikely(pdev->mcopy_mode)) {
1091 dp_rx_process_mcopy_mode(soc, pdev,
1092 ppdu_info, tlv_status,
1093 status_nbuf);
Kai Chen52ef33f2019-03-05 18:33:40 -08001094 } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
1095 if (!nbuf_used)
1096 qdf_nbuf_free(status_nbuf);
1097
1098 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
1099 enh_log_status =
1100 dp_rx_handle_enh_capture(soc,
1101 pdev, ppdu_info);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301102 } else {
sumedh baikady59a2d332018-05-22 01:50:38 -07001103 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301104 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001105
chenguo1b880462018-07-11 15:34:56 +08001106 if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
1107 dp_rx_mon_deliver_non_std(soc, mac_id);
1108 } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Kai Chen783e0382018-01-25 16:29:08 -08001109 rx_mon_stats->status_ppdu_done++;
Kai Chen93f7e1b2019-07-10 16:13:48 -07001110 dp_rx_mon_handle_ofdma_info(ppdu_info);
Soumya Bhat5c60deb2017-12-12 16:42:04 +05301111 if (pdev->enhanced_stats_en ||
sumedh baikady59a2d332018-05-22 01:50:38 -07001112 pdev->mcopy_mode || pdev->neighbour_peers_added)
Anish Nataraj38a29562017-08-18 19:41:17 +05301113 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
Soumya Bhat6fee59c2017-10-31 13:12:37 +05301114
nobelj1c31fee2018-03-21 11:47:05 -07001115 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
1116 dp_rx_mon_dest_process(soc, mac_id, quota);
Kai Chencbe4c342017-06-12 20:06:35 -07001117 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen6eca1a62017-01-12 10:17:53 -08001118 }
1119 }
1120 return;
1121}
1122
1123/*
1124 * dp_rx_mon_status_srng_process() - Process monitor status ring
1125 * post the status ring buffer to Rx status Queue for later
1126 * processing when status ring is filled with status TLV.
1127 * Allocate a new buffer to status ring if the filled buffer
1128 * is posted.
1129 *
1130 * @soc: core txrx main context
1131 * @mac_id: mac_id which is one of 3 mac_ids
1132 * @quota: No. of ring entry that can be serviced in one shot.
1133
1134 * Return: uint32_t: No. of ring entry that is processed.
1135 */
1136static inline uint32_t
1137dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
1138 uint32_t quota)
1139{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001140 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Akshay Kosigia870c612019-07-08 23:10:30 +05301141 hal_soc_handle_t hal_soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08001142 void *mon_status_srng;
1143 void *rxdma_mon_status_ring_entry;
1144 QDF_STATUS status;
1145 uint32_t work_done = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001146 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001147
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001148 mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng;
Kai Chen6eca1a62017-01-12 10:17:53 -08001149
1150 qdf_assert(mon_status_srng);
Houston Hoffman648a9182017-05-21 23:27:50 -07001151 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
1152
1153 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301154 "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
Houston Hoffman648a9182017-05-21 23:27:50 -07001155 __func__, __LINE__, mon_status_srng);
1156 return work_done;
1157 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001158
1159 hal_soc = soc->hal_soc;
1160
1161 qdf_assert(hal_soc);
1162
1163 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
1164 goto done;
1165
1166 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
1167 * BUFFER_ADDR_INFO STRUCT
1168 */
1169 while (qdf_likely((rxdma_mon_status_ring_entry =
1170 hal_srng_src_peek(hal_soc, mon_status_srng))
1171 && quota--)) {
1172 uint32_t rx_buf_cookie;
1173 qdf_nbuf_t status_nbuf;
1174 struct dp_rx_desc *rx_desc;
1175 uint8_t *status_buf;
1176 qdf_dma_addr_t paddr;
1177 uint64_t buf_addr;
1178
1179 buf_addr =
1180 (HAL_RX_BUFFER_ADDR_31_0_GET(
1181 rxdma_mon_status_ring_entry) |
1182 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
1183 rxdma_mon_status_ring_entry)) << 32));
1184
1185 if (qdf_likely(buf_addr)) {
1186
1187 rx_buf_cookie =
1188 HAL_RX_BUF_COOKIE_GET(
1189 rxdma_mon_status_ring_entry);
1190 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
1191 rx_buf_cookie);
1192
1193 qdf_assert(rx_desc);
1194
1195 status_nbuf = rx_desc->nbuf;
1196
1197 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
1198 QDF_DMA_FROM_DEVICE);
1199
1200 status_buf = qdf_nbuf_data(status_nbuf);
1201
1202 status = hal_get_rx_status_done(status_buf);
1203
1204 if (status != QDF_STATUS_SUCCESS) {
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001205 uint32_t hp, tp;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001206 hal_get_sw_hptp(hal_soc, mon_status_srng,
1207 &tp, &hp);
Rakesh Pillai836b6692019-08-27 00:40:37 +05301208 dp_info_rl("tlv tag status error hp:%u, tp:%u",
1209 hp, tp);
1210 pdev->rx_mon_stats.tlv_tag_status_err++;
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001211 /* WAR for missing status: Skip status entry */
1212 hal_srng_src_get_next(hal_soc, mon_status_srng);
1213 continue;
Kai Chen6eca1a62017-01-12 10:17:53 -08001214 }
1215 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
1216
1217 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
1218 QDF_DMA_FROM_DEVICE);
1219
1220 /* Put the status_nbuf to queue */
1221 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
1222
1223 } else {
1224 union dp_rx_desc_list_elem_t *desc_list = NULL;
1225 union dp_rx_desc_list_elem_t *tail = NULL;
1226 struct rx_desc_pool *rx_desc_pool;
1227 uint32_t num_alloc_desc;
1228
1229 rx_desc_pool = &soc->rx_desc_status[mac_id];
1230
1231 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
1232 rx_desc_pool,
1233 1,
1234 &desc_list,
1235 &tail);
phadimanebf4cde2019-01-28 17:50:37 +05301236 /*
1237 * No free descriptors available
1238 */
1239 if (qdf_unlikely(num_alloc_desc == 0)) {
1240 work_done++;
1241 break;
1242 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001243
1244 rx_desc = &desc_list->rx_desc;
1245 }
1246
jinweic chenc3546322018-02-02 15:03:41 +08001247 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001248
jinweic chenc3546322018-02-02 15:03:41 +08001249 /*
1250 * qdf_nbuf alloc or map failed,
1251 * free the dp rx desc to free list,
1252 * fill in NULL dma address at current HP entry,
1253 * keep HP in mon_status_ring unchanged,
1254 * wait next time dp_rx_mon_status_srng_process
1255 * to fill in buffer at current HP.
1256 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001257 if (qdf_unlikely(!status_nbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001258 union dp_rx_desc_list_elem_t *desc_list = NULL;
1259 union dp_rx_desc_list_elem_t *tail = NULL;
1260 struct rx_desc_pool *rx_desc_pool;
Kai Chen6eca1a62017-01-12 10:17:53 -08001261
jinweic chenc3546322018-02-02 15:03:41 +08001262 rx_desc_pool = &soc->rx_desc_status[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001263
jinweic chenc3546322018-02-02 15:03:41 +08001264 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1265 "%s: fail to allocate or map qdf_nbuf",
1266 __func__);
1267 dp_rx_add_to_free_desc_list(&desc_list,
1268 &tail, rx_desc);
1269 dp_rx_add_desc_list_to_free_list(soc, &desc_list,
1270 &tail, mac_id, rx_desc_pool);
1271
1272 hal_rxdma_buff_addr_info_set(
1273 rxdma_mon_status_ring_entry,
1274 0, 0, HAL_RX_BUF_RBM_SW3_BM);
1275 work_done++;
1276 break;
1277 }
1278
Kai Chen6eca1a62017-01-12 10:17:53 -08001279 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
1280
1281 rx_desc->nbuf = status_nbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001282 rx_desc->in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -08001283
1284 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
1285 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
1286
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001287 hal_srng_src_get_next(hal_soc, mon_status_srng);
Kai Chen6eca1a62017-01-12 10:17:53 -08001288 work_done++;
1289 }
1290done:
1291
1292 hal_srng_access_end(hal_soc, mon_status_srng);
1293
1294 return work_done;
1295
1296}
1297/*
1298 * dp_rx_mon_status_process() - Process monitor status ring and
1299 * TLV in status ring.
1300 *
1301 * @soc: core txrx main context
1302 * @mac_id: mac_id which is one of 3 mac_ids
1303 * @quota: No. of ring entry that can be serviced in one shot.
1304
1305 * Return: uint32_t: No. of ring entry that is processed.
1306 */
1307static inline uint32_t
1308dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
1309 uint32_t work_done;
1310
1311 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
Karunakar Dasineni40555682017-03-26 22:44:39 -07001312 quota -= work_done;
1313 dp_rx_mon_status_process_tlv(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001314
1315 return work_done;
1316}
1317/**
1318 * dp_mon_process() - Main monitor mode processing roution.
1319 * This call monitor status ring process then monitor
1320 * destination ring process.
1321 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1322 * @soc: core txrx main context
1323 * @mac_id: mac_id which is one of 3 mac_ids
1324 * @quota: No. of status ring entry that can be serviced in one shot.
1325
1326 * Return: uint32_t: No. of ring entry that is processed.
1327 */
1328uint32_t
1329dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
Karunakar Dasineni40555682017-03-26 22:44:39 -07001330 return dp_rx_mon_status_process(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001331}
Karunakar Dasineni40555682017-03-26 22:44:39 -07001332
Kai Chen6eca1a62017-01-12 10:17:53 -08001333/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001334 * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
Kai Chen6eca1a62017-01-12 10:17:53 -08001335 * @pdev: core txrx pdev context
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001336 * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
Kai Chen6eca1a62017-01-12 10:17:53 -08001337 *
1338 * This function will detach DP RX status ring from
1339 * main device context. will free DP Rx resources for
1340 * status ring
1341 *
1342 * Return: QDF_STATUS_SUCCESS: success
1343 * QDF_STATUS_E_RESOURCES: Error return
1344 */
1345QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001346dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -08001347{
Kai Chen6eca1a62017-01-12 10:17:53 -08001348 struct dp_soc *soc = pdev->soc;
1349 struct rx_desc_pool *rx_desc_pool;
1350
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001351 rx_desc_pool = &soc->rx_desc_status[mac_id];
phadiman449a2682019-02-20 14:00:00 +05301352 if (rx_desc_pool->pool_size != 0) {
1353 if (!dp_is_soc_reinit(soc))
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001354 dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
1355 rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301356 else
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001357 dp_rx_desc_nbuf_free(soc, rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301358 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001359
1360 return QDF_STATUS_SUCCESS;
1361}
1362
1363/*
1364 * dp_rx_buffers_replenish() - replenish monitor status ring with
1365 * rx nbufs called during dp rx
1366 * monitor status ring initialization
1367 *
1368 * @soc: core txrx main context
1369 * @mac_id: mac_id which is one of 3 mac_ids
1370 * @dp_rxdma_srng: dp monitor status circular ring
1371 * @rx_desc_pool; Pointer to Rx descriptor pool
1372 * @num_req_buffers: number of buffer to be replenished
1373 * @desc_list: list of descs if called from dp rx monitor status
1374 * process or NULL during dp rx initialization or
1375 * out of buffer interrupt
1376 * @tail: tail of descs list
1377 * @owner: who owns the nbuf (host, NSS etc...)
1378 * Return: return success or failure
1379 */
1380static inline
1381QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
1382 uint32_t mac_id,
1383 struct dp_srng *dp_rxdma_srng,
1384 struct rx_desc_pool *rx_desc_pool,
1385 uint32_t num_req_buffers,
1386 union dp_rx_desc_list_elem_t **desc_list,
1387 union dp_rx_desc_list_elem_t **tail,
1388 uint8_t owner)
1389{
1390 uint32_t num_alloc_desc;
1391 uint16_t num_desc_to_free = 0;
1392 uint32_t num_entries_avail;
jinweic chenc3546322018-02-02 15:03:41 +08001393 uint32_t count = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08001394 int sync_hw_ptr = 1;
1395 qdf_dma_addr_t paddr;
1396 qdf_nbuf_t rx_netbuf;
1397 void *rxdma_ring_entry;
1398 union dp_rx_desc_list_elem_t *next;
1399 void *rxdma_srng;
jinweic chenc3546322018-02-02 15:03:41 +08001400 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001401
1402 rxdma_srng = dp_rxdma_srng->hal_srng;
1403
1404 qdf_assert(rxdma_srng);
1405
Houston Hoffmanae850c62017-08-11 16:47:50 -07001406 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301407 "[%s][%d] requested %d buffers for replenish",
Kai Chen6eca1a62017-01-12 10:17:53 -08001408 __func__, __LINE__, num_req_buffers);
1409
1410 /*
1411 * if desc_list is NULL, allocate the descs from freelist
1412 */
1413 if (!(*desc_list)) {
1414
1415 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1416 rx_desc_pool,
1417 num_req_buffers,
1418 desc_list,
1419 tail);
1420
1421 if (!num_alloc_desc) {
1422 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301423 "[%s][%d] no free rx_descs in freelist",
Kai Chen6eca1a62017-01-12 10:17:53 -08001424 __func__, __LINE__);
1425 return QDF_STATUS_E_NOMEM;
1426 }
1427
Houston Hoffmanae850c62017-08-11 16:47:50 -07001428 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301429 "[%s][%d] %d rx desc allocated", __func__, __LINE__,
Kai Chen6eca1a62017-01-12 10:17:53 -08001430 num_alloc_desc);
Houston Hoffmanae850c62017-08-11 16:47:50 -07001431
Kai Chen6eca1a62017-01-12 10:17:53 -08001432 num_req_buffers = num_alloc_desc;
1433 }
1434
1435 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
1436 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
1437 rxdma_srng, sync_hw_ptr);
1438
Houston Hoffmanae850c62017-08-11 16:47:50 -07001439 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301440 "[%s][%d] no of available entries in rxdma ring: %d",
Kai Chen6eca1a62017-01-12 10:17:53 -08001441 __func__, __LINE__, num_entries_avail);
1442
1443 if (num_entries_avail < num_req_buffers) {
1444 num_desc_to_free = num_req_buffers - num_entries_avail;
1445 num_req_buffers = num_entries_avail;
1446 }
1447
jinweic chenc3546322018-02-02 15:03:41 +08001448 while (count < num_req_buffers) {
1449 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001450
jinweic chenc3546322018-02-02 15:03:41 +08001451 /*
1452 * qdf_nbuf alloc or map failed,
1453 * keep HP in mon_status_ring unchanged,
1454 * wait dp_rx_mon_status_srng_process
1455 * to fill in buffer at current HP.
1456 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001457 if (qdf_unlikely(!rx_netbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001458 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1459 "%s: qdf_nbuf allocate or map fail, count %d",
1460 __func__, count);
1461 break;
1462 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001463
1464 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
1465
1466 next = (*desc_list)->next;
sumedh baikadyeca2de62018-04-11 14:20:38 -07001467 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
1468 rxdma_srng);
1469
Jeff Johnsona8edf332019-03-18 09:51:52 -07001470 if (qdf_unlikely(!rxdma_ring_entry)) {
sumedh baikadyeca2de62018-04-11 14:20:38 -07001471 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301472 "[%s][%d] rxdma_ring_entry is NULL, count - %d",
sumedh baikadyeca2de62018-04-11 14:20:38 -07001473 __func__, __LINE__, count);
1474 qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301475 QDF_DMA_FROM_DEVICE);
sumedh baikadyeca2de62018-04-11 14:20:38 -07001476 qdf_nbuf_free(rx_netbuf);
1477 break;
1478 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001479
1480 (*desc_list)->rx_desc.nbuf = rx_netbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001481 (*desc_list)->rx_desc.in_use = 1;
jinweic chenc3546322018-02-02 15:03:41 +08001482 count++;
jinweic chenc3546322018-02-02 15:03:41 +08001483
Kai Chen6eca1a62017-01-12 10:17:53 -08001484 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
1485 (*desc_list)->rx_desc.cookie, owner);
1486
Karunakar Dasineni40555682017-03-26 22:44:39 -07001487 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001488 "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
Aditya Sathishded018e2018-07-02 16:25:21 +05301489 paddr=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -08001490 __func__, __LINE__, &(*desc_list)->rx_desc,
1491 (*desc_list)->rx_desc.cookie, rx_netbuf,
jinweic chenc3546322018-02-02 15:03:41 +08001492 (void *)paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -08001493
1494 *desc_list = next;
1495 }
1496
1497 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1498
Houston Hoffmanae850c62017-08-11 16:47:50 -07001499 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301500 "successfully replenished %d buffers", num_req_buffers);
Kai Chen6eca1a62017-01-12 10:17:53 -08001501
Houston Hoffmanae850c62017-08-11 16:47:50 -07001502 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301503 "%d rx desc added back to free list", num_desc_to_free);
Kai Chen6eca1a62017-01-12 10:17:53 -08001504
Kai Chen6eca1a62017-01-12 10:17:53 -08001505 /*
1506 * add any available free desc back to the free list
1507 */
1508 if (*desc_list) {
1509 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1510 mac_id, rx_desc_pool);
1511 }
1512
1513 return QDF_STATUS_SUCCESS;
1514}
1515/**
1516 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
1517 * @pdev: core txrx pdev context
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001518 * @ring_id: ring number
Kai Chen6eca1a62017-01-12 10:17:53 -08001519 * This function will attach a DP RX monitor status ring into pDEV
1520 * and replenish monitor status ring with buffer.
1521 *
1522 * Return: QDF_STATUS_SUCCESS: success
1523 * QDF_STATUS_E_RESOURCES: Error return
1524 */
1525QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001526dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
Kai Chen6eca1a62017-01-12 10:17:53 -08001527 struct dp_soc *soc = pdev->soc;
1528 union dp_rx_desc_list_elem_t *desc_list = NULL;
1529 union dp_rx_desc_list_elem_t *tail = NULL;
Mohit Khanna70514992018-11-12 18:39:03 -08001530 struct dp_srng *mon_status_ring;
1531 uint32_t num_entries;
Kai Chen52ef33f2019-03-05 18:33:40 -08001532 uint32_t i;
Kai Chen6eca1a62017-01-12 10:17:53 -08001533 struct rx_desc_pool *rx_desc_pool;
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001534 QDF_STATUS status;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001535 int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001536
Mohit Khanna70514992018-11-12 18:39:03 -08001537 mon_status_ring = &pdev->rxdma_mon_status_ring[mac_for_pdev];
Kai Chen6eca1a62017-01-12 10:17:53 -08001538
Mohit Khanna70514992018-11-12 18:39:03 -08001539 num_entries = mon_status_ring->num_entries;
Kai Chen6eca1a62017-01-12 10:17:53 -08001540
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001541 rx_desc_pool = &soc->rx_desc_status[ring_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001542
Mohit Khanna70514992018-11-12 18:39:03 -08001543 dp_info("Mon RX Status Pool[%d] entries=%d",
1544 ring_id, num_entries);
Kai Chen6eca1a62017-01-12 10:17:53 -08001545
Mohit Khanna70514992018-11-12 18:39:03 -08001546 status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1,
1547 rx_desc_pool);
1548 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001549 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001550
Mohit Khanna70514992018-11-12 18:39:03 -08001551 dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001552
Mohit Khanna70514992018-11-12 18:39:03 -08001553 status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
1554 mon_status_ring,
1555 rx_desc_pool,
1556 num_entries,
1557 &desc_list, &tail,
1558 HAL_RX_BUF_RBM_SW3_BM);
1559
1560 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001561 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001562
1563 qdf_nbuf_queue_init(&pdev->rx_status_q);
Amir Patel57e7e052019-05-15 20:49:57 +05301564 qdf_nbuf_queue_init(&pdev->rx_ppdu_buf_q);
Kai Chen6eca1a62017-01-12 10:17:53 -08001565
1566 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen783e0382018-01-25 16:29:08 -08001567
Karunakar Dasineni40555682017-03-26 22:44:39 -07001568 qdf_mem_zero(&(pdev->ppdu_info.rx_status),
Mohit Khanna70514992018-11-12 18:39:03 -08001569 sizeof(pdev->ppdu_info.rx_status));
Kai Chen6eca1a62017-01-12 10:17:53 -08001570
Kai Chen783e0382018-01-25 16:29:08 -08001571 qdf_mem_zero(&pdev->rx_mon_stats,
1572 sizeof(pdev->rx_mon_stats));
1573
1574 dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
1575 &pdev->rx_mon_stats);
1576
Kai Chen52ef33f2019-03-05 18:33:40 -08001577 for (i = 0; i < MAX_MU_USERS; i++) {
1578 qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
1579 pdev->is_mpdu_hdr[i] = true;
1580 }
1581 qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
1582
1583 pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
1584
Kai Chen6eca1a62017-01-12 10:17:53 -08001585 return QDF_STATUS_SUCCESS;
1586}