blob: ddad46782c2ecbb7e3adc01f7976375e42b5279d [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
phadimanebf4cde2019-01-28 17:50:37 +05302 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
Kai Chen6eca1a62017-01-12 10:17:53 -08003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053018#include "hal_hw_headers.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080019#include "dp_types.h"
20#include "dp_rx.h"
21#include "dp_peer.h"
22#include "hal_rx.h"
23#include "hal_api.h"
24#include "qdf_trace.h"
25#include "qdf_nbuf.h"
26#include "hal_api_mon.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080027#include "dp_rx_mon.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070028#include "dp_internal.h"
29#include "qdf_mem.h" /* qdf_mem_malloc,free */
Kai Chen6eca1a62017-01-12 10:17:53 -080030
nobelj14531642019-06-25 17:41:55 -070031#include "htt.h"
32
ydb247452018-08-08 00:23:16 +053033#ifdef FEATURE_PERPKT_INFO
34#include "dp_ratetable.h"
35#endif
36
Kai Chen52ef33f2019-03-05 18:33:40 -080037#ifdef WLAN_RX_PKT_CAPTURE_ENH
38#include "dp_rx_mon_feature.h"
39#else
40static QDF_STATUS
41dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
42 struct hal_rx_ppdu_info *ppdu_info)
43{
44 return QDF_STATUS_SUCCESS;
45}
46
47static void
48dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
49 qdf_nbuf_t status_nbuf,
50 struct hal_rx_ppdu_info *ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -070051 bool *nbuf_used)
Kai Chen52ef33f2019-03-05 18:33:40 -080052{
53}
54#endif
55
Amir Patel1d4ac982019-04-25 11:49:01 +053056#ifdef FEATURE_PERPKT_INFO
57static inline void
58dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
59 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
60{
61 uint8_t chain, bw;
62 int8_t rssi;
63
64 for (chain = 0; chain < SS_COUNT; chain++) {
65 for (bw = 0; bw < MAX_BW; bw++) {
66 rssi = ppdu_info->rx_status.rssi_chain[chain][bw];
67 if (rssi != DP_RSSI_INVAL)
68 cdp_rx_ppdu->rssi_chain[chain][bw] = rssi;
69 else
70 cdp_rx_ppdu->rssi_chain[chain][bw] = 0;
71 }
72 }
73}
74
Amir Patel5a8bbbe2019-07-17 21:59:39 +053075/*
76 * dp_rx_populate_su_evm_details() - Populate su evm info
77 * @ppdu_info: ppdu info structure from ppdu ring
78 * @cdp_rx_ppdu: rx ppdu indication structure
79 */
80static inline void
81dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
82 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
83{
84 uint8_t pilot_evm;
85 uint8_t nss_count;
86 uint8_t pilot_count;
87
88 nss_count = ppdu_info->evm_info.nss_count;
89 pilot_count = ppdu_info->evm_info.pilot_count;
90
91 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
92 qdf_err("pilot evm count is more than expected");
93 return;
94 }
95 cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
96 cdp_rx_ppdu->evm_info.nss_count = nss_count;
97
98 /* Populate evm for pilot_evm = nss_count*pilot_count */
99 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
100 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
101 ppdu_info->evm_info.pilot_evm[pilot_evm];
102 }
103}
104
Kai Chen6eca1a62017-01-12 10:17:53 -0800105/**
nobelj14531642019-06-25 17:41:55 -0700106 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
107 * @pdev: pdev ctx
108 * @rx_user_status: mon rx user status
109 *
110 * Return: bool
111 */
112static inline bool
113dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
114 struct mon_rx_user_status *rx_user_status)
115{
116 uint32_t ru_size;
117 bool is_data;
118
119 ru_size = rx_user_status->dl_ofdma_ru_size;
120
121 if (dp_is_subtype_data(rx_user_status->frame_control)) {
122 DP_STATS_INC(pdev,
123 ul_ofdma.data_rx_ru_size[ru_size], 1);
124 is_data = true;
125 } else {
126 DP_STATS_INC(pdev,
127 ul_ofdma.nondata_rx_ru_size[ru_size], 1);
128 is_data = false;
129 }
130
131 return is_data;
132}
133
134/**
135 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
136 * @pdev: pdev ctx
137 * @ppdu_info: ppdu info structure from ppdu ring
138 * @ppdu_nbuf: qdf nbuf abstraction for linux skb
139 *
140 * Return: none
141 */
142static inline void
143dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
144 struct hal_rx_ppdu_info *ppdu_info,
145 qdf_nbuf_t ppdu_nbuf)
146{
147 struct dp_peer *peer;
148 struct dp_soc *soc = pdev->soc;
149 struct dp_ast_entry *ast_entry;
150 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
151 uint32_t ast_index;
152 int i;
153 struct mon_rx_user_status *rx_user_status;
154 struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
155 int ru_size;
156 bool is_data = false;
157 uint32_t num_users;
158
159 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
160
161 num_users = ppdu_info->com_info.num_users;
162 for (i = 0; i < num_users; i++) {
163 if (i > OFDMA_NUM_USERS)
164 return;
165
166 rx_user_status = &ppdu_info->rx_user_status[i];
167 rx_stats_peruser = &cdp_rx_ppdu->user[i];
168
169 ast_index = rx_user_status->ast_index;
170 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
171 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
172 return;
173 }
174
175 ast_entry = soc->ast_table[ast_index];
176 if (!ast_entry) {
177 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
178 return;
179 }
180
181 peer = ast_entry->peer;
182 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
183 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
184 return;
185 }
186
187 rx_stats_peruser->first_data_seq_ctrl =
188 rx_user_status->first_data_seq_ctrl;
189
190 rx_stats_peruser->frame_control =
191 rx_user_status->frame_control;
192
193 rx_stats_peruser->tcp_msdu_count =
194 rx_user_status->tcp_msdu_count;
195 rx_stats_peruser->udp_msdu_count =
196 rx_user_status->udp_msdu_count;
197 rx_stats_peruser->other_msdu_count =
198 rx_user_status->other_msdu_count;
199 rx_stats_peruser->preamble_type =
200 rx_user_status->preamble_type;
201 rx_stats_peruser->mpdu_cnt_fcs_ok =
202 rx_user_status->mpdu_cnt_fcs_ok;
203 rx_stats_peruser->mpdu_cnt_fcs_err =
204 rx_user_status->mpdu_cnt_fcs_err;
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530205 qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
206 &rx_user_status->mpdu_fcs_ok_bitmap,
207 HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
208 sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
nobelj14531642019-06-25 17:41:55 -0700209 rx_stats_peruser->mpdu_ok_byte_count =
210 rx_user_status->mpdu_ok_byte_count;
211 rx_stats_peruser->mpdu_err_byte_count =
212 rx_user_status->mpdu_err_byte_count;
213
214 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
215 cdp_rx_ppdu->num_msdu +=
216 (rx_stats_peruser->tcp_msdu_count +
217 rx_stats_peruser->udp_msdu_count +
218 rx_stats_peruser->other_msdu_count);
219 rx_stats_peruser->retries =
220 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
221 rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
222
223 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
224 rx_stats_peruser->is_ampdu = 1;
225 else
226 rx_stats_peruser->is_ampdu = 0;
227
228 rx_stats_peruser->tid = ppdu_info->rx_status.tid;
229
230 qdf_mem_copy(rx_stats_peruser->mac_addr,
231 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
232 rx_stats_peruser->peer_id = peer->peer_ids[0];
233 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
234 rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
235
236 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA) {
237 if (rx_user_status->ofdma_info_valid) {
238 rx_stats_peruser->nss = rx_user_status->nss;
239 rx_stats_peruser->mcs = rx_user_status->mcs;
240 rx_stats_peruser->ofdma_info_valid =
241 rx_user_status->ofdma_info_valid;
242 rx_stats_peruser->ofdma_ru_start_index =
243 rx_user_status->dl_ofdma_ru_start_index;
244 rx_stats_peruser->ofdma_ru_width =
245 rx_user_status->dl_ofdma_ru_width;
246 rx_stats_peruser->user_index = i;
247 ru_size = rx_user_status->dl_ofdma_ru_size;
248 /*
249 * max RU size will be equal to
250 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
251 */
252 if (ru_size >= OFDMA_NUM_RU_SIZE) {
253 dp_err("invalid ru_size %d\n",
254 ru_size);
255 return;
256 }
257 is_data = dp_rx_inc_rusize_cnt(pdev,
258 rx_user_status);
259 } else {
260 rx_stats_peruser->ofdma_info_valid = 0;
261 }
262 if (is_data) {
263 /* counter to get number of MU OFDMA */
264 pdev->stats.ul_ofdma.data_rx_ppdu++;
265 pdev->stats.ul_ofdma.data_users[num_users]++;
266 }
267 }
268 }
269}
270
271/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530272* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
Soumya Bhat560f90c2018-03-30 13:53:26 +0530273* @pdev: pdev ctx
Anish Nataraj38a29562017-08-18 19:41:17 +0530274* @ppdu_info: ppdu info structure from ppdu ring
275* @ppdu_nbuf: qdf nbuf abstraction for linux skb
276*
277* Return: none
278*/
Anish Nataraj38a29562017-08-18 19:41:17 +0530279static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530280dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530281 struct hal_rx_ppdu_info *ppdu_info,
282 qdf_nbuf_t ppdu_nbuf)
283{
284 struct dp_peer *peer;
Soumya Bhat560f90c2018-03-30 13:53:26 +0530285 struct dp_soc *soc = pdev->soc;
Anish Nataraj38a29562017-08-18 19:41:17 +0530286 struct dp_ast_entry *ast_entry;
287 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
288 uint32_t ast_index;
nobelj14531642019-06-25 17:41:55 -0700289 uint32_t i;
Anish Nataraj38a29562017-08-18 19:41:17 +0530290
291 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
292
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +0530293 cdp_rx_ppdu->first_data_seq_ctrl =
294 ppdu_info->rx_status.first_data_seq_ctrl;
Anish Natarajeb30aa72018-09-20 16:34:01 +0530295 cdp_rx_ppdu->frame_ctrl =
296 ppdu_info->rx_status.frame_control;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530297 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
298 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
299 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
Anish Nataraj38a29562017-08-18 19:41:17 +0530300 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
nobelj14531642019-06-25 17:41:55 -0700301 /* num mpdu is consolidated and added together in num user loop */
Pamidipati, Vijayba4b57f2019-02-13 16:47:23 +0530302 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
nobelj14531642019-06-25 17:41:55 -0700303 /* num msdu is consolidated and added together in num user loop */
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530304 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
305 cdp_rx_ppdu->udp_msdu_count +
306 cdp_rx_ppdu->other_msdu_count);
nobelj14531642019-06-25 17:41:55 -0700307
Amir Patelac7d9462019-03-28 16:16:01 +0530308 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
309 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
Pranita Solankeed0aba62018-01-12 19:14:31 +0530310
311 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
312 cdp_rx_ppdu->is_ampdu = 1;
313 else
314 cdp_rx_ppdu->is_ampdu = 0;
Anish Nataraj45d282c2017-12-30 01:03:38 +0530315 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
nobelj14531642019-06-25 17:41:55 -0700316
Soumya Bhat560f90c2018-03-30 13:53:26 +0530317
318 ast_index = ppdu_info->rx_status.ast_index;
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530319 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530320 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
321 return;
322 }
323
324 ast_entry = soc->ast_table[ast_index];
325 if (!ast_entry) {
326 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
327 return;
328 }
329 peer = ast_entry->peer;
330 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
331 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
332 return;
333 }
334
335 qdf_mem_copy(cdp_rx_ppdu->mac_addr,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800336 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
Soumya Bhat560f90c2018-03-30 13:53:26 +0530337 cdp_rx_ppdu->peer_id = peer->peer_ids[0];
338 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
nobelj14531642019-06-25 17:41:55 -0700339
340 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
341 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
342 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
343 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
344 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
345 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
346 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
347 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
348 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
349 else
350 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
351 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
352 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
353 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
354 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
355 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
356 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
357 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
358 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
359 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
360 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
Keyur Parekh44d8f8f2019-03-12 12:39:41 -0700361 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
Amir Patel1d4ac982019-04-25 11:49:01 +0530362
363 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu);
Amir Patel5a8bbbe2019-07-17 21:59:39 +0530364 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
365 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
nobelj14531642019-06-25 17:41:55 -0700366
367 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
368 for (i = 0; i < MAX_CHAIN; i++)
369 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
370
371 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
372
373 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
374
375 cdp_rx_ppdu->num_mpdu = 0;
376 cdp_rx_ppdu->num_msdu = 0;
377
378 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530379}
380#else
381static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530382dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530383 struct hal_rx_ppdu_info *ppdu_info,
384 qdf_nbuf_t ppdu_nbuf)
385{
386}
387#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530388/**
389 * dp_rx_stats_update() - Update per-peer statistics
390 * @soc: Datapath SOC handle
391 * @peer: Datapath peer handle
392 * @ppdu: PPDU Descriptor
393 *
394 * Return: None
395 */
396#ifdef FEATURE_PERPKT_INFO
ydb247452018-08-08 00:23:16 +0530397static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
398 struct cdp_rx_indication_ppdu *ppdu)
399{
400 uint32_t ratekbps = 0;
401 uint32_t ppdu_rx_rate = 0;
402 uint32_t nss = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530403 uint32_t rix;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530404 uint16_t ratecode;
ydb247452018-08-08 00:23:16 +0530405
406 if (!peer || !ppdu)
407 return;
408
409 if (ppdu->u.nss == 0)
410 nss = 0;
411 else
412 nss = ppdu->u.nss - 1;
413
Anish Nataraj376d9b12018-08-13 14:12:01 +0530414 ratekbps = dp_getrateindex(ppdu->u.gi,
415 ppdu->u.mcs,
ydb247452018-08-08 00:23:16 +0530416 nss,
417 ppdu->u.preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530418 ppdu->u.bw,
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530419 &rix,
420 &ratecode);
ydb247452018-08-08 00:23:16 +0530421
422 if (!ratekbps)
423 return;
424
Amir Patel468bded2019-03-21 11:42:31 +0530425 ppdu->rix = rix;
ydb247452018-08-08 00:23:16 +0530426 DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
427 dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
428 ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
429 DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
Amir Patelac7d9462019-03-28 16:16:01 +0530430 ppdu->rx_ratekbps = ratekbps;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530431 ppdu->rx_ratecode = ratecode;
ydb247452018-08-08 00:23:16 +0530432
433 if (peer->vdev)
434 peer->vdev->stats.rx.last_rx_rate = ratekbps;
435}
436
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530437static void dp_rx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
438 struct cdp_rx_indication_ppdu *ppdu)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530439{
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530440 struct dp_soc *soc = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +0530441 uint8_t mcs, preamble, ac = 0;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530442 uint16_t num_msdu;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530443 bool is_invalid_peer = false;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530444
445 mcs = ppdu->u.mcs;
446 preamble = ppdu->u.preamble;
447 num_msdu = ppdu->num_msdu;
448
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530449 if (pdev)
450 soc = pdev->soc;
451 else
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530452 return;
453
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530454 if (!peer) {
455 is_invalid_peer = true;
456 peer = pdev->invalid_peer;
457 }
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530458
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530459 if (!soc || soc->process_rx_status)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530460 return;
Pranita Solankefc2ff392017-12-15 19:25:13 +0530461
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530462 DP_STATS_UPD(peer, rx.rssi, ppdu->rssi);
Surya Prakash07c81e72019-04-29 10:08:01 +0530463 if (peer->stats.rx.avg_rssi == INVALID_RSSI)
Amir Patelbb69cfa2019-03-28 16:16:01 +0530464 peer->stats.rx.avg_rssi = ppdu->rssi;
465 else
466 peer->stats.rx.avg_rssi =
467 DP_GET_AVG_RSSI(peer->stats.rx.avg_rssi, ppdu->rssi);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530468
469 if ((preamble == DOT11_A) || (preamble == DOT11_B))
470 ppdu->u.nss = 1;
471
472 if (ppdu->u.nss)
473 DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu);
474
Pranita Solankea12b4b32017-11-20 23:04:14 +0530475 DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530476 DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
Anish Nataraj28490c42018-01-19 19:34:54 +0530477 DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530478 DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
479 DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
480 DP_STATS_UPD(peer, rx.rx_rate, mcs);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530481 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530482 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530483 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
484 DP_STATS_INCC(peer,
485 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530486 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530487 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530488 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530489 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
490 DP_STATS_INCC(peer,
491 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530492 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530493 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530494 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530495 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
496 DP_STATS_INCC(peer,
497 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530498 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530499 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530500 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530501 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
502 DP_STATS_INCC(peer,
503 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
504 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
505 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530506 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530507 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
508 DP_STATS_INCC(peer,
509 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
510 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Anish Nataraj28490c42018-01-19 19:34:54 +0530511 /*
512 * If invalid TID, it could be a non-qos frame, hence do not update
513 * any AC counters
514 */
515 ac = TID_TO_WME_AC(ppdu->tid);
516 if (ppdu->tid != HAL_TID_INVALID)
517 DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
Amir Patel52c6b732018-08-03 12:13:22 +0530518 dp_peer_stats_notify(peer);
519 DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi);
Anish Nataraj28490c42018-01-19 19:34:54 +0530520
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530521 if (is_invalid_peer)
522 return;
523
Jeffin Mammen1514e792019-05-20 10:30:44 +0530524 if (dp_is_subtype_data(ppdu->frame_ctrl))
525 dp_rx_rate_stats_update(peer, ppdu);
ydb247452018-08-08 00:23:16 +0530526
Amir Patel756d05e2018-10-10 12:35:30 +0530527#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
528 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
529 &peer->stats, ppdu->peer_id,
530 UPDATE_PEER_STATS, pdev->pdev_id);
531#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530532}
533#endif
Anish Nataraj38a29562017-08-18 19:41:17 +0530534
Amir Patel57e7e052019-05-15 20:49:57 +0530535/*
536 * dp_rx_get_fcs_ok_msdu() - get ppdu status buffer containing fcs_ok msdu
537 * @pdev: pdev object
538 * @ppdu_info: ppdu info object
539 *
540 * Return: nbuf
541 */
542
543static inline qdf_nbuf_t
544dp_rx_get_fcs_ok_msdu(struct dp_pdev *pdev,
545 struct hal_rx_ppdu_info *ppdu_info)
546{
547 uint16_t mpdu_fcs_ok;
548 qdf_nbuf_t status_nbuf = NULL;
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530549 unsigned long *fcs_ok_bitmap;
Amir Patel57e7e052019-05-15 20:49:57 +0530550
551 /* Obtain fcs_ok passed index from bitmap
552 * this index is used to get fcs passed first msdu payload
553 */
554
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530555 fcs_ok_bitmap =
556 (unsigned long *)&ppdu_info->com_info.mpdu_fcs_ok_bitmap[0];
557 mpdu_fcs_ok = qdf_find_first_bit(fcs_ok_bitmap,
558 HAL_RX_MAX_MPDU);
559
560 if (mpdu_fcs_ok >= HAL_RX_MAX_MPDU)
561 goto end;
Amir Patel57e7e052019-05-15 20:49:57 +0530562
563 /* Get status buffer by indexing mpdu_fcs_ok index
564 * containing first msdu payload with fcs passed
565 * and clone the buffer
566 */
567 status_nbuf = ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf;
568 /* Take ref of status nbuf as this nbuf is to be
569 * freeed by upper layer.
570 */
571 qdf_nbuf_ref(status_nbuf);
572
Chaithanya Garrepalli1f89b972019-07-31 12:33:53 +0530573end:
Amir Patel57e7e052019-05-15 20:49:57 +0530574 /* Free the ppdu status buffer queue */
575 qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
576
577 return status_nbuf;
578}
579
580static inline void
581dp_rx_handle_ppdu_status_buf(struct dp_pdev *pdev,
582 struct hal_rx_ppdu_info *ppdu_info,
583 qdf_nbuf_t status_nbuf)
584{
585 qdf_nbuf_queue_add(&pdev->rx_ppdu_buf_q, status_nbuf);
586}
Anish Nataraj38a29562017-08-18 19:41:17 +0530587/**
Soumya Bhat7422db82017-12-15 13:48:53 +0530588 * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
Pranita Solankefc2ff392017-12-15 19:25:13 +0530589 * @soc: core txrx main context
590 * @pdev: pdev strcuture
591 * @ppdu_info: structure for rx ppdu ring
592 *
593 * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
594 * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
Soumya Bhat7422db82017-12-15 13:48:53 +0530595 */
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530596#ifdef FEATURE_PERPKT_INFO
597static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530598dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530599 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
600{
601 uint8_t size = 0;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530602 struct ieee80211_frame *wh;
603 uint32_t *nbuf_data;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530604
Amir Patel57e7e052019-05-15 20:49:57 +0530605 if (!ppdu_info->fcs_ok_msdu_info.first_msdu_payload)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530606 return QDF_STATUS_SUCCESS;
607
Soumya Bhat2f54de22018-02-21 09:54:28 +0530608 if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530609 return QDF_STATUS_SUCCESS;
610
Soumya Bhat2f54de22018-02-21 09:54:28 +0530611 pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530612
Amir Patel57e7e052019-05-15 20:49:57 +0530613 wh = (struct ieee80211_frame *)
614 (ppdu_info->fcs_ok_msdu_info.first_msdu_payload + 4);
615
616 size = (ppdu_info->fcs_ok_msdu_info.first_msdu_payload -
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530617 qdf_nbuf_data(nbuf));
Amir Patel57e7e052019-05-15 20:49:57 +0530618 ppdu_info->fcs_ok_msdu_info.first_msdu_payload = NULL;
Soumya Bhat7422db82017-12-15 13:48:53 +0530619
620 if (qdf_nbuf_pull_head(nbuf, size) == NULL)
621 return QDF_STATUS_SUCCESS;
622
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530623 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
624 IEEE80211_FC0_TYPE_MGT) ||
625 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
626 IEEE80211_FC0_TYPE_CTL)) {
627 return QDF_STATUS_SUCCESS;
628 }
629
630 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
631 *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530632 /* only retain RX MSDU payload in the skb */
633 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
Amir Patel57e7e052019-05-15 20:49:57 +0530634 ppdu_info->fcs_ok_msdu_info.payload_len);
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530635 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
636 nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
637 return QDF_STATUS_E_ALREADY;
638}
639#else
640static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530641dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530642 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
643{
644 return QDF_STATUS_SUCCESS;
645}
646#endif
647
Amir Patel57e7e052019-05-15 20:49:57 +0530648#ifdef FEATURE_PERPKT_INFO
649static inline void
650dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
651 struct hal_rx_ppdu_info *ppdu_info,
652 uint32_t tlv_status,
653 qdf_nbuf_t status_nbuf)
654{
655 QDF_STATUS mcopy_status;
656
657 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) {
658 qdf_nbuf_free(status_nbuf);
659 return;
660 }
661 /* Add buffers to queue until we receive
662 * HAL_TLV_STATUS_PPDU_DONE
663 */
664 dp_rx_handle_ppdu_status_buf(pdev, ppdu_info, status_nbuf);
665
666 /* If tlv_status is PPDU_DONE, process rx_ppdu_buf_q
667 * and devliver fcs_ok msdu buffer
668 */
669 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
670 /* Get rx ppdu status buffer having fcs ok msdu */
671 status_nbuf = dp_rx_get_fcs_ok_msdu(pdev, ppdu_info);
672 if (status_nbuf) {
673 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
674 ppdu_info,
675 status_nbuf);
676 if (mcopy_status == QDF_STATUS_SUCCESS)
677 qdf_nbuf_free(status_nbuf);
678 }
679 }
680}
681#else
682static inline void
683dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
684 struct hal_rx_ppdu_info *ppdu_info,
685 uint32_t tlv_status,
686 qdf_nbuf_t status_nbuf)
687{
688}
689#endif
690
sumedh baikady59a2d332018-05-22 01:50:38 -0700691/**
692 * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
693 * @soc: Datapath SOC handle
694 * @pdev: Datapath PDEV handle
695 * @ppdu_info: Structure for rx ppdu info
696 * @nbuf: Qdf nbuf abstraction for linux skb
697 *
698 * Return: 0 on success, 1 on failure
699 */
700static inline int
701dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
702 struct hal_rx_ppdu_info *ppdu_info,
703 qdf_nbuf_t nbuf)
704{
705 uint8_t size = 0;
706
707 if (!pdev->monitor_vdev) {
708 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
709 "[%s]:[%d] Monitor vdev is NULL !!",
710 __func__, __LINE__);
711 return 1;
712 }
Jeff Johnsona8edf332019-03-18 09:51:52 -0700713 if (!ppdu_info->msdu_info.first_msdu_payload) {
sumedh baikady59a2d332018-05-22 01:50:38 -0700714 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
715 "[%s]:[%d] First msdu payload not present",
716 __func__, __LINE__);
717 return 1;
718 }
719
sumedh baikadyda159202018-11-01 17:31:23 -0700720 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
sumedh baikady59a2d332018-05-22 01:50:38 -0700721 size = (ppdu_info->msdu_info.first_msdu_payload -
sumedh baikadyda159202018-11-01 17:31:23 -0700722 qdf_nbuf_data(nbuf)) + 4;
sumedh baikady59a2d332018-05-22 01:50:38 -0700723 ppdu_info->msdu_info.first_msdu_payload = NULL;
724
725 if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
726 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
727 "[%s]:[%d] No header present",
728 __func__, __LINE__);
729 return 1;
730 }
731
sumedh baikadyda159202018-11-01 17:31:23 -0700732 /* Only retain RX MSDU payload in the skb */
sumedh baikady59a2d332018-05-22 01:50:38 -0700733 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
734 ppdu_info->msdu_info.payload_len);
Shashikala Prabhu08434382019-07-16 15:42:03 +0530735 if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf,
736 qdf_nbuf_headroom(nbuf))) {
737 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
738 return 1;
739 }
740
sumedh baikady59a2d332018-05-22 01:50:38 -0700741 pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
742 nbuf, NULL);
sumedh baikadyda159202018-11-01 17:31:23 -0700743 pdev->ppdu_info.rx_status.monitor_direct_used = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -0700744 return 0;
745}
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530746
747/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530748* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
749* @soc: core txrx main context
750* @pdev: pdev strcuture
751* @ppdu_info: structure for rx ppdu ring
752*
753* Return: none
754*/
755#ifdef FEATURE_PERPKT_INFO
756static inline void
757dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
758 struct hal_rx_ppdu_info *ppdu_info)
759{
760 qdf_nbuf_t ppdu_nbuf;
761 struct dp_peer *peer;
762 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
763
Anish Nataraj28490c42018-01-19 19:34:54 +0530764 /*
765 * Do not allocate if fcs error,
766 * ast idx invalid / fctl invalid
767 */
Anish Nataraj28490c42018-01-19 19:34:54 +0530768 if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
769 return;
770
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530771 if (ppdu_info->nac_info.fc_valid &&
772 ppdu_info->nac_info.to_ds_flag &&
773 ppdu_info->nac_info.mac_addr2_valid) {
774 struct dp_neighbour_peer *peer = NULL;
775 uint8_t rssi = ppdu_info->rx_status.rssi_comb;
776
777 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
778 if (pdev->neighbour_peers_added) {
779 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
780 neighbour_peer_list_elem) {
781 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
782 &ppdu_info->nac_info.mac_addr2,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800783 QDF_MAC_ADDR_SIZE)) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530784 peer->rssi = rssi;
785 break;
786 }
787 }
788 }
789 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
790 }
791
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530792 /* need not generate wdi event when mcopy and
793 * enhanced stats are not enabled
794 */
795 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
796 return;
797
Soumya Bhat560f90c2018-03-30 13:53:26 +0530798 if (!pdev->mcopy_mode) {
799 if (!ppdu_info->rx_status.frame_control_info_valid)
800 return;
Anish Nataraj28490c42018-01-19 19:34:54 +0530801
Soumya Bhat560f90c2018-03-30 13:53:26 +0530802 if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)
803 return;
804 }
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530805 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
Kiran Venkatappa4b50f332019-03-20 18:14:17 +0530806 sizeof(struct cdp_rx_indication_ppdu), 0, 0, FALSE);
Anish Nataraj38a29562017-08-18 19:41:17 +0530807 if (ppdu_nbuf) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530808 dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530809 qdf_nbuf_put_tail(ppdu_nbuf,
810 sizeof(struct cdp_rx_indication_ppdu));
811 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530812 peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
chenguob21a49a2018-11-19 19:17:12 +0800813 if (peer) {
Amir Patel468bded2019-03-21 11:42:31 +0530814 cdp_rx_ppdu->cookie = (void *)peer->wlanstats_ctx;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530815 dp_rx_stats_update(pdev, peer, cdp_rx_ppdu);
chenguob21a49a2018-11-19 19:17:12 +0800816 dp_peer_unref_del_find_by_id(peer);
817 }
818 if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
819 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
820 soc, ppdu_nbuf,
821 cdp_rx_ppdu->peer_id,
822 WDI_NO_VAL, pdev->pdev_id);
823 } else if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +0530824 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
825 ppdu_nbuf, HTT_INVALID_PEER,
826 WDI_NO_VAL, pdev->pdev_id);
827 } else {
Anish Nataraj38a29562017-08-18 19:41:17 +0530828 qdf_nbuf_free(ppdu_nbuf);
Soumya Bhat2f54de22018-02-21 09:54:28 +0530829 }
Anish Nataraj38a29562017-08-18 19:41:17 +0530830 }
831}
832#else
833static inline void
834dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
835 struct hal_rx_ppdu_info *ppdu_info)
836{
837}
838#endif
839
840/**
Keyur Parekhc28f8392018-11-21 02:50:56 -0800841* dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
842* filtering enabled
843* @soc: core txrx main context
844* @ppdu_info: Structure for rx ppdu info
845* @status_nbuf: Qdf nbuf abstraction for linux skb
846* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
847*
848* Return: none
849*/
850static inline void
851dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
852 struct hal_rx_ppdu_info *ppdu_info,
853 qdf_nbuf_t status_nbuf, uint32_t mac_id)
854{
855 struct dp_peer *peer;
856 struct dp_ast_entry *ast_entry;
857 uint32_t ast_index;
858
859 ast_index = ppdu_info->rx_status.ast_index;
860 if (ast_index < (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
861 ast_entry = soc->ast_table[ast_index];
862 if (ast_entry) {
863 peer = ast_entry->peer;
864 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) {
865 if (peer->peer_based_pktlog_filter) {
866 dp_wdi_event_handler(
867 WDI_EVENT_RX_DESC, soc,
868 status_nbuf,
869 peer->peer_ids[0],
870 WDI_NO_VAL, mac_id);
871 }
872 }
873 }
874 }
875}
876
Kai Chen93f7e1b2019-07-10 16:13:48 -0700877#if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
878static inline void
879dp_rx_ul_ofdma_ru_size_to_width(
880 uint32_t ru_size,
881 uint32_t *ru_width)
882{
883 uint32_t width;
884
885 width = 0;
886 switch (ru_size) {
887 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
888 width = 1;
889 break;
890 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
891 width = 2;
892 break;
893 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
894 width = 4;
895 break;
896 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
897 width = 9;
898 break;
899 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
900 width = 18;
901 break;
902 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
903 width = 37;
904 break;
905 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
906 width = 74;
907 break;
908 default:
909 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
910 "RU size to width convert err");
911 break;
912 }
913 *ru_width = width;
914}
915
916static inline void
917dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
918{
919 struct mon_rx_user_status *mon_rx_user_status;
920 uint32_t num_users;
921 uint32_t i;
922 uint32_t ul_ofdma_user_v0_word0;
923 uint32_t ul_ofdma_user_v0_word1;
924 uint32_t ru_width;
925
926 if (ppdu_info->rx_status.reception_type != HAL_RX_TYPE_MU_OFDMA)
927 return;
928
929 num_users = ppdu_info->com_info.num_users;
930 if (num_users > HAL_MAX_UL_MU_USERS)
931 num_users = HAL_MAX_UL_MU_USERS;
932 for (i = 0; i < num_users; i++) {
933 mon_rx_user_status = &ppdu_info->rx_user_status[i];
934 ul_ofdma_user_v0_word0 =
935 mon_rx_user_status->ul_ofdma_user_v0_word0;
936 ul_ofdma_user_v0_word1 =
937 mon_rx_user_status->ul_ofdma_user_v0_word1;
938
939 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
940 ul_ofdma_user_v0_word0) &&
941 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
942 ul_ofdma_user_v0_word0)) {
943 mon_rx_user_status->mcs =
944 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
945 ul_ofdma_user_v0_word1);
946 mon_rx_user_status->nss =
947 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
948 ul_ofdma_user_v0_word1);
949
950 mon_rx_user_status->ofdma_info_valid = 1;
951 mon_rx_user_status->dl_ofdma_ru_start_index =
952 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
953 ul_ofdma_user_v0_word1);
954
955 dp_rx_ul_ofdma_ru_size_to_width(
956 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
957 ul_ofdma_user_v0_word1),
958 &ru_width);
959 mon_rx_user_status->dl_ofdma_ru_width = ru_width;
960 }
961 }
962}
963#else
964static inline void
965dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
966{
967}
968#endif
969
Keyur Parekhc28f8392018-11-21 02:50:56 -0800970/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800971* dp_rx_mon_status_process_tlv() - Process status TLV in status
972* buffer on Rx status Queue posted by status SRNG processing.
973* @soc: core txrx main context
974* @mac_id: mac_id which is one of 3 mac_ids _ring
975*
976* Return: none
977*/
978static inline void
Karunakar Dasineni40555682017-03-26 22:44:39 -0700979dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
980 uint32_t quota)
981{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800982 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800983 struct hal_rx_ppdu_info *ppdu_info;
984 qdf_nbuf_t status_nbuf;
985 uint8_t *rx_tlv;
986 uint8_t *rx_tlv_start;
Kai Chenad516ae2017-09-08 18:35:47 -0700987 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
Kai Chen52ef33f2019-03-05 18:33:40 -0800988 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS;
Kai Chen783e0382018-01-25 16:29:08 -0800989 struct cdp_pdev_mon_stats *rx_mon_stats;
sumedh baikady59a2d332018-05-22 01:50:38 -0700990 int smart_mesh_status;
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800991 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
Kai Chen52ef33f2019-03-05 18:33:40 -0800992 bool nbuf_used;
993 uint32_t rx_enh_capture_mode;
994
Kai Chen6eca1a62017-01-12 10:17:53 -0800995
Kai Chen6eca1a62017-01-12 10:17:53 -0800996 ppdu_info = &pdev->ppdu_info;
Kai Chen783e0382018-01-25 16:29:08 -0800997 rx_mon_stats = &pdev->rx_mon_stats;
Kai Chen6eca1a62017-01-12 10:17:53 -0800998
999 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
1000 return;
1001
Kai Chen52ef33f2019-03-05 18:33:40 -08001002 rx_enh_capture_mode = pdev->rx_enh_capture_mode;
1003
Kai Chen6eca1a62017-01-12 10:17:53 -08001004 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
1005
1006 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
phadiman49757302018-12-18 16:13:59 +05301007
Kai Chen6eca1a62017-01-12 10:17:53 -08001008 rx_tlv = qdf_nbuf_data(status_nbuf);
1009 rx_tlv_start = rx_tlv;
Kai Chen52ef33f2019-03-05 18:33:40 -08001010 nbuf_used = false;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001011
Jeff Johnsona8edf332019-03-18 09:51:52 -07001012 if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) ||
Kai Chen52ef33f2019-03-05 18:33:40 -08001013 pdev->mcopy_mode ||
1014 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
Keyur Parekhfad6d082017-05-07 08:54:47 -07001015 do {
1016 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
Amir Patel57e7e052019-05-15 20:49:57 +05301017 ppdu_info, pdev->soc->hal_soc,
1018 status_nbuf);
Kai Chen783e0382018-01-25 16:29:08 -08001019
1020 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
1021 rx_mon_stats);
1022
Kai Chen52ef33f2019-03-05 18:33:40 -08001023 dp_rx_mon_enh_capture_process(pdev, tlv_status,
1024 status_nbuf, ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001025 &nbuf_used);
Kai Chen52ef33f2019-03-05 18:33:40 -08001026
Keyur Parekhfad6d082017-05-07 08:54:47 -07001027 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
Kai Chen6eca1a62017-01-12 10:17:53 -08001028
Keyur Parekhfad6d082017-05-07 08:54:47 -07001029 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
1030 break;
Kai Chen6eca1a62017-01-12 10:17:53 -08001031
Kai Chen52ef33f2019-03-05 18:33:40 -08001032 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1033 (tlv_status == HAL_TLV_STATUS_HEADER) ||
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001034 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1035 (tlv_status == HAL_TLV_STATUS_MSDU_END));
Keyur Parekhfad6d082017-05-07 08:54:47 -07001036 }
Keyur Parekhc28f8392018-11-21 02:50:56 -08001037 if (pdev->dp_peer_based_pktlog) {
1038 dp_rx_process_peer_based_pktlog(soc, ppdu_info,
1039 status_nbuf, mac_id);
1040 } else {
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -08001041 if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1042 pktlog_mode = WDI_EVENT_RX_DESC;
1043 else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1044 pktlog_mode = WDI_EVENT_LITE_RX;
1045
1046 if (pktlog_mode != WDI_NO_VAL)
1047 dp_wdi_event_handler(pktlog_mode, soc,
1048 status_nbuf,
1049 HTT_INVALID_PEER,
1050 WDI_NO_VAL, mac_id);
Keyur Parekhc28f8392018-11-21 02:50:56 -08001051 }
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301052
1053 /* smart monitor vap and m_copy cannot co-exist */
sumedh baikady59a2d332018-05-22 01:50:38 -07001054 if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
1055 && pdev->monitor_vdev) {
1056 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
1057 pdev, ppdu_info, status_nbuf);
1058 if (smart_mesh_status)
1059 qdf_nbuf_free(status_nbuf);
Amir Patel57e7e052019-05-15 20:49:57 +05301060 } else if (qdf_unlikely(pdev->mcopy_mode)) {
1061 dp_rx_process_mcopy_mode(soc, pdev,
1062 ppdu_info, tlv_status,
1063 status_nbuf);
Kai Chen52ef33f2019-03-05 18:33:40 -08001064 } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
1065 if (!nbuf_used)
1066 qdf_nbuf_free(status_nbuf);
1067
1068 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
1069 enh_log_status =
1070 dp_rx_handle_enh_capture(soc,
1071 pdev, ppdu_info);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301072 } else {
sumedh baikady59a2d332018-05-22 01:50:38 -07001073 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301074 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001075
chenguo1b880462018-07-11 15:34:56 +08001076 if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
1077 dp_rx_mon_deliver_non_std(soc, mac_id);
1078 } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Kai Chen783e0382018-01-25 16:29:08 -08001079 rx_mon_stats->status_ppdu_done++;
Kai Chen93f7e1b2019-07-10 16:13:48 -07001080 dp_rx_mon_handle_ofdma_info(ppdu_info);
Soumya Bhat5c60deb2017-12-12 16:42:04 +05301081 if (pdev->enhanced_stats_en ||
sumedh baikady59a2d332018-05-22 01:50:38 -07001082 pdev->mcopy_mode || pdev->neighbour_peers_added)
Anish Nataraj38a29562017-08-18 19:41:17 +05301083 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
Soumya Bhat6fee59c2017-10-31 13:12:37 +05301084
nobelj1c31fee2018-03-21 11:47:05 -07001085 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
1086 dp_rx_mon_dest_process(soc, mac_id, quota);
Kai Chencbe4c342017-06-12 20:06:35 -07001087 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen6eca1a62017-01-12 10:17:53 -08001088 }
1089 }
1090 return;
1091}
1092
1093/*
1094 * dp_rx_mon_status_srng_process() - Process monitor status ring
1095 * post the status ring buffer to Rx status Queue for later
1096 * processing when status ring is filled with status TLV.
1097 * Allocate a new buffer to status ring if the filled buffer
1098 * is posted.
1099 *
1100 * @soc: core txrx main context
1101 * @mac_id: mac_id which is one of 3 mac_ids
1102 * @quota: No. of ring entry that can be serviced in one shot.
1103
1104 * Return: uint32_t: No. of ring entry that is processed.
1105 */
1106static inline uint32_t
1107dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
1108 uint32_t quota)
1109{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001110 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Akshay Kosigia870c612019-07-08 23:10:30 +05301111 hal_soc_handle_t hal_soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08001112 void *mon_status_srng;
1113 void *rxdma_mon_status_ring_entry;
1114 QDF_STATUS status;
1115 uint32_t work_done = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001116 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001117
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001118 mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng;
Kai Chen6eca1a62017-01-12 10:17:53 -08001119
1120 qdf_assert(mon_status_srng);
Houston Hoffman648a9182017-05-21 23:27:50 -07001121 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
1122
1123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301124 "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
Houston Hoffman648a9182017-05-21 23:27:50 -07001125 __func__, __LINE__, mon_status_srng);
1126 return work_done;
1127 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001128
1129 hal_soc = soc->hal_soc;
1130
1131 qdf_assert(hal_soc);
1132
1133 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
1134 goto done;
1135
1136 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
1137 * BUFFER_ADDR_INFO STRUCT
1138 */
1139 while (qdf_likely((rxdma_mon_status_ring_entry =
1140 hal_srng_src_peek(hal_soc, mon_status_srng))
1141 && quota--)) {
1142 uint32_t rx_buf_cookie;
1143 qdf_nbuf_t status_nbuf;
1144 struct dp_rx_desc *rx_desc;
1145 uint8_t *status_buf;
1146 qdf_dma_addr_t paddr;
1147 uint64_t buf_addr;
1148
1149 buf_addr =
1150 (HAL_RX_BUFFER_ADDR_31_0_GET(
1151 rxdma_mon_status_ring_entry) |
1152 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
1153 rxdma_mon_status_ring_entry)) << 32));
1154
1155 if (qdf_likely(buf_addr)) {
1156
1157 rx_buf_cookie =
1158 HAL_RX_BUF_COOKIE_GET(
1159 rxdma_mon_status_ring_entry);
1160 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
1161 rx_buf_cookie);
1162
1163 qdf_assert(rx_desc);
1164
1165 status_nbuf = rx_desc->nbuf;
1166
1167 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
1168 QDF_DMA_FROM_DEVICE);
1169
1170 status_buf = qdf_nbuf_data(status_nbuf);
1171
1172 status = hal_get_rx_status_done(status_buf);
1173
1174 if (status != QDF_STATUS_SUCCESS) {
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001175 uint32_t hp, tp;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001176 hal_get_sw_hptp(hal_soc, mon_status_srng,
1177 &tp, &hp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001178 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001179 QDF_TRACE_LEVEL_ERROR,
1180 "[%s][%d] status not done - hp:%u, tp:%u",
1181 __func__, __LINE__, hp, tp);
1182 /* WAR for missing status: Skip status entry */
1183 hal_srng_src_get_next(hal_soc, mon_status_srng);
1184 continue;
Kai Chen6eca1a62017-01-12 10:17:53 -08001185 }
1186 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
1187
1188 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
1189 QDF_DMA_FROM_DEVICE);
1190
1191 /* Put the status_nbuf to queue */
1192 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
1193
1194 } else {
1195 union dp_rx_desc_list_elem_t *desc_list = NULL;
1196 union dp_rx_desc_list_elem_t *tail = NULL;
1197 struct rx_desc_pool *rx_desc_pool;
1198 uint32_t num_alloc_desc;
1199
1200 rx_desc_pool = &soc->rx_desc_status[mac_id];
1201
1202 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
1203 rx_desc_pool,
1204 1,
1205 &desc_list,
1206 &tail);
phadimanebf4cde2019-01-28 17:50:37 +05301207 /*
1208 * No free descriptors available
1209 */
1210 if (qdf_unlikely(num_alloc_desc == 0)) {
1211 work_done++;
1212 break;
1213 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001214
1215 rx_desc = &desc_list->rx_desc;
1216 }
1217
jinweic chenc3546322018-02-02 15:03:41 +08001218 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001219
jinweic chenc3546322018-02-02 15:03:41 +08001220 /*
1221 * qdf_nbuf alloc or map failed,
1222 * free the dp rx desc to free list,
1223 * fill in NULL dma address at current HP entry,
1224 * keep HP in mon_status_ring unchanged,
1225 * wait next time dp_rx_mon_status_srng_process
1226 * to fill in buffer at current HP.
1227 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001228 if (qdf_unlikely(!status_nbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001229 union dp_rx_desc_list_elem_t *desc_list = NULL;
1230 union dp_rx_desc_list_elem_t *tail = NULL;
1231 struct rx_desc_pool *rx_desc_pool;
Kai Chen6eca1a62017-01-12 10:17:53 -08001232
jinweic chenc3546322018-02-02 15:03:41 +08001233 rx_desc_pool = &soc->rx_desc_status[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001234
jinweic chenc3546322018-02-02 15:03:41 +08001235 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1236 "%s: fail to allocate or map qdf_nbuf",
1237 __func__);
1238 dp_rx_add_to_free_desc_list(&desc_list,
1239 &tail, rx_desc);
1240 dp_rx_add_desc_list_to_free_list(soc, &desc_list,
1241 &tail, mac_id, rx_desc_pool);
1242
1243 hal_rxdma_buff_addr_info_set(
1244 rxdma_mon_status_ring_entry,
1245 0, 0, HAL_RX_BUF_RBM_SW3_BM);
1246 work_done++;
1247 break;
1248 }
1249
Kai Chen6eca1a62017-01-12 10:17:53 -08001250 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
1251
1252 rx_desc->nbuf = status_nbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001253 rx_desc->in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -08001254
1255 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
1256 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
1257
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001258 hal_srng_src_get_next(hal_soc, mon_status_srng);
Kai Chen6eca1a62017-01-12 10:17:53 -08001259 work_done++;
1260 }
1261done:
1262
1263 hal_srng_access_end(hal_soc, mon_status_srng);
1264
1265 return work_done;
1266
1267}
1268/*
1269 * dp_rx_mon_status_process() - Process monitor status ring and
1270 * TLV in status ring.
1271 *
1272 * @soc: core txrx main context
1273 * @mac_id: mac_id which is one of 3 mac_ids
1274 * @quota: No. of ring entry that can be serviced in one shot.
1275
1276 * Return: uint32_t: No. of ring entry that is processed.
1277 */
1278static inline uint32_t
1279dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
1280 uint32_t work_done;
1281
1282 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
Karunakar Dasineni40555682017-03-26 22:44:39 -07001283 quota -= work_done;
1284 dp_rx_mon_status_process_tlv(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001285
1286 return work_done;
1287}
1288/**
1289 * dp_mon_process() - Main monitor mode processing roution.
1290 * This call monitor status ring process then monitor
1291 * destination ring process.
1292 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1293 * @soc: core txrx main context
1294 * @mac_id: mac_id which is one of 3 mac_ids
1295 * @quota: No. of status ring entry that can be serviced in one shot.
1296
1297 * Return: uint32_t: No. of ring entry that is processed.
1298 */
1299uint32_t
1300dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
Karunakar Dasineni40555682017-03-26 22:44:39 -07001301 return dp_rx_mon_status_process(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001302}
Karunakar Dasineni40555682017-03-26 22:44:39 -07001303
Kai Chen6eca1a62017-01-12 10:17:53 -08001304/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001305 * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
Kai Chen6eca1a62017-01-12 10:17:53 -08001306 * @pdev: core txrx pdev context
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001307 * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
Kai Chen6eca1a62017-01-12 10:17:53 -08001308 *
1309 * This function will detach DP RX status ring from
1310 * main device context. will free DP Rx resources for
1311 * status ring
1312 *
1313 * Return: QDF_STATUS_SUCCESS: success
1314 * QDF_STATUS_E_RESOURCES: Error return
1315 */
1316QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001317dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -08001318{
Kai Chen6eca1a62017-01-12 10:17:53 -08001319 struct dp_soc *soc = pdev->soc;
1320 struct rx_desc_pool *rx_desc_pool;
1321
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001322 rx_desc_pool = &soc->rx_desc_status[mac_id];
phadiman449a2682019-02-20 14:00:00 +05301323 if (rx_desc_pool->pool_size != 0) {
1324 if (!dp_is_soc_reinit(soc))
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001325 dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
1326 rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301327 else
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001328 dp_rx_desc_nbuf_free(soc, rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301329 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001330
1331 return QDF_STATUS_SUCCESS;
1332}
1333
1334/*
1335 * dp_rx_buffers_replenish() - replenish monitor status ring with
1336 * rx nbufs called during dp rx
1337 * monitor status ring initialization
1338 *
1339 * @soc: core txrx main context
1340 * @mac_id: mac_id which is one of 3 mac_ids
1341 * @dp_rxdma_srng: dp monitor status circular ring
1342 * @rx_desc_pool; Pointer to Rx descriptor pool
1343 * @num_req_buffers: number of buffer to be replenished
1344 * @desc_list: list of descs if called from dp rx monitor status
1345 * process or NULL during dp rx initialization or
1346 * out of buffer interrupt
1347 * @tail: tail of descs list
1348 * @owner: who owns the nbuf (host, NSS etc...)
1349 * Return: return success or failure
1350 */
1351static inline
1352QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
1353 uint32_t mac_id,
1354 struct dp_srng *dp_rxdma_srng,
1355 struct rx_desc_pool *rx_desc_pool,
1356 uint32_t num_req_buffers,
1357 union dp_rx_desc_list_elem_t **desc_list,
1358 union dp_rx_desc_list_elem_t **tail,
1359 uint8_t owner)
1360{
1361 uint32_t num_alloc_desc;
1362 uint16_t num_desc_to_free = 0;
1363 uint32_t num_entries_avail;
jinweic chenc3546322018-02-02 15:03:41 +08001364 uint32_t count = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08001365 int sync_hw_ptr = 1;
1366 qdf_dma_addr_t paddr;
1367 qdf_nbuf_t rx_netbuf;
1368 void *rxdma_ring_entry;
1369 union dp_rx_desc_list_elem_t *next;
1370 void *rxdma_srng;
jinweic chenc3546322018-02-02 15:03:41 +08001371 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001372
1373 rxdma_srng = dp_rxdma_srng->hal_srng;
1374
1375 qdf_assert(rxdma_srng);
1376
Houston Hoffmanae850c62017-08-11 16:47:50 -07001377 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301378 "[%s][%d] requested %d buffers for replenish",
Kai Chen6eca1a62017-01-12 10:17:53 -08001379 __func__, __LINE__, num_req_buffers);
1380
1381 /*
1382 * if desc_list is NULL, allocate the descs from freelist
1383 */
1384 if (!(*desc_list)) {
1385
1386 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1387 rx_desc_pool,
1388 num_req_buffers,
1389 desc_list,
1390 tail);
1391
1392 if (!num_alloc_desc) {
1393 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301394 "[%s][%d] no free rx_descs in freelist",
Kai Chen6eca1a62017-01-12 10:17:53 -08001395 __func__, __LINE__);
1396 return QDF_STATUS_E_NOMEM;
1397 }
1398
Houston Hoffmanae850c62017-08-11 16:47:50 -07001399 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301400 "[%s][%d] %d rx desc allocated", __func__, __LINE__,
Kai Chen6eca1a62017-01-12 10:17:53 -08001401 num_alloc_desc);
Houston Hoffmanae850c62017-08-11 16:47:50 -07001402
Kai Chen6eca1a62017-01-12 10:17:53 -08001403 num_req_buffers = num_alloc_desc;
1404 }
1405
1406 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
1407 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
1408 rxdma_srng, sync_hw_ptr);
1409
Houston Hoffmanae850c62017-08-11 16:47:50 -07001410 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301411 "[%s][%d] no of available entries in rxdma ring: %d",
Kai Chen6eca1a62017-01-12 10:17:53 -08001412 __func__, __LINE__, num_entries_avail);
1413
1414 if (num_entries_avail < num_req_buffers) {
1415 num_desc_to_free = num_req_buffers - num_entries_avail;
1416 num_req_buffers = num_entries_avail;
1417 }
1418
jinweic chenc3546322018-02-02 15:03:41 +08001419 while (count < num_req_buffers) {
1420 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001421
jinweic chenc3546322018-02-02 15:03:41 +08001422 /*
1423 * qdf_nbuf alloc or map failed,
1424 * keep HP in mon_status_ring unchanged,
1425 * wait dp_rx_mon_status_srng_process
1426 * to fill in buffer at current HP.
1427 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001428 if (qdf_unlikely(!rx_netbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001429 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1430 "%s: qdf_nbuf allocate or map fail, count %d",
1431 __func__, count);
1432 break;
1433 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001434
1435 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
1436
1437 next = (*desc_list)->next;
sumedh baikadyeca2de62018-04-11 14:20:38 -07001438 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
1439 rxdma_srng);
1440
Jeff Johnsona8edf332019-03-18 09:51:52 -07001441 if (qdf_unlikely(!rxdma_ring_entry)) {
sumedh baikadyeca2de62018-04-11 14:20:38 -07001442 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301443 "[%s][%d] rxdma_ring_entry is NULL, count - %d",
sumedh baikadyeca2de62018-04-11 14:20:38 -07001444 __func__, __LINE__, count);
1445 qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301446 QDF_DMA_FROM_DEVICE);
sumedh baikadyeca2de62018-04-11 14:20:38 -07001447 qdf_nbuf_free(rx_netbuf);
1448 break;
1449 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001450
1451 (*desc_list)->rx_desc.nbuf = rx_netbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001452 (*desc_list)->rx_desc.in_use = 1;
jinweic chenc3546322018-02-02 15:03:41 +08001453 count++;
jinweic chenc3546322018-02-02 15:03:41 +08001454
Kai Chen6eca1a62017-01-12 10:17:53 -08001455 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
1456 (*desc_list)->rx_desc.cookie, owner);
1457
Karunakar Dasineni40555682017-03-26 22:44:39 -07001458 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001459 "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
Aditya Sathishded018e2018-07-02 16:25:21 +05301460 paddr=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -08001461 __func__, __LINE__, &(*desc_list)->rx_desc,
1462 (*desc_list)->rx_desc.cookie, rx_netbuf,
jinweic chenc3546322018-02-02 15:03:41 +08001463 (void *)paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -08001464
1465 *desc_list = next;
1466 }
1467
1468 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1469
Houston Hoffmanae850c62017-08-11 16:47:50 -07001470 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301471 "successfully replenished %d buffers", num_req_buffers);
Kai Chen6eca1a62017-01-12 10:17:53 -08001472
Houston Hoffmanae850c62017-08-11 16:47:50 -07001473 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301474 "%d rx desc added back to free list", num_desc_to_free);
Kai Chen6eca1a62017-01-12 10:17:53 -08001475
Kai Chen6eca1a62017-01-12 10:17:53 -08001476 /*
1477 * add any available free desc back to the free list
1478 */
1479 if (*desc_list) {
1480 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1481 mac_id, rx_desc_pool);
1482 }
1483
1484 return QDF_STATUS_SUCCESS;
1485}
1486/**
1487 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
1488 * @pdev: core txrx pdev context
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001489 * @ring_id: ring number
Kai Chen6eca1a62017-01-12 10:17:53 -08001490 * This function will attach a DP RX monitor status ring into pDEV
1491 * and replenish monitor status ring with buffer.
1492 *
1493 * Return: QDF_STATUS_SUCCESS: success
1494 * QDF_STATUS_E_RESOURCES: Error return
1495 */
1496QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001497dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
Kai Chen6eca1a62017-01-12 10:17:53 -08001498 struct dp_soc *soc = pdev->soc;
1499 union dp_rx_desc_list_elem_t *desc_list = NULL;
1500 union dp_rx_desc_list_elem_t *tail = NULL;
Mohit Khanna70514992018-11-12 18:39:03 -08001501 struct dp_srng *mon_status_ring;
1502 uint32_t num_entries;
Kai Chen52ef33f2019-03-05 18:33:40 -08001503 uint32_t i;
Kai Chen6eca1a62017-01-12 10:17:53 -08001504 struct rx_desc_pool *rx_desc_pool;
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001505 QDF_STATUS status;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001506 int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001507
Mohit Khanna70514992018-11-12 18:39:03 -08001508 mon_status_ring = &pdev->rxdma_mon_status_ring[mac_for_pdev];
Kai Chen6eca1a62017-01-12 10:17:53 -08001509
Mohit Khanna70514992018-11-12 18:39:03 -08001510 num_entries = mon_status_ring->num_entries;
Kai Chen6eca1a62017-01-12 10:17:53 -08001511
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001512 rx_desc_pool = &soc->rx_desc_status[ring_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001513
Mohit Khanna70514992018-11-12 18:39:03 -08001514 dp_info("Mon RX Status Pool[%d] entries=%d",
1515 ring_id, num_entries);
Kai Chen6eca1a62017-01-12 10:17:53 -08001516
Mohit Khanna70514992018-11-12 18:39:03 -08001517 status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1,
1518 rx_desc_pool);
1519 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001520 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001521
Mohit Khanna70514992018-11-12 18:39:03 -08001522 dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001523
Mohit Khanna70514992018-11-12 18:39:03 -08001524 status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
1525 mon_status_ring,
1526 rx_desc_pool,
1527 num_entries,
1528 &desc_list, &tail,
1529 HAL_RX_BUF_RBM_SW3_BM);
1530
1531 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001532 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001533
1534 qdf_nbuf_queue_init(&pdev->rx_status_q);
Amir Patel57e7e052019-05-15 20:49:57 +05301535 qdf_nbuf_queue_init(&pdev->rx_ppdu_buf_q);
Kai Chen6eca1a62017-01-12 10:17:53 -08001536
1537 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen783e0382018-01-25 16:29:08 -08001538
Karunakar Dasineni40555682017-03-26 22:44:39 -07001539 qdf_mem_zero(&(pdev->ppdu_info.rx_status),
Mohit Khanna70514992018-11-12 18:39:03 -08001540 sizeof(pdev->ppdu_info.rx_status));
Kai Chen6eca1a62017-01-12 10:17:53 -08001541
Kai Chen783e0382018-01-25 16:29:08 -08001542 qdf_mem_zero(&pdev->rx_mon_stats,
1543 sizeof(pdev->rx_mon_stats));
1544
1545 dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
1546 &pdev->rx_mon_stats);
1547
Kai Chen52ef33f2019-03-05 18:33:40 -08001548 for (i = 0; i < MAX_MU_USERS; i++) {
1549 qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
1550 pdev->is_mpdu_hdr[i] = true;
1551 }
1552 qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
1553
1554 pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
1555
Kai Chen6eca1a62017-01-12 10:17:53 -08001556 return QDF_STATUS_SUCCESS;
1557}