blob: a5dbcf3cd68a5667d378809708edb893d7323c13 [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
phadimanebf4cde2019-01-28 17:50:37 +05302 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
Kai Chen6eca1a62017-01-12 10:17:53 -08003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053018#include "hal_hw_headers.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080019#include "dp_types.h"
20#include "dp_rx.h"
21#include "dp_peer.h"
22#include "hal_rx.h"
23#include "hal_api.h"
24#include "qdf_trace.h"
25#include "qdf_nbuf.h"
26#include "hal_api_mon.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080027#include "dp_rx_mon.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070028#include "dp_internal.h"
29#include "qdf_mem.h" /* qdf_mem_malloc,free */
Kai Chen6eca1a62017-01-12 10:17:53 -080030
nobelj14531642019-06-25 17:41:55 -070031#include "htt.h"
32
ydb247452018-08-08 00:23:16 +053033#ifdef FEATURE_PERPKT_INFO
34#include "dp_ratetable.h"
35#endif
36
Kai Chen52ef33f2019-03-05 18:33:40 -080037#ifdef WLAN_RX_PKT_CAPTURE_ENH
38#include "dp_rx_mon_feature.h"
39#else
40static QDF_STATUS
41dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
42 struct hal_rx_ppdu_info *ppdu_info)
43{
44 return QDF_STATUS_SUCCESS;
45}
46
47static void
48dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
49 qdf_nbuf_t status_nbuf,
50 struct hal_rx_ppdu_info *ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -070051 bool *nbuf_used)
Kai Chen52ef33f2019-03-05 18:33:40 -080052{
53}
54#endif
55
Amir Patel1d4ac982019-04-25 11:49:01 +053056#ifdef FEATURE_PERPKT_INFO
57static inline void
58dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
59 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
60{
61 uint8_t chain, bw;
62 int8_t rssi;
63
64 for (chain = 0; chain < SS_COUNT; chain++) {
65 for (bw = 0; bw < MAX_BW; bw++) {
66 rssi = ppdu_info->rx_status.rssi_chain[chain][bw];
67 if (rssi != DP_RSSI_INVAL)
68 cdp_rx_ppdu->rssi_chain[chain][bw] = rssi;
69 else
70 cdp_rx_ppdu->rssi_chain[chain][bw] = 0;
71 }
72 }
73}
74
Amir Patel5a8bbbe2019-07-17 21:59:39 +053075/*
76 * dp_rx_populate_su_evm_details() - Populate su evm info
77 * @ppdu_info: ppdu info structure from ppdu ring
78 * @cdp_rx_ppdu: rx ppdu indication structure
79 */
80static inline void
81dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
82 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
83{
84 uint8_t pilot_evm;
85 uint8_t nss_count;
86 uint8_t pilot_count;
87
88 nss_count = ppdu_info->evm_info.nss_count;
89 pilot_count = ppdu_info->evm_info.pilot_count;
90
91 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
92 qdf_err("pilot evm count is more than expected");
93 return;
94 }
95 cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
96 cdp_rx_ppdu->evm_info.nss_count = nss_count;
97
98 /* Populate evm for pilot_evm = nss_count*pilot_count */
99 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
100 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
101 ppdu_info->evm_info.pilot_evm[pilot_evm];
102 }
103}
104
Kai Chen6eca1a62017-01-12 10:17:53 -0800105/**
nobelj14531642019-06-25 17:41:55 -0700106 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
107 * @pdev: pdev ctx
108 * @rx_user_status: mon rx user status
109 *
110 * Return: bool
111 */
112static inline bool
113dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
114 struct mon_rx_user_status *rx_user_status)
115{
116 uint32_t ru_size;
117 bool is_data;
118
119 ru_size = rx_user_status->dl_ofdma_ru_size;
120
121 if (dp_is_subtype_data(rx_user_status->frame_control)) {
122 DP_STATS_INC(pdev,
123 ul_ofdma.data_rx_ru_size[ru_size], 1);
124 is_data = true;
125 } else {
126 DP_STATS_INC(pdev,
127 ul_ofdma.nondata_rx_ru_size[ru_size], 1);
128 is_data = false;
129 }
130
131 return is_data;
132}
133
134/**
135 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
136 * @pdev: pdev ctx
137 * @ppdu_info: ppdu info structure from ppdu ring
138 * @ppdu_nbuf: qdf nbuf abstraction for linux skb
139 *
140 * Return: none
141 */
142static inline void
143dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
144 struct hal_rx_ppdu_info *ppdu_info,
145 qdf_nbuf_t ppdu_nbuf)
146{
147 struct dp_peer *peer;
148 struct dp_soc *soc = pdev->soc;
149 struct dp_ast_entry *ast_entry;
150 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
151 uint32_t ast_index;
152 int i;
153 struct mon_rx_user_status *rx_user_status;
154 struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
155 int ru_size;
156 bool is_data = false;
157 uint32_t num_users;
158
159 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
160
161 num_users = ppdu_info->com_info.num_users;
162 for (i = 0; i < num_users; i++) {
163 if (i > OFDMA_NUM_USERS)
164 return;
165
166 rx_user_status = &ppdu_info->rx_user_status[i];
167 rx_stats_peruser = &cdp_rx_ppdu->user[i];
168
169 ast_index = rx_user_status->ast_index;
170 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
171 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
172 return;
173 }
174
175 ast_entry = soc->ast_table[ast_index];
176 if (!ast_entry) {
177 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
178 return;
179 }
180
181 peer = ast_entry->peer;
182 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
183 rx_stats_peruser->peer_id = HTT_INVALID_PEER;
184 return;
185 }
186
187 rx_stats_peruser->first_data_seq_ctrl =
188 rx_user_status->first_data_seq_ctrl;
189
190 rx_stats_peruser->frame_control =
191 rx_user_status->frame_control;
192
193 rx_stats_peruser->tcp_msdu_count =
194 rx_user_status->tcp_msdu_count;
195 rx_stats_peruser->udp_msdu_count =
196 rx_user_status->udp_msdu_count;
197 rx_stats_peruser->other_msdu_count =
198 rx_user_status->other_msdu_count;
199 rx_stats_peruser->preamble_type =
200 rx_user_status->preamble_type;
201 rx_stats_peruser->mpdu_cnt_fcs_ok =
202 rx_user_status->mpdu_cnt_fcs_ok;
203 rx_stats_peruser->mpdu_cnt_fcs_err =
204 rx_user_status->mpdu_cnt_fcs_err;
205 rx_stats_peruser->mpdu_fcs_ok_bitmap =
206 rx_user_status->mpdu_fcs_ok_bitmap;
207 rx_stats_peruser->mpdu_ok_byte_count =
208 rx_user_status->mpdu_ok_byte_count;
209 rx_stats_peruser->mpdu_err_byte_count =
210 rx_user_status->mpdu_err_byte_count;
211
212 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
213 cdp_rx_ppdu->num_msdu +=
214 (rx_stats_peruser->tcp_msdu_count +
215 rx_stats_peruser->udp_msdu_count +
216 rx_stats_peruser->other_msdu_count);
217 rx_stats_peruser->retries =
218 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
219 rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
220
221 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
222 rx_stats_peruser->is_ampdu = 1;
223 else
224 rx_stats_peruser->is_ampdu = 0;
225
226 rx_stats_peruser->tid = ppdu_info->rx_status.tid;
227
228 qdf_mem_copy(rx_stats_peruser->mac_addr,
229 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
230 rx_stats_peruser->peer_id = peer->peer_ids[0];
231 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
232 rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
233
234 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA) {
235 if (rx_user_status->ofdma_info_valid) {
236 rx_stats_peruser->nss = rx_user_status->nss;
237 rx_stats_peruser->mcs = rx_user_status->mcs;
238 rx_stats_peruser->ofdma_info_valid =
239 rx_user_status->ofdma_info_valid;
240 rx_stats_peruser->ofdma_ru_start_index =
241 rx_user_status->dl_ofdma_ru_start_index;
242 rx_stats_peruser->ofdma_ru_width =
243 rx_user_status->dl_ofdma_ru_width;
244 rx_stats_peruser->user_index = i;
245 ru_size = rx_user_status->dl_ofdma_ru_size;
246 /*
247 * max RU size will be equal to
248 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
249 */
250 if (ru_size >= OFDMA_NUM_RU_SIZE) {
251 dp_err("invalid ru_size %d\n",
252 ru_size);
253 return;
254 }
255 is_data = dp_rx_inc_rusize_cnt(pdev,
256 rx_user_status);
257 } else {
258 rx_stats_peruser->ofdma_info_valid = 0;
259 }
260 if (is_data) {
261 /* counter to get number of MU OFDMA */
262 pdev->stats.ul_ofdma.data_rx_ppdu++;
263 pdev->stats.ul_ofdma.data_users[num_users]++;
264 }
265 }
266 }
267}
268
269/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530270* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
Soumya Bhat560f90c2018-03-30 13:53:26 +0530271* @pdev: pdev ctx
Anish Nataraj38a29562017-08-18 19:41:17 +0530272* @ppdu_info: ppdu info structure from ppdu ring
273* @ppdu_nbuf: qdf nbuf abstraction for linux skb
274*
275* Return: none
276*/
Anish Nataraj38a29562017-08-18 19:41:17 +0530277static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530278dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530279 struct hal_rx_ppdu_info *ppdu_info,
280 qdf_nbuf_t ppdu_nbuf)
281{
282 struct dp_peer *peer;
Soumya Bhat560f90c2018-03-30 13:53:26 +0530283 struct dp_soc *soc = pdev->soc;
Anish Nataraj38a29562017-08-18 19:41:17 +0530284 struct dp_ast_entry *ast_entry;
285 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
286 uint32_t ast_index;
nobelj14531642019-06-25 17:41:55 -0700287 uint32_t i;
Anish Nataraj38a29562017-08-18 19:41:17 +0530288
289 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
290
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +0530291 cdp_rx_ppdu->first_data_seq_ctrl =
292 ppdu_info->rx_status.first_data_seq_ctrl;
Anish Natarajeb30aa72018-09-20 16:34:01 +0530293 cdp_rx_ppdu->frame_ctrl =
294 ppdu_info->rx_status.frame_control;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530295 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
296 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
297 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
Anish Nataraj38a29562017-08-18 19:41:17 +0530298 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
nobelj14531642019-06-25 17:41:55 -0700299 /* num mpdu is consolidated and added together in num user loop */
Pamidipati, Vijayba4b57f2019-02-13 16:47:23 +0530300 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
nobelj14531642019-06-25 17:41:55 -0700301 /* num msdu is consolidated and added together in num user loop */
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530302 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
303 cdp_rx_ppdu->udp_msdu_count +
304 cdp_rx_ppdu->other_msdu_count);
nobelj14531642019-06-25 17:41:55 -0700305
Amir Patelac7d9462019-03-28 16:16:01 +0530306 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
307 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
Pranita Solankeed0aba62018-01-12 19:14:31 +0530308
309 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
310 cdp_rx_ppdu->is_ampdu = 1;
311 else
312 cdp_rx_ppdu->is_ampdu = 0;
Anish Nataraj45d282c2017-12-30 01:03:38 +0530313 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
nobelj14531642019-06-25 17:41:55 -0700314
Soumya Bhat560f90c2018-03-30 13:53:26 +0530315
316 ast_index = ppdu_info->rx_status.ast_index;
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530317 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530318 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
319 return;
320 }
321
322 ast_entry = soc->ast_table[ast_index];
323 if (!ast_entry) {
324 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
325 return;
326 }
327 peer = ast_entry->peer;
328 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
329 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
330 return;
331 }
332
333 qdf_mem_copy(cdp_rx_ppdu->mac_addr,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800334 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
Soumya Bhat560f90c2018-03-30 13:53:26 +0530335 cdp_rx_ppdu->peer_id = peer->peer_ids[0];
336 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
nobelj14531642019-06-25 17:41:55 -0700337
338 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
339 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
340 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
341 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
342 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
343 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
344 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
345 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
346 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
347 else
348 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
349 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
350 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
351 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
352 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
353 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
354 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
355 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
356 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
357 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
358 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
Keyur Parekh44d8f8f2019-03-12 12:39:41 -0700359 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
Amir Patel1d4ac982019-04-25 11:49:01 +0530360
361 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu);
Amir Patel5a8bbbe2019-07-17 21:59:39 +0530362 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
363 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
nobelj14531642019-06-25 17:41:55 -0700364
365 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
366 for (i = 0; i < MAX_CHAIN; i++)
367 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
368
369 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
370
371 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
372
373 cdp_rx_ppdu->num_mpdu = 0;
374 cdp_rx_ppdu->num_msdu = 0;
375
376 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530377}
378#else
379static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530380dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530381 struct hal_rx_ppdu_info *ppdu_info,
382 qdf_nbuf_t ppdu_nbuf)
383{
384}
385#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530386/**
387 * dp_rx_stats_update() - Update per-peer statistics
388 * @soc: Datapath SOC handle
389 * @peer: Datapath peer handle
390 * @ppdu: PPDU Descriptor
391 *
392 * Return: None
393 */
394#ifdef FEATURE_PERPKT_INFO
ydb247452018-08-08 00:23:16 +0530395static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
396 struct cdp_rx_indication_ppdu *ppdu)
397{
398 uint32_t ratekbps = 0;
399 uint32_t ppdu_rx_rate = 0;
400 uint32_t nss = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530401 uint32_t rix;
ydb247452018-08-08 00:23:16 +0530402
403 if (!peer || !ppdu)
404 return;
405
406 if (ppdu->u.nss == 0)
407 nss = 0;
408 else
409 nss = ppdu->u.nss - 1;
410
Anish Nataraj376d9b12018-08-13 14:12:01 +0530411 ratekbps = dp_getrateindex(ppdu->u.gi,
412 ppdu->u.mcs,
ydb247452018-08-08 00:23:16 +0530413 nss,
414 ppdu->u.preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530415 ppdu->u.bw,
416 &rix);
ydb247452018-08-08 00:23:16 +0530417
418 if (!ratekbps)
419 return;
420
Amir Patel468bded2019-03-21 11:42:31 +0530421 ppdu->rix = rix;
ydb247452018-08-08 00:23:16 +0530422 DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
423 dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
424 ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
425 DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
Amir Patelac7d9462019-03-28 16:16:01 +0530426 ppdu->rx_ratekbps = ratekbps;
Amir Patel5a8bbbe2019-07-17 21:59:39 +0530427 ppdu->rx_ratecode = CDP_TXRX_RATECODE(ppdu->u.mcs,
428 nss,
429 ppdu->u.preamble);
ydb247452018-08-08 00:23:16 +0530430
431 if (peer->vdev)
432 peer->vdev->stats.rx.last_rx_rate = ratekbps;
433}
434
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530435static void dp_rx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
436 struct cdp_rx_indication_ppdu *ppdu)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530437{
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530438 struct dp_soc *soc = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +0530439 uint8_t mcs, preamble, ac = 0;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530440 uint16_t num_msdu;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530441 bool is_invalid_peer = false;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530442
443 mcs = ppdu->u.mcs;
444 preamble = ppdu->u.preamble;
445 num_msdu = ppdu->num_msdu;
446
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530447 if (pdev)
448 soc = pdev->soc;
449 else
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530450 return;
451
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530452 if (!peer) {
453 is_invalid_peer = true;
454 peer = pdev->invalid_peer;
455 }
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530456
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530457 if (!soc || soc->process_rx_status)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530458 return;
Pranita Solankefc2ff392017-12-15 19:25:13 +0530459
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530460 DP_STATS_UPD(peer, rx.rssi, ppdu->rssi);
Surya Prakash07c81e72019-04-29 10:08:01 +0530461 if (peer->stats.rx.avg_rssi == INVALID_RSSI)
Amir Patelbb69cfa2019-03-28 16:16:01 +0530462 peer->stats.rx.avg_rssi = ppdu->rssi;
463 else
464 peer->stats.rx.avg_rssi =
465 DP_GET_AVG_RSSI(peer->stats.rx.avg_rssi, ppdu->rssi);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530466
467 if ((preamble == DOT11_A) || (preamble == DOT11_B))
468 ppdu->u.nss = 1;
469
470 if (ppdu->u.nss)
471 DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu);
472
Pranita Solankea12b4b32017-11-20 23:04:14 +0530473 DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530474 DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
Anish Nataraj28490c42018-01-19 19:34:54 +0530475 DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530476 DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
477 DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
478 DP_STATS_UPD(peer, rx.rx_rate, mcs);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530479 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530480 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530481 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
482 DP_STATS_INCC(peer,
483 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530484 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530485 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530486 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530487 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
488 DP_STATS_INCC(peer,
489 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530490 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530491 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530492 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530493 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
494 DP_STATS_INCC(peer,
495 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530496 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530497 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530498 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530499 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
500 DP_STATS_INCC(peer,
501 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
502 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
503 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530504 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530505 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
506 DP_STATS_INCC(peer,
507 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
508 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Anish Nataraj28490c42018-01-19 19:34:54 +0530509 /*
510 * If invalid TID, it could be a non-qos frame, hence do not update
511 * any AC counters
512 */
513 ac = TID_TO_WME_AC(ppdu->tid);
514 if (ppdu->tid != HAL_TID_INVALID)
515 DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
Amir Patel52c6b732018-08-03 12:13:22 +0530516 dp_peer_stats_notify(peer);
517 DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi);
Anish Nataraj28490c42018-01-19 19:34:54 +0530518
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530519 if (is_invalid_peer)
520 return;
521
Jeffin Mammen1514e792019-05-20 10:30:44 +0530522 if (dp_is_subtype_data(ppdu->frame_ctrl))
523 dp_rx_rate_stats_update(peer, ppdu);
ydb247452018-08-08 00:23:16 +0530524
Amir Patel756d05e2018-10-10 12:35:30 +0530525#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
526 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
527 &peer->stats, ppdu->peer_id,
528 UPDATE_PEER_STATS, pdev->pdev_id);
529#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530530}
531#endif
Anish Nataraj38a29562017-08-18 19:41:17 +0530532
Amir Patel57e7e052019-05-15 20:49:57 +0530533/*
534 * dp_rx_get_fcs_ok_msdu() - get ppdu status buffer containing fcs_ok msdu
535 * @pdev: pdev object
536 * @ppdu_info: ppdu info object
537 *
538 * Return: nbuf
539 */
540
541static inline qdf_nbuf_t
542dp_rx_get_fcs_ok_msdu(struct dp_pdev *pdev,
543 struct hal_rx_ppdu_info *ppdu_info)
544{
545 uint16_t mpdu_fcs_ok;
546 qdf_nbuf_t status_nbuf = NULL;
547 unsigned long int fcs_ok_bitmap;
548
549 /* If fcs_ok_bitmap is zero, no need to procees further */
550 if (qdf_unlikely(!ppdu_info->com_info.mpdu_fcs_ok_bitmap))
551 return NULL;
552
553 /* Obtain fcs_ok passed index from bitmap
554 * this index is used to get fcs passed first msdu payload
555 */
556
557 fcs_ok_bitmap = ppdu_info->com_info.mpdu_fcs_ok_bitmap;
558 mpdu_fcs_ok = qdf_find_first_bit(&fcs_ok_bitmap, HAL_RX_MAX_MPDU);
559
560 /* Get status buffer by indexing mpdu_fcs_ok index
561 * containing first msdu payload with fcs passed
562 * and clone the buffer
563 */
564 status_nbuf = ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf;
565 /* Take ref of status nbuf as this nbuf is to be
566 * freeed by upper layer.
567 */
568 qdf_nbuf_ref(status_nbuf);
569
570 /* Free the ppdu status buffer queue */
571 qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
572
573 return status_nbuf;
574}
575
576static inline void
577dp_rx_handle_ppdu_status_buf(struct dp_pdev *pdev,
578 struct hal_rx_ppdu_info *ppdu_info,
579 qdf_nbuf_t status_nbuf)
580{
581 qdf_nbuf_queue_add(&pdev->rx_ppdu_buf_q, status_nbuf);
582}
Anish Nataraj38a29562017-08-18 19:41:17 +0530583/**
Soumya Bhat7422db82017-12-15 13:48:53 +0530584 * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
Pranita Solankefc2ff392017-12-15 19:25:13 +0530585 * @soc: core txrx main context
586 * @pdev: pdev strcuture
587 * @ppdu_info: structure for rx ppdu ring
588 *
589 * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
590 * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
Soumya Bhat7422db82017-12-15 13:48:53 +0530591 */
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530592#ifdef FEATURE_PERPKT_INFO
593static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530594dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530595 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
596{
597 uint8_t size = 0;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530598 struct ieee80211_frame *wh;
599 uint32_t *nbuf_data;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530600
Amir Patel57e7e052019-05-15 20:49:57 +0530601 if (!ppdu_info->fcs_ok_msdu_info.first_msdu_payload)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530602 return QDF_STATUS_SUCCESS;
603
Soumya Bhat2f54de22018-02-21 09:54:28 +0530604 if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530605 return QDF_STATUS_SUCCESS;
606
Soumya Bhat2f54de22018-02-21 09:54:28 +0530607 pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530608
Amir Patel57e7e052019-05-15 20:49:57 +0530609 wh = (struct ieee80211_frame *)
610 (ppdu_info->fcs_ok_msdu_info.first_msdu_payload + 4);
611
612 size = (ppdu_info->fcs_ok_msdu_info.first_msdu_payload -
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530613 qdf_nbuf_data(nbuf));
Amir Patel57e7e052019-05-15 20:49:57 +0530614 ppdu_info->fcs_ok_msdu_info.first_msdu_payload = NULL;
Soumya Bhat7422db82017-12-15 13:48:53 +0530615
616 if (qdf_nbuf_pull_head(nbuf, size) == NULL)
617 return QDF_STATUS_SUCCESS;
618
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530619 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
620 IEEE80211_FC0_TYPE_MGT) ||
621 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
622 IEEE80211_FC0_TYPE_CTL)) {
623 return QDF_STATUS_SUCCESS;
624 }
625
626 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
627 *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530628 /* only retain RX MSDU payload in the skb */
629 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
Amir Patel57e7e052019-05-15 20:49:57 +0530630 ppdu_info->fcs_ok_msdu_info.payload_len);
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530631 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
632 nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
633 return QDF_STATUS_E_ALREADY;
634}
635#else
636static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530637dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530638 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
639{
640 return QDF_STATUS_SUCCESS;
641}
642#endif
643
Amir Patel57e7e052019-05-15 20:49:57 +0530644#ifdef FEATURE_PERPKT_INFO
645static inline void
646dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
647 struct hal_rx_ppdu_info *ppdu_info,
648 uint32_t tlv_status,
649 qdf_nbuf_t status_nbuf)
650{
651 QDF_STATUS mcopy_status;
652
653 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) {
654 qdf_nbuf_free(status_nbuf);
655 return;
656 }
657 /* Add buffers to queue until we receive
658 * HAL_TLV_STATUS_PPDU_DONE
659 */
660 dp_rx_handle_ppdu_status_buf(pdev, ppdu_info, status_nbuf);
661
662 /* If tlv_status is PPDU_DONE, process rx_ppdu_buf_q
663 * and devliver fcs_ok msdu buffer
664 */
665 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
666 /* Get rx ppdu status buffer having fcs ok msdu */
667 status_nbuf = dp_rx_get_fcs_ok_msdu(pdev, ppdu_info);
668 if (status_nbuf) {
669 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
670 ppdu_info,
671 status_nbuf);
672 if (mcopy_status == QDF_STATUS_SUCCESS)
673 qdf_nbuf_free(status_nbuf);
674 }
675 }
676}
677#else
678static inline void
679dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
680 struct hal_rx_ppdu_info *ppdu_info,
681 uint32_t tlv_status,
682 qdf_nbuf_t status_nbuf)
683{
684}
685#endif
686
sumedh baikady59a2d332018-05-22 01:50:38 -0700687/**
688 * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
689 * @soc: Datapath SOC handle
690 * @pdev: Datapath PDEV handle
691 * @ppdu_info: Structure for rx ppdu info
692 * @nbuf: Qdf nbuf abstraction for linux skb
693 *
694 * Return: 0 on success, 1 on failure
695 */
696static inline int
697dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
698 struct hal_rx_ppdu_info *ppdu_info,
699 qdf_nbuf_t nbuf)
700{
701 uint8_t size = 0;
702
703 if (!pdev->monitor_vdev) {
704 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
705 "[%s]:[%d] Monitor vdev is NULL !!",
706 __func__, __LINE__);
707 return 1;
708 }
Jeff Johnsona8edf332019-03-18 09:51:52 -0700709 if (!ppdu_info->msdu_info.first_msdu_payload) {
sumedh baikady59a2d332018-05-22 01:50:38 -0700710 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
711 "[%s]:[%d] First msdu payload not present",
712 __func__, __LINE__);
713 return 1;
714 }
715
sumedh baikadyda159202018-11-01 17:31:23 -0700716 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
sumedh baikady59a2d332018-05-22 01:50:38 -0700717 size = (ppdu_info->msdu_info.first_msdu_payload -
sumedh baikadyda159202018-11-01 17:31:23 -0700718 qdf_nbuf_data(nbuf)) + 4;
sumedh baikady59a2d332018-05-22 01:50:38 -0700719 ppdu_info->msdu_info.first_msdu_payload = NULL;
720
721 if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
722 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
723 "[%s]:[%d] No header present",
724 __func__, __LINE__);
725 return 1;
726 }
727
sumedh baikadyda159202018-11-01 17:31:23 -0700728 /* Only retain RX MSDU payload in the skb */
sumedh baikady59a2d332018-05-22 01:50:38 -0700729 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
730 ppdu_info->msdu_info.payload_len);
Shashikala Prabhu08434382019-07-16 15:42:03 +0530731 if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf,
732 qdf_nbuf_headroom(nbuf))) {
733 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
734 return 1;
735 }
736
sumedh baikady59a2d332018-05-22 01:50:38 -0700737 pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
738 nbuf, NULL);
sumedh baikadyda159202018-11-01 17:31:23 -0700739 pdev->ppdu_info.rx_status.monitor_direct_used = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -0700740 return 0;
741}
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530742
743/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530744* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
745* @soc: core txrx main context
746* @pdev: pdev strcuture
747* @ppdu_info: structure for rx ppdu ring
748*
749* Return: none
750*/
751#ifdef FEATURE_PERPKT_INFO
752static inline void
753dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
754 struct hal_rx_ppdu_info *ppdu_info)
755{
756 qdf_nbuf_t ppdu_nbuf;
757 struct dp_peer *peer;
758 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
759
Anish Nataraj28490c42018-01-19 19:34:54 +0530760 /*
761 * Do not allocate if fcs error,
762 * ast idx invalid / fctl invalid
763 */
Anish Nataraj28490c42018-01-19 19:34:54 +0530764 if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
765 return;
766
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530767 if (ppdu_info->nac_info.fc_valid &&
768 ppdu_info->nac_info.to_ds_flag &&
769 ppdu_info->nac_info.mac_addr2_valid) {
770 struct dp_neighbour_peer *peer = NULL;
771 uint8_t rssi = ppdu_info->rx_status.rssi_comb;
772
773 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
774 if (pdev->neighbour_peers_added) {
775 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
776 neighbour_peer_list_elem) {
777 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
778 &ppdu_info->nac_info.mac_addr2,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800779 QDF_MAC_ADDR_SIZE)) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530780 peer->rssi = rssi;
781 break;
782 }
783 }
784 }
785 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
786 }
787
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530788 /* need not generate wdi event when mcopy and
789 * enhanced stats are not enabled
790 */
791 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
792 return;
793
Soumya Bhat560f90c2018-03-30 13:53:26 +0530794 if (!pdev->mcopy_mode) {
795 if (!ppdu_info->rx_status.frame_control_info_valid)
796 return;
Anish Nataraj28490c42018-01-19 19:34:54 +0530797
Soumya Bhat560f90c2018-03-30 13:53:26 +0530798 if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)
799 return;
800 }
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530801 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
Kiran Venkatappa4b50f332019-03-20 18:14:17 +0530802 sizeof(struct cdp_rx_indication_ppdu), 0, 0, FALSE);
Anish Nataraj38a29562017-08-18 19:41:17 +0530803 if (ppdu_nbuf) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530804 dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530805 qdf_nbuf_put_tail(ppdu_nbuf,
806 sizeof(struct cdp_rx_indication_ppdu));
807 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530808 peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
chenguob21a49a2018-11-19 19:17:12 +0800809 if (peer) {
Amir Patel468bded2019-03-21 11:42:31 +0530810 cdp_rx_ppdu->cookie = (void *)peer->wlanstats_ctx;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530811 dp_rx_stats_update(pdev, peer, cdp_rx_ppdu);
chenguob21a49a2018-11-19 19:17:12 +0800812 dp_peer_unref_del_find_by_id(peer);
813 }
814 if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
815 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
816 soc, ppdu_nbuf,
817 cdp_rx_ppdu->peer_id,
818 WDI_NO_VAL, pdev->pdev_id);
819 } else if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +0530820 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
821 ppdu_nbuf, HTT_INVALID_PEER,
822 WDI_NO_VAL, pdev->pdev_id);
823 } else {
Anish Nataraj38a29562017-08-18 19:41:17 +0530824 qdf_nbuf_free(ppdu_nbuf);
Soumya Bhat2f54de22018-02-21 09:54:28 +0530825 }
Anish Nataraj38a29562017-08-18 19:41:17 +0530826 }
827}
828#else
829static inline void
830dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
831 struct hal_rx_ppdu_info *ppdu_info)
832{
833}
834#endif
835
836/**
Keyur Parekhc28f8392018-11-21 02:50:56 -0800837* dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
838* filtering enabled
839* @soc: core txrx main context
840* @ppdu_info: Structure for rx ppdu info
841* @status_nbuf: Qdf nbuf abstraction for linux skb
842* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
843*
844* Return: none
845*/
846static inline void
847dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
848 struct hal_rx_ppdu_info *ppdu_info,
849 qdf_nbuf_t status_nbuf, uint32_t mac_id)
850{
851 struct dp_peer *peer;
852 struct dp_ast_entry *ast_entry;
853 uint32_t ast_index;
854
855 ast_index = ppdu_info->rx_status.ast_index;
856 if (ast_index < (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
857 ast_entry = soc->ast_table[ast_index];
858 if (ast_entry) {
859 peer = ast_entry->peer;
860 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) {
861 if (peer->peer_based_pktlog_filter) {
862 dp_wdi_event_handler(
863 WDI_EVENT_RX_DESC, soc,
864 status_nbuf,
865 peer->peer_ids[0],
866 WDI_NO_VAL, mac_id);
867 }
868 }
869 }
870 }
871}
872
Kai Chen93f7e1b2019-07-10 16:13:48 -0700873#if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
874static inline void
875dp_rx_ul_ofdma_ru_size_to_width(
876 uint32_t ru_size,
877 uint32_t *ru_width)
878{
879 uint32_t width;
880
881 width = 0;
882 switch (ru_size) {
883 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
884 width = 1;
885 break;
886 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
887 width = 2;
888 break;
889 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
890 width = 4;
891 break;
892 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
893 width = 9;
894 break;
895 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
896 width = 18;
897 break;
898 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
899 width = 37;
900 break;
901 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
902 width = 74;
903 break;
904 default:
905 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
906 "RU size to width convert err");
907 break;
908 }
909 *ru_width = width;
910}
911
912static inline void
913dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
914{
915 struct mon_rx_user_status *mon_rx_user_status;
916 uint32_t num_users;
917 uint32_t i;
918 uint32_t ul_ofdma_user_v0_word0;
919 uint32_t ul_ofdma_user_v0_word1;
920 uint32_t ru_width;
921
922 if (ppdu_info->rx_status.reception_type != HAL_RX_TYPE_MU_OFDMA)
923 return;
924
925 num_users = ppdu_info->com_info.num_users;
926 if (num_users > HAL_MAX_UL_MU_USERS)
927 num_users = HAL_MAX_UL_MU_USERS;
928 for (i = 0; i < num_users; i++) {
929 mon_rx_user_status = &ppdu_info->rx_user_status[i];
930 ul_ofdma_user_v0_word0 =
931 mon_rx_user_status->ul_ofdma_user_v0_word0;
932 ul_ofdma_user_v0_word1 =
933 mon_rx_user_status->ul_ofdma_user_v0_word1;
934
935 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
936 ul_ofdma_user_v0_word0) &&
937 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
938 ul_ofdma_user_v0_word0)) {
939 mon_rx_user_status->mcs =
940 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
941 ul_ofdma_user_v0_word1);
942 mon_rx_user_status->nss =
943 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
944 ul_ofdma_user_v0_word1);
945
946 mon_rx_user_status->ofdma_info_valid = 1;
947 mon_rx_user_status->dl_ofdma_ru_start_index =
948 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
949 ul_ofdma_user_v0_word1);
950
951 dp_rx_ul_ofdma_ru_size_to_width(
952 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
953 ul_ofdma_user_v0_word1),
954 &ru_width);
955 mon_rx_user_status->dl_ofdma_ru_width = ru_width;
956 }
957 }
958}
959#else
960static inline void
961dp_rx_mon_handle_ofdma_info(struct hal_rx_ppdu_info *ppdu_info)
962{
963}
964#endif
965
Keyur Parekhc28f8392018-11-21 02:50:56 -0800966/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800967* dp_rx_mon_status_process_tlv() - Process status TLV in status
968* buffer on Rx status Queue posted by status SRNG processing.
969* @soc: core txrx main context
970* @mac_id: mac_id which is one of 3 mac_ids _ring
971*
972* Return: none
973*/
974static inline void
Karunakar Dasineni40555682017-03-26 22:44:39 -0700975dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
976 uint32_t quota)
977{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800978 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800979 struct hal_rx_ppdu_info *ppdu_info;
980 qdf_nbuf_t status_nbuf;
981 uint8_t *rx_tlv;
982 uint8_t *rx_tlv_start;
Kai Chenad516ae2017-09-08 18:35:47 -0700983 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
Kai Chen52ef33f2019-03-05 18:33:40 -0800984 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS;
Kai Chen783e0382018-01-25 16:29:08 -0800985 struct cdp_pdev_mon_stats *rx_mon_stats;
sumedh baikady59a2d332018-05-22 01:50:38 -0700986 int smart_mesh_status;
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800987 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
Kai Chen52ef33f2019-03-05 18:33:40 -0800988 bool nbuf_used;
989 uint32_t rx_enh_capture_mode;
990
Kai Chen6eca1a62017-01-12 10:17:53 -0800991
Kai Chen6eca1a62017-01-12 10:17:53 -0800992 ppdu_info = &pdev->ppdu_info;
Kai Chen783e0382018-01-25 16:29:08 -0800993 rx_mon_stats = &pdev->rx_mon_stats;
Kai Chen6eca1a62017-01-12 10:17:53 -0800994
995 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
996 return;
997
Kai Chen52ef33f2019-03-05 18:33:40 -0800998 rx_enh_capture_mode = pdev->rx_enh_capture_mode;
999
Kai Chen6eca1a62017-01-12 10:17:53 -08001000 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
1001
1002 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
phadiman49757302018-12-18 16:13:59 +05301003
Kai Chen6eca1a62017-01-12 10:17:53 -08001004 rx_tlv = qdf_nbuf_data(status_nbuf);
1005 rx_tlv_start = rx_tlv;
Kai Chen52ef33f2019-03-05 18:33:40 -08001006 nbuf_used = false;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001007
Jeff Johnsona8edf332019-03-18 09:51:52 -07001008 if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) ||
Kai Chen52ef33f2019-03-05 18:33:40 -08001009 pdev->mcopy_mode ||
1010 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
Keyur Parekhfad6d082017-05-07 08:54:47 -07001011 do {
1012 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
Amir Patel57e7e052019-05-15 20:49:57 +05301013 ppdu_info, pdev->soc->hal_soc,
1014 status_nbuf);
Kai Chen783e0382018-01-25 16:29:08 -08001015
1016 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
1017 rx_mon_stats);
1018
Kai Chen52ef33f2019-03-05 18:33:40 -08001019 dp_rx_mon_enh_capture_process(pdev, tlv_status,
1020 status_nbuf, ppdu_info,
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001021 &nbuf_used);
Kai Chen52ef33f2019-03-05 18:33:40 -08001022
Keyur Parekhfad6d082017-05-07 08:54:47 -07001023 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
Kai Chen6eca1a62017-01-12 10:17:53 -08001024
Keyur Parekhfad6d082017-05-07 08:54:47 -07001025 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
1026 break;
Kai Chen6eca1a62017-01-12 10:17:53 -08001027
Kai Chen52ef33f2019-03-05 18:33:40 -08001028 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1029 (tlv_status == HAL_TLV_STATUS_HEADER) ||
Karunakar Dasineniacc8b562019-05-07 07:00:24 -07001030 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1031 (tlv_status == HAL_TLV_STATUS_MSDU_END));
Keyur Parekhfad6d082017-05-07 08:54:47 -07001032 }
Keyur Parekhc28f8392018-11-21 02:50:56 -08001033 if (pdev->dp_peer_based_pktlog) {
1034 dp_rx_process_peer_based_pktlog(soc, ppdu_info,
1035 status_nbuf, mac_id);
1036 } else {
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -08001037 if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1038 pktlog_mode = WDI_EVENT_RX_DESC;
1039 else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1040 pktlog_mode = WDI_EVENT_LITE_RX;
1041
1042 if (pktlog_mode != WDI_NO_VAL)
1043 dp_wdi_event_handler(pktlog_mode, soc,
1044 status_nbuf,
1045 HTT_INVALID_PEER,
1046 WDI_NO_VAL, mac_id);
Keyur Parekhc28f8392018-11-21 02:50:56 -08001047 }
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301048
1049 /* smart monitor vap and m_copy cannot co-exist */
sumedh baikady59a2d332018-05-22 01:50:38 -07001050 if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
1051 && pdev->monitor_vdev) {
1052 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
1053 pdev, ppdu_info, status_nbuf);
1054 if (smart_mesh_status)
1055 qdf_nbuf_free(status_nbuf);
Amir Patel57e7e052019-05-15 20:49:57 +05301056 } else if (qdf_unlikely(pdev->mcopy_mode)) {
1057 dp_rx_process_mcopy_mode(soc, pdev,
1058 ppdu_info, tlv_status,
1059 status_nbuf);
Kai Chen52ef33f2019-03-05 18:33:40 -08001060 } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
1061 if (!nbuf_used)
1062 qdf_nbuf_free(status_nbuf);
1063
1064 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
1065 enh_log_status =
1066 dp_rx_handle_enh_capture(soc,
1067 pdev, ppdu_info);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301068 } else {
sumedh baikady59a2d332018-05-22 01:50:38 -07001069 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +05301070 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001071
chenguo1b880462018-07-11 15:34:56 +08001072 if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
1073 dp_rx_mon_deliver_non_std(soc, mac_id);
1074 } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Kai Chen783e0382018-01-25 16:29:08 -08001075 rx_mon_stats->status_ppdu_done++;
Kai Chen93f7e1b2019-07-10 16:13:48 -07001076 dp_rx_mon_handle_ofdma_info(ppdu_info);
Soumya Bhat5c60deb2017-12-12 16:42:04 +05301077 if (pdev->enhanced_stats_en ||
sumedh baikady59a2d332018-05-22 01:50:38 -07001078 pdev->mcopy_mode || pdev->neighbour_peers_added)
Anish Nataraj38a29562017-08-18 19:41:17 +05301079 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
Soumya Bhat6fee59c2017-10-31 13:12:37 +05301080
nobelj1c31fee2018-03-21 11:47:05 -07001081 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
1082 dp_rx_mon_dest_process(soc, mac_id, quota);
Kai Chencbe4c342017-06-12 20:06:35 -07001083 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen6eca1a62017-01-12 10:17:53 -08001084 }
1085 }
1086 return;
1087}
1088
1089/*
1090 * dp_rx_mon_status_srng_process() - Process monitor status ring
1091 * post the status ring buffer to Rx status Queue for later
1092 * processing when status ring is filled with status TLV.
1093 * Allocate a new buffer to status ring if the filled buffer
1094 * is posted.
1095 *
1096 * @soc: core txrx main context
1097 * @mac_id: mac_id which is one of 3 mac_ids
1098 * @quota: No. of ring entry that can be serviced in one shot.
1099
1100 * Return: uint32_t: No. of ring entry that is processed.
1101 */
1102static inline uint32_t
1103dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
1104 uint32_t quota)
1105{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001106 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Akshay Kosigia870c612019-07-08 23:10:30 +05301107 hal_soc_handle_t hal_soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08001108 void *mon_status_srng;
1109 void *rxdma_mon_status_ring_entry;
1110 QDF_STATUS status;
1111 uint32_t work_done = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001112 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001113
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001114 mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng;
Kai Chen6eca1a62017-01-12 10:17:53 -08001115
1116 qdf_assert(mon_status_srng);
Houston Hoffman648a9182017-05-21 23:27:50 -07001117 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
1118
1119 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301120 "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
Houston Hoffman648a9182017-05-21 23:27:50 -07001121 __func__, __LINE__, mon_status_srng);
1122 return work_done;
1123 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001124
1125 hal_soc = soc->hal_soc;
1126
1127 qdf_assert(hal_soc);
1128
1129 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
1130 goto done;
1131
1132 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
1133 * BUFFER_ADDR_INFO STRUCT
1134 */
1135 while (qdf_likely((rxdma_mon_status_ring_entry =
1136 hal_srng_src_peek(hal_soc, mon_status_srng))
1137 && quota--)) {
1138 uint32_t rx_buf_cookie;
1139 qdf_nbuf_t status_nbuf;
1140 struct dp_rx_desc *rx_desc;
1141 uint8_t *status_buf;
1142 qdf_dma_addr_t paddr;
1143 uint64_t buf_addr;
1144
1145 buf_addr =
1146 (HAL_RX_BUFFER_ADDR_31_0_GET(
1147 rxdma_mon_status_ring_entry) |
1148 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
1149 rxdma_mon_status_ring_entry)) << 32));
1150
1151 if (qdf_likely(buf_addr)) {
1152
1153 rx_buf_cookie =
1154 HAL_RX_BUF_COOKIE_GET(
1155 rxdma_mon_status_ring_entry);
1156 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
1157 rx_buf_cookie);
1158
1159 qdf_assert(rx_desc);
1160
1161 status_nbuf = rx_desc->nbuf;
1162
1163 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
1164 QDF_DMA_FROM_DEVICE);
1165
1166 status_buf = qdf_nbuf_data(status_nbuf);
1167
1168 status = hal_get_rx_status_done(status_buf);
1169
1170 if (status != QDF_STATUS_SUCCESS) {
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001171 uint32_t hp, tp;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001172 hal_get_sw_hptp(hal_soc, mon_status_srng,
1173 &tp, &hp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001174 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001175 QDF_TRACE_LEVEL_ERROR,
1176 "[%s][%d] status not done - hp:%u, tp:%u",
1177 __func__, __LINE__, hp, tp);
1178 /* WAR for missing status: Skip status entry */
1179 hal_srng_src_get_next(hal_soc, mon_status_srng);
1180 continue;
Kai Chen6eca1a62017-01-12 10:17:53 -08001181 }
1182 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
1183
1184 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
1185 QDF_DMA_FROM_DEVICE);
1186
1187 /* Put the status_nbuf to queue */
1188 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
1189
1190 } else {
1191 union dp_rx_desc_list_elem_t *desc_list = NULL;
1192 union dp_rx_desc_list_elem_t *tail = NULL;
1193 struct rx_desc_pool *rx_desc_pool;
1194 uint32_t num_alloc_desc;
1195
1196 rx_desc_pool = &soc->rx_desc_status[mac_id];
1197
1198 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
1199 rx_desc_pool,
1200 1,
1201 &desc_list,
1202 &tail);
phadimanebf4cde2019-01-28 17:50:37 +05301203 /*
1204 * No free descriptors available
1205 */
1206 if (qdf_unlikely(num_alloc_desc == 0)) {
1207 work_done++;
1208 break;
1209 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001210
1211 rx_desc = &desc_list->rx_desc;
1212 }
1213
jinweic chenc3546322018-02-02 15:03:41 +08001214 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001215
jinweic chenc3546322018-02-02 15:03:41 +08001216 /*
1217 * qdf_nbuf alloc or map failed,
1218 * free the dp rx desc to free list,
1219 * fill in NULL dma address at current HP entry,
1220 * keep HP in mon_status_ring unchanged,
1221 * wait next time dp_rx_mon_status_srng_process
1222 * to fill in buffer at current HP.
1223 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001224 if (qdf_unlikely(!status_nbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001225 union dp_rx_desc_list_elem_t *desc_list = NULL;
1226 union dp_rx_desc_list_elem_t *tail = NULL;
1227 struct rx_desc_pool *rx_desc_pool;
Kai Chen6eca1a62017-01-12 10:17:53 -08001228
jinweic chenc3546322018-02-02 15:03:41 +08001229 rx_desc_pool = &soc->rx_desc_status[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001230
jinweic chenc3546322018-02-02 15:03:41 +08001231 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1232 "%s: fail to allocate or map qdf_nbuf",
1233 __func__);
1234 dp_rx_add_to_free_desc_list(&desc_list,
1235 &tail, rx_desc);
1236 dp_rx_add_desc_list_to_free_list(soc, &desc_list,
1237 &tail, mac_id, rx_desc_pool);
1238
1239 hal_rxdma_buff_addr_info_set(
1240 rxdma_mon_status_ring_entry,
1241 0, 0, HAL_RX_BUF_RBM_SW3_BM);
1242 work_done++;
1243 break;
1244 }
1245
Kai Chen6eca1a62017-01-12 10:17:53 -08001246 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
1247
1248 rx_desc->nbuf = status_nbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001249 rx_desc->in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -08001250
1251 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
1252 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
1253
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001254 hal_srng_src_get_next(hal_soc, mon_status_srng);
Kai Chen6eca1a62017-01-12 10:17:53 -08001255 work_done++;
1256 }
1257done:
1258
1259 hal_srng_access_end(hal_soc, mon_status_srng);
1260
1261 return work_done;
1262
1263}
1264/*
1265 * dp_rx_mon_status_process() - Process monitor status ring and
1266 * TLV in status ring.
1267 *
1268 * @soc: core txrx main context
1269 * @mac_id: mac_id which is one of 3 mac_ids
1270 * @quota: No. of ring entry that can be serviced in one shot.
1271
1272 * Return: uint32_t: No. of ring entry that is processed.
1273 */
1274static inline uint32_t
1275dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
1276 uint32_t work_done;
1277
1278 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
Karunakar Dasineni40555682017-03-26 22:44:39 -07001279 quota -= work_done;
1280 dp_rx_mon_status_process_tlv(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001281
1282 return work_done;
1283}
1284/**
1285 * dp_mon_process() - Main monitor mode processing roution.
1286 * This call monitor status ring process then monitor
1287 * destination ring process.
1288 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1289 * @soc: core txrx main context
1290 * @mac_id: mac_id which is one of 3 mac_ids
1291 * @quota: No. of status ring entry that can be serviced in one shot.
1292
1293 * Return: uint32_t: No. of ring entry that is processed.
1294 */
1295uint32_t
1296dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
Karunakar Dasineni40555682017-03-26 22:44:39 -07001297 return dp_rx_mon_status_process(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -08001298}
Karunakar Dasineni40555682017-03-26 22:44:39 -07001299
Kai Chen6eca1a62017-01-12 10:17:53 -08001300/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001301 * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
Kai Chen6eca1a62017-01-12 10:17:53 -08001302 * @pdev: core txrx pdev context
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001303 * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
Kai Chen6eca1a62017-01-12 10:17:53 -08001304 *
1305 * This function will detach DP RX status ring from
1306 * main device context. will free DP Rx resources for
1307 * status ring
1308 *
1309 * Return: QDF_STATUS_SUCCESS: success
1310 * QDF_STATUS_E_RESOURCES: Error return
1311 */
1312QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001313dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -08001314{
Kai Chen6eca1a62017-01-12 10:17:53 -08001315 struct dp_soc *soc = pdev->soc;
1316 struct rx_desc_pool *rx_desc_pool;
1317
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001318 rx_desc_pool = &soc->rx_desc_status[mac_id];
phadiman449a2682019-02-20 14:00:00 +05301319 if (rx_desc_pool->pool_size != 0) {
1320 if (!dp_is_soc_reinit(soc))
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001321 dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
1322 rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301323 else
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07001324 dp_rx_desc_nbuf_free(soc, rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05301325 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001326
1327 return QDF_STATUS_SUCCESS;
1328}
1329
1330/*
1331 * dp_rx_buffers_replenish() - replenish monitor status ring with
1332 * rx nbufs called during dp rx
1333 * monitor status ring initialization
1334 *
1335 * @soc: core txrx main context
1336 * @mac_id: mac_id which is one of 3 mac_ids
1337 * @dp_rxdma_srng: dp monitor status circular ring
1338 * @rx_desc_pool; Pointer to Rx descriptor pool
1339 * @num_req_buffers: number of buffer to be replenished
1340 * @desc_list: list of descs if called from dp rx monitor status
1341 * process or NULL during dp rx initialization or
1342 * out of buffer interrupt
1343 * @tail: tail of descs list
1344 * @owner: who owns the nbuf (host, NSS etc...)
1345 * Return: return success or failure
1346 */
1347static inline
1348QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
1349 uint32_t mac_id,
1350 struct dp_srng *dp_rxdma_srng,
1351 struct rx_desc_pool *rx_desc_pool,
1352 uint32_t num_req_buffers,
1353 union dp_rx_desc_list_elem_t **desc_list,
1354 union dp_rx_desc_list_elem_t **tail,
1355 uint8_t owner)
1356{
1357 uint32_t num_alloc_desc;
1358 uint16_t num_desc_to_free = 0;
1359 uint32_t num_entries_avail;
jinweic chenc3546322018-02-02 15:03:41 +08001360 uint32_t count = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08001361 int sync_hw_ptr = 1;
1362 qdf_dma_addr_t paddr;
1363 qdf_nbuf_t rx_netbuf;
1364 void *rxdma_ring_entry;
1365 union dp_rx_desc_list_elem_t *next;
1366 void *rxdma_srng;
jinweic chenc3546322018-02-02 15:03:41 +08001367 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001368
1369 rxdma_srng = dp_rxdma_srng->hal_srng;
1370
1371 qdf_assert(rxdma_srng);
1372
Houston Hoffmanae850c62017-08-11 16:47:50 -07001373 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301374 "[%s][%d] requested %d buffers for replenish",
Kai Chen6eca1a62017-01-12 10:17:53 -08001375 __func__, __LINE__, num_req_buffers);
1376
1377 /*
1378 * if desc_list is NULL, allocate the descs from freelist
1379 */
1380 if (!(*desc_list)) {
1381
1382 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1383 rx_desc_pool,
1384 num_req_buffers,
1385 desc_list,
1386 tail);
1387
1388 if (!num_alloc_desc) {
1389 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301390 "[%s][%d] no free rx_descs in freelist",
Kai Chen6eca1a62017-01-12 10:17:53 -08001391 __func__, __LINE__);
1392 return QDF_STATUS_E_NOMEM;
1393 }
1394
Houston Hoffmanae850c62017-08-11 16:47:50 -07001395 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301396 "[%s][%d] %d rx desc allocated", __func__, __LINE__,
Kai Chen6eca1a62017-01-12 10:17:53 -08001397 num_alloc_desc);
Houston Hoffmanae850c62017-08-11 16:47:50 -07001398
Kai Chen6eca1a62017-01-12 10:17:53 -08001399 num_req_buffers = num_alloc_desc;
1400 }
1401
1402 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
1403 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
1404 rxdma_srng, sync_hw_ptr);
1405
Houston Hoffmanae850c62017-08-11 16:47:50 -07001406 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301407 "[%s][%d] no of available entries in rxdma ring: %d",
Kai Chen6eca1a62017-01-12 10:17:53 -08001408 __func__, __LINE__, num_entries_avail);
1409
1410 if (num_entries_avail < num_req_buffers) {
1411 num_desc_to_free = num_req_buffers - num_entries_avail;
1412 num_req_buffers = num_entries_avail;
1413 }
1414
jinweic chenc3546322018-02-02 15:03:41 +08001415 while (count < num_req_buffers) {
1416 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08001417
jinweic chenc3546322018-02-02 15:03:41 +08001418 /*
1419 * qdf_nbuf alloc or map failed,
1420 * keep HP in mon_status_ring unchanged,
1421 * wait dp_rx_mon_status_srng_process
1422 * to fill in buffer at current HP.
1423 */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001424 if (qdf_unlikely(!rx_netbuf)) {
jinweic chenc3546322018-02-02 15:03:41 +08001425 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1426 "%s: qdf_nbuf allocate or map fail, count %d",
1427 __func__, count);
1428 break;
1429 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001430
1431 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
1432
1433 next = (*desc_list)->next;
sumedh baikadyeca2de62018-04-11 14:20:38 -07001434 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
1435 rxdma_srng);
1436
Jeff Johnsona8edf332019-03-18 09:51:52 -07001437 if (qdf_unlikely(!rxdma_ring_entry)) {
sumedh baikadyeca2de62018-04-11 14:20:38 -07001438 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301439 "[%s][%d] rxdma_ring_entry is NULL, count - %d",
sumedh baikadyeca2de62018-04-11 14:20:38 -07001440 __func__, __LINE__, count);
1441 qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301442 QDF_DMA_FROM_DEVICE);
sumedh baikadyeca2de62018-04-11 14:20:38 -07001443 qdf_nbuf_free(rx_netbuf);
1444 break;
1445 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001446
1447 (*desc_list)->rx_desc.nbuf = rx_netbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -07001448 (*desc_list)->rx_desc.in_use = 1;
jinweic chenc3546322018-02-02 15:03:41 +08001449 count++;
jinweic chenc3546322018-02-02 15:03:41 +08001450
Kai Chen6eca1a62017-01-12 10:17:53 -08001451 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
1452 (*desc_list)->rx_desc.cookie, owner);
1453
Karunakar Dasineni40555682017-03-26 22:44:39 -07001454 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001455 "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
Aditya Sathishded018e2018-07-02 16:25:21 +05301456 paddr=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -08001457 __func__, __LINE__, &(*desc_list)->rx_desc,
1458 (*desc_list)->rx_desc.cookie, rx_netbuf,
jinweic chenc3546322018-02-02 15:03:41 +08001459 (void *)paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -08001460
1461 *desc_list = next;
1462 }
1463
1464 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1465
Houston Hoffmanae850c62017-08-11 16:47:50 -07001466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301467 "successfully replenished %d buffers", num_req_buffers);
Kai Chen6eca1a62017-01-12 10:17:53 -08001468
Houston Hoffmanae850c62017-08-11 16:47:50 -07001469 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301470 "%d rx desc added back to free list", num_desc_to_free);
Kai Chen6eca1a62017-01-12 10:17:53 -08001471
Kai Chen6eca1a62017-01-12 10:17:53 -08001472 /*
1473 * add any available free desc back to the free list
1474 */
1475 if (*desc_list) {
1476 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1477 mac_id, rx_desc_pool);
1478 }
1479
1480 return QDF_STATUS_SUCCESS;
1481}
1482/**
1483 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
1484 * @pdev: core txrx pdev context
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001485 * @ring_id: ring number
Kai Chen6eca1a62017-01-12 10:17:53 -08001486 * This function will attach a DP RX monitor status ring into pDEV
1487 * and replenish monitor status ring with buffer.
1488 *
1489 * Return: QDF_STATUS_SUCCESS: success
1490 * QDF_STATUS_E_RESOURCES: Error return
1491 */
1492QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001493dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
Kai Chen6eca1a62017-01-12 10:17:53 -08001494 struct dp_soc *soc = pdev->soc;
1495 union dp_rx_desc_list_elem_t *desc_list = NULL;
1496 union dp_rx_desc_list_elem_t *tail = NULL;
Mohit Khanna70514992018-11-12 18:39:03 -08001497 struct dp_srng *mon_status_ring;
1498 uint32_t num_entries;
Kai Chen52ef33f2019-03-05 18:33:40 -08001499 uint32_t i;
Kai Chen6eca1a62017-01-12 10:17:53 -08001500 struct rx_desc_pool *rx_desc_pool;
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001501 QDF_STATUS status;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001502 int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001503
Mohit Khanna70514992018-11-12 18:39:03 -08001504 mon_status_ring = &pdev->rxdma_mon_status_ring[mac_for_pdev];
Kai Chen6eca1a62017-01-12 10:17:53 -08001505
Mohit Khanna70514992018-11-12 18:39:03 -08001506 num_entries = mon_status_ring->num_entries;
Kai Chen6eca1a62017-01-12 10:17:53 -08001507
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001508 rx_desc_pool = &soc->rx_desc_status[ring_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001509
Mohit Khanna70514992018-11-12 18:39:03 -08001510 dp_info("Mon RX Status Pool[%d] entries=%d",
1511 ring_id, num_entries);
Kai Chen6eca1a62017-01-12 10:17:53 -08001512
Mohit Khanna70514992018-11-12 18:39:03 -08001513 status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1,
1514 rx_desc_pool);
1515 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001516 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001517
Mohit Khanna70514992018-11-12 18:39:03 -08001518 dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001519
Mohit Khanna70514992018-11-12 18:39:03 -08001520 status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
1521 mon_status_ring,
1522 rx_desc_pool,
1523 num_entries,
1524 &desc_list, &tail,
1525 HAL_RX_BUF_RBM_SW3_BM);
1526
1527 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001528 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001529
1530 qdf_nbuf_queue_init(&pdev->rx_status_q);
Amir Patel57e7e052019-05-15 20:49:57 +05301531 qdf_nbuf_queue_init(&pdev->rx_ppdu_buf_q);
Kai Chen6eca1a62017-01-12 10:17:53 -08001532
1533 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen783e0382018-01-25 16:29:08 -08001534
Karunakar Dasineni40555682017-03-26 22:44:39 -07001535 qdf_mem_zero(&(pdev->ppdu_info.rx_status),
Mohit Khanna70514992018-11-12 18:39:03 -08001536 sizeof(pdev->ppdu_info.rx_status));
Kai Chen6eca1a62017-01-12 10:17:53 -08001537
Kai Chen783e0382018-01-25 16:29:08 -08001538 qdf_mem_zero(&pdev->rx_mon_stats,
1539 sizeof(pdev->rx_mon_stats));
1540
1541 dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
1542 &pdev->rx_mon_stats);
1543
Kai Chen52ef33f2019-03-05 18:33:40 -08001544 for (i = 0; i < MAX_MU_USERS; i++) {
1545 qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
1546 pdev->is_mpdu_hdr[i] = true;
1547 }
1548 qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
1549
1550 pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
1551
Kai Chen6eca1a62017-01-12 10:17:53 -08001552 return QDF_STATUS_SUCCESS;
1553}