blob: 918dfdb3b5d8b44e94eae590d2609b259107ee1a [file] [log] [blame]
Debashis Dutt390645c2016-10-04 17:31:45 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Debashis Dutt390645c2016-10-04 17:31:45 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053019#include "hal_hw_headers.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070020#include "dp_types.h"
21#include "dp_rx.h"
22#include "dp_peer.h"
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080023#include "dp_internal.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070024#include "hal_api.h"
25#include "qdf_trace.h"
26#include "qdf_nbuf.h"
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080027#ifdef CONFIG_MCL
28#include <cds_ieee80211_common.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080029#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -080030#include "dp_rx_defrag.h"
Amir Patelcb990262019-05-28 15:12:48 +053031#ifdef FEATURE_WDS
32#include "dp_txrx_wds.h"
33#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -080034#include <enet.h> /* LLC_SNAP_HDR_LEN */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -080035#include "qdf_net_types.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070036
37/**
Tallapragada Kalyan94034632017-12-07 17:29:13 +053038 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
39 * back on same vap or a different vap.
40 *
41 * @soc: core DP main context
42 * @peer: dp peer handler
43 * @rx_tlv_hdr: start of the rx TLV header
44 * @nbuf: pkt buffer
45 *
46 * Return: bool (true if it is a looped back pkt else false)
47 *
48 */
49static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
50 struct dp_peer *peer,
51 uint8_t *rx_tlv_hdr,
52 qdf_nbuf_t nbuf)
53{
54 struct dp_vdev *vdev = peer->vdev;
Amir Patelcb990262019-05-28 15:12:48 +053055 struct dp_ast_entry *ase = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053056 uint16_t sa_idx = 0;
Tallapragada Kalyan94034632017-12-07 17:29:13 +053057 uint8_t *data;
58
59 /*
60 * Multicast Echo Check is required only if vdev is STA and
61 * received pkt is a multicast/broadcast pkt. otherwise
62 * skip the MEC check.
63 */
64 if (vdev->opmode != wlan_op_mode_sta)
65 return false;
66
67 if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))
68 return false;
69
70 data = qdf_nbuf_data(nbuf);
71 /*
72 * if the received pkts src mac addr matches with vdev
73 * mac address then drop the pkt as it is looped back
74 */
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -080075 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
Tallapragada Kalyan94034632017-12-07 17:29:13 +053076 vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -080077 QDF_MAC_ADDR_SIZE)))
Tallapragada Kalyan94034632017-12-07 17:29:13 +053078 return true;
79
Nandha Kishore Easwaran3053dee2018-03-12 18:27:10 +053080 /*
81 * In case of qwrap isolation mode, donot drop loopback packets.
82 * In isolation mode, all packets from the wired stations need to go
83 * to rootap and loop back to reach the wireless stations and
84 * vice-versa.
85 */
86 if (qdf_unlikely(vdev->isolation_vdev))
87 return false;
88
Tallapragada Kalyan94034632017-12-07 17:29:13 +053089 /* if the received pkts src mac addr matches with the
90 * wired PCs MAC addr which is behind the STA or with
91 * wireless STAs MAC addr which are behind the Repeater,
92 * then drop the pkt as it is looped back
93 */
94 qdf_spin_lock_bh(&soc->ast_lock);
95 if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) {
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +053096 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
Tallapragada Kalyan94034632017-12-07 17:29:13 +053097
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053098 if ((sa_idx < 0) ||
Tallapragada Kalyana7023622018-12-03 19:29:52 +053099 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530100 qdf_spin_unlock_bh(&soc->ast_lock);
101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
102 "invalid sa_idx: %d", sa_idx);
103 qdf_assert_always(0);
104 }
105
106 ase = soc->ast_table[sa_idx];
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530107 if (!ase) {
108 /* We do not get a peer map event for STA and without
109 * this event we don't know what is STA's sa_idx.
110 * For this reason the AST is still not associated to
111 * any index postion in ast_table.
112 * In these kind of scenarios where sa is valid but
113 * ast is not in ast_table, we use the below API to get
114 * AST entry for STA's own mac_address.
115 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530116 ase = dp_peer_ast_list_find(soc, peer,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800117 &data[QDF_MAC_ADDR_SIZE]);
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530118 if (ase) {
119 ase->ast_idx = sa_idx;
120 soc->ast_table[sa_idx] = ase;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530121 ase->is_mapped = TRUE;
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530122 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530123 }
Amir Patelcb990262019-05-28 15:12:48 +0530124 } else {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530125 ase = dp_peer_ast_hash_find_by_pdevid(soc,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800126 &data[QDF_MAC_ADDR_SIZE],
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530127 vdev->pdev->pdev_id);
Amir Patelcb990262019-05-28 15:12:48 +0530128 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530129
130 if (ase) {
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530131
132 if (ase->pdev_id != vdev->pdev->pdev_id) {
133 qdf_spin_unlock_bh(&soc->ast_lock);
134 QDF_TRACE(QDF_MODULE_ID_DP,
135 QDF_TRACE_LEVEL_INFO,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530136 "Detected DBDC Root AP %pM, %d %d",
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530138 ase->pdev_id);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530139 return false;
140 }
141
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
143 (ase->peer != peer)) {
144 qdf_spin_unlock_bh(&soc->ast_lock);
145 QDF_TRACE(QDF_MODULE_ID_DP,
146 QDF_TRACE_LEVEL_INFO,
147 "received pkt with same src mac %pM",
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800148 &data[QDF_MAC_ADDR_SIZE]);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530149
150 return true;
151 }
152 }
153 qdf_spin_unlock_bh(&soc->ast_lock);
154 return false;
155}
156
157/**
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
159 * (WBM) by address
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530160 *
161 * @soc: core DP main context
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530162 * @link_desc_addr: link descriptor addr
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530163 *
164 * Return: QDF_STATUS
165 */
psimha223883f2017-11-16 17:18:51 -0800166QDF_STATUS
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530167dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
168 uint8_t bm_action)
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530169{
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530170 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
171 void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
172 void *hal_soc = soc->hal_soc;
173 QDF_STATUS status = QDF_STATUS_E_FAILURE;
174 void *src_srng_desc;
175
176 if (!wbm_rel_srng) {
177 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
178 "WBM RELEASE RING not initialized");
179 return status;
180 }
181
182 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
183
184 /* TODO */
185 /*
186 * Need API to convert from hal_ring pointer to
187 * Ring Type / Ring Id combo
188 */
189 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
190 FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
191 wbm_rel_srng);
192 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
193 goto done;
194 }
195 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
196 if (qdf_likely(src_srng_desc)) {
197 /* Return link descriptor through WBM ring (SW2WBM)*/
198 hal_rx_msdu_link_desc_set(hal_soc,
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530199 src_srng_desc, link_desc_addr, bm_action);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530200 status = QDF_STATUS_SUCCESS;
201 } else {
202 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
203 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
204 FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
205 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
206 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
207 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
208 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
209 }
210done:
211 hal_srng_access_end(hal_soc, wbm_rel_srng);
212 return status;
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530213
214}
215
216/**
217 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
218 * (WBM), following error handling
219 *
220 * @soc: core DP main context
221 * @ring_desc: opaque pointer to the REO error ring descriptor
222 *
223 * Return: QDF_STATUS
224 */
225QDF_STATUS
226dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action)
227{
228 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
229 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530230}
231
232/**
Debashis Dutt390645c2016-10-04 17:31:45 -0700233 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
234 *
235 * @soc: core txrx main context
236 * @ring_desc: opaque pointer to the REO error ring descriptor
237 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
238 * @head: head of the local descriptor free-list
239 * @tail: tail of the local descriptor free-list
240 * @quota: No. of units (packets) that can be serviced in one shot.
241 *
242 * This function is used to drop all MSDU in an MPDU
243 *
244 * Return: uint32_t: No. of elements processed
245 */
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800246static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530247 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530248 uint8_t *mac_id,
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530249 uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -0700250{
Debashis Dutt390645c2016-10-04 17:31:45 -0700251 uint32_t rx_bufs_used = 0;
252 void *link_desc_va;
253 struct hal_buf_info buf_info;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530254 struct dp_pdev *pdev;
Debashis Dutt390645c2016-10-04 17:31:45 -0700255 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
256 int i;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530257 uint8_t *rx_tlv_hdr;
258 uint32_t tid;
Debashis Dutt390645c2016-10-04 17:31:45 -0700259
260 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
261
262 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
263
Debashis Dutt390645c2016-10-04 17:31:45 -0700264 /* No UNMAP required -- this is "malloc_consistent" memory */
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +0530265 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
266 &mpdu_desc_info->msdu_count);
Debashis Dutt390645c2016-10-04 17:31:45 -0700267
Karunakar Dasineni80cded82017-07-10 10:49:55 -0700268 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
Debashis Dutt390645c2016-10-04 17:31:45 -0700269 struct dp_rx_desc *rx_desc =
Kai Chen6eca1a62017-01-12 10:17:53 -0800270 dp_rx_cookie_2_va_rxdma_buf(soc,
271 msdu_list.sw_cookie[i]);
Debashis Dutt390645c2016-10-04 17:31:45 -0700272
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530273 qdf_assert_always(rx_desc);
274
275 /* all buffers from a MSDU link link belong to same pdev */
276 *mac_id = rx_desc->pool_id;
277 pdev = soc->pdev_list[rx_desc->pool_id];
Debashis Dutt390645c2016-10-04 17:31:45 -0700278
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530279 if (!dp_rx_desc_check_magic(rx_desc)) {
280 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
281 FL("Invalid rx_desc cookie=%d"),
282 msdu_list.sw_cookie[i]);
283 return rx_bufs_used;
284 }
285
Pamidipati, Vijayc736e832019-03-21 22:24:10 +0530286 qdf_nbuf_unmap_single(soc->osdev,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530287 rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
Pamidipati, Vijayc736e832019-03-21 22:24:10 +0530288
289 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
290
Debashis Dutt390645c2016-10-04 17:31:45 -0700291 rx_bufs_used++;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530292 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
293 rx_desc->rx_buf_start);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
295 "Packet received with PN error for tid :%d", tid);
296
297 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
298 if (hal_rx_encryption_info_valid(rx_tlv_hdr))
299 hal_rx_print_pn(rx_tlv_hdr);
Debashis Dutt390645c2016-10-04 17:31:45 -0700300
301 /* Just free the buffers */
302 qdf_nbuf_free(rx_desc->nbuf);
303
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530304 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
305 &pdev->free_list_tail, rx_desc);
Debashis Dutt390645c2016-10-04 17:31:45 -0700306 }
307
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530308 /* Return link descriptor through WBM ring (SW2WBM)*/
309 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
310
Debashis Dutt390645c2016-10-04 17:31:45 -0700311 return rx_bufs_used;
312}
313
314/**
315 * dp_rx_pn_error_handle() - Handles PN check errors
316 *
317 * @soc: core txrx main context
318 * @ring_desc: opaque pointer to the REO error ring descriptor
319 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
320 * @head: head of the local descriptor free-list
321 * @tail: tail of the local descriptor free-list
322 * @quota: No. of units (packets) that can be serviced in one shot.
323 *
324 * This function implements PN error handling
325 * If the peer is configured to ignore the PN check errors
326 * or if DP feels, that this frame is still OK, the frame can be
327 * re-injected back to REO to use some of the other features
328 * of REO e.g. duplicate detection/routing to other cores
329 *
330 * Return: uint32_t: No. of elements processed
331 */
Jeff Johnson8a4fd9b2017-01-05 16:33:40 -0800332static uint32_t
Debashis Dutt390645c2016-10-04 17:31:45 -0700333dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
334 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530335 uint8_t *mac_id,
Debashis Dutt390645c2016-10-04 17:31:45 -0700336 uint32_t quota)
337{
338 uint16_t peer_id;
339 uint32_t rx_bufs_used = 0;
340 struct dp_peer *peer;
341 bool peer_pn_policy = false;
342
343 peer_id = DP_PEER_METADATA_PEER_ID_GET(
344 mpdu_desc_info->peer_meta_data);
345
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530346
Debashis Dutt390645c2016-10-04 17:31:45 -0700347 peer = dp_peer_find_by_id(soc, peer_id);
348
349 if (qdf_likely(peer)) {
350 /*
351 * TODO: Check for peer specific policies & set peer_pn_policy
352 */
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
354 "discard rx due to PN error for peer %pK "
Aditya Sathishded018e2018-07-02 16:25:21 +0530355 "(%02x:%02x:%02x:%02x:%02x:%02x)",
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530356 peer,
357 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
358 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
359 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
360
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530361 dp_peer_unref_del_find_by_id(peer);
Debashis Dutt390645c2016-10-04 17:31:45 -0700362 }
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530363 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
364 "Packet received with PN error");
Debashis Dutt390645c2016-10-04 17:31:45 -0700365
Debashis Dutt390645c2016-10-04 17:31:45 -0700366 /* No peer PN policy -- definitely drop */
367 if (!peer_pn_policy)
368 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
369 mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530370 mac_id, quota);
Debashis Dutt390645c2016-10-04 17:31:45 -0700371
372 return rx_bufs_used;
373}
374
375/**
376 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
377 *
378 * @soc: core txrx main context
379 * @ring_desc: opaque pointer to the REO error ring descriptor
380 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
381 * @head: head of the local descriptor free-list
382 * @tail: tail of the local descriptor free-list
383 * @quota: No. of units (packets) that can be serviced in one shot.
384 *
385 * This function implements the error handling when sequence number
386 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
387 * need to be handled:
388 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
389 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
390 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
391 * For case B), the frame is normally dropped, no more action is taken
392 *
393 * Return: uint32_t: No. of elements processed
394 */
Jeff Johnson8a4fd9b2017-01-05 16:33:40 -0800395static uint32_t
Debashis Dutt390645c2016-10-04 17:31:45 -0700396dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
397 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530398 uint8_t *mac_id, uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -0700399{
400 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530401 mac_id, quota);
Debashis Dutt390645c2016-10-04 17:31:45 -0700402}
403
Jinwei Chen214590a2018-12-06 16:45:44 +0800404#ifdef CONFIG_MCL
405#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
406 do { \
407 qdf_assert_always(!(head)); \
408 qdf_assert_always(!(tail)); \
409 } while (0)
410#else
411#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
412#endif
413
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530414/**
415 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
416 * to pdev invalid peer list
417 *
418 * @soc: core DP main context
419 * @nbuf: Buffer pointer
420 * @rx_tlv_hdr: start of rx tlv header
421 * @mac_id: mac id
422 *
423 * Return: bool: true for last msdu of mpdu
424 */
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530425static bool
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530426dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
427 uint8_t mac_id)
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530428{
429 bool mpdu_done = false;
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530430 qdf_nbuf_t curr_nbuf = NULL;
431 qdf_nbuf_t tmp_nbuf = NULL;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530432
Pratik Gandhi4fbe3fe2017-07-20 15:31:26 +0530433 /* TODO: Currently only single radio is supported, hence
434 * pdev hard coded to '0' index
435 */
Pratik Gandhi76139082017-07-28 19:18:02 +0530436 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530437
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530438 if (!dp_pdev->first_nbuf) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530439 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530440 dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr);
441 dp_pdev->first_nbuf = true;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530442
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530443 /* If the new nbuf received is the first msdu of the
444 * amsdu and there are msdus in the invalid peer msdu
445 * list, then let us free all the msdus of the invalid
446 * peer msdu list.
447 * This scenario can happen when we start receiving
448 * new a-msdu even before the previous a-msdu is completely
449 * received.
450 */
451 curr_nbuf = dp_pdev->invalid_peer_head_msdu;
452 while (curr_nbuf) {
453 tmp_nbuf = curr_nbuf->next;
454 qdf_nbuf_free(curr_nbuf);
455 curr_nbuf = tmp_nbuf;
456 }
457
Pratik Gandhi76139082017-07-28 19:18:02 +0530458 dp_pdev->invalid_peer_head_msdu = NULL;
459 dp_pdev->invalid_peer_tail_msdu = NULL;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530460 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
Pratik Gandhi4fbe3fe2017-07-20 15:31:26 +0530461 &(dp_pdev->ppdu_info.rx_status));
462
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530463 }
464
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530465 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
466 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530467 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530468 qdf_assert_always(dp_pdev->first_nbuf == true);
469 dp_pdev->first_nbuf = false;
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530470 mpdu_done = true;
471 }
472
Jinwei Chen214590a2018-12-06 16:45:44 +0800473 /*
474 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
475 * should be NULL here, add the checking for debugging purpose
476 * in case some corner case.
477 */
478 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
479 dp_pdev->invalid_peer_tail_msdu);
Pratik Gandhi76139082017-07-28 19:18:02 +0530480 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
481 dp_pdev->invalid_peer_tail_msdu,
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530482 nbuf);
483
484 return mpdu_done;
485}
486
Debashis Dutt390645c2016-10-04 17:31:45 -0700487/**
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700488 * dp_2k_jump_handle() - Function to handle 2k jump exception
489 * on WBM ring
490 *
491 * @soc: core DP main context
492 * @nbuf: buffer pointer
493 * @rx_tlv_hdr: start of rx tlv header
494 * @peer_id: peer id of first msdu
495 * @tid: Tid for which exception occurred
496 *
497 * This function handles 2k jump violations arising out
498 * of receiving aggregates in non BA case. This typically
499 * may happen if aggregates are received on a QOS enabled TID
500 * while Rx window size is still initialized to value of 2. Or
501 * it may also happen if negotiated window size is 1 but peer
502 * sends aggregates.
503 *
504 */
505
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530506void
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700507dp_2k_jump_handle(struct dp_soc *soc,
508 qdf_nbuf_t nbuf,
509 uint8_t *rx_tlv_hdr,
510 uint16_t peer_id,
511 uint8_t tid)
512{
513 uint32_t ppdu_id;
514 struct dp_peer *peer = NULL;
515 struct dp_rx_tid *rx_tid = NULL;
516
517 peer = dp_peer_find_by_id(soc, peer_id);
518 if (!peer || peer->delete_in_progress) {
519 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
520 "peer not found");
521 goto free_nbuf;
522 }
523 rx_tid = &peer->rx_tid[tid];
Jeff Johnsona8edf332019-03-18 09:51:52 -0700524 if (qdf_unlikely(!rx_tid)) {
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700525 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
526 "rx_tid is NULL!!");
527 goto free_nbuf;
528 }
529 qdf_spin_lock_bh(&rx_tid->tid_lock);
530 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
Prathyusha Guduri9e4bb952019-03-15 17:41:19 +0530531
532 /*
533 * If BA session is created and a non-aggregate packet is
534 * landing here then the issue is with sequence number mismatch.
535 * Proceed with delba even in that case
536 */
537 if (rx_tid->ppdu_id_2k != ppdu_id &&
538 rx_tid->ba_status != DP_RX_BA_ACTIVE) {
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700539 rx_tid->ppdu_id_2k = ppdu_id;
540 qdf_spin_unlock_bh(&rx_tid->tid_lock);
541 goto free_nbuf;
542 }
543 if (!rx_tid->delba_tx_status) {
544 rx_tid->delba_tx_retry++;
545 rx_tid->delba_tx_status = 1;
sumedh baikadyfaadbb62018-08-21 21:13:42 -0700546 rx_tid->delba_rcode =
547 IEEE80211_REASON_QOS_SETUP_REQUIRED;
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700548 qdf_spin_unlock_bh(&rx_tid->tid_lock);
549 soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
550 peer->ctrl_peer,
551 peer->mac_addr.raw,
552 tid,
sumedh baikadyfaadbb62018-08-21 21:13:42 -0700553 peer->vdev->ctrl_vdev,
554 rx_tid->delba_rcode);
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700555 } else {
556 qdf_spin_unlock_bh(&rx_tid->tid_lock);
557 }
558
559free_nbuf:
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530560 if (peer)
561 dp_peer_unref_del_find_by_id(peer);
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700562 qdf_nbuf_free(nbuf);
563 return;
564}
565
Krunal Soni53add652018-10-05 22:42:35 -0700566#ifdef QCA_WIFI_QCA6390
567/**
568 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
569 * @soc: pointer to dp_soc struct
570 * @pool_id: Pool id to find dp_pdev
571 * @rx_tlv_hdr: TLV header of received packet
572 * @nbuf: SKB
573 *
574 * In certain types of packets if peer_id is not correct then
575 * driver may not be able find. Try finding peer by addr_2 of
576 * received MPDU. If you find the peer then most likely sw_peer_id &
577 * ast_idx is corrupted.
578 *
579 * Return: True if you find the peer by addr_2 of received MPDU else false
580 */
581static bool
582dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
583 uint8_t pool_id,
584 uint8_t *rx_tlv_hdr,
585 qdf_nbuf_t nbuf)
586{
587 uint8_t local_id;
588 struct dp_peer *peer = NULL;
589 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
590 struct dp_pdev *pdev = soc->pdev_list[pool_id];
591 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
592
593 /*
594 * WAR- In certain types of packets if peer_id is not correct then
595 * driver may not be able find. Try finding peer by addr_2 of
596 * received MPDU
597 */
598 if (wh)
599 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
600 wh->i_addr2, &local_id);
601 if (peer) {
Krunal Sonic96a1162019-02-21 11:33:26 -0800602 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
Krunal Soni53add652018-10-05 22:42:35 -0700603 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
Jinwei Chen2f79dd92019-03-27 10:34:18 +0800604 QDF_TRACE_LEVEL_DEBUG);
Krunal Soni53add652018-10-05 22:42:35 -0700605 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
606 1, qdf_nbuf_len(nbuf));
607 qdf_nbuf_free(nbuf);
608
609 return true;
610 }
611 return false;
612}
Mohit Khannaf085b612019-04-02 14:43:10 -0700613
614/**
615 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
616 * @soc: DP SOC context
617 * @pkt_len: computed length of the pkt from caller in bytes
618 *
619 * Return: true if pktlen > RX_BUFFER_SIZE, else return false
620 *
621 */
622static inline
623bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
624{
625 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
626 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
627 1, pkt_len);
628 return true;
629 } else {
630 return false;
631 }
632}
633
Krunal Soni53add652018-10-05 22:42:35 -0700634#else
635static inline bool
636dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
637 uint8_t pool_id,
638 uint8_t *rx_tlv_hdr,
639 qdf_nbuf_t nbuf)
640{
641 return false;
642}
Mohit Khannaf085b612019-04-02 14:43:10 -0700643
644static inline
645bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
646{
647 return false;
648}
649
Krunal Soni53add652018-10-05 22:42:35 -0700650#endif
651
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700652/**
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530653 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
654 * descriptor violation on either a
655 * REO or WBM ring
656 *
657 * @soc: core DP main context
658 * @nbuf: buffer pointer
659 * @rx_tlv_hdr: start of rx tlv header
660 * @pool_id: mac id
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530661 * @peer: peer handle
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530662 *
663 * This function handles NULL queue descriptor violations arising out
664 * a missing REO queue for a given peer or a given TID. This typically
665 * may happen if a packet is received on a QOS enabled TID before the
666 * ADDBA negotiation for that TID, when the TID queue is setup. Or
667 * it may also happen for MC/BC frames if they are not routed to the
668 * non-QOS TID queue, in the absence of any other default TID queue.
669 * This error can show up both in a REO destination or WBM release ring.
670 *
Mohit Khannaf085b612019-04-02 14:43:10 -0700671 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
672 * if nbuf could not be handled or dropped.
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530673 */
Mohit Khannaf085b612019-04-02 14:43:10 -0700674static QDF_STATUS
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530675dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
676 uint8_t *rx_tlv_hdr, uint8_t pool_id,
677 struct dp_peer *peer)
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530678{
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530679 uint32_t pkt_len, l2_hdr_offset;
680 uint16_t msdu_len;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530681 struct dp_vdev *vdev;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530682 uint8_t tid;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800683 qdf_ether_header_t *eh;
Pratik Gandhi76139082017-07-28 19:18:02 +0530684
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530685 qdf_nbuf_set_rx_chfrag_start(nbuf,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530686 hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr));
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530687 qdf_nbuf_set_rx_chfrag_end(nbuf,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530688 hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr));
689 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr));
690 qdf_nbuf_set_da_valid(nbuf,
691 hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr));
692 qdf_nbuf_set_sa_valid(nbuf,
693 hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr));
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530694
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530695 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
696 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530697 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
698
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530699 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700700 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
701 goto drop_nbuf;
702
703 /* Set length in nbuf */
704 qdf_nbuf_set_pktlen(nbuf,
705 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
706 qdf_assert_always(nbuf->data == rx_tlv_hdr);
707 }
708
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530709 /*
710 * Check if DMA completed -- msdu_done is the last bit
711 * to be written
712 */
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530713 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530714
Mohit Khannaf085b612019-04-02 14:43:10 -0700715 dp_err_rl("MSDU DONE failure");
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530716 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
717 QDF_TRACE_LEVEL_INFO);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530718 qdf_assert(0);
719 }
720
Krunal Soni53add652018-10-05 22:42:35 -0700721 if (!peer &&
722 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
723 rx_tlv_hdr, nbuf))
Mohit Khannaf085b612019-04-02 14:43:10 -0700724 return QDF_STATUS_E_FAILURE;
Krunal Soni53add652018-10-05 22:42:35 -0700725
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530726 if (!peer) {
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530727 bool mpdu_done = false;
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530728 struct dp_pdev *pdev = soc->pdev_list[pool_id];
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530729
Mohit Khannaf085b612019-04-02 14:43:10 -0700730 dp_err_rl("peer is NULL");
731 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530732 qdf_nbuf_len(nbuf));
733
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530734 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
chenguo91c90102017-12-12 16:16:37 +0800735 /* Trigger invalid peer handler wrapper */
nobelj7dfc8cc2019-03-06 18:25:56 -0800736 dp_rx_process_invalid_peer_wrapper(soc,
737 pdev->invalid_peer_head_msdu,
738 mpdu_done);
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530739
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530740 if (mpdu_done) {
741 pdev->invalid_peer_head_msdu = NULL;
742 pdev->invalid_peer_tail_msdu = NULL;
743 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700744 return QDF_STATUS_E_FAILURE;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530745 }
746
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530747 vdev = peer->vdev;
748 if (!vdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700749 dp_err_rl("Null vdev!");
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530750 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700751 goto drop_nbuf;
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530752 }
753
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530754 /*
755 * Advance the packet start pointer by total size of
756 * pre-header TLV's
757 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530758 if (qdf_nbuf_is_frag(nbuf))
Pramod Simha21e69f52018-07-03 16:45:00 -0700759 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
760 else
761 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530762
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530763 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
764 /* this is a looped back MCBC pkt, drop it */
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530765 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
Mohit Khannaf085b612019-04-02 14:43:10 -0700766 goto drop_nbuf;
Tallapragada Kalyan71cc01b2017-08-23 12:47:06 +0530767 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700768
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530769 /*
770 * In qwrap mode if the received packet matches with any of the vdev
771 * mac addresses, drop it. Donot receive multicast packets originated
772 * from any proxysta.
773 */
774 if (check_qwrap_multicast_loopback(vdev, nbuf)) {
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530775 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
Mohit Khannaf085b612019-04-02 14:43:10 -0700776 goto drop_nbuf;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530777 }
778
Tallapragada Kalyan71cc01b2017-08-23 12:47:06 +0530779
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530780 if (qdf_unlikely((peer->nawds_enabled == true) &&
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530781 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700782 dp_err_rl("free buffer for multicast packet");
Ruchi, Agrawal27550482018-02-20 19:43:41 +0530783 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700784 goto drop_nbuf;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530785 }
786
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530787 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700788 dp_err_rl("mcast Policy Check Drop pkt");
789 goto drop_nbuf;
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +0530790 }
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530791 /* WDS Source Port Learning */
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530792 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
793 vdev->wds_enabled))
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530794 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530795
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530796 if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800797 /* TODO: Assuming that qos_control_valid also indicates
798 * unicast. Should we check this?
799 */
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530800 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +0530801 if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800802 /* IEEE80211_SEQ_MAX indicates invalid start_seq */
803 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
804 }
805 }
806
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530807 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530808 qdf_nbuf_set_next(nbuf, NULL);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530809 dp_rx_deliver_raw(vdev, nbuf, peer);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530810 } else {
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530811 if (vdev->osif_rx) {
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530812 qdf_nbuf_set_next(nbuf, NULL);
Amir Patel3217ade2018-09-07 12:21:35 +0530813 DP_STATS_INC_PKT(peer, rx.to_stack, 1,
814 qdf_nbuf_len(nbuf));
Neil Zhaofca09192018-11-27 15:16:19 -0800815
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700816 /*
817 * Update the protocol tag in SKB based on
818 * CCE metadata
819 */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700820 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
821 EXCEPTION_DEST_RING_ID,
822 true, true);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700823
Amir Patel3217ade2018-09-07 12:21:35 +0530824 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
825 rx_tlv_hdr) &&
826 (vdev->rx_decap_type ==
827 htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800828 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Amir Patel3217ade2018-09-07 12:21:35 +0530829
830 DP_STATS_INC_PKT(peer, rx.multicast, 1,
831 qdf_nbuf_len(nbuf));
Srinivas Girigowda79502972019-02-11 12:25:12 -0800832 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
Amir Patel3217ade2018-09-07 12:21:35 +0530833 DP_STATS_INC_PKT(peer, rx.bcast, 1,
834 qdf_nbuf_len(nbuf));
835 }
836 }
Neil Zhaofca09192018-11-27 15:16:19 -0800837
838 vdev->osif_rx(vdev->osif_vdev, nbuf);
839
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530840 } else {
Mohit Khannaf085b612019-04-02 14:43:10 -0700841 dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530842 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700843 goto drop_nbuf;
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530844 }
845 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700846 return QDF_STATUS_SUCCESS;
847
848drop_nbuf:
849 qdf_nbuf_free(nbuf);
850 return QDF_STATUS_E_FAILURE;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530851}
852
853/**
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800854 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
855 * frames to OS or wifi parse errors.
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530856 * @soc: core DP main context
857 * @nbuf: buffer pointer
858 * @rx_tlv_hdr: start of rx tlv header
859 * @peer: peer reference
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800860 * @err_code: rxdma err code
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530861 *
862 * Return: None
863 */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800864void
865dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
866 uint8_t *rx_tlv_hdr, struct dp_peer *peer,
867 uint8_t err_code)
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530868{
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530869 uint32_t pkt_len, l2_hdr_offset;
870 uint16_t msdu_len;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530871 struct dp_vdev *vdev;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800872 qdf_ether_header_t *eh;
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530873 bool is_broadcast;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530874
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530875 /*
876 * Check if DMA completed -- msdu_done is the last bit
877 * to be written
878 */
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530879 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530880
881 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
882 FL("MSDU DONE failure"));
883
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530884 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
885 QDF_TRACE_LEVEL_INFO);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530886 qdf_assert(0);
887 }
888
Gurumoorthi Gnanasambandhan83873112018-03-15 14:42:51 +0530889 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
890 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
891 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
892
893 /* Set length in nbuf */
894 qdf_nbuf_set_pktlen(nbuf, pkt_len);
895
896 qdf_nbuf_set_next(nbuf, NULL);
897
898 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
899 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
900
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530901 if (!peer) {
Jinwei Chena1f53042018-08-29 15:54:29 +0800902 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530903 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
904 qdf_nbuf_len(nbuf));
Gurumoorthi Gnanasambandhan83873112018-03-15 14:42:51 +0530905 /* Trigger invalid peer handler wrapper */
906 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530907 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530908 }
909
910 vdev = peer->vdev;
911 if (!vdev) {
912 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700913 FL("INVALID vdev %pK OR osif_rx"), vdev);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530914 /* Drop & free packet */
915 qdf_nbuf_free(nbuf);
916 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530917 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530918 }
919
jiad268579c2018-11-28 16:42:28 +0800920 /*
921 * Advance the packet start pointer by total size of
922 * pre-header TLV's
923 */
924 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
925
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800926 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
927 uint8_t *pkt_type;
928
929 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
930 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q) &&
931 *(uint16_t *)(pkt_type + DP_SKIP_VLAN) == htons(QDF_LLC_STP)) {
932 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
933 goto process_mesh;
934 } else {
935 DP_STATS_INC(vdev->pdev, dropped.wifi_parse, 1);
936 qdf_nbuf_free(nbuf);
937 return;
938 }
939 }
940
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530941 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
942 goto process_mesh;
943
jiad268579c2018-11-28 16:42:28 +0800944 /*
945 * WAPI cert AP sends rekey frames as unencrypted.
946 * Thus RXDMA will report unencrypted frame error.
947 * To pass WAPI cert case, SW needs to pass unencrypted
948 * rekey frame to stack.
949 */
950 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
951 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530952 goto process_rx;
jiad268579c2018-11-28 16:42:28 +0800953 }
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530954 /*
955 * In dynamic WEP case rekey frames are not encrypted
956 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
957 * key install is already done
958 */
959 if ((vdev->sec_type == cdp_sec_type_wep104) &&
960 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
961 goto process_rx;
962
963process_mesh:
jiad268579c2018-11-28 16:42:28 +0800964
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800965 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530966 qdf_nbuf_free(nbuf);
967 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530968 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530969 }
970
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800971 if (vdev->mesh_vdev) {
972 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
973 == QDF_STATUS_SUCCESS) {
974 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
975 FL("mesh pkt filtered"));
976 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530977
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800978 qdf_nbuf_free(nbuf);
979 return;
980 }
981 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530982 }
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530983process_rx:
Pranita Solankea5a3ae72018-01-18 21:45:27 +0530984 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
985 (vdev->rx_decap_type ==
986 htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800987 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Srinivas Girigowda79502972019-02-11 12:25:12 -0800988 is_broadcast = (QDF_IS_ADDR_BROADCAST
Pranita Solankea5a3ae72018-01-18 21:45:27 +0530989 (eh->ether_dhost)) ? 1 : 0 ;
Amir Patel3217ade2018-09-07 12:21:35 +0530990 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530991 if (is_broadcast) {
Pranita Solankea5a3ae72018-01-18 21:45:27 +0530992 DP_STATS_INC_PKT(peer, rx.bcast, 1,
993 qdf_nbuf_len(nbuf));
994 }
995 }
996
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530997 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
998 dp_rx_deliver_raw(vdev, nbuf, peer);
999 } else {
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001000 /* Update the protocol tag in SKB based on CCE metadata */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001001 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1002 EXCEPTION_DEST_RING_ID, true, true);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05301003 DP_STATS_INC(peer, rx.to_stack.num, 1);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301004 vdev->osif_rx(vdev->osif_vdev, nbuf);
1005 }
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301006
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301007 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301008}
1009
1010/**
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301011 * dp_rx_process_mic_error(): Function to pass mic error indication to umac
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301012 * @soc: core DP main context
1013 * @nbuf: buffer pointer
1014 * @rx_tlv_hdr: start of rx tlv header
1015 * @peer: peer handle
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301016 *
1017 * return: void
1018 */
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301019void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1020 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301021{
1022 struct dp_vdev *vdev = NULL;
1023 struct dp_pdev *pdev = NULL;
1024 struct ol_if_ops *tops = NULL;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301025 struct ieee80211_frame *wh;
1026 uint8_t *rx_pkt_hdr;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301027 uint16_t rx_seq, fragno;
Pramod Simha366c1e02018-06-20 11:55:50 -07001028 unsigned int tid;
1029 QDF_STATUS status;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301030
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301031 if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr))
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301032 return;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301033
1034 rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf));
1035 wh = (struct ieee80211_frame *)rx_pkt_hdr;
1036
jiad9dee72a2017-12-05 13:39:25 +08001037 if (!peer) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001038 dp_err_rl("peer not found");
jiad9dee72a2017-12-05 13:39:25 +08001039 goto fail;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301040 }
1041
jiad9dee72a2017-12-05 13:39:25 +08001042 vdev = peer->vdev;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301043 if (!vdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001044 dp_err_rl("VDEV not found");
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301045 goto fail;
1046 }
1047
jiad9dee72a2017-12-05 13:39:25 +08001048 pdev = vdev->pdev;
1049 if (!pdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001050 dp_err_rl("PDEV not found");
jiad9dee72a2017-12-05 13:39:25 +08001051 goto fail;
1052 }
1053
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05301054 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf));
Pramod Simha366c1e02018-06-20 11:55:50 -07001055 rx_seq = (((*(uint16_t *)wh->i_seq) &
1056 IEEE80211_SEQ_SEQ_MASK) >>
1057 IEEE80211_SEQ_SEQ_SHIFT);
1058
1059 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
1060
1061 /* Can get only last fragment */
1062 if (fragno) {
1063 status = dp_rx_defrag_add_last_frag(soc, peer,
1064 tid, rx_seq, nbuf);
Mohit Khannaf085b612019-04-02 14:43:10 -07001065 dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !",
1066 rx_seq, fragno, status);
Pramod Simha366c1e02018-06-20 11:55:50 -07001067 return;
1068 }
1069
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301070 tops = pdev->soc->cdp_soc.ol_ops;
1071 if (tops->rx_mic_error)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05301072 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301073
1074fail:
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301075 qdf_nbuf_free(nbuf);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301076 return;
1077}
1078
Debashis Dutt390645c2016-10-04 17:31:45 -07001079/**
1080 * dp_rx_err_process() - Processes error frames routed to REO error ring
1081 *
1082 * @soc: core txrx main context
1083 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1084 * @quota: No. of units (packets) that can be serviced in one shot.
1085 *
1086 * This function implements error processing and top level demultiplexer
1087 * for all the frames routed to REO error ring.
1088 *
1089 * Return: uint32_t: No. of elements processed
1090 */
1091uint32_t
1092dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
1093{
1094 void *hal_soc;
1095 void *ring_desc;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301096 uint32_t count = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001097 uint32_t rx_bufs_used = 0;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301098 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1099 uint8_t mac_id = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001100 uint8_t buf_type;
1101 uint8_t error, rbm;
1102 struct hal_rx_mpdu_desc_info mpdu_desc_info;
1103 struct hal_buf_info hbi;
Kai Chen6eca1a62017-01-12 10:17:53 -08001104 struct dp_pdev *dp_pdev;
1105 struct dp_srng *dp_rxdma_srng;
1106 struct rx_desc_pool *rx_desc_pool;
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301107 uint32_t cookie = 0;
1108 void *link_desc_va;
1109 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1110 uint16_t num_msdus;
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301111 struct dp_rx_desc *rx_desc = NULL;
Debashis Dutt390645c2016-10-04 17:31:45 -07001112
1113 /* Debug -- Remove later */
1114 qdf_assert(soc && hal_ring);
1115
1116 hal_soc = soc->hal_soc;
1117
1118 /* Debug -- Remove later */
1119 qdf_assert(hal_soc);
1120
1121 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
1122
1123 /* TODO */
1124 /*
1125 * Need API to convert from hal_ring pointer to
1126 * Ring Type / Ring Id combo
1127 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001128 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301129 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001130 FL("HAL RING Access Failed -- %pK"), hal_ring);
Debashis Dutt390645c2016-10-04 17:31:45 -07001131 goto done;
1132 }
1133
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301134 while (qdf_likely(quota-- && (ring_desc =
1135 hal_srng_dst_get_next(hal_soc, hal_ring)))) {
1136
Ishank Jain57c42a12017-04-12 10:42:22 +05301137 DP_STATS_INC(soc, rx.err_ring_pkts, 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001138
1139 error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1140
1141 qdf_assert(error == HAL_REO_ERROR_DETECTED);
1142
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301143 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
1144 /*
1145 * For REO error ring, expect only MSDU LINK DESC
1146 */
1147 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
1148
1149 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1150 /*
1151 * check for the magic number in the sw cookie
1152 */
1153 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
1154 LINK_DESC_ID_START);
1155
Debashis Dutt390645c2016-10-04 17:31:45 -07001156 /*
1157 * Check if the buffer is to be processed on this processor
1158 */
1159 rbm = hal_rx_ret_buf_manager_get(ring_desc);
1160
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301161 hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1162 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05301163 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1164 &num_msdus);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301165
jiadc26dfc82018-05-03 16:09:03 +08001166 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
psimha223883f2017-11-16 17:18:51 -08001167 (msdu_list.rbm[0] !=
1168 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001169 /* TODO */
1170 /* Call appropriate handler */
syed touqeer pasha576972b2019-03-12 14:49:27 +05301171 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
1172 DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1173 QDF_TRACE(QDF_MODULE_ID_DP,
1174 QDF_TRACE_LEVEL_ERROR,
1175 FL("Invalid RBM %d"),
1176 msdu_list.rbm[0]);
1177 }
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301178
1179 /* Return link descriptor through WBM ring (SW2WBM)*/
1180 dp_rx_link_desc_return(soc, ring_desc,
1181 HAL_BM_ACTION_RELEASE_MSDU_LIST);
Debashis Dutt390645c2016-10-04 17:31:45 -07001182 continue;
1183 }
1184
syed touqeer pasha576972b2019-03-12 14:49:27 +05301185 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
1186 msdu_list.sw_cookie[0]);
1187 qdf_assert_always(rx_desc);
1188
1189 mac_id = rx_desc->pool_id;
1190
Debashis Dutt390645c2016-10-04 17:31:45 -07001191 /* Get the MPDU DESC info */
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301192 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
Debashis Dutt390645c2016-10-04 17:31:45 -07001193
1194 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301195 /*
1196 * We only handle one msdu per link desc for fragmented
1197 * case. We drop the msdus and release the link desc
1198 * back if there are more than one msdu in link desc.
1199 */
1200 if (qdf_unlikely(num_msdus > 1)) {
1201 count = dp_rx_msdus_drop(soc, ring_desc,
1202 &mpdu_desc_info,
1203 &mac_id, quota);
1204 rx_bufs_reaped[mac_id] += count;
1205 continue;
1206 }
1207
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301208 count = dp_rx_frag_handle(soc,
1209 ring_desc, &mpdu_desc_info,
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301210 rx_desc, &mac_id, quota);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301211
1212 rx_bufs_reaped[mac_id] += count;
Ishank Jain57c42a12017-04-12 10:42:22 +05301213 DP_STATS_INC(soc, rx.rx_frags, 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001214 continue;
1215 }
1216
1217 if (hal_rx_reo_is_pn_error(ring_desc)) {
1218 /* TOD0 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001219 DP_STATS_INC(soc,
1220 rx.err.
1221 reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
1222 1);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301223 count = dp_rx_pn_error_handle(soc,
1224 ring_desc,
1225 &mpdu_desc_info, &mac_id,
1226 quota);
1227
1228 rx_bufs_reaped[mac_id] += count;
Debashis Dutt390645c2016-10-04 17:31:45 -07001229 continue;
1230 }
1231
1232 if (hal_rx_reo_is_2k_jump(ring_desc)) {
1233 /* TOD0 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001234 DP_STATS_INC(soc,
1235 rx.err.
1236 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
1237 1);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301238
1239 count = dp_rx_2k_jump_handle(soc,
1240 ring_desc, &mpdu_desc_info,
1241 &mac_id, quota);
1242
1243 rx_bufs_reaped[mac_id] += count;
Debashis Dutt390645c2016-10-04 17:31:45 -07001244 continue;
1245 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001246 }
1247
1248done:
1249 hal_srng_access_end(hal_soc, hal_ring);
1250
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08001251 if (soc->rx.flags.defrag_timeout_check) {
1252 uint32_t now_ms =
1253 qdf_system_ticks_to_msecs(qdf_system_ticks());
1254
1255 if (now_ms >= soc->rx.defrag.next_flush_ms)
1256 dp_rx_defrag_waitlist_flush(soc);
1257 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001258
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301259 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1260 if (rx_bufs_reaped[mac_id]) {
1261 dp_pdev = soc->pdev_list[mac_id];
1262 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
1263 rx_desc_pool = &soc->rx_desc_buf[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001264
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301265 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1266 rx_desc_pool,
1267 rx_bufs_reaped[mac_id],
1268 &dp_pdev->free_list_head,
1269 &dp_pdev->free_list_tail);
1270 rx_bufs_used += rx_bufs_reaped[mac_id];
1271 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001272 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001273
1274 return rx_bufs_used; /* Assume no scale factor for now */
1275}
1276
1277/**
1278 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
1279 *
1280 * @soc: core txrx main context
1281 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
1282 * @quota: No. of units (packets) that can be serviced in one shot.
1283 *
1284 * This function implements error processing and top level demultiplexer
1285 * for all the frames routed to WBM2HOST sw release ring.
1286 *
1287 * Return: uint32_t: No. of elements processed
1288 */
1289uint32_t
1290dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
1291{
1292 void *hal_soc;
1293 void *ring_desc;
1294 struct dp_rx_desc *rx_desc;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301295 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1296 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301297 uint32_t rx_bufs_used = 0;
1298 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
Debashis Dutt390645c2016-10-04 17:31:45 -07001299 uint8_t buf_type, rbm;
Debashis Dutt390645c2016-10-04 17:31:45 -07001300 uint32_t rx_buf_cookie;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301301 uint8_t mac_id;
Kai Chen6eca1a62017-01-12 10:17:53 -08001302 struct dp_pdev *dp_pdev;
1303 struct dp_srng *dp_rxdma_srng;
1304 struct rx_desc_pool *rx_desc_pool;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301305 uint8_t *rx_tlv_hdr;
1306 qdf_nbuf_t nbuf_head = NULL;
1307 qdf_nbuf_t nbuf_tail = NULL;
1308 qdf_nbuf_t nbuf, next;
1309 struct hal_wbm_err_desc_info wbm_err_info = { 0 };
1310 uint8_t pool_id;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001311 uint8_t tid = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001312
1313 /* Debug -- Remove later */
1314 qdf_assert(soc && hal_ring);
1315
1316 hal_soc = soc->hal_soc;
1317
1318 /* Debug -- Remove later */
1319 qdf_assert(hal_soc);
1320
1321 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
1322
1323 /* TODO */
1324 /*
1325 * Need API to convert from hal_ring pointer to
1326 * Ring Type / Ring Id combo
1327 */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301328 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001329 FL("HAL RING Access Failed -- %pK"), hal_ring);
Debashis Dutt390645c2016-10-04 17:31:45 -07001330 goto done;
1331 }
1332
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301333 while (qdf_likely(quota-- && (ring_desc =
1334 hal_srng_dst_get_next(hal_soc, hal_ring)))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001335
1336 /* XXX */
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301337 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
Debashis Dutt390645c2016-10-04 17:31:45 -07001338
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301339 /*
1340 * For WBM ring, expect only MSDU buffers
1341 */
1342 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
1343
1344 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1345 == HAL_RX_WBM_ERR_SRC_RXDMA) ||
1346 (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1347 == HAL_RX_WBM_ERR_SRC_REO));
Debashis Dutt390645c2016-10-04 17:31:45 -07001348
1349 /*
1350 * Check if the buffer is to be processed on this processor
1351 */
1352 rbm = hal_rx_ret_buf_manager_get(ring_desc);
1353
1354 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
1355 /* TODO */
1356 /* Call appropriate handler */
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301357 DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301358 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1359 FL("Invalid RBM %d"), rbm);
Debashis Dutt390645c2016-10-04 17:31:45 -07001360 continue;
1361 }
1362
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301363 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
1364
Kai Chen6eca1a62017-01-12 10:17:53 -08001365 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301366 qdf_assert_always(rx_desc);
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301367
Pamidipati, Vijay53794742017-06-03 11:24:32 +05301368 if (!dp_rx_desc_check_magic(rx_desc)) {
1369 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1370 FL("Invalid rx_desc cookie=%d"),
1371 rx_buf_cookie);
1372 continue;
1373 }
1374
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301375 /*
1376 * this is a unlikely scenario where the host is reaping
1377 * a descriptor which it already reaped just a while ago
1378 * but is yet to replenish it back to HW.
1379 * In this case host will dump the last 128 descriptors
1380 * including the software descriptor rx_desc and assert.
1381 */
1382 if (qdf_unlikely(!rx_desc->in_use)) {
1383 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
1384 dp_rx_dump_info_and_assert(soc, hal_ring,
1385 ring_desc, rx_desc);
1386 }
1387
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301388 nbuf = rx_desc->nbuf;
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301389 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
Pamidipati, Vijay53794742017-06-03 11:24:32 +05301390
Debashis Dutt390645c2016-10-04 17:31:45 -07001391 /*
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301392 * save the wbm desc info in nbuf TLV. We will need this
1393 * info when we do the actual nbuf processing
Debashis Dutt390645c2016-10-04 17:31:45 -07001394 */
Balamurugan Mahalingam764219e2018-09-17 15:34:25 +05301395 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301396 wbm_err_info.pool_id = rx_desc->pool_id;
1397 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
1398 &wbm_err_info);
Debashis Dutt390645c2016-10-04 17:31:45 -07001399
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301400 rx_bufs_reaped[rx_desc->pool_id]++;
Debashis Dutt390645c2016-10-04 17:31:45 -07001401
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301402 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1403 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1404 &tail[rx_desc->pool_id],
1405 rx_desc);
1406 }
1407done:
1408 hal_srng_access_end(hal_soc, hal_ring);
Debashis Dutt390645c2016-10-04 17:31:45 -07001409
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301410 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1411 if (rx_bufs_reaped[mac_id]) {
1412 dp_pdev = soc->pdev_list[mac_id];
1413 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
1414 rx_desc_pool = &soc->rx_desc_buf[mac_id];
Debashis Dutt390645c2016-10-04 17:31:45 -07001415
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301416 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1417 rx_desc_pool, rx_bufs_reaped[mac_id],
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001418 &head[mac_id], &tail[mac_id]);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301419 rx_bufs_used += rx_bufs_reaped[mac_id];
1420 }
1421 }
Ishank Jaine73c4032017-03-16 11:48:15 +05301422
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301423 nbuf = nbuf_head;
1424 while (nbuf) {
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301425 struct dp_peer *peer;
1426 uint16_t peer_id;
1427
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301428 rx_tlv_hdr = qdf_nbuf_data(nbuf);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301429
1430 peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
1431 peer = dp_peer_find_by_id(soc, peer_id);
1432
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301433 /*
1434 * retrieve the wbm desc info from nbuf TLV, so we can
1435 * handle error cases appropriately
1436 */
1437 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
1438
Prathyusha Guduri02ed9482018-04-17 19:06:30 +05301439 /* Set queue_mapping in nbuf to 0 */
1440 dp_set_rx_queue(nbuf, 0);
1441
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301442 next = nbuf->next;
Mohit Khannaf085b612019-04-02 14:43:10 -07001443
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301444 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
1445 if (wbm_err_info.reo_psh_rsn
1446 == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
1447
1448 DP_STATS_INC(soc,
1449 rx.err.reo_error
1450 [wbm_err_info.reo_err_code], 1);
1451
1452 switch (wbm_err_info.reo_err_code) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301453 /*
1454 * Handling for packets which have NULL REO
1455 * queue descriptor
1456 */
1457 case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301458 pool_id = wbm_err_info.pool_id;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301459 dp_rx_null_q_desc_handle(soc, nbuf,
1460 rx_tlv_hdr,
1461 pool_id, peer);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301462 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301463 if (peer)
1464 dp_peer_unref_del_find_by_id(
1465 peer);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301466 continue;
Debashis Dutt390645c2016-10-04 17:31:45 -07001467 /* TODO */
1468 /* Add per error code accounting */
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001469 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1470 pool_id = wbm_err_info.pool_id;
Mohit Khannaf085b612019-04-02 14:43:10 -07001471
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001472 if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) {
1473 peer_id =
1474 hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
1475 tid =
1476 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
1477 }
1478 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
1479 peer_id, tid);
1480 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301481 if (peer)
1482 dp_peer_unref_del_find_by_id(
1483 peer);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001484 continue;
Debashis Dutt390645c2016-10-04 17:31:45 -07001485 default:
Mohit Khannaf085b612019-04-02 14:43:10 -07001486 dp_err_rl("Got pkt with REO ERROR: %d",
1487 wbm_err_info.reo_err_code);
1488 break;
Debashis Dutt390645c2016-10-04 17:31:45 -07001489 }
1490 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301491 } else if (wbm_err_info.wbm_err_src ==
1492 HAL_RX_WBM_ERR_SRC_RXDMA) {
1493 if (wbm_err_info.rxdma_psh_rsn
1494 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
1495 DP_STATS_INC(soc,
1496 rx.err.rxdma_error
1497 [wbm_err_info.rxdma_err_code], 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001498
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301499 switch (wbm_err_info.rxdma_err_code) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301500 case HAL_RXDMA_ERR_UNENCRYPTED:
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001501
1502 case HAL_RXDMA_ERR_WIFI_PARSE:
1503 dp_rx_process_rxdma_err(soc, nbuf,
1504 rx_tlv_hdr, peer,
1505 wbm_err_info.rxdma_err_code);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301506 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301507 if (peer)
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001508 dp_peer_unref_del_find_by_id(peer);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301509 continue;
Debashis Dutt390645c2016-10-04 17:31:45 -07001510
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301511 case HAL_RXDMA_ERR_TKIP_MIC:
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301512 dp_rx_process_mic_error(soc, nbuf,
1513 rx_tlv_hdr,
1514 peer);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301515 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301516 if (peer) {
Gurumoorthi Gnanasambandhan306de842017-12-19 08:47:40 +05301517 DP_STATS_INC(peer, rx.err.mic_err, 1);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301518 dp_peer_unref_del_find_by_id(
1519 peer);
1520 }
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301521 continue;
1522
1523 case HAL_RXDMA_ERR_DECRYPT:
Gurumoorthi Gnanasambandhan306de842017-12-19 08:47:40 +05301524 if (peer)
1525 DP_STATS_INC(peer, rx.err.decrypt_err, 1);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301526 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001527 QDF_TRACE_LEVEL_DEBUG,
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301528 "Packet received with Decrypt error");
1529 break;
1530
Debashis Dutt390645c2016-10-04 17:31:45 -07001531 default:
Mohit Khannaf085b612019-04-02 14:43:10 -07001532 dp_err_rl("RXDMA error %d",
1533 wbm_err_info.rxdma_err_code);
Debashis Dutt390645c2016-10-04 17:31:45 -07001534 }
1535 }
1536 } else {
1537 /* Should not come here */
1538 qdf_assert(0);
1539 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001540
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301541 if (peer)
1542 dp_peer_unref_del_find_by_id(peer);
1543
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301544 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
1545 QDF_TRACE_LEVEL_DEBUG);
chenguod70b7d92018-01-13 17:40:27 +08001546 qdf_nbuf_free(nbuf);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301547 nbuf = next;
Debashis Dutt390645c2016-10-04 17:31:45 -07001548 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301549 return rx_bufs_used; /* Assume no scale factor for now */
Debashis Dutt390645c2016-10-04 17:31:45 -07001550}
Pramod Simhae382ff82017-06-05 18:09:26 -07001551
1552/**
nwzhaoea2ffbb2019-01-31 11:43:17 -08001553 * dup_desc_dbg() - dump and assert if duplicate rx desc found
1554 *
1555 * @soc: core DP main context
1556 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
1557 * @rx_desc: void pointer to rx descriptor
1558 *
1559 * Return: void
1560 */
1561static void dup_desc_dbg(struct dp_soc *soc,
1562 void *rxdma_dst_ring_desc,
1563 void *rx_desc)
1564{
1565 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
1566 dp_rx_dump_info_and_assert(soc,
1567 soc->rx_rel_ring.hal_srng,
1568 rxdma_dst_ring_desc,
1569 rx_desc);
1570}
1571
1572/**
Pramod Simhae382ff82017-06-05 18:09:26 -07001573 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
1574 *
1575 * @soc: core DP main context
1576 * @mac_id: mac id which is one of 3 mac_ids
1577 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
1578 * @head: head of descs list to be freed
1579 * @tail: tail of decs list to be freed
1580
1581 * Return: number of msdu in MPDU to be popped
1582 */
1583static inline uint32_t
1584dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
1585 void *rxdma_dst_ring_desc,
1586 union dp_rx_desc_list_elem_t **head,
1587 union dp_rx_desc_list_elem_t **tail)
1588{
1589 void *rx_msdu_link_desc;
1590 qdf_nbuf_t msdu;
1591 qdf_nbuf_t last;
1592 struct hal_rx_msdu_list msdu_list;
Karunakar Dasineni80cded82017-07-10 10:49:55 -07001593 uint16_t num_msdus;
Pramod Simhae382ff82017-06-05 18:09:26 -07001594 struct hal_buf_info buf_info;
1595 void *p_buf_addr_info;
1596 void *p_last_buf_addr_info;
1597 uint32_t rx_bufs_used = 0;
1598 uint32_t msdu_cnt;
1599 uint32_t i;
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001600 uint8_t push_reason;
1601 uint8_t rxdma_error_code = 0;
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301602 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001603 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
nwzhaoea2ffbb2019-01-31 11:43:17 -08001604 void *ring_desc;
Pramod Simhae382ff82017-06-05 18:09:26 -07001605
1606 msdu = 0;
1607
1608 last = NULL;
1609
1610 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
Kai Chen634d53f2017-07-15 18:49:02 -07001611 &p_last_buf_addr_info, &msdu_cnt);
Pramod Simhae382ff82017-06-05 18:09:26 -07001612
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001613 push_reason =
1614 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
1615 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
1616 rxdma_error_code =
1617 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
1618 }
1619
Pramod Simhae382ff82017-06-05 18:09:26 -07001620 do {
1621 rx_msdu_link_desc =
1622 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1623
1624 qdf_assert(rx_msdu_link_desc);
1625
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05301626 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
1627 &msdu_list, &num_msdus);
Pramod Simhae382ff82017-06-05 18:09:26 -07001628
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001629 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301630 /* if the msdus belongs to NSS offloaded radio &&
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001631 * the rbm is not SW1_BM then return the msdu_link
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301632 * descriptor without freeing the msdus (nbufs). let
1633 * these buffers be given to NSS completion ring for
1634 * NSS to free them.
1635 * else iterate through the msdu link desc list and
1636 * free each msdu in the list.
1637 */
1638 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
1639 wlan_cfg_get_dp_pdev_nss_enabled(
1640 pdev->wlan_cfg_ctx))
1641 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
1642 else {
1643 for (i = 0; i < num_msdus; i++) {
1644 struct dp_rx_desc *rx_desc =
1645 dp_rx_cookie_2_va_rxdma_buf(soc,
1646 msdu_list.sw_cookie[i]);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301647 qdf_assert_always(rx_desc);
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301648 msdu = rx_desc->nbuf;
nwzhaoea2ffbb2019-01-31 11:43:17 -08001649 /*
1650 * this is a unlikely scenario
1651 * where the host is reaping
1652 * a descriptor which
1653 * it already reaped just a while ago
1654 * but is yet to replenish
1655 * it back to HW.
1656 * In this case host will dump
1657 * the last 128 descriptors
1658 * including the software descriptor
1659 * rx_desc and assert.
1660 */
1661 ring_desc = rxdma_dst_ring_desc;
1662 if (qdf_unlikely(!rx_desc->in_use)) {
1663 dup_desc_dbg(soc,
1664 ring_desc,
1665 rx_desc);
1666 continue;
1667 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001668
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301669 qdf_nbuf_unmap_single(soc->osdev, msdu,
1670 QDF_DMA_FROM_DEVICE);
Pramod Simhae382ff82017-06-05 18:09:26 -07001671
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301672 QDF_TRACE(QDF_MODULE_ID_DP,
1673 QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301674 "[%s][%d] msdu_nbuf=%pK ",
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301675 __func__, __LINE__, msdu);
Pramod Simhae382ff82017-06-05 18:09:26 -07001676
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301677 qdf_nbuf_free(msdu);
1678 rx_bufs_used++;
1679 dp_rx_add_to_free_desc_list(head,
1680 tail, rx_desc);
1681 }
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001682 }
1683 } else {
1684 rxdma_error_code = HAL_RXDMA_ERR_WAR;
Pramod Simhae382ff82017-06-05 18:09:26 -07001685 }
1686
1687 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
1688 &p_buf_addr_info);
1689
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301690 dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
Pramod Simhae382ff82017-06-05 18:09:26 -07001691 p_last_buf_addr_info = p_buf_addr_info;
1692
Karunakar Dasinenia7ee2c62017-10-31 09:05:44 -07001693 } while (buf_info.paddr);
Pramod Simhae382ff82017-06-05 18:09:26 -07001694
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001695 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
1696
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301697 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
1698 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1699 "Packet received with Decrypt error");
1700 }
1701
Pramod Simhae382ff82017-06-05 18:09:26 -07001702 return rx_bufs_used;
1703}
1704
1705/**
1706* dp_rxdma_err_process() - RxDMA error processing functionality
1707*
1708* @soc: core txrx main contex
1709* @mac_id: mac id which is one of 3 mac_ids
1710* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1711* @quota: No. of units (packets) that can be serviced in one shot.
1712
1713* Return: num of buffers processed
1714*/
1715uint32_t
1716dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
1717{
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001718 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001719 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Pramod Simhae382ff82017-06-05 18:09:26 -07001720 void *hal_soc;
1721 void *rxdma_dst_ring_desc;
1722 void *err_dst_srng;
1723 union dp_rx_desc_list_elem_t *head = NULL;
1724 union dp_rx_desc_list_elem_t *tail = NULL;
1725 struct dp_srng *dp_rxdma_srng;
1726 struct rx_desc_pool *rx_desc_pool;
1727 uint32_t work_done = 0;
1728 uint32_t rx_bufs_used = 0;
1729
Pramod Simhae382ff82017-06-05 18:09:26 -07001730 if (!pdev)
1731 return 0;
Nandha Kishore Easwarane03102f2018-08-22 22:23:00 +05301732
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001733 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
Pramod Simhae382ff82017-06-05 18:09:26 -07001734
1735 if (!err_dst_srng) {
1736 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1737 "%s %d : HAL Monitor Destination Ring Init \
Aditya Sathishded018e2018-07-02 16:25:21 +05301738 Failed -- %pK",
Pramod Simhae382ff82017-06-05 18:09:26 -07001739 __func__, __LINE__, err_dst_srng);
1740 return 0;
1741 }
1742
1743 hal_soc = soc->hal_soc;
1744
1745 qdf_assert(hal_soc);
1746
1747 if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
1748 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1749 "%s %d : HAL Monitor Destination Ring Init \
Aditya Sathishded018e2018-07-02 16:25:21 +05301750 Failed -- %pK",
Pramod Simhae382ff82017-06-05 18:09:26 -07001751 __func__, __LINE__, err_dst_srng);
1752 return 0;
1753 }
1754
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301755 while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
1756 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
Pramod Simhae382ff82017-06-05 18:09:26 -07001757
1758 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
1759 rxdma_dst_ring_desc,
1760 &head, &tail);
1761 }
1762
1763 hal_srng_access_end(hal_soc, err_dst_srng);
1764
1765 if (rx_bufs_used) {
1766 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1767 rx_desc_pool = &soc->rx_desc_buf[mac_id];
1768
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001769 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001770 rx_desc_pool, rx_bufs_used, &head, &tail);
1771
Pramod Simhae382ff82017-06-05 18:09:26 -07001772 work_done += rx_bufs_used;
1773 }
1774
1775 return work_done;
1776}