blob: 29fd9acb6fa0a46ae98cc8c450ead37453541d60 [file] [log] [blame]
Debashis Dutt390645c2016-10-04 17:31:45 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Debashis Dutt390645c2016-10-04 17:31:45 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053019#include "hal_hw_headers.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070020#include "dp_types.h"
21#include "dp_rx.h"
22#include "dp_peer.h"
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080023#include "dp_internal.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070024#include "hal_api.h"
25#include "qdf_trace.h"
26#include "qdf_nbuf.h"
Ravi Joshi36f68ad2016-11-09 17:09:47 -080027#include "dp_rx_defrag.h"
Amir Patelcb990262019-05-28 15:12:48 +053028#ifdef FEATURE_WDS
29#include "dp_txrx_wds.h"
30#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -080031#include <enet.h> /* LLC_SNAP_HDR_LEN */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -080032#include "qdf_net_types.h"
Debashis Dutt390645c2016-10-04 17:31:45 -070033
Chaithanya Garrepallid3c58a32019-05-24 19:41:05 +053034/* Max buffer in invalid peer SG list*/
35#define DP_MAX_INVALID_BUFFERS 10
36
Debashis Dutt390645c2016-10-04 17:31:45 -070037/**
Tallapragada Kalyan94034632017-12-07 17:29:13 +053038 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
39 * back on same vap or a different vap.
40 *
41 * @soc: core DP main context
42 * @peer: dp peer handler
43 * @rx_tlv_hdr: start of the rx TLV header
44 * @nbuf: pkt buffer
45 *
46 * Return: bool (true if it is a looped back pkt else false)
47 *
48 */
49static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
50 struct dp_peer *peer,
51 uint8_t *rx_tlv_hdr,
52 qdf_nbuf_t nbuf)
53{
54 struct dp_vdev *vdev = peer->vdev;
Amir Patelcb990262019-05-28 15:12:48 +053055 struct dp_ast_entry *ase = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053056 uint16_t sa_idx = 0;
Tallapragada Kalyan94034632017-12-07 17:29:13 +053057 uint8_t *data;
58
59 /*
60 * Multicast Echo Check is required only if vdev is STA and
61 * received pkt is a multicast/broadcast pkt. otherwise
62 * skip the MEC check.
63 */
64 if (vdev->opmode != wlan_op_mode_sta)
65 return false;
66
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -070067 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
Tallapragada Kalyan94034632017-12-07 17:29:13 +053068 return false;
69
70 data = qdf_nbuf_data(nbuf);
71 /*
72 * if the received pkts src mac addr matches with vdev
73 * mac address then drop the pkt as it is looped back
74 */
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -080075 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
Tallapragada Kalyan94034632017-12-07 17:29:13 +053076 vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -080077 QDF_MAC_ADDR_SIZE)))
Tallapragada Kalyan94034632017-12-07 17:29:13 +053078 return true;
79
Nandha Kishore Easwaran3053dee2018-03-12 18:27:10 +053080 /*
81 * In case of qwrap isolation mode, donot drop loopback packets.
82 * In isolation mode, all packets from the wired stations need to go
83 * to rootap and loop back to reach the wireless stations and
84 * vice-versa.
85 */
86 if (qdf_unlikely(vdev->isolation_vdev))
87 return false;
88
Tallapragada Kalyan94034632017-12-07 17:29:13 +053089 /* if the received pkts src mac addr matches with the
90 * wired PCs MAC addr which is behind the STA or with
91 * wireless STAs MAC addr which are behind the Repeater,
92 * then drop the pkt as it is looped back
93 */
94 qdf_spin_lock_bh(&soc->ast_lock);
Venkata Sharath Chandra Manchala59ebd5e2019-09-20 15:52:55 -070095 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
Venkata Sharath Chandra Manchala5bf1e5a2019-09-20 16:18:42 -070096 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
Tallapragada Kalyan94034632017-12-07 17:29:13 +053097
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053098 if ((sa_idx < 0) ||
Tallapragada Kalyana7023622018-12-03 19:29:52 +053099 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530100 qdf_spin_unlock_bh(&soc->ast_lock);
101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
102 "invalid sa_idx: %d", sa_idx);
103 qdf_assert_always(0);
104 }
105
106 ase = soc->ast_table[sa_idx];
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530107 if (!ase) {
108 /* We do not get a peer map event for STA and without
109 * this event we don't know what is STA's sa_idx.
110 * For this reason the AST is still not associated to
111 * any index postion in ast_table.
112 * In these kind of scenarios where sa is valid but
113 * ast is not in ast_table, we use the below API to get
114 * AST entry for STA's own mac_address.
115 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530116 ase = dp_peer_ast_list_find(soc, peer,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800117 &data[QDF_MAC_ADDR_SIZE]);
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530118 if (ase) {
119 ase->ast_idx = sa_idx;
120 soc->ast_table[sa_idx] = ase;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530121 ase->is_mapped = TRUE;
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530122 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530123 }
Amir Patelcb990262019-05-28 15:12:48 +0530124 } else {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530125 ase = dp_peer_ast_hash_find_by_pdevid(soc,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800126 &data[QDF_MAC_ADDR_SIZE],
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530127 vdev->pdev->pdev_id);
Amir Patelcb990262019-05-28 15:12:48 +0530128 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530129
130 if (ase) {
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530131
132 if (ase->pdev_id != vdev->pdev->pdev_id) {
133 qdf_spin_unlock_bh(&soc->ast_lock);
134 QDF_TRACE(QDF_MODULE_ID_DP,
135 QDF_TRACE_LEVEL_INFO,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530136 "Detected DBDC Root AP %pM, %d %d",
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800137 &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530138 ase->pdev_id);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530139 return false;
140 }
141
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530142 if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
143 (ase->peer != peer)) {
144 qdf_spin_unlock_bh(&soc->ast_lock);
145 QDF_TRACE(QDF_MODULE_ID_DP,
146 QDF_TRACE_LEVEL_INFO,
147 "received pkt with same src mac %pM",
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800148 &data[QDF_MAC_ADDR_SIZE]);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530149
150 return true;
151 }
152 }
153 qdf_spin_unlock_bh(&soc->ast_lock);
154 return false;
155}
156
157/**
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530158 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
159 * (WBM) by address
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530160 *
161 * @soc: core DP main context
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530162 * @link_desc_addr: link descriptor addr
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530163 *
164 * Return: QDF_STATUS
165 */
psimha223883f2017-11-16 17:18:51 -0800166QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530167dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
Aniruddha Paul33fce952019-11-27 18:48:04 +0530168 hal_buff_addrinfo_t link_desc_addr,
Akshay Kosigi91c56522019-07-02 11:49:39 +0530169 uint8_t bm_action)
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530170{
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530171 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
Akshay Kosigia870c612019-07-08 23:10:30 +0530172 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
173 hal_soc_handle_t hal_soc = soc->hal_soc;
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530174 QDF_STATUS status = QDF_STATUS_E_FAILURE;
175 void *src_srng_desc;
176
177 if (!wbm_rel_srng) {
178 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
179 "WBM RELEASE RING not initialized");
180 return status;
181 }
182
183 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
184
185 /* TODO */
186 /*
187 * Need API to convert from hal_ring pointer to
188 * Ring Type / Ring Id combo
189 */
190 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
191 FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
192 wbm_rel_srng);
193 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
194 goto done;
195 }
196 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
197 if (qdf_likely(src_srng_desc)) {
198 /* Return link descriptor through WBM ring (SW2WBM)*/
199 hal_rx_msdu_link_desc_set(hal_soc,
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530200 src_srng_desc, link_desc_addr, bm_action);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530201 status = QDF_STATUS_SUCCESS;
202 } else {
203 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
204 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
205 FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
207 "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
208 *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
209 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
210 }
211done:
212 hal_srng_access_end(hal_soc, wbm_rel_srng);
213 return status;
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530214
215}
216
217/**
218 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
219 * (WBM), following error handling
220 *
221 * @soc: core DP main context
222 * @ring_desc: opaque pointer to the REO error ring descriptor
223 *
224 * Return: QDF_STATUS
225 */
226QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530227dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
228 uint8_t bm_action)
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530229{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530230 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
231
232 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530233}
234
235/**
Debashis Dutt390645c2016-10-04 17:31:45 -0700236 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
237 *
238 * @soc: core txrx main context
239 * @ring_desc: opaque pointer to the REO error ring descriptor
240 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
241 * @head: head of the local descriptor free-list
242 * @tail: tail of the local descriptor free-list
243 * @quota: No. of units (packets) that can be serviced in one shot.
244 *
245 * This function is used to drop all MSDU in an MPDU
246 *
247 * Return: uint32_t: No. of elements processed
248 */
Akshay Kosigi91c56522019-07-02 11:49:39 +0530249static uint32_t
250dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
251 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
252 uint8_t *mac_id,
253 uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -0700254{
Debashis Dutt390645c2016-10-04 17:31:45 -0700255 uint32_t rx_bufs_used = 0;
256 void *link_desc_va;
257 struct hal_buf_info buf_info;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530258 struct dp_pdev *pdev;
Debashis Dutt390645c2016-10-04 17:31:45 -0700259 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
260 int i;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530261 uint8_t *rx_tlv_hdr;
262 uint32_t tid;
Debashis Dutt390645c2016-10-04 17:31:45 -0700263
264 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
265
266 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
267
Debashis Dutt390645c2016-10-04 17:31:45 -0700268 /* No UNMAP required -- this is "malloc_consistent" memory */
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +0530269 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
270 &mpdu_desc_info->msdu_count);
Debashis Dutt390645c2016-10-04 17:31:45 -0700271
Karunakar Dasineni80cded82017-07-10 10:49:55 -0700272 for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
Debashis Dutt390645c2016-10-04 17:31:45 -0700273 struct dp_rx_desc *rx_desc =
Kai Chen6eca1a62017-01-12 10:17:53 -0800274 dp_rx_cookie_2_va_rxdma_buf(soc,
275 msdu_list.sw_cookie[i]);
Debashis Dutt390645c2016-10-04 17:31:45 -0700276
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530277 qdf_assert_always(rx_desc);
278
279 /* all buffers from a MSDU link link belong to same pdev */
280 *mac_id = rx_desc->pool_id;
281 pdev = soc->pdev_list[rx_desc->pool_id];
Debashis Dutt390645c2016-10-04 17:31:45 -0700282
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530283 if (!dp_rx_desc_check_magic(rx_desc)) {
284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
285 FL("Invalid rx_desc cookie=%d"),
286 msdu_list.sw_cookie[i]);
287 return rx_bufs_used;
288 }
289
Pamidipati, Vijayc736e832019-03-21 22:24:10 +0530290 qdf_nbuf_unmap_single(soc->osdev,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530291 rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
Pamidipati, Vijayc736e832019-03-21 22:24:10 +0530292
293 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
294
Debashis Dutt390645c2016-10-04 17:31:45 -0700295 rx_bufs_used++;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530296 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
297 rx_desc->rx_buf_start);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530298 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
299 "Packet received with PN error for tid :%d", tid);
300
301 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
Venkata Sharath Chandra Manchalac1a4c8b2019-09-20 17:42:07 -0700302 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
Venkata Sharath Chandra Manchalaa2d74972019-09-20 18:02:57 -0700303 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
Debashis Dutt390645c2016-10-04 17:31:45 -0700304
305 /* Just free the buffers */
306 qdf_nbuf_free(rx_desc->nbuf);
307
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530308 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
309 &pdev->free_list_tail, rx_desc);
Debashis Dutt390645c2016-10-04 17:31:45 -0700310 }
311
Tallapragada Kalyana7010a62017-10-20 12:20:19 +0530312 /* Return link descriptor through WBM ring (SW2WBM)*/
313 dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
314
Debashis Dutt390645c2016-10-04 17:31:45 -0700315 return rx_bufs_used;
316}
317
318/**
319 * dp_rx_pn_error_handle() - Handles PN check errors
320 *
321 * @soc: core txrx main context
322 * @ring_desc: opaque pointer to the REO error ring descriptor
323 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
324 * @head: head of the local descriptor free-list
325 * @tail: tail of the local descriptor free-list
326 * @quota: No. of units (packets) that can be serviced in one shot.
327 *
328 * This function implements PN error handling
329 * If the peer is configured to ignore the PN check errors
330 * or if DP feels, that this frame is still OK, the frame can be
331 * re-injected back to REO to use some of the other features
332 * of REO e.g. duplicate detection/routing to other cores
333 *
334 * Return: uint32_t: No. of elements processed
335 */
Jeff Johnson8a4fd9b2017-01-05 16:33:40 -0800336static uint32_t
Akshay Kosigi91c56522019-07-02 11:49:39 +0530337dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
Debashis Dutt390645c2016-10-04 17:31:45 -0700338 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530339 uint8_t *mac_id,
Debashis Dutt390645c2016-10-04 17:31:45 -0700340 uint32_t quota)
341{
342 uint16_t peer_id;
343 uint32_t rx_bufs_used = 0;
344 struct dp_peer *peer;
345 bool peer_pn_policy = false;
346
347 peer_id = DP_PEER_METADATA_PEER_ID_GET(
348 mpdu_desc_info->peer_meta_data);
349
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530350
Debashis Dutt390645c2016-10-04 17:31:45 -0700351 peer = dp_peer_find_by_id(soc, peer_id);
352
353 if (qdf_likely(peer)) {
354 /*
355 * TODO: Check for peer specific policies & set peer_pn_policy
356 */
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
358 "discard rx due to PN error for peer %pK "
Aditya Sathishded018e2018-07-02 16:25:21 +0530359 "(%02x:%02x:%02x:%02x:%02x:%02x)",
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530360 peer,
361 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
362 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
363 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
364
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530365 dp_peer_unref_del_find_by_id(peer);
Debashis Dutt390645c2016-10-04 17:31:45 -0700366 }
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
368 "Packet received with PN error");
Debashis Dutt390645c2016-10-04 17:31:45 -0700369
Debashis Dutt390645c2016-10-04 17:31:45 -0700370 /* No peer PN policy -- definitely drop */
371 if (!peer_pn_policy)
372 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
373 mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530374 mac_id, quota);
Debashis Dutt390645c2016-10-04 17:31:45 -0700375
376 return rx_bufs_used;
377}
378
379/**
380 * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
381 *
382 * @soc: core txrx main context
383 * @ring_desc: opaque pointer to the REO error ring descriptor
384 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
385 * @head: head of the local descriptor free-list
386 * @tail: tail of the local descriptor free-list
387 * @quota: No. of units (packets) that can be serviced in one shot.
388 *
389 * This function implements the error handling when sequence number
390 * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
391 * need to be handled:
392 * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
393 * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
394 * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
395 * For case B), the frame is normally dropped, no more action is taken
396 *
397 * Return: uint32_t: No. of elements processed
398 */
Jeff Johnson8a4fd9b2017-01-05 16:33:40 -0800399static uint32_t
Akshay Kosigi91c56522019-07-02 11:49:39 +0530400dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
Debashis Dutt390645c2016-10-04 17:31:45 -0700401 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530402 uint8_t *mac_id, uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -0700403{
404 return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +0530405 mac_id, quota);
Debashis Dutt390645c2016-10-04 17:31:45 -0700406}
407
Vevek Venkatesande31ff62019-06-11 12:50:49 +0530408#ifdef DP_INVALID_PEER_ASSERT
Jinwei Chen214590a2018-12-06 16:45:44 +0800409#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
410 do { \
411 qdf_assert_always(!(head)); \
412 qdf_assert_always(!(tail)); \
413 } while (0)
414#else
415#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
416#endif
417
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530418/**
419 * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
420 * to pdev invalid peer list
421 *
422 * @soc: core DP main context
423 * @nbuf: Buffer pointer
424 * @rx_tlv_hdr: start of rx tlv header
425 * @mac_id: mac id
426 *
427 * Return: bool: true for last msdu of mpdu
428 */
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530429static bool
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530430dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
431 uint8_t mac_id)
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530432{
433 bool mpdu_done = false;
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530434 qdf_nbuf_t curr_nbuf = NULL;
435 qdf_nbuf_t tmp_nbuf = NULL;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530436
Pratik Gandhi4fbe3fe2017-07-20 15:31:26 +0530437 /* TODO: Currently only single radio is supported, hence
438 * pdev hard coded to '0' index
439 */
Pratik Gandhi76139082017-07-28 19:18:02 +0530440 struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530441
Chaithanya Garrepallid3c58a32019-05-24 19:41:05 +0530442 /* if invalid peer SG list has max values free the buffers in list
443 * and treat current buffer as start of list
444 *
445 * current logic to detect the last buffer from attn_tlv is not reliable
446 * in OFDMA UL scenario hence add max buffers check to avoid list pile
447 * up
448 */
449 if (!dp_pdev->first_nbuf ||
450 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
451 (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530452 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
Venkata Sharath Chandra Manchala84d50922019-09-21 16:48:04 -0700453 dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc,
454 rx_tlv_hdr);
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530455 dp_pdev->first_nbuf = true;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530456
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530457 /* If the new nbuf received is the first msdu of the
458 * amsdu and there are msdus in the invalid peer msdu
459 * list, then let us free all the msdus of the invalid
460 * peer msdu list.
461 * This scenario can happen when we start receiving
462 * new a-msdu even before the previous a-msdu is completely
463 * received.
464 */
465 curr_nbuf = dp_pdev->invalid_peer_head_msdu;
466 while (curr_nbuf) {
467 tmp_nbuf = curr_nbuf->next;
468 qdf_nbuf_free(curr_nbuf);
469 curr_nbuf = tmp_nbuf;
470 }
471
Pratik Gandhi76139082017-07-28 19:18:02 +0530472 dp_pdev->invalid_peer_head_msdu = NULL;
473 dp_pdev->invalid_peer_tail_msdu = NULL;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530474 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
Pratik Gandhi4fbe3fe2017-07-20 15:31:26 +0530475 &(dp_pdev->ppdu_info.rx_status));
476
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530477 }
478
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530479 if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
480 hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530481 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
Tallapragada Kalyan70539512018-03-29 16:19:43 +0530482 qdf_assert_always(dp_pdev->first_nbuf == true);
483 dp_pdev->first_nbuf = false;
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530484 mpdu_done = true;
485 }
486
Jinwei Chen214590a2018-12-06 16:45:44 +0800487 /*
488 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
489 * should be NULL here, add the checking for debugging purpose
490 * in case some corner case.
491 */
492 DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
493 dp_pdev->invalid_peer_tail_msdu);
Pratik Gandhi76139082017-07-28 19:18:02 +0530494 DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
495 dp_pdev->invalid_peer_tail_msdu,
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530496 nbuf);
497
498 return mpdu_done;
499}
500
Mohit Khanna82382b32019-12-09 19:15:27 -0800501static
502void dp_rx_wbm_err_handle_bar(struct dp_soc *soc,
503 struct dp_peer *peer,
504 qdf_nbuf_t nbuf)
505{
506 uint8_t *rx_tlv_hdr;
507 unsigned char type, subtype;
508 uint16_t start_seq_num;
509 uint32_t tid;
510 struct ieee80211_frame_bar *bar;
511
512 /*
513 * 1. Is this a BAR frame. If not Discard it.
514 * 2. If it is, get the peer id, tid, ssn
515 * 2a Do a tid update
516 */
517
518 rx_tlv_hdr = qdf_nbuf_data(nbuf);
519 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr +
520 sizeof(struct rx_pkt_tlvs));
521
522 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
523 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
524
525 if (!(type == IEEE80211_FC0_TYPE_CTL &&
526 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
527 dp_err_rl("Not a BAR frame!");
528 return;
529 }
530
531 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
532 qdf_assert_always(tid < DP_MAX_TIDS);
533
534 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
535
536 dp_info_rl("tid %u window_size %u start_seq_num %u",
537 tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
538
539 dp_rx_tid_update_wifi3(peer, tid,
540 peer->rx_tid[tid].ba_win_size,
541 start_seq_num);
542}
543
Debashis Dutt390645c2016-10-04 17:31:45 -0700544/**
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700545 * dp_2k_jump_handle() - Function to handle 2k jump exception
546 * on WBM ring
547 *
548 * @soc: core DP main context
549 * @nbuf: buffer pointer
550 * @rx_tlv_hdr: start of rx tlv header
551 * @peer_id: peer id of first msdu
552 * @tid: Tid for which exception occurred
553 *
554 * This function handles 2k jump violations arising out
555 * of receiving aggregates in non BA case. This typically
556 * may happen if aggregates are received on a QOS enabled TID
557 * while Rx window size is still initialized to value of 2. Or
558 * it may also happen if negotiated window size is 1 but peer
559 * sends aggregates.
560 *
561 */
562
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530563void
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700564dp_2k_jump_handle(struct dp_soc *soc,
565 qdf_nbuf_t nbuf,
566 uint8_t *rx_tlv_hdr,
567 uint16_t peer_id,
568 uint8_t tid)
569{
570 uint32_t ppdu_id;
571 struct dp_peer *peer = NULL;
572 struct dp_rx_tid *rx_tid = NULL;
573
574 peer = dp_peer_find_by_id(soc, peer_id);
575 if (!peer || peer->delete_in_progress) {
576 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
577 "peer not found");
578 goto free_nbuf;
579 }
580 rx_tid = &peer->rx_tid[tid];
Jeff Johnsona8edf332019-03-18 09:51:52 -0700581 if (qdf_unlikely(!rx_tid)) {
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700582 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
583 "rx_tid is NULL!!");
584 goto free_nbuf;
585 }
586 qdf_spin_lock_bh(&rx_tid->tid_lock);
587 ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
Prathyusha Guduri9e4bb952019-03-15 17:41:19 +0530588
589 /*
590 * If BA session is created and a non-aggregate packet is
591 * landing here then the issue is with sequence number mismatch.
592 * Proceed with delba even in that case
593 */
594 if (rx_tid->ppdu_id_2k != ppdu_id &&
595 rx_tid->ba_status != DP_RX_BA_ACTIVE) {
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700596 rx_tid->ppdu_id_2k = ppdu_id;
597 qdf_spin_unlock_bh(&rx_tid->tid_lock);
598 goto free_nbuf;
599 }
600 if (!rx_tid->delba_tx_status) {
601 rx_tid->delba_tx_retry++;
602 rx_tid->delba_tx_status = 1;
sumedh baikadyfaadbb62018-08-21 21:13:42 -0700603 rx_tid->delba_rcode =
604 IEEE80211_REASON_QOS_SETUP_REQUIRED;
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700605 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Venkata Sharath Chandra Manchalaa6c04702019-06-20 15:27:58 -0700606 if (soc->cdp_soc.ol_ops->send_delba)
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +0530607 soc->cdp_soc.ol_ops->send_delba(
608 peer->vdev->pdev->soc->ctrl_psoc,
609 peer->vdev->vdev_id,
610 peer->mac_addr.raw,
611 tid,
612 rx_tid->delba_rcode);
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700613 } else {
614 qdf_spin_unlock_bh(&rx_tid->tid_lock);
615 }
616
617free_nbuf:
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530618 if (peer)
619 dp_peer_unref_del_find_by_id(peer);
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700620 qdf_nbuf_free(nbuf);
621 return;
622}
623
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700624#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
Krunal Soni53add652018-10-05 22:42:35 -0700625/**
626 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
627 * @soc: pointer to dp_soc struct
628 * @pool_id: Pool id to find dp_pdev
629 * @rx_tlv_hdr: TLV header of received packet
630 * @nbuf: SKB
631 *
632 * In certain types of packets if peer_id is not correct then
633 * driver may not be able find. Try finding peer by addr_2 of
634 * received MPDU. If you find the peer then most likely sw_peer_id &
635 * ast_idx is corrupted.
636 *
637 * Return: True if you find the peer by addr_2 of received MPDU else false
638 */
639static bool
640dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
641 uint8_t pool_id,
642 uint8_t *rx_tlv_hdr,
643 qdf_nbuf_t nbuf)
644{
Krunal Soni53add652018-10-05 22:42:35 -0700645 struct dp_peer *peer = NULL;
646 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
647 struct dp_pdev *pdev = soc->pdev_list[pool_id];
648 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
649
650 /*
651 * WAR- In certain types of packets if peer_id is not correct then
652 * driver may not be able find. Try finding peer by addr_2 of
653 * received MPDU
654 */
655 if (wh)
656 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +0530657 wh->i_addr2);
Krunal Soni53add652018-10-05 22:42:35 -0700658 if (peer) {
Krunal Sonic96a1162019-02-21 11:33:26 -0800659 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
Krunal Soni53add652018-10-05 22:42:35 -0700660 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
Jinwei Chen2f79dd92019-03-27 10:34:18 +0800661 QDF_TRACE_LEVEL_DEBUG);
Krunal Soni53add652018-10-05 22:42:35 -0700662 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
663 1, qdf_nbuf_len(nbuf));
664 qdf_nbuf_free(nbuf);
665
666 return true;
667 }
668 return false;
669}
Mohit Khannaf085b612019-04-02 14:43:10 -0700670
671/**
672 * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
673 * @soc: DP SOC context
674 * @pkt_len: computed length of the pkt from caller in bytes
675 *
676 * Return: true if pktlen > RX_BUFFER_SIZE, else return false
677 *
678 */
679static inline
680bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
681{
682 if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
683 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
684 1, pkt_len);
685 return true;
686 } else {
687 return false;
688 }
689}
690
Krunal Soni53add652018-10-05 22:42:35 -0700691#else
692static inline bool
693dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
694 uint8_t pool_id,
695 uint8_t *rx_tlv_hdr,
696 qdf_nbuf_t nbuf)
697{
698 return false;
699}
Mohit Khannaf085b612019-04-02 14:43:10 -0700700
701static inline
702bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
703{
704 return false;
705}
706
Krunal Soni53add652018-10-05 22:42:35 -0700707#endif
708
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700709/**
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530710 * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
711 * descriptor violation on either a
712 * REO or WBM ring
713 *
714 * @soc: core DP main context
715 * @nbuf: buffer pointer
716 * @rx_tlv_hdr: start of rx tlv header
717 * @pool_id: mac id
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530718 * @peer: peer handle
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530719 *
720 * This function handles NULL queue descriptor violations arising out
721 * a missing REO queue for a given peer or a given TID. This typically
722 * may happen if a packet is received on a QOS enabled TID before the
723 * ADDBA negotiation for that TID, when the TID queue is setup. Or
724 * it may also happen for MC/BC frames if they are not routed to the
725 * non-QOS TID queue, in the absence of any other default TID queue.
726 * This error can show up both in a REO destination or WBM release ring.
727 *
Mohit Khannaf085b612019-04-02 14:43:10 -0700728 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
729 * if nbuf could not be handled or dropped.
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530730 */
Mohit Khannaf085b612019-04-02 14:43:10 -0700731static QDF_STATUS
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530732dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
733 uint8_t *rx_tlv_hdr, uint8_t pool_id,
734 struct dp_peer *peer)
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530735{
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530736 uint32_t pkt_len, l2_hdr_offset;
737 uint16_t msdu_len;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530738 struct dp_vdev *vdev;
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530739 uint8_t tid;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800740 qdf_ether_header_t *eh;
Pratik Gandhi76139082017-07-28 19:18:02 +0530741
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530742 qdf_nbuf_set_rx_chfrag_start(nbuf,
Venkata Sharath Chandra Manchalacb255b42019-09-21 11:03:38 -0700743 hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
744 rx_tlv_hdr));
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530745 qdf_nbuf_set_rx_chfrag_end(nbuf,
Venkata Sharath Chandra Manchala55f2d922019-09-21 11:37:01 -0700746 hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
747 rx_tlv_hdr));
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -0700748 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
749 rx_tlv_hdr));
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530750 qdf_nbuf_set_da_valid(nbuf,
Venkata Sharath Chandra Manchala79055382019-09-21 11:22:30 -0700751 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
752 rx_tlv_hdr));
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530753 qdf_nbuf_set_sa_valid(nbuf,
Venkata Sharath Chandra Manchala59ebd5e2019-09-20 15:52:55 -0700754 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
755 rx_tlv_hdr));
Tallapragada Kalyanc5ac6382017-12-11 15:40:04 +0530756
Venkata Sharath Chandra Manchalaf05b2ae2019-09-20 17:25:21 -0700757 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
758 rx_tlv_hdr);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530759 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530760 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
761
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530762 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700763 if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
764 goto drop_nbuf;
765
766 /* Set length in nbuf */
767 qdf_nbuf_set_pktlen(nbuf,
768 qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
769 qdf_assert_always(nbuf->data == rx_tlv_hdr);
770 }
771
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530772 /*
773 * Check if DMA completed -- msdu_done is the last bit
774 * to be written
775 */
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530776 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530777
Mohit Khannaf085b612019-04-02 14:43:10 -0700778 dp_err_rl("MSDU DONE failure");
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530779 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
780 QDF_TRACE_LEVEL_INFO);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530781 qdf_assert(0);
782 }
783
Krunal Soni53add652018-10-05 22:42:35 -0700784 if (!peer &&
785 dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
786 rx_tlv_hdr, nbuf))
Mohit Khannaf085b612019-04-02 14:43:10 -0700787 return QDF_STATUS_E_FAILURE;
Krunal Soni53add652018-10-05 22:42:35 -0700788
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530789 if (!peer) {
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530790 bool mpdu_done = false;
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530791 struct dp_pdev *pdev = soc->pdev_list[pool_id];
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530792
Mohit Khannaf085b612019-04-02 14:43:10 -0700793 dp_err_rl("peer is NULL");
794 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530795 qdf_nbuf_len(nbuf));
796
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530797 mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
chenguo91c90102017-12-12 16:16:37 +0800798 /* Trigger invalid peer handler wrapper */
nobelj7dfc8cc2019-03-06 18:25:56 -0800799 dp_rx_process_invalid_peer_wrapper(soc,
800 pdev->invalid_peer_head_msdu,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700801 mpdu_done, pool_id);
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530802
Tallapragada Kalyan084a46d2018-03-15 10:51:35 +0530803 if (mpdu_done) {
804 pdev->invalid_peer_head_msdu = NULL;
805 pdev->invalid_peer_tail_msdu = NULL;
806 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700807 return QDF_STATUS_E_FAILURE;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530808 }
809
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530810 vdev = peer->vdev;
811 if (!vdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700812 dp_err_rl("Null vdev!");
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530813 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700814 goto drop_nbuf;
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530815 }
816
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530817 /*
818 * Advance the packet start pointer by total size of
819 * pre-header TLV's
820 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530821 if (qdf_nbuf_is_frag(nbuf))
Pramod Simha21e69f52018-07-03 16:45:00 -0700822 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
823 else
824 qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530825
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530826 if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
827 /* this is a looped back MCBC pkt, drop it */
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530828 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
Mohit Khannaf085b612019-04-02 14:43:10 -0700829 goto drop_nbuf;
Tallapragada Kalyan71cc01b2017-08-23 12:47:06 +0530830 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700831
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530832 /*
833 * In qwrap mode if the received packet matches with any of the vdev
834 * mac addresses, drop it. Donot receive multicast packets originated
835 * from any proxysta.
836 */
837 if (check_qwrap_multicast_loopback(vdev, nbuf)) {
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +0530838 DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
Mohit Khannaf085b612019-04-02 14:43:10 -0700839 goto drop_nbuf;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530840 }
841
Tallapragada Kalyan71cc01b2017-08-23 12:47:06 +0530842
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530843 if (qdf_unlikely((peer->nawds_enabled == true) &&
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -0700844 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
845 rx_tlv_hdr))) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700846 dp_err_rl("free buffer for multicast packet");
Ruchi, Agrawal27550482018-02-20 19:43:41 +0530847 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700848 goto drop_nbuf;
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530849 }
850
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530851 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
Mohit Khannaf085b612019-04-02 14:43:10 -0700852 dp_err_rl("mcast Policy Check Drop pkt");
853 goto drop_nbuf;
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +0530854 }
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530855 /* WDS Source Port Learning */
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530856 if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
857 vdev->wds_enabled))
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530858 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530859
Venkata Sharath Chandra Manchala5ddc5182019-09-21 15:53:03 -0700860 if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
nobelj2b861f82019-03-20 20:23:59 -0700861 tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
862 if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800863 dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
nobelj2b861f82019-03-20 20:23:59 -0700864 /* IEEE80211_SEQ_MAX indicates invalid start_seq */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800865 }
866
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530867 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +0530868 qdf_nbuf_set_next(nbuf, NULL);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530869 dp_rx_deliver_raw(vdev, nbuf, peer);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530870 } else {
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530871 if (vdev->osif_rx) {
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530872 qdf_nbuf_set_next(nbuf, NULL);
Amir Patel3217ade2018-09-07 12:21:35 +0530873 DP_STATS_INC_PKT(peer, rx.to_stack, 1,
874 qdf_nbuf_len(nbuf));
Neil Zhaofca09192018-11-27 15:16:19 -0800875
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700876 /*
877 * Update the protocol tag in SKB based on
878 * CCE metadata
879 */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700880 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
881 EXCEPTION_DEST_RING_ID,
882 true, true);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700883
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700884 /* Update the flow tag in SKB based on FSE metadata */
885 dp_rx_update_flow_tag(soc, vdev, nbuf,
886 rx_tlv_hdr, true);
887
Amir Patel3217ade2018-09-07 12:21:35 +0530888 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -0700889 soc->hal_soc, rx_tlv_hdr) &&
Amir Patel3217ade2018-09-07 12:21:35 +0530890 (vdev->rx_decap_type ==
891 htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800892 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Amir Patel3217ade2018-09-07 12:21:35 +0530893
894 DP_STATS_INC_PKT(peer, rx.multicast, 1,
895 qdf_nbuf_len(nbuf));
Srinivas Girigowda79502972019-02-11 12:25:12 -0800896 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
Amir Patel3217ade2018-09-07 12:21:35 +0530897 DP_STATS_INC_PKT(peer, rx.bcast, 1,
898 qdf_nbuf_len(nbuf));
899 }
900 }
Neil Zhaofca09192018-11-27 15:16:19 -0800901
902 vdev->osif_rx(vdev->osif_vdev, nbuf);
903
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530904 } else {
Mohit Khannaf085b612019-04-02 14:43:10 -0700905 dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530906 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Mohit Khannaf085b612019-04-02 14:43:10 -0700907 goto drop_nbuf;
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530908 }
909 }
Mohit Khannaf085b612019-04-02 14:43:10 -0700910 return QDF_STATUS_SUCCESS;
911
912drop_nbuf:
913 qdf_nbuf_free(nbuf);
914 return QDF_STATUS_E_FAILURE;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530915}
916
917/**
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800918 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
919 * frames to OS or wifi parse errors.
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530920 * @soc: core DP main context
921 * @nbuf: buffer pointer
922 * @rx_tlv_hdr: start of rx tlv header
923 * @peer: peer reference
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800924 * @err_code: rxdma err code
Keyur Parekhb8149a52019-04-16 21:30:25 -0700925 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
926 * pool_id has same mapping)
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530927 *
928 * Return: None
929 */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800930void
931dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
932 uint8_t *rx_tlv_hdr, struct dp_peer *peer,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700933 uint8_t err_code, uint8_t mac_id)
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530934{
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530935 uint32_t pkt_len, l2_hdr_offset;
936 uint16_t msdu_len;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530937 struct dp_vdev *vdev;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -0800938 qdf_ether_header_t *eh;
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +0530939 bool is_broadcast;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530940
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530941 /*
942 * Check if DMA completed -- msdu_done is the last bit
943 * to be written
944 */
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530945 if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530946
947 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
948 FL("MSDU DONE failure"));
949
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530950 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
951 QDF_TRACE_LEVEL_INFO);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530952 qdf_assert(0);
953 }
954
Venkata Sharath Chandra Manchalaf05b2ae2019-09-20 17:25:21 -0700955 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
956 rx_tlv_hdr);
Gurumoorthi Gnanasambandhan83873112018-03-15 14:42:51 +0530957 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
958 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
959
960 /* Set length in nbuf */
961 qdf_nbuf_set_pktlen(nbuf, pkt_len);
962
963 qdf_nbuf_set_next(nbuf, NULL);
964
965 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
966 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
967
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530968 if (!peer) {
Jinwei Chena1f53042018-08-29 15:54:29 +0800969 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530970 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
971 qdf_nbuf_len(nbuf));
Gurumoorthi Gnanasambandhan83873112018-03-15 14:42:51 +0530972 /* Trigger invalid peer handler wrapper */
Keyur Parekhb8149a52019-04-16 21:30:25 -0700973 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530974 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530975 }
976
977 vdev = peer->vdev;
978 if (!vdev) {
979 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700980 FL("INVALID vdev %pK OR osif_rx"), vdev);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530981 /* Drop & free packet */
982 qdf_nbuf_free(nbuf);
983 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Tallapragada Kalyan94034632017-12-07 17:29:13 +0530984 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530985 }
986
jiad268579c2018-11-28 16:42:28 +0800987 /*
988 * Advance the packet start pointer by total size of
989 * pre-header TLV's
990 */
991 qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
992
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800993 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
994 uint8_t *pkt_type;
995
996 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
Tallapragada Kalyanab357a92019-08-14 20:28:03 +0530997 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
998 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
999 htons(QDF_LLC_STP)) {
1000 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1001 goto process_mesh;
1002 } else {
1003 goto process_rx;
1004 }
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001005 }
1006 }
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +05301007 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1008 goto process_mesh;
1009
jiad268579c2018-11-28 16:42:28 +08001010 /*
1011 * WAPI cert AP sends rekey frames as unencrypted.
1012 * Thus RXDMA will report unencrypted frame error.
1013 * To pass WAPI cert case, SW needs to pass unencrypted
1014 * rekey frame to stack.
1015 */
1016 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +05301017 goto process_rx;
jiad268579c2018-11-28 16:42:28 +08001018 }
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +05301019 /*
1020 * In dynamic WEP case rekey frames are not encrypted
1021 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1022 * key install is already done
1023 */
1024 if ((vdev->sec_type == cdp_sec_type_wep104) &&
1025 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1026 goto process_rx;
1027
1028process_mesh:
jiad268579c2018-11-28 16:42:28 +08001029
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001030 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301031 qdf_nbuf_free(nbuf);
1032 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301033 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301034 }
1035
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001036 if (vdev->mesh_vdev) {
1037 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1038 == QDF_STATUS_SUCCESS) {
1039 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
1040 FL("mesh pkt filtered"));
1041 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +05301042
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001043 qdf_nbuf_free(nbuf);
1044 return;
1045 }
1046 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +05301047 }
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +05301048process_rx:
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -07001049 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1050 rx_tlv_hdr) &&
1051 (vdev->rx_decap_type ==
Pranita Solankea5a3ae72018-01-18 21:45:27 +05301052 htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -08001053 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Srinivas Girigowda79502972019-02-11 12:25:12 -08001054 is_broadcast = (QDF_IS_ADDR_BROADCAST
Pranita Solankea5a3ae72018-01-18 21:45:27 +05301055 (eh->ether_dhost)) ? 1 : 0 ;
Amir Patel3217ade2018-09-07 12:21:35 +05301056 DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
Gurumoorthi Gnanasambandhane1334b32018-12-07 12:14:17 +05301057 if (is_broadcast) {
Pranita Solankea5a3ae72018-01-18 21:45:27 +05301058 DP_STATS_INC_PKT(peer, rx.bcast, 1,
1059 qdf_nbuf_len(nbuf));
1060 }
1061 }
1062
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301063 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1064 dp_rx_deliver_raw(vdev, nbuf, peer);
1065 } else {
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001066 /* Update the protocol tag in SKB based on CCE metadata */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001067 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1068 EXCEPTION_DEST_RING_ID, true, true);
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07001069 /* Update the flow tag in SKB based on FSE metadata */
1070 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05301071 DP_STATS_INC(peer, rx.to_stack.num, 1);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301072 vdev->osif_rx(vdev->osif_vdev, nbuf);
1073 }
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301074
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301075 return;
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301076}
1077
1078/**
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301079 * dp_rx_process_mic_error(): Function to pass mic error indication to umac
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301080 * @soc: core DP main context
1081 * @nbuf: buffer pointer
1082 * @rx_tlv_hdr: start of rx tlv header
1083 * @peer: peer handle
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301084 *
1085 * return: void
1086 */
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301087void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1088 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301089{
1090 struct dp_vdev *vdev = NULL;
1091 struct dp_pdev *pdev = NULL;
1092 struct ol_if_ops *tops = NULL;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301093 uint16_t rx_seq, fragno;
Prathyusha Guduri244eeac2019-10-23 13:13:58 +05301094 uint8_t is_raw;
Pramod Simha366c1e02018-06-20 11:55:50 -07001095 unsigned int tid;
1096 QDF_STATUS status;
Rakshith Suresh Patkard863f8d2019-07-16 16:30:59 +05301097 struct cdp_rx_mic_err_info mic_failure_info;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301098
Venkata Sharath Chandra Manchalacb255b42019-09-21 11:03:38 -07001099 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1100 rx_tlv_hdr))
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301101 return;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301102
jiad9dee72a2017-12-05 13:39:25 +08001103 if (!peer) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001104 dp_err_rl("peer not found");
jiad9dee72a2017-12-05 13:39:25 +08001105 goto fail;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301106 }
1107
jiad9dee72a2017-12-05 13:39:25 +08001108 vdev = peer->vdev;
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301109 if (!vdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001110 dp_err_rl("VDEV not found");
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301111 goto fail;
1112 }
1113
jiad9dee72a2017-12-05 13:39:25 +08001114 pdev = vdev->pdev;
1115 if (!pdev) {
Mohit Khannaf085b612019-04-02 14:43:10 -07001116 dp_err_rl("PDEV not found");
jiad9dee72a2017-12-05 13:39:25 +08001117 goto fail;
1118 }
1119
Prathyusha Guduri244eeac2019-10-23 13:13:58 +05301120 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1121 if (is_raw) {
1122 fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
1123 /* Can get only last fragment */
1124 if (fragno) {
1125 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1126 qdf_nbuf_data(nbuf));
1127 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1128 qdf_nbuf_data(nbuf));
Shashikala Prabhuecec78c2019-09-17 12:01:24 +05301129
Prathyusha Guduri244eeac2019-10-23 13:13:58 +05301130 status = dp_rx_defrag_add_last_frag(soc, peer,
1131 tid, rx_seq, nbuf);
1132 dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1133 "status %d !", rx_seq, fragno, status);
1134 return;
1135 }
Pramod Simha366c1e02018-06-20 11:55:50 -07001136 }
1137
Venkata Sharath Chandra Manchalaaa762832019-09-21 15:13:47 -07001138 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
Shashikala Prabhuecec78c2019-09-17 12:01:24 +05301139 &mic_failure_info.da_mac_addr.bytes[0])) {
1140 dp_err_rl("Failed to get da_mac_addr");
1141 goto fail;
1142 }
1143
Venkata Sharath Chandra Manchalaaa762832019-09-21 15:13:47 -07001144 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
Shashikala Prabhuecec78c2019-09-17 12:01:24 +05301145 &mic_failure_info.ta_mac_addr.bytes[0])) {
1146 dp_err_rl("Failed to get ta_mac_addr");
1147 goto fail;
1148 }
1149
Rakshith Suresh Patkard863f8d2019-07-16 16:30:59 +05301150 mic_failure_info.key_id = 0;
1151 mic_failure_info.multicast =
Shashikala Prabhuecec78c2019-09-17 12:01:24 +05301152 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
Rakshith Suresh Patkard863f8d2019-07-16 16:30:59 +05301153 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1154 mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
Shashikala Prabhuecec78c2019-09-17 12:01:24 +05301155 mic_failure_info.data = NULL;
Rakshith Suresh Patkard863f8d2019-07-16 16:30:59 +05301156 mic_failure_info.vdev_id = vdev->vdev_id;
1157
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301158 tops = pdev->soc->cdp_soc.ol_ops;
1159 if (tops->rx_mic_error)
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301160 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1161 &mic_failure_info);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301162
1163fail:
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301164 qdf_nbuf_free(nbuf);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301165 return;
1166}
1167
Debashis Dutt390645c2016-10-04 17:31:45 -07001168uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +05301169dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301170 hal_ring_handle_t hal_ring_hdl, uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -07001171{
Akshay Kosigi91c56522019-07-02 11:49:39 +05301172 hal_ring_desc_t ring_desc;
Akshay Kosigia870c612019-07-08 23:10:30 +05301173 hal_soc_handle_t hal_soc;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301174 uint32_t count = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001175 uint32_t rx_bufs_used = 0;
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301176 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1177 uint8_t mac_id = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001178 uint8_t buf_type;
1179 uint8_t error, rbm;
1180 struct hal_rx_mpdu_desc_info mpdu_desc_info;
1181 struct hal_buf_info hbi;
Kai Chen6eca1a62017-01-12 10:17:53 -08001182 struct dp_pdev *dp_pdev;
1183 struct dp_srng *dp_rxdma_srng;
1184 struct rx_desc_pool *rx_desc_pool;
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301185 uint32_t cookie = 0;
1186 void *link_desc_va;
1187 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
1188 uint16_t num_msdus;
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301189 struct dp_rx_desc *rx_desc = NULL;
Debashis Dutt390645c2016-10-04 17:31:45 -07001190
1191 /* Debug -- Remove later */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301192 qdf_assert(soc && hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001193
1194 hal_soc = soc->hal_soc;
1195
1196 /* Debug -- Remove later */
1197 qdf_assert(hal_soc);
1198
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301199 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001200
1201 /* TODO */
1202 /*
1203 * Need API to convert from hal_ring pointer to
1204 * Ring Type / Ring Id combo
1205 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001206 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301207 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301208 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001209 goto done;
1210 }
1211
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301212 while (qdf_likely(quota-- && (ring_desc =
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301213 hal_srng_dst_get_next(hal_soc,
1214 hal_ring_hdl)))) {
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301215
Ishank Jain57c42a12017-04-12 10:42:22 +05301216 DP_STATS_INC(soc, rx.err_ring_pkts, 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001217
1218 error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1219
1220 qdf_assert(error == HAL_REO_ERROR_DETECTED);
1221
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301222 buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
1223 /*
1224 * For REO error ring, expect only MSDU LINK DESC
1225 */
1226 qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
1227
1228 cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1229 /*
1230 * check for the magic number in the sw cookie
1231 */
1232 qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
1233 LINK_DESC_ID_START);
1234
Debashis Dutt390645c2016-10-04 17:31:45 -07001235 /*
1236 * Check if the buffer is to be processed on this processor
1237 */
1238 rbm = hal_rx_ret_buf_manager_get(ring_desc);
1239
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301240 hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1241 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05301242 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1243 &num_msdus);
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301244
jiadc26dfc82018-05-03 16:09:03 +08001245 if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
psimha223883f2017-11-16 17:18:51 -08001246 (msdu_list.rbm[0] !=
1247 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001248 /* TODO */
1249 /* Call appropriate handler */
syed touqeer pasha576972b2019-03-12 14:49:27 +05301250 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
1251 DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1252 QDF_TRACE(QDF_MODULE_ID_DP,
1253 QDF_TRACE_LEVEL_ERROR,
1254 FL("Invalid RBM %d"),
1255 msdu_list.rbm[0]);
1256 }
Tallapragada Kalyana7010a62017-10-20 12:20:19 +05301257
1258 /* Return link descriptor through WBM ring (SW2WBM)*/
1259 dp_rx_link_desc_return(soc, ring_desc,
1260 HAL_BM_ACTION_RELEASE_MSDU_LIST);
Debashis Dutt390645c2016-10-04 17:31:45 -07001261 continue;
1262 }
1263
syed touqeer pasha576972b2019-03-12 14:49:27 +05301264 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
1265 msdu_list.sw_cookie[0]);
1266 qdf_assert_always(rx_desc);
1267
1268 mac_id = rx_desc->pool_id;
1269
Debashis Dutt390645c2016-10-04 17:31:45 -07001270 /* Get the MPDU DESC info */
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301271 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
Debashis Dutt390645c2016-10-04 17:31:45 -07001272
1273 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301274 /*
1275 * We only handle one msdu per link desc for fragmented
1276 * case. We drop the msdus and release the link desc
1277 * back if there are more than one msdu in link desc.
1278 */
1279 if (qdf_unlikely(num_msdus > 1)) {
1280 count = dp_rx_msdus_drop(soc, ring_desc,
1281 &mpdu_desc_info,
1282 &mac_id, quota);
1283 rx_bufs_reaped[mac_id] += count;
1284 continue;
1285 }
1286
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301287 count = dp_rx_frag_handle(soc,
1288 ring_desc, &mpdu_desc_info,
Tallapragada Kalyanfe278d52019-02-13 17:12:43 +05301289 rx_desc, &mac_id, quota);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301290
1291 rx_bufs_reaped[mac_id] += count;
Ishank Jain57c42a12017-04-12 10:42:22 +05301292 DP_STATS_INC(soc, rx.rx_frags, 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001293 continue;
1294 }
1295
1296 if (hal_rx_reo_is_pn_error(ring_desc)) {
1297 /* TOD0 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001298 DP_STATS_INC(soc,
1299 rx.err.
1300 reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
1301 1);
Ankit Kumar1c69ca12019-07-30 14:03:12 +05301302 /* increment @pdev level */
1303 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
1304 if (dp_pdev)
1305 DP_STATS_INC(dp_pdev, err.reo_error, 1);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301306 count = dp_rx_pn_error_handle(soc,
1307 ring_desc,
1308 &mpdu_desc_info, &mac_id,
1309 quota);
1310
1311 rx_bufs_reaped[mac_id] += count;
Debashis Dutt390645c2016-10-04 17:31:45 -07001312 continue;
1313 }
1314
1315 if (hal_rx_reo_is_2k_jump(ring_desc)) {
1316 /* TOD0 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001317 DP_STATS_INC(soc,
1318 rx.err.
1319 reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
1320 1);
Ankit Kumar1c69ca12019-07-30 14:03:12 +05301321 /* increment @pdev level */
1322 dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
1323 if (dp_pdev)
1324 DP_STATS_INC(dp_pdev, err.reo_error, 1);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301325
1326 count = dp_rx_2k_jump_handle(soc,
1327 ring_desc, &mpdu_desc_info,
1328 &mac_id, quota);
1329
1330 rx_bufs_reaped[mac_id] += count;
Debashis Dutt390645c2016-10-04 17:31:45 -07001331 continue;
1332 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001333 }
1334
1335done:
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301336 dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001337
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08001338 if (soc->rx.flags.defrag_timeout_check) {
1339 uint32_t now_ms =
1340 qdf_system_ticks_to_msecs(qdf_system_ticks());
1341
1342 if (now_ms >= soc->rx.defrag.next_flush_ms)
1343 dp_rx_defrag_waitlist_flush(soc);
1344 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001345
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301346 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1347 if (rx_bufs_reaped[mac_id]) {
1348 dp_pdev = soc->pdev_list[mac_id];
1349 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
1350 rx_desc_pool = &soc->rx_desc_buf[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001351
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301352 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1353 rx_desc_pool,
1354 rx_bufs_reaped[mac_id],
1355 &dp_pdev->free_list_head,
1356 &dp_pdev->free_list_tail);
1357 rx_bufs_used += rx_bufs_reaped[mac_id];
1358 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001359 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001360
1361 return rx_bufs_used; /* Assume no scale factor for now */
1362}
1363
Debashis Dutt390645c2016-10-04 17:31:45 -07001364uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +05301365dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301366 hal_ring_handle_t hal_ring_hdl, uint32_t quota)
Debashis Dutt390645c2016-10-04 17:31:45 -07001367{
Akshay Kosigi91c56522019-07-02 11:49:39 +05301368 hal_ring_desc_t ring_desc;
Akshay Kosigia870c612019-07-08 23:10:30 +05301369 hal_soc_handle_t hal_soc;
Debashis Dutt390645c2016-10-04 17:31:45 -07001370 struct dp_rx_desc *rx_desc;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301371 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1372 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301373 uint32_t rx_bufs_used = 0;
1374 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
Debashis Dutt390645c2016-10-04 17:31:45 -07001375 uint8_t buf_type, rbm;
Debashis Dutt390645c2016-10-04 17:31:45 -07001376 uint32_t rx_buf_cookie;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301377 uint8_t mac_id;
Kai Chen6eca1a62017-01-12 10:17:53 -08001378 struct dp_pdev *dp_pdev;
1379 struct dp_srng *dp_rxdma_srng;
1380 struct rx_desc_pool *rx_desc_pool;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301381 uint8_t *rx_tlv_hdr;
1382 qdf_nbuf_t nbuf_head = NULL;
1383 qdf_nbuf_t nbuf_tail = NULL;
1384 qdf_nbuf_t nbuf, next;
1385 struct hal_wbm_err_desc_info wbm_err_info = { 0 };
1386 uint8_t pool_id;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001387 uint8_t tid = 0;
Debashis Dutt390645c2016-10-04 17:31:45 -07001388
1389 /* Debug -- Remove later */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301390 qdf_assert(soc && hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001391
1392 hal_soc = soc->hal_soc;
1393
1394 /* Debug -- Remove later */
1395 qdf_assert(hal_soc);
1396
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301397 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001398
1399 /* TODO */
1400 /*
1401 * Need API to convert from hal_ring pointer to
1402 * Ring Type / Ring Id combo
1403 */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301404 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301405 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001406 goto done;
1407 }
1408
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301409 while (qdf_likely(quota-- && (ring_desc =
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301410 hal_srng_dst_get_next(hal_soc,
1411 hal_ring_hdl)))) {
Debashis Dutt390645c2016-10-04 17:31:45 -07001412
1413 /* XXX */
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301414 buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
Debashis Dutt390645c2016-10-04 17:31:45 -07001415
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301416 /*
1417 * For WBM ring, expect only MSDU buffers
1418 */
1419 qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
1420
1421 qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1422 == HAL_RX_WBM_ERR_SRC_RXDMA) ||
1423 (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
1424 == HAL_RX_WBM_ERR_SRC_REO));
Debashis Dutt390645c2016-10-04 17:31:45 -07001425
1426 /*
1427 * Check if the buffer is to be processed on this processor
1428 */
1429 rbm = hal_rx_ret_buf_manager_get(ring_desc);
1430
1431 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
1432 /* TODO */
1433 /* Call appropriate handler */
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301434 DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301435 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1436 FL("Invalid RBM %d"), rbm);
Debashis Dutt390645c2016-10-04 17:31:45 -07001437 continue;
1438 }
1439
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301440 rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
1441
Kai Chen6eca1a62017-01-12 10:17:53 -08001442 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301443 qdf_assert_always(rx_desc);
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301444
Pamidipati, Vijay53794742017-06-03 11:24:32 +05301445 if (!dp_rx_desc_check_magic(rx_desc)) {
1446 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1447 FL("Invalid rx_desc cookie=%d"),
1448 rx_buf_cookie);
1449 continue;
1450 }
1451
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301452 /*
1453 * this is a unlikely scenario where the host is reaping
1454 * a descriptor which it already reaped just a while ago
1455 * but is yet to replenish it back to HW.
1456 * In this case host will dump the last 128 descriptors
1457 * including the software descriptor rx_desc and assert.
1458 */
1459 if (qdf_unlikely(!rx_desc->in_use)) {
1460 DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301461 dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301462 ring_desc, rx_desc);
1463 }
1464
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301465 nbuf = rx_desc->nbuf;
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301466 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
Pamidipati, Vijay53794742017-06-03 11:24:32 +05301467
Debashis Dutt390645c2016-10-04 17:31:45 -07001468 /*
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301469 * save the wbm desc info in nbuf TLV. We will need this
1470 * info when we do the actual nbuf processing
Debashis Dutt390645c2016-10-04 17:31:45 -07001471 */
Balamurugan Mahalingam764219e2018-09-17 15:34:25 +05301472 hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301473 wbm_err_info.pool_id = rx_desc->pool_id;
1474 hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
1475 &wbm_err_info);
Debashis Dutt390645c2016-10-04 17:31:45 -07001476
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301477 rx_bufs_reaped[rx_desc->pool_id]++;
Debashis Dutt390645c2016-10-04 17:31:45 -07001478
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301479 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1480 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1481 &tail[rx_desc->pool_id],
1482 rx_desc);
1483 }
1484done:
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301485 dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
Debashis Dutt390645c2016-10-04 17:31:45 -07001486
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301487 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1488 if (rx_bufs_reaped[mac_id]) {
1489 dp_pdev = soc->pdev_list[mac_id];
1490 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
1491 rx_desc_pool = &soc->rx_desc_buf[mac_id];
Debashis Dutt390645c2016-10-04 17:31:45 -07001492
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301493 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1494 rx_desc_pool, rx_bufs_reaped[mac_id],
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001495 &head[mac_id], &tail[mac_id]);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301496 rx_bufs_used += rx_bufs_reaped[mac_id];
1497 }
1498 }
Ishank Jaine73c4032017-03-16 11:48:15 +05301499
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301500 nbuf = nbuf_head;
1501 while (nbuf) {
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301502 struct dp_peer *peer;
1503 uint16_t peer_id;
Vinay Adella5c1b7882019-06-13 18:19:29 +05301504 uint8_t e_code;
1505 uint8_t *tlv_hdr;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301506 rx_tlv_hdr = qdf_nbuf_data(nbuf);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301507
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301508 /*
1509 * retrieve the wbm desc info from nbuf TLV, so we can
1510 * handle error cases appropriately
1511 */
1512 hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
1513
Mohit Khanna82382b32019-12-09 19:15:27 -08001514 peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
1515 rx_tlv_hdr);
1516 peer = dp_peer_find_by_id(soc, peer_id);
1517
1518 if (!peer)
1519 dp_err_rl("peer is null! peer_id %u err_src %u err_rsn %u",
1520 peer_id, wbm_err_info.wbm_err_src,
1521 wbm_err_info.reo_psh_rsn);
1522
Prathyusha Guduri02ed9482018-04-17 19:06:30 +05301523 /* Set queue_mapping in nbuf to 0 */
1524 dp_set_rx_queue(nbuf, 0);
1525
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301526 next = nbuf->next;
Mohit Khannaf085b612019-04-02 14:43:10 -07001527
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301528 if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
1529 if (wbm_err_info.reo_psh_rsn
1530 == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
1531
1532 DP_STATS_INC(soc,
1533 rx.err.reo_error
1534 [wbm_err_info.reo_err_code], 1);
Ankit Kumar1c69ca12019-07-30 14:03:12 +05301535 /* increment @pdev level */
1536 pool_id = wbm_err_info.pool_id;
1537 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
1538 if (dp_pdev)
1539 DP_STATS_INC(dp_pdev, err.reo_error,
1540 1);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301541
1542 switch (wbm_err_info.reo_err_code) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301543 /*
1544 * Handling for packets which have NULL REO
1545 * queue descriptor
1546 */
1547 case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301548 pool_id = wbm_err_info.pool_id;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301549 dp_rx_null_q_desc_handle(soc, nbuf,
1550 rx_tlv_hdr,
1551 pool_id, peer);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301552 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301553 if (peer)
1554 dp_peer_unref_del_find_by_id(
1555 peer);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301556 continue;
Debashis Dutt390645c2016-10-04 17:31:45 -07001557 /* TODO */
1558 /* Add per error code accounting */
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001559 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1560 pool_id = wbm_err_info.pool_id;
Mohit Khannaf085b612019-04-02 14:43:10 -07001561
Venkata Sharath Chandra Manchalacb255b42019-09-21 11:03:38 -07001562 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1563 rx_tlv_hdr)) {
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001564 peer_id =
Venkata Sharath Chandra Manchala96ed6232019-09-21 12:11:19 -07001565 hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
1566 rx_tlv_hdr);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001567 tid =
1568 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
1569 }
1570 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
1571 peer_id, tid);
1572 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301573 if (peer)
1574 dp_peer_unref_del_find_by_id(
1575 peer);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001576 continue;
Mohit Khanna82382b32019-12-09 19:15:27 -08001577 case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1578 case HAL_REO_ERR_BAR_FRAME_OOR:
1579 if (peer)
1580 dp_rx_wbm_err_handle_bar(soc,
1581 peer,
1582 nbuf);
1583 break;
1584
Debashis Dutt390645c2016-10-04 17:31:45 -07001585 default:
Mohit Khannaf085b612019-04-02 14:43:10 -07001586 dp_err_rl("Got pkt with REO ERROR: %d",
1587 wbm_err_info.reo_err_code);
1588 break;
Debashis Dutt390645c2016-10-04 17:31:45 -07001589 }
1590 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301591 } else if (wbm_err_info.wbm_err_src ==
1592 HAL_RX_WBM_ERR_SRC_RXDMA) {
1593 if (wbm_err_info.rxdma_psh_rsn
1594 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
1595 DP_STATS_INC(soc,
1596 rx.err.rxdma_error
1597 [wbm_err_info.rxdma_err_code], 1);
Ankit Kumar1c69ca12019-07-30 14:03:12 +05301598 /* increment @pdev level */
1599 pool_id = wbm_err_info.pool_id;
1600 dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
1601 if (dp_pdev)
1602 DP_STATS_INC(dp_pdev,
1603 err.rxdma_error, 1);
Debashis Dutt390645c2016-10-04 17:31:45 -07001604
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301605 switch (wbm_err_info.rxdma_err_code) {
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301606 case HAL_RXDMA_ERR_UNENCRYPTED:
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001607
1608 case HAL_RXDMA_ERR_WIFI_PARSE:
Keyur Parekhb8149a52019-04-16 21:30:25 -07001609 pool_id = wbm_err_info.pool_id;
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001610 dp_rx_process_rxdma_err(soc, nbuf,
Keyur Parekhb8149a52019-04-16 21:30:25 -07001611 rx_tlv_hdr,
1612 peer,
1613 wbm_err_info.
1614 rxdma_err_code,
1615 pool_id);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301616 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301617 if (peer)
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001618 dp_peer_unref_del_find_by_id(peer);
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +05301619 continue;
Debashis Dutt390645c2016-10-04 17:31:45 -07001620
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301621 case HAL_RXDMA_ERR_TKIP_MIC:
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301622 dp_rx_process_mic_error(soc, nbuf,
1623 rx_tlv_hdr,
1624 peer);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301625 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301626 if (peer) {
Gurumoorthi Gnanasambandhan306de842017-12-19 08:47:40 +05301627 DP_STATS_INC(peer, rx.err.mic_err, 1);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301628 dp_peer_unref_del_find_by_id(
1629 peer);
1630 }
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301631 continue;
1632
1633 case HAL_RXDMA_ERR_DECRYPT:
Vinay Adella5c1b7882019-06-13 18:19:29 +05301634 pool_id = wbm_err_info.pool_id;
1635 e_code = wbm_err_info.rxdma_err_code;
1636 tlv_hdr = rx_tlv_hdr;
1637 if (peer) {
1638 DP_STATS_INC(peer, rx.err.
1639 decrypt_err, 1);
1640 } else {
1641 dp_rx_process_rxdma_err(soc,
1642 nbuf,
1643 tlv_hdr,
1644 NULL,
1645 e_code,
1646 pool_id
1647 );
1648 nbuf = next;
1649 continue;
1650 }
1651
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301652 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001653 QDF_TRACE_LEVEL_DEBUG,
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301654 "Packet received with Decrypt error");
1655 break;
1656
Debashis Dutt390645c2016-10-04 17:31:45 -07001657 default:
Mohit Khannaf085b612019-04-02 14:43:10 -07001658 dp_err_rl("RXDMA error %d",
1659 wbm_err_info.rxdma_err_code);
Debashis Dutt390645c2016-10-04 17:31:45 -07001660 }
1661 }
1662 } else {
1663 /* Should not come here */
1664 qdf_assert(0);
1665 }
Debashis Dutt390645c2016-10-04 17:31:45 -07001666
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301667 if (peer)
1668 dp_peer_unref_del_find_by_id(peer);
1669
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301670 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
1671 QDF_TRACE_LEVEL_DEBUG);
chenguod70b7d92018-01-13 17:40:27 +08001672 qdf_nbuf_free(nbuf);
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301673 nbuf = next;
Debashis Dutt390645c2016-10-04 17:31:45 -07001674 }
Tallapragada Kalyan94034632017-12-07 17:29:13 +05301675 return rx_bufs_used; /* Assume no scale factor for now */
Debashis Dutt390645c2016-10-04 17:31:45 -07001676}
Pramod Simhae382ff82017-06-05 18:09:26 -07001677
1678/**
nwzhaoea2ffbb2019-01-31 11:43:17 -08001679 * dup_desc_dbg() - dump and assert if duplicate rx desc found
1680 *
1681 * @soc: core DP main context
1682 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
1683 * @rx_desc: void pointer to rx descriptor
1684 *
1685 * Return: void
1686 */
1687static void dup_desc_dbg(struct dp_soc *soc,
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301688 hal_rxdma_desc_t rxdma_dst_ring_desc,
nwzhaoea2ffbb2019-01-31 11:43:17 -08001689 void *rx_desc)
1690{
1691 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301692 dp_rx_dump_info_and_assert(
1693 soc,
1694 soc->rx_rel_ring.hal_srng,
1695 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
1696 rx_desc);
nwzhaoea2ffbb2019-01-31 11:43:17 -08001697}
1698
1699/**
Pramod Simhae382ff82017-06-05 18:09:26 -07001700 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
1701 *
1702 * @soc: core DP main context
1703 * @mac_id: mac id which is one of 3 mac_ids
1704 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
1705 * @head: head of descs list to be freed
1706 * @tail: tail of decs list to be freed
1707
1708 * Return: number of msdu in MPDU to be popped
1709 */
1710static inline uint32_t
1711dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301712 hal_rxdma_desc_t rxdma_dst_ring_desc,
Pramod Simhae382ff82017-06-05 18:09:26 -07001713 union dp_rx_desc_list_elem_t **head,
1714 union dp_rx_desc_list_elem_t **tail)
1715{
1716 void *rx_msdu_link_desc;
1717 qdf_nbuf_t msdu;
1718 qdf_nbuf_t last;
1719 struct hal_rx_msdu_list msdu_list;
Karunakar Dasineni80cded82017-07-10 10:49:55 -07001720 uint16_t num_msdus;
Pramod Simhae382ff82017-06-05 18:09:26 -07001721 struct hal_buf_info buf_info;
Pramod Simhae382ff82017-06-05 18:09:26 -07001722 uint32_t rx_bufs_used = 0;
1723 uint32_t msdu_cnt;
1724 uint32_t i;
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001725 uint8_t push_reason;
1726 uint8_t rxdma_error_code = 0;
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301727 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001728 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Aniruddha Paul33fce952019-11-27 18:48:04 +05301729 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301730 hal_rxdma_desc_t ring_desc;
Pramod Simhae382ff82017-06-05 18:09:26 -07001731
1732 msdu = 0;
1733
1734 last = NULL;
1735
1736 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
Aniruddha Paul33fce952019-11-27 18:48:04 +05301737 &msdu_cnt);
Pramod Simhae382ff82017-06-05 18:09:26 -07001738
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001739 push_reason =
1740 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
1741 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
1742 rxdma_error_code =
1743 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
1744 }
1745
Pramod Simhae382ff82017-06-05 18:09:26 -07001746 do {
1747 rx_msdu_link_desc =
1748 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1749
1750 qdf_assert(rx_msdu_link_desc);
1751
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05301752 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
1753 &msdu_list, &num_msdus);
Pramod Simhae382ff82017-06-05 18:09:26 -07001754
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001755 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301756 /* if the msdus belongs to NSS offloaded radio &&
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001757 * the rbm is not SW1_BM then return the msdu_link
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301758 * descriptor without freeing the msdus (nbufs). let
1759 * these buffers be given to NSS completion ring for
1760 * NSS to free them.
1761 * else iterate through the msdu link desc list and
1762 * free each msdu in the list.
1763 */
1764 if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
1765 wlan_cfg_get_dp_pdev_nss_enabled(
1766 pdev->wlan_cfg_ctx))
1767 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
1768 else {
1769 for (i = 0; i < num_msdus; i++) {
1770 struct dp_rx_desc *rx_desc =
1771 dp_rx_cookie_2_va_rxdma_buf(soc,
1772 msdu_list.sw_cookie[i]);
Pamidipati, Vijayb75e8102018-11-05 12:41:18 +05301773 qdf_assert_always(rx_desc);
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301774 msdu = rx_desc->nbuf;
nwzhaoea2ffbb2019-01-31 11:43:17 -08001775 /*
1776 * this is a unlikely scenario
1777 * where the host is reaping
1778 * a descriptor which
1779 * it already reaped just a while ago
1780 * but is yet to replenish
1781 * it back to HW.
1782 * In this case host will dump
1783 * the last 128 descriptors
1784 * including the software descriptor
1785 * rx_desc and assert.
1786 */
1787 ring_desc = rxdma_dst_ring_desc;
1788 if (qdf_unlikely(!rx_desc->in_use)) {
1789 dup_desc_dbg(soc,
1790 ring_desc,
1791 rx_desc);
1792 continue;
1793 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001794
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301795 qdf_nbuf_unmap_single(soc->osdev, msdu,
1796 QDF_DMA_FROM_DEVICE);
Pramod Simhae382ff82017-06-05 18:09:26 -07001797
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301798 QDF_TRACE(QDF_MODULE_ID_DP,
1799 QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301800 "[%s][%d] msdu_nbuf=%pK ",
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301801 __func__, __LINE__, msdu);
Pramod Simhae382ff82017-06-05 18:09:26 -07001802
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301803 qdf_nbuf_free(msdu);
1804 rx_bufs_used++;
1805 dp_rx_add_to_free_desc_list(head,
1806 tail, rx_desc);
1807 }
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001808 }
1809 } else {
1810 rxdma_error_code = HAL_RXDMA_ERR_WAR;
Pramod Simhae382ff82017-06-05 18:09:26 -07001811 }
1812
Aniruddha Paul33fce952019-11-27 18:48:04 +05301813 /*
1814 * Store the current link buffer into to the local structure
1815 * to be used for release purpose.
1816 */
1817 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
1818 buf_info.sw_cookie, buf_info.rbm);
Pramod Simhae382ff82017-06-05 18:09:26 -07001819
Aniruddha Paul33fce952019-11-27 18:48:04 +05301820 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
1821 dp_rx_link_desc_return_by_addr(soc,
1822 (hal_buff_addrinfo_t)
1823 rx_link_buf_info,
1824 bm_action);
Karunakar Dasinenia7ee2c62017-10-31 09:05:44 -07001825 } while (buf_info.paddr);
Pramod Simhae382ff82017-06-05 18:09:26 -07001826
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001827 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
Ankit Kumar1c69ca12019-07-30 14:03:12 +05301828 if (pdev)
1829 DP_STATS_INC(pdev, err.rxdma_error, 1);
Karunakar Dasineni15a3d482017-07-12 19:19:40 -07001830
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301831 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
1832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1833 "Packet received with Decrypt error");
1834 }
1835
Pramod Simhae382ff82017-06-05 18:09:26 -07001836 return rx_bufs_used;
1837}
1838
Pramod Simhae382ff82017-06-05 18:09:26 -07001839uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +05301840dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1841 uint32_t mac_id, uint32_t quota)
Pramod Simhae382ff82017-06-05 18:09:26 -07001842{
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08001843 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001844 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301845 hal_rxdma_desc_t rxdma_dst_ring_desc;
Akshay Kosigia870c612019-07-08 23:10:30 +05301846 hal_soc_handle_t hal_soc;
Pramod Simhae382ff82017-06-05 18:09:26 -07001847 void *err_dst_srng;
1848 union dp_rx_desc_list_elem_t *head = NULL;
1849 union dp_rx_desc_list_elem_t *tail = NULL;
1850 struct dp_srng *dp_rxdma_srng;
1851 struct rx_desc_pool *rx_desc_pool;
1852 uint32_t work_done = 0;
1853 uint32_t rx_bufs_used = 0;
1854
Pramod Simhae382ff82017-06-05 18:09:26 -07001855 if (!pdev)
1856 return 0;
Nandha Kishore Easwarane03102f2018-08-22 22:23:00 +05301857
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001858 err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
Pramod Simhae382ff82017-06-05 18:09:26 -07001859
1860 if (!err_dst_srng) {
1861 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1862 "%s %d : HAL Monitor Destination Ring Init \
Aditya Sathishded018e2018-07-02 16:25:21 +05301863 Failed -- %pK",
Pramod Simhae382ff82017-06-05 18:09:26 -07001864 __func__, __LINE__, err_dst_srng);
1865 return 0;
1866 }
1867
1868 hal_soc = soc->hal_soc;
1869
1870 qdf_assert(hal_soc);
1871
Rakesh Pillai2529ae12019-05-31 20:28:30 +05301872 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
Pramod Simhae382ff82017-06-05 18:09:26 -07001873 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1874 "%s %d : HAL Monitor Destination Ring Init \
Aditya Sathishded018e2018-07-02 16:25:21 +05301875 Failed -- %pK",
Pramod Simhae382ff82017-06-05 18:09:26 -07001876 __func__, __LINE__, err_dst_srng);
1877 return 0;
1878 }
1879
Pamidipati, Vijay3379c742017-11-01 18:50:43 +05301880 while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
1881 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
Pramod Simhae382ff82017-06-05 18:09:26 -07001882
1883 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
1884 rxdma_dst_ring_desc,
1885 &head, &tail);
1886 }
1887
Rakesh Pillai2529ae12019-05-31 20:28:30 +05301888 dp_srng_access_end(int_ctx, soc, err_dst_srng);
Pramod Simhae382ff82017-06-05 18:09:26 -07001889
1890 if (rx_bufs_used) {
1891 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1892 rx_desc_pool = &soc->rx_desc_buf[mac_id];
1893
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001894 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001895 rx_desc_pool, rx_bufs_used, &head, &tail);
1896
Pramod Simhae382ff82017-06-05 18:09:26 -07001897 work_done += rx_bufs_used;
1898 }
1899
1900 return work_done;
1901}
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301902
1903static inline uint32_t
1904dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
1905 hal_rxdma_desc_t rxdma_dst_ring_desc,
1906 union dp_rx_desc_list_elem_t **head,
1907 union dp_rx_desc_list_elem_t **tail)
1908{
1909 void *rx_msdu_link_desc;
1910 qdf_nbuf_t msdu;
1911 qdf_nbuf_t last;
1912 struct hal_rx_msdu_list msdu_list;
1913 uint16_t num_msdus;
1914 struct hal_buf_info buf_info;
Aniruddha Paul33fce952019-11-27 18:48:04 +05301915 uint32_t rx_bufs_used = 0, msdu_cnt, i;
1916 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301917
1918 msdu = 0;
1919
1920 last = NULL;
1921
1922 hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
Aniruddha Paul33fce952019-11-27 18:48:04 +05301923 &msdu_cnt);
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301924
1925 do {
1926 rx_msdu_link_desc =
1927 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1928
1929 if (!rx_msdu_link_desc) {
1930 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
1931 break;
1932 }
1933
1934 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
1935 &msdu_list, &num_msdus);
1936
1937 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
1938 for (i = 0; i < num_msdus; i++) {
1939 struct dp_rx_desc *rx_desc =
1940 dp_rx_cookie_2_va_rxdma_buf(
1941 soc,
1942 msdu_list.sw_cookie[i]);
1943 qdf_assert_always(rx_desc);
1944 msdu = rx_desc->nbuf;
1945
1946 qdf_nbuf_unmap_single(soc->osdev, msdu,
1947 QDF_DMA_FROM_DEVICE);
1948
1949 qdf_nbuf_free(msdu);
1950 rx_bufs_used++;
1951 dp_rx_add_to_free_desc_list(head,
1952 tail, rx_desc);
1953 }
1954 }
1955
Aniruddha Paul33fce952019-11-27 18:48:04 +05301956 /*
1957 * Store the current link buffer into to the local structure
1958 * to be used for release purpose.
1959 */
1960 hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
1961 buf_info.sw_cookie, buf_info.rbm);
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301962
Aniruddha Paul33fce952019-11-27 18:48:04 +05301963 hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
1964 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
1965 rx_link_buf_info,
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301966 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
Pavankumar Nandeshwarb86ddaf2019-10-07 12:55:16 +05301967 } while (buf_info.paddr);
1968
1969 return rx_bufs_used;
1970}
1971
1972/*
1973 *
1974 * dp_handle_wbm_internal_error() - handles wbm_internal_error case
1975 *
1976 * @soc: core DP main context
1977 * @hal_desc: hal descriptor
1978 * @buf_type: indicates if the buffer is of type link disc or msdu
1979 * Return: None
1980 *
1981 * wbm_internal_error is seen in following scenarios :
1982 *
1983 * 1. Null pointers detected in WBM_RELEASE_RING descriptors
1984 * 2. Null pointers detected during delinking process
1985 *
1986 * Some null pointer cases:
1987 *
1988 * a. MSDU buffer pointer is NULL
1989 * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
1990 * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
1991 */
1992void
1993dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
1994 uint32_t buf_type)
1995{
1996 struct hal_buf_info buf_info = {0};
1997 struct dp_pdev *dp_pdev;
1998 struct dp_rx_desc *rx_desc = NULL;
1999 uint32_t rx_buf_cookie;
2000 uint32_t rx_bufs_reaped = 0;
2001 union dp_rx_desc_list_elem_t *head = NULL;
2002 union dp_rx_desc_list_elem_t *tail = NULL;
2003 uint8_t pool_id;
2004
2005 hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
2006
2007 if (!buf_info.paddr) {
2008 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
2009 return;
2010 }
2011
2012 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
2013 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
2014
2015 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
2016 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
2017 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2018
2019 if (rx_desc && rx_desc->nbuf) {
2020 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
2021 QDF_DMA_FROM_DEVICE);
2022
2023 rx_desc->unmapped = 1;
2024
2025 qdf_nbuf_free(rx_desc->nbuf);
2026 dp_rx_add_to_free_desc_list(&head,
2027 &tail,
2028 rx_desc);
2029
2030 rx_bufs_reaped++;
2031 }
2032 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
2033 rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
2034 hal_desc,
2035 &head, &tail);
2036 }
2037
2038 if (rx_bufs_reaped) {
2039 struct rx_desc_pool *rx_desc_pool;
2040 struct dp_srng *dp_rxdma_srng;
2041
2042 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
2043 dp_pdev = soc->pdev_list[pool_id];
2044 dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
2045 rx_desc_pool = &soc->rx_desc_buf[pool_id];
2046
2047 dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
2048 rx_desc_pool,
2049 rx_bufs_reaped,
2050 &head, &tail);
2051 }
2052}