blob: 992820ed564242554ef8a3268c8d79871de7ff5e [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Venkata Sharath Chandra Manchala09d116a2020-01-03 16:42:00 -08002 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053019#include "hal_hw_headers.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070020#include "dp_types.h"
21#include "dp_rx.h"
22#include "dp_peer.h"
23#include "hal_rx.h"
24#include "hal_api.h"
25#include "qdf_nbuf.h"
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +053026#ifdef MESH_MODE_SUPPORT
27#include "if_meta_hdr.h"
28#endif
Ishank Jainbc2d91f2017-01-03 18:14:54 +053029#include "dp_internal.h"
Pratik Gandhi3da3bc72017-03-16 18:20:22 +053030#include "dp_rx_mon.h"
jiad3b8104b2019-03-08 17:23:35 +080031#include "dp_ipa.h"
Amir Patelcb990262019-05-28 15:12:48 +053032#ifdef FEATURE_WDS
33#include "dp_txrx_wds.h"
34#endif
Mohit Khanna70514992018-11-12 18:39:03 -080035
Varsha Mishra09a4c0e2019-05-22 12:09:24 +053036#ifdef ATH_RX_PRI_SAVE
37#define DP_RX_TID_SAVE(_nbuf, _tid) \
38 (qdf_nbuf_set_priority(_nbuf, _tid))
39#else
40#define DP_RX_TID_SAVE(_nbuf, _tid)
41#endif
42
Mohit Khannac42d8032019-08-08 18:44:17 -070043#ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44static inline
45bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46{
47 if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 return false;
51 }
52 return true;
53}
54#else
55static inline
56bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57{
58 return true;
59}
60#endif
Pranita Solanke05862962019-01-09 11:39:29 +053061static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62{
63 return vdev->ap_bridge_enabled;
64}
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053065
Saket Jha7f890142019-07-10 18:31:36 -070066#ifdef DUP_RX_DESC_WAR
Akshay Kosigi8eda31c2019-07-10 14:42:42 +053067void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 hal_ring_handle_t hal_ring,
69 hal_ring_desc_t ring_desc,
70 struct dp_rx_desc *rx_desc)
Saket Jha7f890142019-07-10 18:31:36 -070071{
72 void *hal_soc = soc->hal_soc;
73
74 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 dp_rx_desc_dump(rx_desc);
76}
77#else
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +053078void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi91c56522019-07-02 11:49:39 +053080 hal_ring_desc_t ring_desc,
81 struct dp_rx_desc *rx_desc)
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053082{
Akshay Kosigia870c612019-07-08 23:10:30 +053083 hal_soc_handle_t hal_soc = soc->hal_soc;
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053084
85 dp_rx_desc_dump(rx_desc);
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +053086 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 hal_srng_dump_ring(hal_soc, hal_ring_hdl);
Mohit Khanna16cd1b22019-01-25 10:46:00 -080088 qdf_assert_always(0);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053089}
Saket Jha7f890142019-07-10 18:31:36 -070090#endif
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053091
Rakesh Pillai79979d62020-02-29 20:42:28 +053092#ifdef RX_DESC_SANITY_WAR
93static inline
94QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
95 hal_ring_handle_t hal_ring_hdl,
96 hal_ring_desc_t ring_desc,
97 struct dp_rx_desc *rx_desc)
98{
Saket Jha3aeabaa2020-03-03 16:21:12 -080099 uint8_t return_buffer_manager;
100
Rakesh Pillai79979d62020-02-29 20:42:28 +0530101 if (qdf_unlikely(!rx_desc)) {
102 /*
103 * This is an unlikely case where the cookie obtained
104 * from the ring_desc is invalid and hence we are not
105 * able to find the corresponding rx_desc
106 */
Saket Jha3aeabaa2020-03-03 16:21:12 -0800107 goto fail;
108 }
109
110 return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc);
111 if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
112 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
Saket Jha3aeabaa2020-03-03 16:21:12 -0800113 goto fail;
Rakesh Pillai79979d62020-02-29 20:42:28 +0530114 }
115
116 return QDF_STATUS_SUCCESS;
Saket Jha3aeabaa2020-03-03 16:21:12 -0800117
118fail:
119 DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
120 dp_err("Ring Desc:");
121 hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
122 ring_desc);
123 return QDF_STATUS_E_NULL_VALUE;
124
Rakesh Pillai79979d62020-02-29 20:42:28 +0530125}
126#else
127static inline
128QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
129 hal_ring_handle_t hal_ring_hdl,
130 hal_ring_desc_t ring_desc,
131 struct dp_rx_desc *rx_desc)
132{
133 return QDF_STATUS_SUCCESS;
134}
135#endif
136
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700137/*
138 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
139 * called during dp rx initialization
140 * and at the end of dp_rx_process.
141 *
142 * @soc: core txrx main context
143 * @mac_id: mac_id which is one of 3 mac_ids
Kai Chen6eca1a62017-01-12 10:17:53 -0800144 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700145 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -0800146 * @num_req_buffers: number of buffer to be replenished
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700147 * @desc_list: list of descs if called from dp_rx_process
148 * or NULL during dp rx initialization or out of buffer
149 * interrupt.
Kai Chen6eca1a62017-01-12 10:17:53 -0800150 * @tail: tail of descs list
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700151 * Return: return success or failure
152 */
153QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800154 struct dp_srng *dp_rxdma_srng,
155 struct rx_desc_pool *rx_desc_pool,
156 uint32_t num_req_buffers,
157 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800158 union dp_rx_desc_list_elem_t **tail)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700159{
160 uint32_t num_alloc_desc;
161 uint16_t num_desc_to_free = 0;
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700162 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700163 uint32_t num_entries_avail;
164 uint32_t count;
165 int sync_hw_ptr = 1;
166 qdf_dma_addr_t paddr;
167 qdf_nbuf_t rx_netbuf;
168 void *rxdma_ring_entry;
169 union dp_rx_desc_list_elem_t *next;
Kai Chen6eca1a62017-01-12 10:17:53 -0800170 QDF_STATUS ret;
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530171 uint16_t buf_size = rx_desc_pool->buf_size;
172 uint8_t buf_alignment = rx_desc_pool->buf_alignment;
Kai Chen6eca1a62017-01-12 10:17:53 -0800173
174 void *rxdma_srng;
175
176 rxdma_srng = dp_rxdma_srng->hal_srng;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700177
178 if (!rxdma_srng) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +0800179 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
180 "rxdma srng not initialized");
Ishank Jain57c42a12017-04-12 10:42:22 +0530181 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700182 return QDF_STATUS_E_FAILURE;
183 }
184
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700186 "requested %d buffers for replenish", num_req_buffers);
187
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700188 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
189 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
190 rxdma_srng,
191 sync_hw_ptr);
192
193 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700194 "no of available entries in rxdma ring: %d",
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700195 num_entries_avail);
196
197 if (!(*desc_list) && (num_entries_avail >
198 ((dp_rxdma_srng->num_entries * 3) / 4))) {
199 num_req_buffers = num_entries_avail;
200 } else if (num_entries_avail < num_req_buffers) {
201 num_desc_to_free = num_req_buffers - num_entries_avail;
202 num_req_buffers = num_entries_avail;
203 }
204
205 if (qdf_unlikely(!num_req_buffers)) {
206 num_desc_to_free = num_req_buffers;
207 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
208 goto free_descs;
209 }
210
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700211 /*
212 * if desc_list is NULL, allocate the descs from freelist
213 */
214 if (!(*desc_list)) {
215 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800216 rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700217 num_req_buffers,
218 desc_list,
219 tail);
220
221 if (!num_alloc_desc) {
222 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
223 "no free rx_descs in freelist");
Ishank Jain1e7401c2017-02-17 15:38:39 +0530224 DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
Ishank Jain57c42a12017-04-12 10:42:22 +0530225 num_req_buffers);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700226 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700227 return QDF_STATUS_E_NOMEM;
228 }
229
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700230 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700231 "%d rx desc allocated", num_alloc_desc);
232 num_req_buffers = num_alloc_desc;
233 }
234
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700235
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530236 count = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700237
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530238 while (count < num_req_buffers) {
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530239 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530240 buf_size,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700241 RX_BUFFER_RESERVATION,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530242 buf_alignment,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700243 FALSE);
244
Ankit Kumarf2526d42019-05-02 15:13:27 +0530245 if (qdf_unlikely(!rx_netbuf)) {
Ishank Jain57c42a12017-04-12 10:42:22 +0530246 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
Shashikala Prabhu0bb59b22019-05-31 16:11:19 +0530247 break;
Ishank Jain57c42a12017-04-12 10:42:22 +0530248 }
Houston Hoffmanfc0a9602017-01-26 22:36:31 -0800249
Ishank Jain57c42a12017-04-12 10:42:22 +0530250 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530251 QDF_DMA_FROM_DEVICE);
jinweic chenc3546322018-02-02 15:03:41 +0800252 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
Manjunathappa Prakash6d2f46d2017-11-10 20:27:19 -0800253 qdf_nbuf_free(rx_netbuf);
Ishank Jain57c42a12017-04-12 10:42:22 +0530254 DP_STATS_INC(dp_pdev, replenish.map_err, 1);
255 continue;
256 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700257
258 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
259
Nisha Menonf7c5baa2019-10-23 12:50:16 -0700260 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530261 /*
262 * check if the physical address of nbuf->data is
263 * less then 0x50000000 then free the nbuf and try
264 * allocating new nbuf. We can try for 100 times.
265 * this is a temp WAR till we fix it properly.
266 */
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530267 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool);
Ishank Jain57c42a12017-04-12 10:42:22 +0530268 if (ret == QDF_STATUS_E_FAILURE) {
269 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530270 break;
Ishank Jain57c42a12017-04-12 10:42:22 +0530271 }
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530272
273 count++;
274
275 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
Mohit Khanna16cd1b22019-01-25 10:46:00 -0800276 rxdma_srng);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530277 qdf_assert_always(rxdma_ring_entry);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530278
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700279 next = (*desc_list)->next;
280
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530281 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
Mohit Khanna16cd1b22019-01-25 10:46:00 -0800282
283 /* rx_desc.in_use should be zero at this time*/
284 qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
285
Pramod Simha59fcb312017-06-22 17:43:16 -0700286 (*desc_list)->rx_desc.in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800287
Krunal Sonic96a1162019-02-21 11:33:26 -0800288 dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
289 rx_netbuf, qdf_nbuf_data(rx_netbuf),
290 (unsigned long long)paddr,
291 (*desc_list)->rx_desc.cookie);
Kai Chen6eca1a62017-01-12 10:17:53 -0800292
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700293 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
294 (*desc_list)->rx_desc.cookie,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800295 rx_desc_pool->owner);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700296
297 *desc_list = next;
jiad3b8104b2019-03-08 17:23:35 +0800298
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700299 }
300
301 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
302
Krunal Sonic96a1162019-02-21 11:33:26 -0800303 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
Shashikala Prabhu0bb59b22019-05-31 16:11:19 +0530304 count, num_desc_to_free);
Houston Hoffmanae850c62017-08-11 16:47:50 -0700305
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530306 /* No need to count the number of bytes received during replenish.
307 * Therefore set replenish.pkts.bytes as 0.
308 */
309 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700310
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700311free_descs:
312 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700313 /*
314 * add any available free desc back to the free list
315 */
316 if (*desc_list)
Kai Chen6eca1a62017-01-12 10:17:53 -0800317 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
318 mac_id, rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700319
320 return QDF_STATUS_SUCCESS;
321}
322
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530323/*
324 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
325 * pkts to RAW mode simulation to
326 * decapsulate the pkt.
327 *
328 * @vdev: vdev on which RAW mode is enabled
329 * @nbuf_list: list of RAW pkts to process
c_cgodavbd5b3c22017-06-07 12:31:40 +0530330 * @peer: peer object from which the pkt is rx
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530331 *
332 * Return: void
333 */
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530334void
c_cgodavbd5b3c22017-06-07 12:31:40 +0530335dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530336 struct dp_peer *peer)
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530337{
338 qdf_nbuf_t deliver_list_head = NULL;
339 qdf_nbuf_t deliver_list_tail = NULL;
340 qdf_nbuf_t nbuf;
341
342 nbuf = nbuf_list;
343 while (nbuf) {
344 qdf_nbuf_t next = qdf_nbuf_next(nbuf);
345
346 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
347
Chaithanya Garrepalli9b3988c2018-05-12 15:47:15 +0530348 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
Anish Nataraj7b6d21f2018-04-30 11:08:54 +0530349 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530350 /*
351 * reset the chfrag_start and chfrag_end bits in nbuf cb
352 * as this is a non-amsdu pkt and RAW mode simulation expects
353 * these bit s to be 0 for non-amsdu pkt.
354 */
Vivekde90e592017-11-30 17:24:18 +0530355 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
356 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
357 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
358 qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530359 }
360
361 nbuf = next;
362 }
363
364 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
Pavankumar Nandeshwar0ce38702019-09-30 18:43:03 +0530365 &deliver_list_tail, peer->mac_addr.raw);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530366
367 vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
368}
369
370
Pramod Simhab17d0672017-03-06 17:20:13 -0800371#ifdef DP_LFR
372/*
373 * In case of LFR, data of a new peer might be sent up
374 * even before peer is added.
375 */
376static inline struct dp_vdev *
377dp_get_vdev_from_peer(struct dp_soc *soc,
378 uint16_t peer_id,
379 struct dp_peer *peer,
380 struct hal_rx_mpdu_desc_info mpdu_desc_info)
381{
382 struct dp_vdev *vdev;
383 uint8_t vdev_id;
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530384
Pramod Simhab17d0672017-03-06 17:20:13 -0800385 if (unlikely(!peer)) {
386 if (peer_id != HTT_INVALID_PEER) {
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +0530387 vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
Pramod Simhab17d0672017-03-06 17:20:13 -0800388 mpdu_desc_info.peer_meta_data);
389 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700390 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800391 FL("PeerID %d not found use vdevID %d"),
392 peer_id, vdev_id);
393 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530394 vdev_id);
Pramod Simhab17d0672017-03-06 17:20:13 -0800395 } else {
396 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700397 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800398 FL("Invalid PeerID %d"),
399 peer_id);
400 return NULL;
401 }
402 } else {
403 vdev = peer->vdev;
404 }
405 return vdev;
406}
407#else
408static inline struct dp_vdev *
409dp_get_vdev_from_peer(struct dp_soc *soc,
410 uint16_t peer_id,
411 struct dp_peer *peer,
412 struct hal_rx_mpdu_desc_info mpdu_desc_info)
413{
414 if (unlikely(!peer)) {
415 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700416 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800417 FL("Peer not found for peerID %d"),
418 peer_id);
419 return NULL;
420 } else {
421 return peer->vdev;
422 }
423}
424#endif
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530425
Amir Patelcb990262019-05-28 15:12:48 +0530426#ifndef FEATURE_WDS
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530427static void
428dp_rx_da_learn(struct dp_soc *soc,
429 uint8_t *rx_tlv_hdr,
430 struct dp_peer *ta_peer,
431 qdf_nbuf_t nbuf)
432{
433}
434#endif
Amir Patelcb990262019-05-28 15:12:48 +0530435/*
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700436 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
437 *
438 * @soc: core txrx main context
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530439 * @ta_peer : source peer entry
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530440 * @rx_tlv_hdr : start address of rx tlvs
441 * @nbuf : nbuf that has to be intrabss forwarded
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700442 *
443 * Return: bool: true if it is forwarded else false
444 */
445static bool
446dp_rx_intrabss_fwd(struct dp_soc *soc,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530447 struct dp_peer *ta_peer,
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530448 uint8_t *rx_tlv_hdr,
syed touqeer pasha6997a372019-12-31 15:45:55 +0530449 qdf_nbuf_t nbuf,
450 struct hal_rx_msdu_metadata msdu_metadata)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700451{
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530452 uint16_t len;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530453 uint8_t is_frag;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530454 struct dp_peer *da_peer;
455 struct dp_ast_entry *ast_entry;
456 qdf_nbuf_t nbuf_copy;
Ankit Kumare2227752019-04-30 00:16:04 +0530457 uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
Varsha Mishra1f4cfb62019-05-31 00:59:15 +0530458 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
459 struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
460 tid_stats.tid_rx_stats[ring_id][tid];
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530461
462 /* check if the destination peer is available in peer table
463 * and also check if the source peer and destination peer
464 * belong to the same vap and destination peer is not bss peer.
465 */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530466
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530467 if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530468
syed touqeer pasha6997a372019-12-31 15:45:55 +0530469 ast_entry = soc->ast_table[msdu_metadata.da_idx];
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530470 if (!ast_entry)
471 return false;
472
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530473 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
474 ast_entry->is_active = TRUE;
475 return false;
476 }
477
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530478 da_peer = ast_entry->peer;
479
480 if (!da_peer)
481 return false;
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530482 /* TA peer cannot be same as peer(DA) on which AST is present
483 * this indicates a change in topology and that AST entries
484 * are yet to be updated.
485 */
486 if (da_peer == ta_peer)
487 return false;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530488
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530489 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530490 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
491 is_frag = qdf_nbuf_is_frag(nbuf);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530492 memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530493
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530494 /* linearize the nbuf just before we send to
495 * dp_tx_send()
496 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530497 if (qdf_unlikely(is_frag)) {
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530498 if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
499 return false;
500
501 nbuf = qdf_nbuf_unshare(nbuf);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530502 if (!nbuf) {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530503 DP_STATS_INC_PKT(ta_peer,
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530504 rx.intra_bss.fail,
505 1,
506 len);
507 /* return true even though the pkt is
508 * not forwarded. Basically skb_unshare
509 * failed and we want to continue with
510 * next nbuf.
511 */
Varsha Mishra18281792019-03-06 17:57:23 +0530512 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530513 return true;
514 }
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530515 }
516
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530517 if (!dp_tx_send((struct cdp_soc_t *)soc,
518 ta_peer->vdev->vdev_id, nbuf)) {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530519 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
520 len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530521 return true;
Ishank Jain57c42a12017-04-12 10:42:22 +0530522 } else {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530523 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
Varsha Mishra18281792019-03-06 17:57:23 +0530524 len);
525 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530526 return false;
Ishank Jain57c42a12017-04-12 10:42:22 +0530527 }
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530528 }
529 }
530 /* if it is a broadcast pkt (eg: ARP) and it is not its own
531 * source, then clone the pkt and send the cloned pkt for
532 * intra BSS forwarding and original pkt up the network stack
533 * Note: how do we handle multicast pkts. do we forward
534 * all multicast pkts as is or let a higher layer module
535 * like igmpsnoop decide whether to forward or not with
536 * Mcast enhancement.
537 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530538 else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
539 !ta_peer->bss_peer))) {
Mohit Khannac42d8032019-08-08 18:44:17 -0700540 if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
541 goto end;
542
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530543 nbuf_copy = qdf_nbuf_copy(nbuf);
544 if (!nbuf_copy)
Mohit Khannac42d8032019-08-08 18:44:17 -0700545 goto end;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530546
547 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530548 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530549
Mainak Sen8bc9b422019-10-29 13:29:58 +0530550 /* Set cb->ftype to intrabss FWD */
551 qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +0530552 if (dp_tx_send((struct cdp_soc_t *)soc,
553 ta_peer->vdev->vdev_id, nbuf_copy)) {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530554 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
Varsha Mishra18281792019-03-06 17:57:23 +0530555 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530556 qdf_nbuf_free(nbuf_copy);
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530557 } else {
558 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
Varsha Mishra18281792019-03-06 17:57:23 +0530559 tid_stats->intrabss_cnt++;
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530560 }
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530561 }
Mohit Khannac42d8032019-08-08 18:44:17 -0700562
563end:
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530564 /* return false as we have to still send the original pkt
565 * up the stack
566 */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700567 return false;
568}
569
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530570#ifdef MESH_MODE_SUPPORT
571
572/**
573 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
574 *
575 * @vdev: DP Virtual device handle
576 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530577 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530578 * @peer: pointer to peer
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530579 *
580 * This function allocated memory for mesh receive stats and fill the
581 * required stats. Stores the memory address in skb cb.
582 *
583 * Return: void
584 */
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530585
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530586void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530587 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530588{
589 struct mesh_recv_hdr_s *rx_info = NULL;
590 uint32_t pkt_type;
591 uint32_t nss;
592 uint32_t rate_mcs;
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530593 uint32_t bw;
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530594
595 /* fill recv mesh stats */
596 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
597
598 /* upper layers are resposible to free this memory */
599
Jeff Johnsona8edf332019-03-18 09:51:52 -0700600 if (!rx_info) {
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530601 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
602 "Memory allocation failed for mesh rx stats");
Ishank Jain57c42a12017-04-12 10:42:22 +0530603 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530604 return;
605 }
606
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530607 rx_info->rs_flags = MESH_RXHDR_VER1;
Vivekde90e592017-11-30 17:24:18 +0530608 if (qdf_nbuf_is_rx_chfrag_start(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530609 rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
610
Vivekde90e592017-11-30 17:24:18 +0530611 if (qdf_nbuf_is_rx_chfrag_end(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530612 rx_info->rs_flags |= MESH_RX_LAST_MSDU;
613
614 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
615 rx_info->rs_flags |= MESH_RX_DECRYPTED;
616 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530617 if (vdev->osif_get_key)
618 vdev->osif_get_key(vdev->osif_vdev,
619 &rx_info->rs_decryptkey[0],
620 &peer->mac_addr.raw[0],
621 rx_info->rs_keyix);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530622 }
623
624 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
625 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
626 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
627 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530628 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530629 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530630 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
631 (bw << 24);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530632
Vivekde90e592017-11-30 17:24:18 +0530633 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530634
Venkateswara Swamy Bandaru37a3a452018-02-12 15:37:14 +0530635 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530636 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
637 rx_info->rs_flags,
638 rx_info->rs_rssi,
639 rx_info->rs_channel,
640 rx_info->rs_ratephy1,
641 rx_info->rs_keyix);
642
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530643}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530644
645/**
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530646 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530647 *
648 * @vdev: DP Virtual device handle
649 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530650 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530651 *
652 * This checks if the received packet is matching any filter out
653 * catogery and and drop the packet if it matches.
654 *
655 * Return: status(0 indicates drop, 1 indicate to no drop)
656 */
657
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530658QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
659 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530660{
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530661 union dp_align_mac_addr mac_addr;
Venkata Sharath Chandra Manchalae7924fd2019-09-21 12:44:52 -0700662 struct dp_soc *soc = vdev->pdev->soc;
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530663
664 if (qdf_unlikely(vdev->mesh_rx_filter)) {
665 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
Venkata Sharath Chandra Manchala1e3a4792019-09-21 13:15:09 -0700666 if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
667 rx_tlv_hdr))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530668 return QDF_STATUS_SUCCESS;
669
670 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
Venkata Sharath Chandra Manchalae7924fd2019-09-21 12:44:52 -0700671 if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
672 rx_tlv_hdr))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530673 return QDF_STATUS_SUCCESS;
674
675 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
Venkata Sharath Chandra Manchala1e3a4792019-09-21 13:15:09 -0700676 if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
677 rx_tlv_hdr) &&
Venkata Sharath Chandra Manchalae7924fd2019-09-21 12:44:52 -0700678 !hal_rx_mpdu_get_to_ds(soc->hal_soc,
679 rx_tlv_hdr))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530680 return QDF_STATUS_SUCCESS;
681
682 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
Venkata Sharath Chandra Manchalae3ae3192019-09-21 13:59:46 -0700683 if (hal_rx_mpdu_get_addr1(soc->hal_soc,
684 rx_tlv_hdr,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530685 &mac_addr.raw[0]))
686 return QDF_STATUS_E_FAILURE;
687
688 if (!qdf_mem_cmp(&mac_addr.raw[0],
689 &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800690 QDF_MAC_ADDR_SIZE))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530691 return QDF_STATUS_SUCCESS;
692 }
693
694 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
Venkata Sharath Chandra Manchalaa81a2fe2019-09-21 14:29:40 -0700695 if (hal_rx_mpdu_get_addr2(soc->hal_soc,
696 rx_tlv_hdr,
697 &mac_addr.raw[0]))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530698 return QDF_STATUS_E_FAILURE;
699
700 if (!qdf_mem_cmp(&mac_addr.raw[0],
701 &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800702 QDF_MAC_ADDR_SIZE))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530703 return QDF_STATUS_SUCCESS;
704 }
705 }
706
707 return QDF_STATUS_E_FAILURE;
708}
709
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530710#else
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530711void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530712 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530713{
714}
715
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530716QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
717 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530718{
719 return QDF_STATUS_E_FAILURE;
720}
721
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530722#endif
723
Pranita Solanke05862962019-01-09 11:39:29 +0530724#ifdef FEATURE_NAC_RSSI
Ishank Jain9f174c62017-03-30 18:37:42 +0530725/**
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530726 * dp_rx_nac_filter(): Function to perform filtering of non-associated
727 * clients
728 * @pdev: DP pdev handle
729 * @rx_pkt_hdr: Rx packet Header
730 *
731 * return: dp_vdev*
732 */
733static
734struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
735 uint8_t *rx_pkt_hdr)
736{
737 struct ieee80211_frame *wh;
738 struct dp_neighbour_peer *peer = NULL;
739
740 wh = (struct ieee80211_frame *)rx_pkt_hdr;
741
742 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
743 return NULL;
744
745 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
746 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
747 neighbour_peer_list_elem) {
748 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800749 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530750 QDF_TRACE(
Houston Hoffmanae850c62017-08-11 16:47:50 -0700751 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530752 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
753 peer->neighbour_peers_macaddr.raw[0],
754 peer->neighbour_peers_macaddr.raw[1],
755 peer->neighbour_peers_macaddr.raw[2],
756 peer->neighbour_peers_macaddr.raw[3],
757 peer->neighbour_peers_macaddr.raw[4],
758 peer->neighbour_peers_macaddr.raw[5]);
Pratik Gandhi97fa0b02017-07-14 00:55:43 +0530759
760 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
761
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530762 return pdev->monitor_vdev;
763 }
764 }
765 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
766
767 return NULL;
768}
769
770/**
Ishank Jain9f174c62017-03-30 18:37:42 +0530771 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
772 * @soc: DP SOC handle
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530773 * @mpdu: mpdu for which peer is invalid
Keyur Parekhb8149a52019-04-16 21:30:25 -0700774 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
775 * pool_id has same mapping)
Ishank Jain9f174c62017-03-30 18:37:42 +0530776 *
777 * return: integer type
778 */
Keyur Parekhb8149a52019-04-16 21:30:25 -0700779uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
780 uint8_t mac_id)
Ishank Jain9f174c62017-03-30 18:37:42 +0530781{
782 struct dp_invalid_peer_msg msg;
783 struct dp_vdev *vdev = NULL;
784 struct dp_pdev *pdev = NULL;
785 struct ieee80211_frame *wh;
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530786 qdf_nbuf_t curr_nbuf, next_nbuf;
Soumya Bhatbc719e62018-02-18 18:21:25 +0530787 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
788 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
Ishank Jain9f174c62017-03-30 18:37:42 +0530789
Keyur Parekhb8149a52019-04-16 21:30:25 -0700790 rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
791
Venkata Sharath Chandra Manchala43d56322019-09-20 16:51:48 -0700792 if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
Shashikala Prabhue11412d2019-03-08 11:37:15 +0530793 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
794 "Drop decapped frames");
795 goto free;
796 }
797
Ishank Jain9f174c62017-03-30 18:37:42 +0530798 wh = (struct ieee80211_frame *)rx_pkt_hdr;
799
800 if (!DP_FRAME_IS_DATA(wh)) {
801 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700802 "NAWDS valid only for data frames");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530803 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530804 }
805
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530806 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
Ishank Jain9f174c62017-03-30 18:37:42 +0530807 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700808 "Invalid nbuf length");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530809 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530810 }
811
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700812 pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
Ishank Jain9f174c62017-03-30 18:37:42 +0530813
Gyanranjan Hazarikae8047262019-06-05 00:43:38 -0700814 if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
Keyur Parekhb8149a52019-04-16 21:30:25 -0700815 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Gyanranjan Hazarikae8047262019-06-05 00:43:38 -0700816 "PDEV %s", !pdev ? "not found" : "down");
Keyur Parekhb8149a52019-04-16 21:30:25 -0700817 goto free;
818 }
819
820 if (pdev->filter_neighbour_peers) {
821 /* Next Hop scenario not yet handle */
822 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
823 if (vdev) {
824 dp_rx_mon_deliver(soc, pdev->pdev_id,
825 pdev->invalid_peer_head_msdu,
826 pdev->invalid_peer_tail_msdu);
827
828 pdev->invalid_peer_head_msdu = NULL;
829 pdev->invalid_peer_tail_msdu = NULL;
830
831 return 0;
Ishank Jain9f174c62017-03-30 18:37:42 +0530832 }
Keyur Parekhb8149a52019-04-16 21:30:25 -0700833 }
Soumya Bhatbc719e62018-02-18 18:21:25 +0530834
Keyur Parekhb8149a52019-04-16 21:30:25 -0700835 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Soumya Bhatbc719e62018-02-18 18:21:25 +0530836
Keyur Parekhb8149a52019-04-16 21:30:25 -0700837 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
838 QDF_MAC_ADDR_SIZE) == 0) {
839 goto out;
Ishank Jain9f174c62017-03-30 18:37:42 +0530840 }
841 }
842
843 if (!vdev) {
844 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700845 "VDEV not found");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530846 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530847 }
848
849out:
850 msg.wh = wh;
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530851 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
852 msg.nbuf = mpdu;
Ishank Jain9f174c62017-03-30 18:37:42 +0530853 msg.vdev_id = vdev->vdev_id;
854 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530855 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
856 (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
857 pdev->pdev_id, &msg);
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530858
859free:
860 /* Drop and free packet */
861 curr_nbuf = mpdu;
862 while (curr_nbuf) {
863 next_nbuf = qdf_nbuf_next(curr_nbuf);
864 qdf_nbuf_free(curr_nbuf);
865 curr_nbuf = next_nbuf;
866 }
Ishank Jain9f174c62017-03-30 18:37:42 +0530867
868 return 0;
869}
chenguo91c90102017-12-12 16:16:37 +0800870
871/**
872 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
873 * @soc: DP SOC handle
874 * @mpdu: mpdu for which peer is invalid
875 * @mpdu_done: if an mpdu is completed
Keyur Parekhb8149a52019-04-16 21:30:25 -0700876 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
877 * pool_id has same mapping)
chenguo91c90102017-12-12 16:16:37 +0800878 *
879 * return: integer type
880 */
881void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700882 qdf_nbuf_t mpdu, bool mpdu_done,
883 uint8_t mac_id)
chenguo91c90102017-12-12 16:16:37 +0800884{
885 /* Only trigger the process when mpdu is completed */
886 if (mpdu_done)
Keyur Parekhb8149a52019-04-16 21:30:25 -0700887 dp_rx_process_invalid_peer(soc, mpdu, mac_id);
chenguo91c90102017-12-12 16:16:37 +0800888}
Ishank Jain9f174c62017-03-30 18:37:42 +0530889#else
Keyur Parekhb8149a52019-04-16 21:30:25 -0700890uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
891 uint8_t mac_id)
Ishank Jain9f174c62017-03-30 18:37:42 +0530892{
chenguo91c90102017-12-12 16:16:37 +0800893 qdf_nbuf_t curr_nbuf, next_nbuf;
894 struct dp_pdev *pdev;
Jinwei Chen46733102018-08-20 15:42:08 +0800895 struct dp_vdev *vdev = NULL;
896 struct ieee80211_frame *wh;
897 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
898 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
chenguo91c90102017-12-12 16:16:37 +0800899
Jinwei Chen46733102018-08-20 15:42:08 +0800900 wh = (struct ieee80211_frame *)rx_pkt_hdr;
901
902 if (!DP_FRAME_IS_DATA(wh)) {
Jinwei Chen214590a2018-12-06 16:45:44 +0800903 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
904 "only for data frames");
Jinwei Chen46733102018-08-20 15:42:08 +0800905 goto free;
chenguo91c90102017-12-12 16:16:37 +0800906 }
907
Jinwei Chen46733102018-08-20 15:42:08 +0800908 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
909 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
910 "Invalid nbuf length");
911 goto free;
912 }
Jinwei Chen214590a2018-12-06 16:45:44 +0800913
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700914 pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
Keyur Parekhb8149a52019-04-16 21:30:25 -0700915 if (!pdev) {
916 QDF_TRACE(QDF_MODULE_ID_DP,
917 QDF_TRACE_LEVEL_ERROR,
918 "PDEV not found");
919 goto free;
chenguo91c90102017-12-12 16:16:37 +0800920 }
Jinwei Chen46733102018-08-20 15:42:08 +0800921
Keyur Parekhb8149a52019-04-16 21:30:25 -0700922 qdf_spin_lock_bh(&pdev->vdev_list_lock);
923 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
924 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
925 QDF_MAC_ADDR_SIZE) == 0) {
926 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
927 goto out;
928 }
929 }
930 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
931
Jeff Johnsona8edf332019-03-18 09:51:52 -0700932 if (!vdev) {
Jinwei Chen46733102018-08-20 15:42:08 +0800933 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
934 "VDEV not found");
935 goto free;
936 }
937
938out:
939 if (soc->cdp_soc.ol_ops->rx_invalid_peer)
940 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
941free:
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700942 /* reset the head and tail pointers */
943 pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
944 if (pdev) {
945 pdev->invalid_peer_head_msdu = NULL;
946 pdev->invalid_peer_tail_msdu = NULL;
947 }
948
Jinwei Chen46733102018-08-20 15:42:08 +0800949 /* Drop and free packet */
950 curr_nbuf = mpdu;
951 while (curr_nbuf) {
952 next_nbuf = qdf_nbuf_next(curr_nbuf);
Jinwei Chen46733102018-08-20 15:42:08 +0800953 qdf_nbuf_free(curr_nbuf);
954 curr_nbuf = next_nbuf;
955 }
956
Venkata Sharath Chandra Manchala09d116a2020-01-03 16:42:00 -0800957 /* Reset the head and tail pointers */
958 pdev = dp_get_pdev_for_mac_id(soc, mac_id);
959 if (pdev) {
960 pdev->invalid_peer_head_msdu = NULL;
961 pdev->invalid_peer_tail_msdu = NULL;
962 }
963
Ishank Jain9f174c62017-03-30 18:37:42 +0530964 return 0;
965}
chenguo91c90102017-12-12 16:16:37 +0800966
967void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700968 qdf_nbuf_t mpdu, bool mpdu_done,
969 uint8_t mac_id)
chenguo91c90102017-12-12 16:16:37 +0800970{
chenguo91c90102017-12-12 16:16:37 +0800971 /* Process the nbuf */
Keyur Parekhb8149a52019-04-16 21:30:25 -0700972 dp_rx_process_invalid_peer(soc, mpdu, mac_id);
chenguo91c90102017-12-12 16:16:37 +0800973}
Ishank Jain9f174c62017-03-30 18:37:42 +0530974#endif
975
Mohit Khanna16816ae2018-10-30 14:12:03 -0700976#ifdef RECEIVE_OFFLOAD
977/**
978 * dp_rx_print_offload_info() - Print offload info from RX TLV
Venkata Sharath Chandra Manchala5c5d4092019-09-25 13:31:51 -0700979 * @soc: dp soc handle
Mohit Khanna16816ae2018-10-30 14:12:03 -0700980 * @rx_tlv: RX TLV for which offload information is to be printed
981 *
982 * Return: None
983 */
Venkata Sharath Chandra Manchala5c5d4092019-09-25 13:31:51 -0700984static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700985{
Krunal Sonic96a1162019-02-21 11:33:26 -0800986 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
987 dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
988 dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
Venkata Sharath Chandra Manchala5c5d4092019-09-25 13:31:51 -0700989 dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
990 rx_tlv));
Krunal Sonic96a1162019-02-21 11:33:26 -0800991 dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
992 dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
993 dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
994 dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
995 dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
996 dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
997 dp_verbose_debug("---------------------------------------------------------");
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700998}
999
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001000/**
Mohit Khanna16816ae2018-10-30 14:12:03 -07001001 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1002 * @soc: DP SOC handle
1003 * @rx_tlv: RX TLV received for the msdu
1004 * @msdu: msdu for which GRO info needs to be filled
Mohit Khanna698987c2019-07-28 21:38:05 -07001005 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
Dhanashri Atre0da31222017-03-23 12:30:58 -07001006 *
Mohit Khanna16816ae2018-10-30 14:12:03 -07001007 * Return: None
Dhanashri Atre0da31222017-03-23 12:30:58 -07001008 */
Mohit Khanna16816ae2018-10-30 14:12:03 -07001009static
1010void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
Mohit Khanna698987c2019-07-28 21:38:05 -07001011 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
Dhanashri Atre0da31222017-03-23 12:30:58 -07001012{
Mohit Khanna16816ae2018-10-30 14:12:03 -07001013 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001014 return;
Mohit Khanna16816ae2018-10-30 14:12:03 -07001015
1016 /* Filling up RX offload info only for TCP packets */
1017 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
1018 return;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001019
Mohit Khanna698987c2019-07-28 21:38:05 -07001020 *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1021
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001022 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
1023 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001024 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
1025 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001026 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
Venkata Sharath Chandra Manchala5c5d4092019-09-25 13:31:51 -07001027 hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1028 rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001029 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
1030 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
1031 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
1032 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
1033 QDF_NBUF_CB_RX_TCP_WIN(msdu) =
1034 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
1035 QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
1036 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
1037 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
1038 HAL_RX_TLV_GET_IPV6(rx_tlv);
1039 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
1040 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
Manjunathappa Prakash71772a52017-11-07 18:01:31 -08001041 QDF_NBUF_CB_RX_FLOW_ID(msdu) =
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001042 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001043
Venkata Sharath Chandra Manchala5c5d4092019-09-25 13:31:51 -07001044 dp_rx_print_offload_info(soc, rx_tlv);
Dhanashri Atre0da31222017-03-23 12:30:58 -07001045}
1046#else
Mohit Khanna16816ae2018-10-30 14:12:03 -07001047static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
Mohit Khanna698987c2019-07-28 21:38:05 -07001048 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
Dhanashri Atre0da31222017-03-23 12:30:58 -07001049{
Dhanashri Atre0da31222017-03-23 12:30:58 -07001050}
Mohit Khanna16816ae2018-10-30 14:12:03 -07001051#endif /* RECEIVE_OFFLOAD */
Dhanashri Atre0da31222017-03-23 12:30:58 -07001052
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301053/**
1054 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1055 *
1056 * @nbuf: pointer to msdu.
1057 * @mpdu_len: mpdu length
1058 *
1059 * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1060 */
1061static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301062{
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301063 bool last_nbuf;
1064
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05301065 if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1066 qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301067 last_nbuf = false;
1068 } else {
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301069 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301070 last_nbuf = true;
1071 }
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301072
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05301073 *mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301074
1075 return last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301076}
1077
1078/**
1079 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1080 * multiple nbufs.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301081 * @nbuf: pointer to the first msdu of an amsdu.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301082 *
1083 * This function implements the creation of RX frag_list for cases
1084 * where an MSDU is spread across multiple nbufs.
1085 *
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301086 * Return: returns the head nbuf which contains complete frag_list.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301087 */
Jinwei Chen0b924692020-01-14 13:52:06 +08001088qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301089{
Saket Jha6ef03402019-12-17 17:03:27 -08001090 qdf_nbuf_t parent, frag_list, next = NULL;
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301091 uint16_t frag_list_len = 0;
1092 uint16_t mpdu_len;
1093 bool last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301094
Jinwei Chen0b924692020-01-14 13:52:06 +08001095 /*
1096 * Use msdu len got from REO entry descriptor instead since
1097 * there is case the RX PKT TLV is corrupted while msdu_len
1098 * from REO descriptor is right for non-raw RX scatter msdu.
1099 */
1100 mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301101 /*
1102 * this is a case where the complete msdu fits in one single nbuf.
1103 * in this case HW sets both start and end bit and we only need to
1104 * reset these bits for RAW mode simulator to decap the pkt
1105 */
1106 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1107 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301108 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1109 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301110 return nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301111 }
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301112
1113 /*
1114 * This is a case where we have multiple msdus (A-MSDU) spread across
1115 * multiple nbufs. here we create a fraglist out of these nbufs.
1116 *
1117 * the moment we encounter a nbuf with continuation bit set we
1118 * know for sure we have an MSDU which is spread across multiple
1119 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1120 */
1121 parent = nbuf;
1122 frag_list = nbuf->next;
1123 nbuf = nbuf->next;
1124
1125 /*
1126 * set the start bit in the first nbuf we encounter with continuation
1127 * bit set. This has the proper mpdu length set as it is the first
1128 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1129 * nbufs will form the frag_list of the parent nbuf.
1130 */
1131 qdf_nbuf_set_rx_chfrag_start(parent, 1);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301132 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1133
1134 /*
1135 * this is where we set the length of the fragments which are
1136 * associated to the parent nbuf. We iterate through the frag_list
1137 * till we hit the last_nbuf of the list.
1138 */
1139 do {
1140 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1141 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1142 frag_list_len += qdf_nbuf_len(nbuf);
1143
1144 if (last_nbuf) {
1145 next = nbuf->next;
1146 nbuf->next = NULL;
1147 break;
1148 }
1149
1150 nbuf = nbuf->next;
1151 } while (!last_nbuf);
1152
1153 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1154 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1155 parent->next = next;
1156
1157 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1158 return parent;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301159}
1160
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301161/**
1162 * dp_rx_compute_delay() - Compute and fill in all timestamps
1163 * to pass in correct fields
1164 *
1165 * @vdev: pdev handle
1166 * @tx_desc: tx descriptor
1167 * @tid: tid value
1168 * Return: none
1169 */
1170void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1171{
Varsha Mishra1f4cfb62019-05-31 00:59:15 +05301172 uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301173 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1174 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
Ankit Kumare2227752019-04-30 00:16:04 +05301175 uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301176 uint32_t interframe_delay =
1177 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1178
1179 dp_update_delay_stats(vdev->pdev, to_stack, tid,
Varsha Mishra1f4cfb62019-05-31 00:59:15 +05301180 CDP_DELAY_STATS_REAP_STACK, ring_id);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301181 /*
1182 * Update interframe delay stats calculated at deliver_data_ol point.
1183 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1184 * interframe delay will not be calculate correctly for 1st frame.
1185 * On the other side, this will help in avoiding extra per packet check
1186 * of vdev->prev_rx_deliver_tstamp.
1187 */
1188 dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
Varsha Mishra1f4cfb62019-05-31 00:59:15 +05301189 CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301190 vdev->prev_rx_deliver_tstamp = current_ts;
1191}
1192
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301193/**
1194 * dp_rx_drop_nbuf_list() - drop an nbuf list
1195 * @pdev: dp pdev reference
1196 * @buf_list: buffer list to be dropepd
1197 *
1198 * Return: int (number of bufs dropped)
1199 */
1200static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1201 qdf_nbuf_t buf_list)
1202{
1203 struct cdp_tid_rx_stats *stats = NULL;
Varsha Mishra1f4cfb62019-05-31 00:59:15 +05301204 uint8_t tid = 0, ring_id = 0;
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301205 int num_dropped = 0;
1206 qdf_nbuf_t buf, next_buf;
1207
1208 buf = buf_list;
1209 while (buf) {
Varsha Mishra1f4cfb62019-05-31 00:59:15 +05301210 ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301211 next_buf = qdf_nbuf_queue_next(buf);
Ankit Kumare2227752019-04-30 00:16:04 +05301212 tid = qdf_nbuf_get_tid_val(buf);
Rakesh Pillaic1aeb352020-01-14 13:06:15 +05301213 if (qdf_likely(pdev)) {
1214 stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1215 stats->fail_cnt[INVALID_PEER_VDEV]++;
1216 stats->delivered_to_stack--;
1217 }
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301218 qdf_nbuf_free(buf);
1219 buf = next_buf;
1220 num_dropped++;
1221 }
1222
1223 return num_dropped;
1224}
1225
1226#ifdef PEER_CACHE_RX_PKTS
1227/**
1228 * dp_rx_flush_rx_cached() - flush cached rx frames
1229 * @peer: peer
1230 * @drop: flag to drop frames or forward to net stack
1231 *
1232 * Return: None
1233 */
1234void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1235{
1236 struct dp_peer_cached_bufq *bufqi;
1237 struct dp_rx_cached_buf *cache_buf = NULL;
1238 ol_txrx_rx_fp data_rx = NULL;
1239 int num_buff_elem;
1240 QDF_STATUS status;
1241
1242 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1243 qdf_atomic_dec(&peer->flush_in_progress);
1244 return;
1245 }
1246
1247 qdf_spin_lock_bh(&peer->peer_info_lock);
1248 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1249 data_rx = peer->vdev->osif_rx;
1250 else
1251 drop = true;
1252 qdf_spin_unlock_bh(&peer->peer_info_lock);
1253
1254 bufqi = &peer->bufq_info;
1255
1256 qdf_spin_lock_bh(&bufqi->bufq_lock);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301257 qdf_list_remove_front(&bufqi->cached_bufq,
1258 (qdf_list_node_t **)&cache_buf);
1259 while (cache_buf) {
1260 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1261 cache_buf->buf);
1262 bufqi->entries -= num_buff_elem;
1263 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1264 if (drop) {
1265 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1266 cache_buf->buf);
1267 } else {
1268 /* Flush the cached frames to OSIF DEV */
1269 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1270 if (status != QDF_STATUS_SUCCESS)
1271 bufqi->dropped = dp_rx_drop_nbuf_list(
1272 peer->vdev->pdev,
1273 cache_buf->buf);
1274 }
1275 qdf_mem_free(cache_buf);
1276 cache_buf = NULL;
1277 qdf_spin_lock_bh(&bufqi->bufq_lock);
1278 qdf_list_remove_front(&bufqi->cached_bufq,
1279 (qdf_list_node_t **)&cache_buf);
1280 }
1281 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1282 qdf_atomic_dec(&peer->flush_in_progress);
1283}
1284
1285/**
1286 * dp_rx_enqueue_rx() - cache rx frames
1287 * @peer: peer
1288 * @rx_buf_list: cache buffer list
1289 *
1290 * Return: None
1291 */
1292static QDF_STATUS
1293dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1294{
1295 struct dp_rx_cached_buf *cache_buf;
1296 struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1297 int num_buff_elem;
1298
Nisha Menon4f633662020-01-21 18:17:28 -08001299 dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1300 bufqi->dropped);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301301 if (!peer->valid) {
1302 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1303 rx_buf_list);
1304 return QDF_STATUS_E_INVAL;
1305 }
1306
1307 qdf_spin_lock_bh(&bufqi->bufq_lock);
1308 if (bufqi->entries >= bufqi->thresh) {
1309 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1310 rx_buf_list);
1311 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1312 return QDF_STATUS_E_RESOURCES;
1313 }
1314 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1315
1316 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1317
1318 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1319 if (!cache_buf) {
1320 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1321 "Failed to allocate buf to cache rx frames");
1322 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1323 rx_buf_list);
1324 return QDF_STATUS_E_NOMEM;
1325 }
1326
1327 cache_buf->buf = rx_buf_list;
1328
1329 qdf_spin_lock_bh(&bufqi->bufq_lock);
1330 qdf_list_insert_back(&bufqi->cached_bufq,
1331 &cache_buf->node);
1332 bufqi->entries += num_buff_elem;
1333 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1334
1335 return QDF_STATUS_SUCCESS;
1336}
1337
1338static inline
1339bool dp_rx_is_peer_cache_bufq_supported(void)
1340{
1341 return true;
1342}
1343#else
1344static inline
1345bool dp_rx_is_peer_cache_bufq_supported(void)
1346{
1347 return false;
1348}
1349
1350static inline QDF_STATUS
1351dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1352{
1353 return QDF_STATUS_SUCCESS;
1354}
1355#endif
1356
Nisha Menon4f633662020-01-21 18:17:28 -08001357void dp_rx_deliver_to_stack(struct dp_soc *soc,
1358 struct dp_vdev *vdev,
1359 struct dp_peer *peer,
1360 qdf_nbuf_t nbuf_head,
1361 qdf_nbuf_t nbuf_tail)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301362{
Rakesh Pillaic1aeb352020-01-14 13:06:15 +05301363 int num_nbuf = 0;
1364
1365 if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1366 num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1367 /*
1368 * This is a special case where vdev is invalid,
1369 * so we cannot know the pdev to which this packet
1370 * belonged. Hence we update the soc rx error stats.
1371 */
1372 DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1373 return;
1374 }
1375
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301376 /*
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07001377 * highly unlikely to have a vdev without a registered rx
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301378 * callback function. if so let us free the nbuf_list.
1379 */
1380 if (qdf_unlikely(!vdev->osif_rx)) {
Nisha Menon4f633662020-01-21 18:17:28 -08001381 if (peer && dp_rx_is_peer_cache_bufq_supported()) {
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301382 dp_rx_enqueue_rx(peer, nbuf_head);
Nisha Menon4f633662020-01-21 18:17:28 -08001383 } else {
1384 num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1385 nbuf_head);
1386 DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1387 }
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301388 return;
1389 }
1390
1391 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301392 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1393 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
Pavankumar Nandeshwar0ce38702019-09-30 18:43:03 +05301394 &nbuf_tail, peer->mac_addr.raw);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301395 }
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001396
1397 /* Function pointer initialized only when FISA is enabled */
1398 if (vdev->osif_fisa_rx)
1399 /* on failure send it via regular path */
1400 vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1401 else
1402 vdev->osif_rx(vdev->osif_vdev, nbuf_head);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301403}
1404
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301405/**
1406 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1407 * @nbuf: pointer to the first msdu of an amsdu.
1408 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1409 *
1410 * The ipsumed field of the skb is set based on whether HW validated the
1411 * IP/TCP/UDP checksum.
1412 *
1413 * Return: void
1414 */
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301415static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1416 qdf_nbuf_t nbuf,
1417 uint8_t *rx_tlv_hdr)
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301418{
1419 qdf_nbuf_rx_cksum_t cksum = {0};
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301420 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1421 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301422
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301423 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301424 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301425 qdf_nbuf_set_rx_cksum(nbuf, &cksum);
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301426 } else {
1427 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1428 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301429 }
1430}
1431
Adil Saeed Musthafabbc4de02019-12-12 14:34:44 -08001432#ifdef VDEV_PEER_PROTOCOL_COUNT
1433#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1434{ \
1435 qdf_nbuf_t nbuf_local; \
1436 struct dp_peer *peer_local; \
1437 struct dp_vdev *vdev_local = vdev_hdl; \
1438 do { \
1439 if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1440 break; \
1441 nbuf_local = nbuf; \
1442 peer_local = peer; \
1443 if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1444 break; \
1445 else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1446 break; \
1447 dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1448 (nbuf_local), \
1449 (peer_local), 0, 1); \
1450 } while (0); \
1451}
1452#else
1453#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1454#endif
1455
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301456/**
1457 * dp_rx_msdu_stats_update() - update per msdu stats.
1458 * @soc: core txrx main context
1459 * @nbuf: pointer to the first msdu of an amsdu.
1460 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1461 * @peer: pointer to the peer object.
1462 * @ring_id: reo dest ring number on which pkt is reaped.
Varsha Mishra9d42f122019-05-03 12:47:40 +05301463 * @tid_stats: per tid rx stats.
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301464 *
1465 * update all the per msdu stats for that nbuf.
1466 * Return: void
1467 */
1468static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1469 qdf_nbuf_t nbuf,
1470 uint8_t *rx_tlv_hdr,
1471 struct dp_peer *peer,
Varsha Mishra9d42f122019-05-03 12:47:40 +05301472 uint8_t ring_id,
1473 struct cdp_tid_rx_stats *tid_stats)
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301474{
1475 bool is_ampdu, is_not_amsdu;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301476 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1477 struct dp_vdev *vdev = peer->vdev;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -08001478 qdf_ether_header_t *eh;
Ankit Kumarf90c9442019-05-02 18:55:20 +05301479 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301480
Adil Saeed Musthafabbc4de02019-12-12 14:34:44 -08001481 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301482 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1483 qdf_nbuf_is_rx_chfrag_end(nbuf);
1484
Aditya Sathish6add3db2018-04-10 19:43:34 +05301485 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301486 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1487 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
Paul Zhang12758962019-08-23 14:52:47 +08001488 DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301489
Varsha Mishra9d42f122019-05-03 12:47:40 +05301490 tid_stats->msdu_cnt++;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301491 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
Aditya Sathish6add3db2018-04-10 19:43:34 +05301492 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -08001493 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Amir Patel3217ade2018-09-07 12:21:35 +05301494 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
Varsha Mishra9d42f122019-05-03 12:47:40 +05301495 tid_stats->mcast_msdu_cnt++;
Srinivas Girigowda79502972019-02-11 12:25:12 -08001496 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
Aditya Sathish6add3db2018-04-10 19:43:34 +05301497 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
Varsha Mishra9d42f122019-05-03 12:47:40 +05301498 tid_stats->bcast_msdu_cnt++;
Aditya Sathish6add3db2018-04-10 19:43:34 +05301499 }
1500 }
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301501
1502 /*
1503 * currently we can return from here as we have similar stats
1504 * updated at per ppdu level instead of msdu level
1505 */
1506 if (!soc->process_rx_status)
1507 return;
1508
1509 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1510 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1511 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1512
1513 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1514 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
Ankit Kumare2227752019-04-30 00:16:04 +05301515 tid = qdf_nbuf_get_tid_val(nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301516 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301517 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1518 rx_tlv_hdr);
1519 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301520 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1521
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07001522 DP_STATS_INC(peer, rx.bw[bw], 1);
Jinwei Chene6611272019-04-22 18:38:51 +08001523 /*
1524 * only if nss > 0 and pkt_type is 11N/AC/AX,
1525 * then increase index [nss - 1] in array counter.
1526 */
1527 if (nss > 0 && (pkt_type == DOT11_N ||
1528 pkt_type == DOT11_AC ||
1529 pkt_type == DOT11_AX))
1530 DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1531
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301532 DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1533 DP_STATS_INCC(peer, rx.err.mic_err, 1,
1534 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1535 DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1536 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1537
1538 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301539 DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1540
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001541 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301542 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1543 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1544 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001545 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301546 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1547 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1548 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001549 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301550 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1551 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1552 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001553 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301554 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1555 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1556 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001557 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301558 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1559 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
phadiman6c3432b2019-01-09 12:45:28 +05301560 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301561
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301562 if ((soc->process_rx_status) &&
1563 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
Amir Patel756d05e2018-10-10 12:35:30 +05301564#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
phadiman49757302018-12-18 16:13:59 +05301565 if (!vdev->pdev)
1566 return;
1567
Amir Patel756d05e2018-10-10 12:35:30 +05301568 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +05301569 &peer->stats, peer->peer_ids[0],
Amir Patel756d05e2018-10-10 12:35:30 +05301570 UPDATE_PEER_STATS,
1571 vdev->pdev->pdev_id);
1572#endif
1573
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301574 }
1575}
1576
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301577static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
Akshay Kosigi6eef9e32019-06-24 14:32:18 +05301578 uint8_t *rx_tlv_hdr,
syed touqeer pasha6997a372019-12-31 15:45:55 +05301579 qdf_nbuf_t nbuf,
1580 struct hal_rx_msdu_metadata msdu_info)
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301581{
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301582 if ((qdf_nbuf_is_sa_valid(nbuf) &&
syed touqeer pasha6997a372019-12-31 15:45:55 +05301583 (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
Mohit Khanna3ba93722019-06-13 18:32:50 -07001584 (!qdf_nbuf_is_da_mcbc(nbuf) &&
1585 qdf_nbuf_is_da_valid(nbuf) &&
syed touqeer pasha6997a372019-12-31 15:45:55 +05301586 (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301587 return false;
1588
1589 return true;
1590}
1591
Amir Patelcb990262019-05-28 15:12:48 +05301592#ifndef WDS_VENDOR_EXTENSION
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301593int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1594 struct dp_vdev *vdev,
1595 struct dp_peer *peer)
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301596{
1597 return 1;
1598}
1599#endif
1600
Manjunathappa Prakash8f708622019-02-20 17:02:59 -08001601#ifdef RX_DESC_DEBUG_CHECK
1602/**
1603 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1604 * corruption
1605 *
1606 * @ring_desc: REO ring descriptor
1607 * @rx_desc: Rx descriptor
1608 *
1609 * Return: NONE
1610 */
Akshay Kosigi91c56522019-07-02 11:49:39 +05301611static inline
1612void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1613 struct dp_rx_desc *rx_desc)
Manjunathappa Prakash8f708622019-02-20 17:02:59 -08001614{
1615 struct hal_buf_info hbi;
1616
1617 hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1618 /* Sanity check for possible buffer paddr corruption */
1619 qdf_assert_always((&hbi)->paddr ==
1620 qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1621}
1622#else
Akshay Kosigi91c56522019-07-02 11:49:39 +05301623static inline
1624void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1625 struct dp_rx_desc *rx_desc)
Manjunathappa Prakash8f708622019-02-20 17:02:59 -08001626{
1627}
1628#endif
1629
Mohit Khannae5a6e942018-11-28 14:22:48 -08001630#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1631static inline
1632bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1633{
1634 bool limit_hit = false;
1635 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1636
1637 limit_hit =
1638 (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1639
1640 if (limit_hit)
1641 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1642
1643 return limit_hit;
1644}
1645
Mohit Khannae5a6e942018-11-28 14:22:48 -08001646static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1647{
1648 return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1649}
1650
1651#else
1652static inline
1653bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1654{
1655 return false;
1656}
1657
Mohit Khannae5a6e942018-11-28 14:22:48 -08001658static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1659{
1660 return false;
1661}
1662
1663#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
Saket Jha7f890142019-07-10 18:31:36 -07001664
Dhanashri Atre0da31222017-03-23 12:30:58 -07001665/**
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001666 * dp_is_special_data() - check is the pkt special like eapol, dhcp, etc
1667 *
1668 * @nbuf: pkt skb pointer
1669 *
1670 * Return: true if matched, false if not
1671 */
1672static inline
1673bool dp_is_special_data(qdf_nbuf_t nbuf)
1674{
1675 if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) ||
1676 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) ||
1677 qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1678 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
1679 return true;
1680 else
1681 return false;
1682}
1683
1684#ifdef DP_RX_PKT_NO_PEER_DELIVER
1685/**
1686 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1687 * no corresbonding peer found
1688 * @soc: core txrx main context
1689 * @nbuf: pkt skb pointer
1690 *
1691 * This function will try to deliver some RX special frames to stack
1692 * even there is no peer matched found. for instance, LFR case, some
1693 * eapol data will be sent to host before peer_map done.
1694 *
1695 * Return: None
1696 */
Rakesh Pillaif09f0b72020-03-02 14:09:18 +05301697static
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001698void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1699{
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001700 uint16_t peer_id;
1701 uint8_t vdev_id;
1702 struct dp_vdev *vdev;
1703 uint32_t l2_hdr_offset = 0;
1704 uint16_t msdu_len = 0;
1705 uint32_t pkt_len = 0;
1706 uint8_t *rx_tlv_hdr;
1707
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05301708 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001709 if (peer_id > soc->max_peers)
1710 goto deliver_fail;
1711
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05301712 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001713 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
Rakesh Pillaic1aeb352020-01-14 13:06:15 +05301714 if (!vdev || vdev->delete.pending || !vdev->osif_rx)
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001715 goto deliver_fail;
1716
1717 rx_tlv_hdr = qdf_nbuf_data(nbuf);
1718 l2_hdr_offset =
Venkata Sharath Chandra Manchalaf05b2ae2019-09-20 17:25:21 -07001719 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001720
1721 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1722 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1723
Mohit Khannad2732502019-08-12 01:33:37 -07001724 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1725 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1726 } else {
1727 qdf_nbuf_set_pktlen(nbuf, pkt_len);
1728 qdf_nbuf_pull_head(nbuf,
1729 RX_PKT_TLVS_LEN +
1730 l2_hdr_offset);
1731 }
Jinwei Chen9d3f9852019-07-12 19:01:18 +08001732
1733 /* only allow special frames */
1734 if (!dp_is_special_data(nbuf))
1735 goto deliver_fail;
1736
1737 vdev->osif_rx(vdev->osif_vdev, nbuf);
1738 DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1739 return;
1740
1741deliver_fail:
1742 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1743 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1744 qdf_nbuf_free(nbuf);
1745}
1746#else
1747static inline
1748void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1749{
1750 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1751 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1752 qdf_nbuf_free(nbuf);
1753}
1754#endif
1755
1756/**
Mohit Khanna80002652019-10-14 23:27:36 -07001757 * dp_rx_srng_get_num_pending() - get number of pending entries
1758 * @hal_soc: hal soc opaque pointer
1759 * @hal_ring: opaque pointer to the HAL Rx Ring
1760 * @num_entries: number of entries in the hal_ring.
1761 * @near_full: pointer to a boolean. This is set if ring is near full.
1762 *
1763 * The function returns the number of entries in a destination ring which are
1764 * yet to be reaped. The function also checks if the ring is near full.
1765 * If more than half of the ring needs to be reaped, the ring is considered
1766 * approaching full.
1767 * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1768 * entries. It should not be called within a SRNG lock. HW pointer value is
1769 * synced into cached_hp.
1770 *
1771 * Return: Number of pending entries if any
1772 */
1773static
1774uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1775 hal_ring_handle_t hal_ring_hdl,
1776 uint32_t num_entries,
1777 bool *near_full)
1778{
1779 uint32_t num_pending = 0;
1780
1781 num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1782 hal_ring_hdl,
1783 true);
1784
1785 if (num_entries && (num_pending >= num_entries >> 1))
1786 *near_full = true;
1787 else
1788 *near_full = false;
1789
1790 return num_pending;
1791}
1792
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001793#ifdef WLAN_SUPPORT_RX_FISA
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08001794void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001795{
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08001796 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
1797 qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001798}
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08001799#else
1800void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001801{
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08001802 qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001803}
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08001804#endif
1805
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08001806
Mohit Khanna80002652019-10-14 23:27:36 -07001807/**
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001808 * dp_rx_process() - Brain of the Rx processing functionality
1809 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
Mohit Khanna80002652019-10-14 23:27:36 -07001810 * @int_ctx: per interrupt context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001811 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001812 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001813 * @quota: No. of units (packets) that can be serviced in one shot.
1814 *
1815 * This function implements the core of Rx functionality. This is
1816 * expected to handle only non-error frames.
1817 *
1818 * Return: uint32_t: No. of elements processed
1819 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301820uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
Mohit Khanna80002652019-10-14 23:27:36 -07001821 uint8_t reo_ring_num, uint32_t quota)
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001822{
Akshay Kosigi91c56522019-07-02 11:49:39 +05301823 hal_ring_desc_t ring_desc;
Akshay Kosigia870c612019-07-08 23:10:30 +05301824 hal_soc_handle_t hal_soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001825 struct dp_rx_desc *rx_desc = NULL;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301826 qdf_nbuf_t nbuf, next;
Mohit Khanna80002652019-10-14 23:27:36 -07001827 bool near_full;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001828 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1829 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
Mohit Khanna80002652019-10-14 23:27:36 -07001830 uint32_t num_pending;
Chaithanya Garrepalli8aaf9b62018-05-17 15:53:21 +05301831 uint32_t rx_bufs_used = 0, rx_buf_cookie;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301832 uint16_t msdu_len = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001833 uint16_t peer_id;
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05301834 uint8_t vdev_id;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001835 struct dp_peer *peer;
1836 struct dp_vdev *vdev;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301837 uint32_t pkt_len = 0;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001838 struct hal_rx_mpdu_desc_info mpdu_desc_info;
1839 struct hal_rx_msdu_desc_info msdu_desc_info;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001840 enum hal_reo_error_status error;
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05301841 uint32_t peer_mdata;
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301842 uint8_t *rx_tlv_hdr;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001843 uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301844 uint8_t mac_id = 0;
Varsha Mishra18281792019-03-06 17:57:23 +05301845 struct dp_pdev *rx_pdev;
Kai Chen6eca1a62017-01-12 10:17:53 -08001846 struct dp_srng *dp_rxdma_srng;
1847 struct rx_desc_pool *rx_desc_pool;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001848 struct dp_soc *soc = int_ctx->soc;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301849 uint8_t ring_id = 0;
1850 uint8_t core_id = 0;
Varsha Mishra18281792019-03-06 17:57:23 +05301851 struct cdp_tid_rx_stats *tid_stats;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001852 qdf_nbuf_t nbuf_head;
1853 qdf_nbuf_t nbuf_tail;
1854 qdf_nbuf_t deliver_list_head;
1855 qdf_nbuf_t deliver_list_tail;
1856 uint32_t num_rx_bufs_reaped = 0;
1857 uint32_t intr_id;
1858 struct hif_opaque_softc *scn;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001859 int32_t tid = 0;
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301860 bool is_prev_msdu_last = true;
1861 uint32_t num_entries_avail = 0;
Mohit Khanna698987c2019-07-28 21:38:05 -07001862 uint32_t rx_ol_pkt_cnt = 0;
Mohit Khanna80002652019-10-14 23:27:36 -07001863 uint32_t num_entries = 0;
syed touqeer pasha6997a372019-12-31 15:45:55 +05301864 struct hal_rx_msdu_metadata msdu_metadata;
Rakesh Pillai79979d62020-02-29 20:42:28 +05301865 QDF_STATUS status;
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301866
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001867 DP_HIST_INIT();
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001868
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301869 qdf_assert_always(soc && hal_ring_hdl);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001870 hal_soc = soc->hal_soc;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001871 qdf_assert_always(hal_soc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001872
Mohit Khannae5a6e942018-11-28 14:22:48 -08001873 scn = soc->hif_handle;
Sravan Kumar Kairamb96e5072019-08-21 20:59:51 +05301874 hif_pm_runtime_mark_dp_rx_busy(scn);
Mohit Khannae5a6e942018-11-28 14:22:48 -08001875 intr_id = int_ctx->dp_intr_id;
Mohit Khanna80002652019-10-14 23:27:36 -07001876 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
Mohit Khannae5a6e942018-11-28 14:22:48 -08001877
1878more_data:
1879 /* reset local variables here to be re-used in the function */
1880 nbuf_head = NULL;
1881 nbuf_tail = NULL;
1882 deliver_list_head = NULL;
1883 deliver_list_tail = NULL;
1884 peer = NULL;
1885 vdev = NULL;
1886 num_rx_bufs_reaped = 0;
1887
1888 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1889 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1890 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1891 qdf_mem_zero(head, sizeof(head));
1892 qdf_mem_zero(tail, sizeof(tail));
Yue Ma245b47b2017-02-21 16:35:31 -08001893
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301894 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001895
1896 /*
1897 * Need API to convert from hal_ring pointer to
1898 * Ring Type / Ring Id combo
1899 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001900 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001901 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301902 FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001903 goto done;
1904 }
1905
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301906 /*
1907 * start reaping the buffers from reo ring and queue
1908 * them in per vdev queue.
1909 * Process the received pkts in a different per vdev loop.
1910 */
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301911 while (qdf_likely(quota &&
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301912 (ring_desc = hal_srng_dst_peek(hal_soc,
1913 hal_ring_hdl)))) {
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001914
1915 error = HAL_RX_ERROR_STATUS_GET(ring_desc);
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301916 ring_id = hal_srng_ring_id_get(hal_ring_hdl);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001917
1918 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1919 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301920 FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
Ishank Jain57c42a12017-04-12 10:42:22 +05301921 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001922 /* Don't know how to deal with this -- assert */
1923 qdf_assert(0);
1924 }
1925
1926 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1927
Kai Chen6eca1a62017-01-12 10:17:53 -08001928 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
Rakesh Pillai79979d62020-02-29 20:42:28 +05301929 status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
1930 ring_desc, rx_desc);
1931 if (QDF_IS_STATUS_ERROR(status)) {
1932 hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1933 continue;
1934 }
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301935
Saket Jha3aeabaa2020-03-03 16:21:12 -08001936 dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
1937
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301938 /*
1939 * this is a unlikely scenario where the host is reaping
1940 * a descriptor which it already reaped just a while ago
1941 * but is yet to replenish it back to HW.
1942 * In this case host will dump the last 128 descriptors
1943 * including the software descriptor rx_desc and assert.
1944 */
Gyanranjan Hazarikae8047262019-06-05 00:43:38 -07001945
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301946 if (qdf_unlikely(!rx_desc->in_use)) {
1947 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
Manjunathappa Prakash5f1b6982019-07-12 12:36:21 -07001948 dp_info_rl("Reaping rx_desc not in use!");
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301949 dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001950 ring_desc, rx_desc);
Saket Jha7f890142019-07-10 18:31:36 -07001951 /* ignore duplicate RX desc and continue to process */
Manjunathappa Prakash5f1b6982019-07-12 12:36:21 -07001952 /* Pop out the descriptor */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301953 hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
Saket Jha7f890142019-07-10 18:31:36 -07001954 continue;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001955 }
1956
1957 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1958 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1959 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301960 dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301961 ring_desc, rx_desc);
1962 }
1963
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301964 /* Get MPDU DESC info */
1965 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001966
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301967 /* Get MSDU DESC info */
1968 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1969
Jinwei Chen0b924692020-01-14 13:52:06 +08001970 if (qdf_unlikely(msdu_desc_info.msdu_flags &
1971 HAL_MSDU_F_MSDU_CONTINUATION)) {
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301972 /* previous msdu has end bit set, so current one is
1973 * the new MPDU
1974 */
1975 if (is_prev_msdu_last) {
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301976 /* Get number of entries available in HW ring */
1977 num_entries_avail =
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301978 hal_srng_dst_num_valid(hal_soc,
1979 hal_ring_hdl, 1);
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301980
1981 /* For new MPDU check if we can read complete
1982 * MPDU by comparing the number of buffers
1983 * available and number of buffers needed to
1984 * reap this MPDU
1985 */
1986 if (((msdu_desc_info.msdu_len /
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05301987 (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
1988 1)) > num_entries_avail) {
Jinwei Chen0b924692020-01-14 13:52:06 +08001989 DP_STATS_INC(
1990 soc,
1991 rx.msdu_scatter_wait_break,
1992 1);
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301993 break;
Jinwei Chen0b924692020-01-14 13:52:06 +08001994 }
1995 is_prev_msdu_last = false;
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301996 }
Jinwei Chen0b924692020-01-14 13:52:06 +08001997
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301998 }
1999
Jinwei Chenb0c23052020-03-02 20:44:28 +08002000 /*
2001 * move unmap after scattered msdu waiting break logic
2002 * in case double skb unmap happened.
2003 */
2004 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
2005 QDF_DMA_FROM_DEVICE);
2006 rx_desc->unmapped = 1;
2007
2008 core_id = smp_processor_id();
2009 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
2010
2011 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
2012 qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
2013
Jinwei Chen0b924692020-01-14 13:52:06 +08002014 if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
2015 HAL_MPDU_F_RAW_AMPDU))
2016 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
2017
2018 if (!is_prev_msdu_last &&
2019 msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2020 is_prev_msdu_last = true;
2021
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05302022 /* Pop out the descriptor*/
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05302023 hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05302024
2025 rx_bufs_reaped[rx_desc->pool_id]++;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302026 peer_mdata = mpdu_desc_info.peer_meta_data;
2027 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
2028 DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05302029 QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
2030 DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05302031
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05302032 /*
2033 * save msdu flags first, last and continuation msdu in
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302034 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
2035 * length to nbuf->cb. This ensures the info required for
2036 * per pkt processing is always in the same cache line.
2037 * This helps in improving throughput for smaller pkt
2038 * sizes.
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05302039 */
2040 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05302041 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05302042
2043 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
Vivekde90e592017-11-30 17:24:18 +05302044 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05302045
2046 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05302047 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05302048
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302049 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
2050 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
2051
2052 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
2053 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
2054
2055 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
2056 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
2057
Ankit Kumare2227752019-04-30 00:16:04 +05302058 qdf_nbuf_set_tid_val(rx_desc->nbuf,
2059 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
2060
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302061 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
2062
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002063 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302064
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302065 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05302066
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05302067 /*
2068 * if continuation bit is set then we have MSDU spread
2069 * across multiple buffers, let us not decrement quota
2070 * till we reap all buffers of that MSDU.
2071 */
Vivekde90e592017-11-30 17:24:18 +05302072 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05302073 quota -= 1;
2074
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05302075 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2076 &tail[rx_desc->pool_id],
2077 rx_desc);
Mohit Khannae5a6e942018-11-28 14:22:48 -08002078
2079 num_rx_bufs_reaped++;
Jinwei Chenb0c23052020-03-02 20:44:28 +08002080 /*
2081 * only if complete msdu is received for scatter case,
2082 * then allow break.
2083 */
2084 if (is_prev_msdu_last &&
2085 dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
Mohit Khannae5a6e942018-11-28 14:22:48 -08002086 break;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002087 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302088done:
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05302089 dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002090
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05302091 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2092 /*
2093 * continue with next mac_id if no pkts were reaped
2094 * from that pool
2095 */
2096 if (!rx_bufs_reaped[mac_id])
2097 continue;
2098
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002099 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2100
Kai Chen6eca1a62017-01-12 10:17:53 -08002101 rx_desc_pool = &soc->rx_desc_buf[mac_id];
2102
2103 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2104 rx_desc_pool, rx_bufs_reaped[mac_id],
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08002105 &head[mac_id], &tail[mac_id]);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302106 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002107
Mohit Khannae5a6e942018-11-28 14:22:48 -08002108 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
psimha03f9a792017-10-17 10:42:58 -07002109 /* Peer can be NULL is case of LFR */
Jeff Johnsona8edf332019-03-18 09:51:52 -07002110 if (qdf_likely(peer))
psimha03f9a792017-10-17 10:42:58 -07002111 vdev = NULL;
2112
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05302113 /*
2114 * BIG loop where each nbuf is dequeued from global queue,
2115 * processed and queued back on a per vdev basis. These nbufs
2116 * are sent to stack as and when we run out of nbufs
2117 * or a new nbuf dequeued from global queue has a different
2118 * vdev when compared to previous nbuf.
2119 */
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302120 nbuf = nbuf_head;
2121 while (nbuf) {
2122 next = nbuf->next;
2123 rx_tlv_hdr = qdf_nbuf_data(nbuf);
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05302124 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2125
2126 if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
Rakesh Pillaic1aeb352020-01-14 13:06:15 +05302127 dp_rx_deliver_to_stack(soc, vdev, peer,
2128 deliver_list_head,
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05302129 deliver_list_tail);
2130 deliver_list_head = NULL;
2131 deliver_list_tail = NULL;
2132 }
2133
Ankit Kumare2227752019-04-30 00:16:04 +05302134 /* Get TID from struct cb->tid_val, save to tid */
Varsha Mishra18281792019-03-06 17:57:23 +05302135 if (qdf_nbuf_is_rx_chfrag_start(nbuf))
Ankit Kumare2227752019-04-30 00:16:04 +05302136 tid = qdf_nbuf_get_tid_val(nbuf);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002137
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +05302138 peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302139 peer = dp_peer_find_by_id(soc, peer_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002140
Mohit Khanna163c3172018-06-27 01:34:02 -07002141 if (peer) {
2142 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2143 qdf_dp_trace_set_track(nbuf, QDF_RX);
2144 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2145 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2146 QDF_NBUF_RX_PKT_DATA_TRACK;
2147 }
2148
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05302149 rx_bufs_used++;
2150
Jeff Johnsona8edf332019-03-18 09:51:52 -07002151 if (qdf_likely(peer)) {
psimha03f9a792017-10-17 10:42:58 -07002152 vdev = peer->vdev;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05302153 } else {
Manjunathappa Prakash97137592019-07-26 17:08:36 -07002154 nbuf->next = NULL;
Jinwei Chen9d3f9852019-07-12 19:01:18 +08002155 dp_rx_deliver_to_stack_no_peer(soc, nbuf);
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05302156 nbuf = next;
2157 continue;
2158 }
2159
Jeff Johnsona8edf332019-03-18 09:51:52 -07002160 if (qdf_unlikely(!vdev)) {
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05302161 qdf_nbuf_free(nbuf);
2162 nbuf = next;
2163 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302164 dp_peer_unref_del_find_by_id(peer);
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05302165 continue;
2166 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302167
Tallapragada Kalyancea9c932019-04-30 16:43:28 +05302168 rx_pdev = vdev->pdev;
2169 DP_RX_TID_SAVE(nbuf, tid);
2170 if (qdf_unlikely(rx_pdev->delay_stats_flag))
2171 qdf_nbuf_set_timestamp(nbuf);
2172
2173 ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2174 tid_stats =
2175 &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2176
2177 /*
2178 * Check if DMA completed -- msdu_done is the last bit
2179 * to be written
2180 */
Jinwei Chen0b924692020-01-14 13:52:06 +08002181 if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
Tallapragada Kalyancea9c932019-04-30 16:43:28 +05302182 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2183 dp_err("MSDU DONE failure");
2184 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2185 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2186 QDF_TRACE_LEVEL_INFO);
2187 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2188 qdf_nbuf_free(nbuf);
2189 qdf_assert(0);
2190 nbuf = next;
2191 continue;
2192 }
2193
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05302194 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302195 /*
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302196 * First IF condition:
2197 * 802.11 Fragmented pkts are reinjected to REO
2198 * HW block as SG pkts and for these pkts we only
2199 * need to pull the RX TLVS header length.
2200 * Second IF condition:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302201 * The below condition happens when an MSDU is spread
2202 * across multiple buffers. This can happen in two cases
2203 * 1. The nbuf size is smaller then the received msdu.
2204 * ex: we have set the nbuf size to 2048 during
2205 * nbuf_alloc. but we received an msdu which is
2206 * 2304 bytes in size then this msdu is spread
2207 * across 2 nbufs.
2208 *
2209 * 2. AMSDUs when RAW mode is enabled.
2210 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2211 * across 1st nbuf and 2nd nbuf and last MSDU is
2212 * spread across 2nd nbuf and 3rd nbuf.
2213 *
2214 * for these scenarios let us create a skb frag_list and
2215 * append these buffers till the last MSDU of the AMSDU
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302216 * Third condition:
2217 * This is the most likely case, we receive 802.3 pkts
2218 * decapsulated by HW, here we need to set the pkt length.
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302219 */
syed touqeer pasha6997a372019-12-31 15:45:55 +05302220 hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302221 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2222 bool is_mcbc, is_sa_vld, is_da_vld;
2223
Venkata Sharath Chandra Manchalaee909382019-09-20 10:52:37 -07002224 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2225 rx_tlv_hdr);
Venkata Sharath Chandra Manchala59ebd5e2019-09-20 15:52:55 -07002226 is_sa_vld =
2227 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2228 rx_tlv_hdr);
Venkata Sharath Chandra Manchala79055382019-09-21 11:22:30 -07002229 is_da_vld =
2230 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2231 rx_tlv_hdr);
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302232
2233 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2234 qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2235 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2236
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302237 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
Jinwei Chen0b924692020-01-14 13:52:06 +08002238 } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302239 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Jinwei Chen0b924692020-01-14 13:52:06 +08002240 nbuf = dp_rx_sg_create(nbuf);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05302241 next = nbuf->next;
Jinwei Chen0b924692020-01-14 13:52:06 +08002242
2243 if (qdf_nbuf_is_raw_frame(nbuf)) {
2244 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2245 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2246 } else {
2247 qdf_nbuf_free(nbuf);
2248 DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2249 dp_info_rl("scatter msdu len %d, dropped",
2250 msdu_len);
2251 nbuf = next;
2252 dp_peer_unref_del_find_by_id(peer);
2253 continue;
2254 }
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302255 } else {
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302256
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302257 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
syed touqeer pasha6997a372019-12-31 15:45:55 +05302258 pkt_len = msdu_len +
2259 msdu_metadata.l3_hdr_pad +
2260 RX_PKT_TLVS_LEN;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05302261
2262 qdf_nbuf_set_pktlen(nbuf, pkt_len);
Manjunathappa Prakash9ee605c2020-02-10 19:35:18 -08002263 dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302264 }
2265
Varsha Mishra6e1760c2019-07-27 22:51:42 +05302266 /*
2267 * process frame for mulitpass phrase processing
2268 */
2269 if (qdf_unlikely(vdev->multipass_en)) {
Ankit Kumar53581e92020-01-02 10:15:16 +05302270 if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2271 DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2272 qdf_nbuf_free(nbuf);
2273 nbuf = next;
2274 dp_peer_unref_del_find_by_id(peer);
2275 continue;
2276 }
Varsha Mishra6e1760c2019-07-27 22:51:42 +05302277 }
2278
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302279 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302280 QDF_TRACE(QDF_MODULE_ID_DP,
2281 QDF_TRACE_LEVEL_ERROR,
2282 FL("Policy Check Drop pkt"));
Varsha Mishra18281792019-03-06 17:57:23 +05302283 tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302284 /* Drop & free packet */
2285 qdf_nbuf_free(nbuf);
2286 /* Statistics */
2287 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302288 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05302289 continue;
2290 }
2291
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302292 if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2293 (qdf_nbuf_is_da_mcbc(nbuf)) &&
Venkata Sharath Chandra Manchala2a52d342019-09-21 11:52:54 -07002294 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2295 rx_tlv_hdr) ==
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302296 false))) {
Varsha Mishra18281792019-03-06 17:57:23 +05302297 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
Ruchi, Agrawal27550482018-02-20 19:43:41 +05302298 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302299 qdf_nbuf_free(nbuf);
2300 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302301 dp_peer_unref_del_find_by_id(peer);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302302 continue;
2303 }
2304
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302305 if (soc->process_rx_status)
2306 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302307
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07002308 /* Update the protocol tag in SKB based on CCE metadata */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07002309 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2310 reo_ring_num, false, true);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07002311
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07002312 /* Update the flow tag in SKB based on FSE metadata */
2313 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2314
Varsha Mishra9d42f122019-05-03 12:47:40 +05302315 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2316 ring_id, tid_stats);
Aditya Sathish6add3db2018-04-10 19:43:34 +05302317
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302318 if (qdf_unlikely(vdev->mesh_vdev)) {
Varsha Mishra18281792019-03-06 17:57:23 +05302319 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302320 == QDF_STATUS_SUCCESS) {
2321 QDF_TRACE(QDF_MODULE_ID_DP,
Varsha Mishra18281792019-03-06 17:57:23 +05302322 QDF_TRACE_LEVEL_INFO_MED,
2323 FL("mesh pkt filtered"));
2324 tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2325 DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2326 1);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302327
2328 qdf_nbuf_free(nbuf);
2329 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302330 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302331 continue;
2332 }
2333 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2334 }
2335
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302336 if (qdf_likely(vdev->rx_decap_type ==
Sravan Kumar Kairamd7d1d672018-09-04 14:56:33 +05302337 htt_cmn_pkt_type_ethernet) &&
2338 qdf_likely(!vdev->mesh_vdev)) {
phadiman4213e9c2018-10-29 12:50:02 +05302339 /* WDS Destination Address Learning */
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05302340 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
phadiman4213e9c2018-10-29 12:50:02 +05302341
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05302342 /* Due to HW issue, sometimes we see that the sa_idx
2343 * and da_idx are invalid with sa_valid and da_valid
2344 * bits set
2345 *
2346 * in this case we also see that value of
2347 * sa_sw_peer_id is set as 0
2348 *
2349 * Drop the packet if sa_idx and da_idx OOB or
2350 * sa_sw_peerid is 0
2351 */
syed touqeer pasha6997a372019-12-31 15:45:55 +05302352 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2353 msdu_metadata)) {
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05302354 qdf_nbuf_free(nbuf);
2355 nbuf = next;
2356 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
Jinwei Chen5bcc30f2019-05-20 21:17:56 +08002357 dp_peer_unref_del_find_by_id(peer);
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05302358 continue;
2359 }
phadiman4213e9c2018-10-29 12:50:02 +05302360 /* WDS Source Port Learning */
Ankit Kumarf2526d42019-05-02 15:13:27 +05302361 if (qdf_likely(vdev->wds_enabled))
syed touqeer pasha6997a372019-12-31 15:45:55 +05302362 dp_rx_wds_srcport_learn(soc,
2363 rx_tlv_hdr,
2364 peer,
2365 nbuf,
2366 msdu_metadata);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302367
2368 /* Intrabss-fwd */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302369 if (dp_rx_check_ap_bridge(vdev))
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302370 if (dp_rx_intrabss_fwd(soc,
2371 peer,
2372 rx_tlv_hdr,
syed touqeer pasha6997a372019-12-31 15:45:55 +05302373 nbuf,
2374 msdu_metadata)) {
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302375 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302376 dp_peer_unref_del_find_by_id(peer);
Varsha Mishra18281792019-03-06 17:57:23 +05302377 tid_stats->intrabss_cnt++;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302378 continue; /* Get next desc */
2379 }
2380 }
2381
Mohit Khanna698987c2019-07-28 21:38:05 -07002382 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2383
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302384 DP_RX_LIST_APPEND(deliver_list_head,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002385 deliver_list_tail,
2386 nbuf);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302387 DP_STATS_INC_PKT(peer, rx.to_stack, 1,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302388 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302389
Varsha Mishra18281792019-03-06 17:57:23 +05302390 tid_stats->delivered_to_stack++;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302391 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302392 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302393 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07002394
Chaithanya Garrepalli79b64ac2020-01-07 17:28:49 +05302395 if (qdf_likely(deliver_list_head)) {
2396 if (qdf_likely(peer))
Rakesh Pillaic1aeb352020-01-14 13:06:15 +05302397 dp_rx_deliver_to_stack(soc, vdev, peer,
2398 deliver_list_head,
Chaithanya Garrepalli79b64ac2020-01-07 17:28:49 +05302399 deliver_list_tail);
2400 else {
2401 nbuf = deliver_list_head;
2402 while (nbuf) {
2403 next = nbuf->next;
2404 nbuf->next = NULL;
2405 dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2406 nbuf = next;
2407 }
2408 }
2409 }
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302410
Mohit Khanna698987c2019-07-28 21:38:05 -07002411 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
Mohit Khanna80002652019-10-14 23:27:36 -07002412 if (quota) {
2413 num_pending =
2414 dp_rx_srng_get_num_pending(hal_soc,
2415 hal_ring_hdl,
2416 num_entries,
2417 &near_full);
2418 if (num_pending) {
2419 DP_STATS_INC(soc, rx.hp_oos2, 1);
2420
2421 if (!hif_exec_should_yield(scn, intr_id))
2422 goto more_data;
2423
2424 if (qdf_unlikely(near_full)) {
2425 DP_STATS_INC(soc, rx.near_full, 1);
2426 goto more_data;
2427 }
2428 }
Mohit Khannae5a6e942018-11-28 14:22:48 -08002429 }
Mohit Khanna698987c2019-07-28 21:38:05 -07002430
Manjunathappa Prakashb896f0e2020-01-20 18:45:36 -08002431 if (vdev && vdev->osif_fisa_flush)
Manjunathappa Prakash5d73e072020-01-08 16:50:25 -08002432 vdev->osif_fisa_flush(soc, reo_ring_num);
2433
Sravan Kumar Kairamafd707d2019-08-11 18:43:30 +05302434 if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
Mohit Khanna698987c2019-07-28 21:38:05 -07002435 vdev->osif_gro_flush(vdev->osif_vdev,
2436 reo_ring_num);
2437 }
Mohit Khannae5a6e942018-11-28 14:22:48 -08002438 }
Mohit Khanna698987c2019-07-28 21:38:05 -07002439
Mohit Khannae5a6e942018-11-28 14:22:48 -08002440 /* Update histogram statistics by looping through pdev's */
2441 DP_RX_HIST_STATS_PER_PDEV();
2442
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002443 return rx_bufs_used; /* Assume no scale factor for now */
2444}
2445
Rakesh Pillai534a1432019-10-24 06:44:11 +05302446QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2447{
2448 QDF_STATUS ret;
2449
2450 if (vdev->osif_rx_flush) {
2451 ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2452 if (!ret) {
2453 dp_err("Failed to flush rx pkts for vdev %d\n",
2454 vdev->vdev_id);
2455 return ret;
2456 }
2457 }
2458
2459 return QDF_STATUS_SUCCESS;
2460}
2461
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002462/**
Rakesh Pillai534a1432019-10-24 06:44:11 +05302463 * dp_rx_pdev_detach() - detach dp rx
Kai Chen6eca1a62017-01-12 10:17:53 -08002464 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002465 *
2466 * This function will detach DP RX into main device context
2467 * will free DP Rx resources.
2468 *
2469 * Return: void
2470 */
2471void
2472dp_rx_pdev_detach(struct dp_pdev *pdev)
2473{
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002474 uint8_t mac_for_pdev = pdev->lmac_id;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002475 struct dp_soc *soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08002476 struct rx_desc_pool *rx_desc_pool;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002477
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002478 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
Kai Chen6eca1a62017-01-12 10:17:53 -08002479
psimhaeae1b412017-08-25 16:10:13 -07002480 if (rx_desc_pool->pool_size != 0) {
phadiman449a2682019-02-20 14:00:00 +05302481 if (!dp_is_soc_reinit(soc))
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002482 dp_rx_desc_nbuf_and_pool_free(soc, mac_for_pdev,
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07002483 rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05302484 else
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07002485 dp_rx_desc_nbuf_free(soc, rx_desc_pool);
psimhaeae1b412017-08-25 16:10:13 -07002486 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302487
2488 return;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002489}
2490
jiad5679e392019-04-03 17:00:02 +08002491static QDF_STATUS
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002492dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302493 struct dp_pdev *dp_pdev,
2494 struct rx_desc_pool *rx_desc_pool)
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002495{
2496 qdf_dma_addr_t paddr;
2497 QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2498
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302499 *nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2500 RX_BUFFER_RESERVATION,
2501 rx_desc_pool->buf_alignment, FALSE);
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002502 if (!(*nbuf)) {
2503 dp_err("nbuf alloc failed");
2504 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2505 return ret;
2506 }
2507
2508 ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf,
2509 QDF_DMA_FROM_DEVICE);
2510 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2511 qdf_nbuf_free(*nbuf);
2512 dp_err("nbuf map failed");
2513 DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2514 return ret;
2515 }
2516
2517 paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2518
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302519 ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002520 if (ret == QDF_STATUS_E_FAILURE) {
2521 qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf,
2522 QDF_DMA_FROM_DEVICE);
2523 qdf_nbuf_free(*nbuf);
2524 dp_err("nbuf check x86 failed");
2525 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2526 return ret;
2527 }
2528
2529 return QDF_STATUS_SUCCESS;
2530}
2531
Kiran Venkatappa115309a2019-07-16 22:15:35 +05302532QDF_STATUS
jiad5679e392019-04-03 17:00:02 +08002533dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2534 struct dp_srng *dp_rxdma_srng,
2535 struct rx_desc_pool *rx_desc_pool,
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002536 uint32_t num_req_buffers)
jiad5679e392019-04-03 17:00:02 +08002537{
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002538 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
Akshay Kosigia870c612019-07-08 23:10:30 +05302539 hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
jiad5679e392019-04-03 17:00:02 +08002540 union dp_rx_desc_list_elem_t *next;
2541 void *rxdma_ring_entry;
2542 qdf_dma_addr_t paddr;
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002543 qdf_nbuf_t *rx_nbuf_arr;
2544 uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2545 uint32_t buffer_index, nbuf_ptrs_per_page;
jiad5679e392019-04-03 17:00:02 +08002546 qdf_nbuf_t nbuf;
2547 QDF_STATUS ret;
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002548 int page_idx, total_pages;
2549 union dp_rx_desc_list_elem_t *desc_list = NULL;
2550 union dp_rx_desc_list_elem_t *tail = NULL;
jiad5679e392019-04-03 17:00:02 +08002551
2552 if (qdf_unlikely(!rxdma_srng)) {
2553 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2554 return QDF_STATUS_E_FAILURE;
2555 }
2556
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002557 dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
jiad5679e392019-04-03 17:00:02 +08002558
2559 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002560 num_req_buffers, &desc_list, &tail);
jiad5679e392019-04-03 17:00:02 +08002561 if (!nr_descs) {
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002562 dp_err("no free rx_descs in freelist");
jiad5679e392019-04-03 17:00:02 +08002563 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2564 return QDF_STATUS_E_NOMEM;
2565 }
2566
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002567 dp_debug("got %u RX descs for driver attach", nr_descs);
jiad5679e392019-04-03 17:00:02 +08002568
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002569 /*
2570 * Try to allocate pointers to the nbuf one page at a time.
2571 * Take pointers that can fit in one page of memory and
2572 * iterate through the total descriptors that need to be
2573 * allocated in order of pages. Reuse the pointers that
2574 * have been allocated to fit in one page across each
2575 * iteration to index into the nbuf.
2576 */
2577 total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2578
2579 /*
2580 * Add an extra page to store the remainder if any
2581 */
2582 if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2583 total_pages++;
2584 rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
jiad5679e392019-04-03 17:00:02 +08002585 if (!rx_nbuf_arr) {
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002586 dp_err("failed to allocate nbuf array");
jiad5679e392019-04-03 17:00:02 +08002587 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002588 QDF_BUG(0);
jiad5679e392019-04-03 17:00:02 +08002589 return QDF_STATUS_E_NOMEM;
2590 }
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002591 nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
jiad5679e392019-04-03 17:00:02 +08002592
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002593 for (page_idx = 0; page_idx < total_pages; page_idx++) {
2594 qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2595
2596 for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2597 /*
2598 * The last page of buffer pointers may not be required
2599 * completely based on the number of descriptors. Below
2600 * check will ensure we are allocating only the
2601 * required number of descriptors.
2602 */
2603 if (nr_nbuf_total >= nr_descs)
2604 break;
2605 ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2606 &rx_nbuf_arr[nr_nbuf],
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302607 dp_pdev, rx_desc_pool);
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002608 if (QDF_IS_STATUS_ERROR(ret))
2609 break;
2610
2611 nr_nbuf_total++;
jiad5679e392019-04-03 17:00:02 +08002612 }
2613
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002614 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2615
2616 for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2617 rxdma_ring_entry =
2618 hal_srng_src_get_next(dp_soc->hal_soc,
2619 rxdma_srng);
2620 qdf_assert_always(rxdma_ring_entry);
2621
2622 next = desc_list->next;
2623 nbuf = rx_nbuf_arr[buffer_index];
2624 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2625
2626 dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2627 desc_list->rx_desc.in_use = 1;
2628
2629 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2630 desc_list->rx_desc.cookie,
2631 rx_desc_pool->owner);
2632
2633 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2634
2635 desc_list = next;
jiad5679e392019-04-03 17:00:02 +08002636 }
2637
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002638 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
jiad5679e392019-04-03 17:00:02 +08002639 }
2640
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002641 dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
jiad5679e392019-04-03 17:00:02 +08002642 qdf_mem_free(rx_nbuf_arr);
2643
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002644 if (!nr_nbuf_total) {
2645 dp_err("No nbuf's allocated");
2646 QDF_BUG(0);
2647 return QDF_STATUS_E_RESOURCES;
2648 }
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302649
2650 /* No need to count the number of bytes received during replenish.
2651 * Therefore set replenish.pkts.bytes as 0.
2652 */
2653 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002654
jiad5679e392019-04-03 17:00:02 +08002655 return QDF_STATUS_SUCCESS;
2656}
2657
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002658/**
2659 * dp_rx_attach() - attach DP RX
Kai Chen6eca1a62017-01-12 10:17:53 -08002660 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002661 *
2662 * This function will attach a DP RX instance into the main
2663 * device (SOC) context. Will allocate dp rx resource and
2664 * initialize resources.
2665 *
2666 * Return: QDF_STATUS_SUCCESS: success
2667 * QDF_STATUS_E_RESOURCES: Error return
2668 */
2669QDF_STATUS
2670dp_rx_pdev_attach(struct dp_pdev *pdev)
2671{
2672 uint8_t pdev_id = pdev->pdev_id;
2673 struct dp_soc *soc = pdev->soc;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002674 uint32_t rxdma_entries;
Mainak Sen95502732019-07-25 00:48:59 +05302675 uint32_t rx_sw_desc_weight;
Kai Chen6eca1a62017-01-12 10:17:53 -08002676 struct dp_srng *dp_rxdma_srng;
2677 struct rx_desc_pool *rx_desc_pool;
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07002678 QDF_STATUS ret_val;
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002679 int mac_for_pdev;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002680
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302681 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
Aditya Sathishded018e2018-07-02 16:25:21 +05302682 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2683 "nss-wifi<4> skip Rx refil %d", pdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302684 return QDF_STATUS_SUCCESS;
2685 }
2686
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002687 pdev = soc->pdev_list[pdev_id];
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002688 mac_for_pdev = pdev->lmac_id;
2689 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2690
Mohit Khanna70514992018-11-12 18:39:03 -08002691 rxdma_entries = dp_rxdma_srng->num_entries;
2692
chenguo9bece1a2017-12-19 18:49:41 +08002693 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002694
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002695 rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
Mainak Sen95502732019-07-25 00:48:59 +05302696 rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2697
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002698 dp_rx_desc_pool_alloc(soc, mac_for_pdev,
Mainak Sen95502732019-07-25 00:48:59 +05302699 rx_sw_desc_weight * rxdma_entries,
Mohit Khanna70514992018-11-12 18:39:03 -08002700 rx_desc_pool);
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08002701
2702 rx_desc_pool->owner = DP_WBM2SW_RBM;
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302703 rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
2704 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
2705
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002706 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
Mohit Khanna70514992018-11-12 18:39:03 -08002707
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07002708 ret_val = dp_rx_fst_attach(soc, pdev);
2709 if ((ret_val != QDF_STATUS_SUCCESS) &&
2710 (ret_val != QDF_STATUS_E_NOSUPPORT)) {
2711 QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
2712 "RX Flow Search Table attach failed: pdev %d err %d",
2713 pdev_id, ret_val);
2714 return ret_val;
2715 }
2716
Amit Shukla1edfe5a2019-10-24 14:03:39 -07002717 return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
Varun Reddy Yeturuf31e44d2019-06-14 07:50:11 -07002718 rx_desc_pool, rxdma_entries - 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002719}
jinweic chenc3546322018-02-02 15:03:41 +08002720
2721/*
2722 * dp_rx_nbuf_prepare() - prepare RX nbuf
2723 * @soc: core txrx main context
2724 * @pdev: core txrx pdev context
2725 *
2726 * This function alloc & map nbuf for RX dma usage, retry it if failed
2727 * until retry times reaches max threshold or succeeded.
2728 *
2729 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2730 */
2731qdf_nbuf_t
2732dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2733{
2734 uint8_t *buf;
2735 int32_t nbuf_retry_count;
2736 QDF_STATUS ret;
2737 qdf_nbuf_t nbuf = NULL;
2738
2739 for (nbuf_retry_count = 0; nbuf_retry_count <
2740 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2741 nbuf_retry_count++) {
2742 /* Allocate a new skb */
2743 nbuf = qdf_nbuf_alloc(soc->osdev,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302744 RX_DATA_BUFFER_SIZE,
jinweic chenc3546322018-02-02 15:03:41 +08002745 RX_BUFFER_RESERVATION,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302746 RX_DATA_BUFFER_ALIGNMENT,
jinweic chenc3546322018-02-02 15:03:41 +08002747 FALSE);
2748
Jeff Johnsona8edf332019-03-18 09:51:52 -07002749 if (!nbuf) {
jinweic chenc3546322018-02-02 15:03:41 +08002750 DP_STATS_INC(pdev,
2751 replenish.nbuf_alloc_fail, 1);
2752 continue;
2753 }
2754
2755 buf = qdf_nbuf_data(nbuf);
2756
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +05302757 memset(buf, 0, RX_DATA_BUFFER_SIZE);
jinweic chenc3546322018-02-02 15:03:41 +08002758
2759 ret = qdf_nbuf_map_single(soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05302760 QDF_DMA_FROM_DEVICE);
jinweic chenc3546322018-02-02 15:03:41 +08002761
2762 /* nbuf map failed */
2763 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2764 qdf_nbuf_free(nbuf);
2765 DP_STATS_INC(pdev, replenish.map_err, 1);
2766 continue;
2767 }
2768 /* qdf_nbuf alloc and map succeeded */
2769 break;
2770 }
2771
2772 /* qdf_nbuf still alloc or map failed */
2773 if (qdf_unlikely(nbuf_retry_count >=
2774 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2775 return NULL;
2776
2777 return nbuf;
2778}