blob: 6caec240b59e519e3052529e0a48d332bfc5f957 [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
phadiman6c3432b2019-01-09 12:45:28 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053019#include "hal_hw_headers.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070020#include "dp_types.h"
21#include "dp_rx.h"
22#include "dp_peer.h"
23#include "hal_rx.h"
24#include "hal_api.h"
25#include "qdf_nbuf.h"
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +053026#ifdef MESH_MODE_SUPPORT
27#include "if_meta_hdr.h"
28#endif
Ishank Jainbc2d91f2017-01-03 18:14:54 +053029#include "dp_internal.h"
Pratik Gandhi3da3bc72017-03-16 18:20:22 +053030#include "dp_rx_mon.h"
jiad3b8104b2019-03-08 17:23:35 +080031#include "dp_ipa.h"
Amir Patelcb990262019-05-28 15:12:48 +053032#ifdef FEATURE_WDS
33#include "dp_txrx_wds.h"
34#endif
Mohit Khanna70514992018-11-12 18:39:03 -080035
Varsha Mishra09a4c0e2019-05-22 12:09:24 +053036#ifdef ATH_RX_PRI_SAVE
37#define DP_RX_TID_SAVE(_nbuf, _tid) \
38 (qdf_nbuf_set_priority(_nbuf, _tid))
39#else
40#define DP_RX_TID_SAVE(_nbuf, _tid)
41#endif
42
Pranita Solanke05862962019-01-09 11:39:29 +053043#ifdef CONFIG_MCL
URAJ SASAN81d95712017-08-21 20:51:03 +053044static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
45{
46 if (vdev->opmode != wlan_op_mode_sta)
47 return true;
48 else
49 return false;
50}
Pranita Solanke05862962019-01-09 11:39:29 +053051#else
52static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
53{
54 return vdev->ap_bridge_enabled;
55}
URAJ SASAN81d95712017-08-21 20:51:03 +053056#endif
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053057
58/*
59 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
60 *
61 * @soc: core txrx main context
62 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
63 * @ring_desc: opaque pointer to the RX ring descriptor
64 * @rx_desc: host rs descriptor
65 *
66 * Return: void
67 */
68void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring,
69 void *ring_desc, struct dp_rx_desc *rx_desc)
70{
71 void *hal_soc = soc->hal_soc;
72
73 dp_rx_desc_dump(rx_desc);
74 hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 hal_srng_dump_ring(hal_soc, hal_ring);
Mohit Khanna16cd1b22019-01-25 10:46:00 -080076 qdf_assert_always(0);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +053077}
78
Debashis Duttc4c52dc2016-10-04 17:12:23 -070079/*
80 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
81 * called during dp rx initialization
82 * and at the end of dp_rx_process.
83 *
84 * @soc: core txrx main context
85 * @mac_id: mac_id which is one of 3 mac_ids
Kai Chen6eca1a62017-01-12 10:17:53 -080086 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -070087 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -080088 * @num_req_buffers: number of buffer to be replenished
Debashis Duttc4c52dc2016-10-04 17:12:23 -070089 * @desc_list: list of descs if called from dp_rx_process
90 * or NULL during dp rx initialization or out of buffer
91 * interrupt.
Kai Chen6eca1a62017-01-12 10:17:53 -080092 * @tail: tail of descs list
Debashis Duttc4c52dc2016-10-04 17:12:23 -070093 * Return: return success or failure
94 */
95QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -080096 struct dp_srng *dp_rxdma_srng,
97 struct rx_desc_pool *rx_desc_pool,
98 uint32_t num_req_buffers,
99 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800100 union dp_rx_desc_list_elem_t **tail)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700101{
102 uint32_t num_alloc_desc;
103 uint16_t num_desc_to_free = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800104 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700105 uint32_t num_entries_avail;
106 uint32_t count;
107 int sync_hw_ptr = 1;
108 qdf_dma_addr_t paddr;
109 qdf_nbuf_t rx_netbuf;
110 void *rxdma_ring_entry;
111 union dp_rx_desc_list_elem_t *next;
Kai Chen6eca1a62017-01-12 10:17:53 -0800112 QDF_STATUS ret;
113
114 void *rxdma_srng;
115
116 rxdma_srng = dp_rxdma_srng->hal_srng;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700117
118 if (!rxdma_srng) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +0800119 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
120 "rxdma srng not initialized");
Ishank Jain57c42a12017-04-12 10:42:22 +0530121 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700122 return QDF_STATUS_E_FAILURE;
123 }
124
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700125 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700126 "requested %d buffers for replenish", num_req_buffers);
127
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700128 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
129 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
130 rxdma_srng,
131 sync_hw_ptr);
132
133 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700134 "no of available entries in rxdma ring: %d",
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700135 num_entries_avail);
136
137 if (!(*desc_list) && (num_entries_avail >
138 ((dp_rxdma_srng->num_entries * 3) / 4))) {
139 num_req_buffers = num_entries_avail;
140 } else if (num_entries_avail < num_req_buffers) {
141 num_desc_to_free = num_req_buffers - num_entries_avail;
142 num_req_buffers = num_entries_avail;
143 }
144
145 if (qdf_unlikely(!num_req_buffers)) {
146 num_desc_to_free = num_req_buffers;
147 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
148 goto free_descs;
149 }
150
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700151 /*
152 * if desc_list is NULL, allocate the descs from freelist
153 */
154 if (!(*desc_list)) {
155 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800156 rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700157 num_req_buffers,
158 desc_list,
159 tail);
160
161 if (!num_alloc_desc) {
162 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
163 "no free rx_descs in freelist");
Ishank Jain1e7401c2017-02-17 15:38:39 +0530164 DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
Ishank Jain57c42a12017-04-12 10:42:22 +0530165 num_req_buffers);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700166 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700167 return QDF_STATUS_E_NOMEM;
168 }
169
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700170 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700171 "%d rx desc allocated", num_alloc_desc);
172 num_req_buffers = num_alloc_desc;
173 }
174
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700175
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530176 count = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700177
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530178 while (count < num_req_buffers) {
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530179 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700180 RX_BUFFER_SIZE,
181 RX_BUFFER_RESERVATION,
182 RX_BUFFER_ALIGNMENT,
183 FALSE);
184
Ankit Kumarf2526d42019-05-02 15:13:27 +0530185 if (qdf_unlikely(!rx_netbuf)) {
Ishank Jain57c42a12017-04-12 10:42:22 +0530186 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530187 continue;
Ishank Jain57c42a12017-04-12 10:42:22 +0530188 }
Houston Hoffmanfc0a9602017-01-26 22:36:31 -0800189
Ishank Jain57c42a12017-04-12 10:42:22 +0530190 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530191 QDF_DMA_FROM_DEVICE);
jinweic chenc3546322018-02-02 15:03:41 +0800192 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
Manjunathappa Prakash6d2f46d2017-11-10 20:27:19 -0800193 qdf_nbuf_free(rx_netbuf);
Ishank Jain57c42a12017-04-12 10:42:22 +0530194 DP_STATS_INC(dp_pdev, replenish.map_err, 1);
195 continue;
196 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700197
198 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
199
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530200 /*
201 * check if the physical address of nbuf->data is
202 * less then 0x50000000 then free the nbuf and try
203 * allocating new nbuf. We can try for 100 times.
204 * this is a temp WAR till we fix it properly.
205 */
206 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
Ishank Jain57c42a12017-04-12 10:42:22 +0530207 if (ret == QDF_STATUS_E_FAILURE) {
208 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530209 break;
Ishank Jain57c42a12017-04-12 10:42:22 +0530210 }
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530211
212 count++;
213
214 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
Mohit Khanna16cd1b22019-01-25 10:46:00 -0800215 rxdma_srng);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530216 qdf_assert_always(rxdma_ring_entry);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530217
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700218 next = (*desc_list)->next;
219
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530220 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
Mohit Khanna16cd1b22019-01-25 10:46:00 -0800221
222 /* rx_desc.in_use should be zero at this time*/
223 qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
224
Pramod Simha59fcb312017-06-22 17:43:16 -0700225 (*desc_list)->rx_desc.in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800226
Krunal Sonic96a1162019-02-21 11:33:26 -0800227 dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
228 rx_netbuf, qdf_nbuf_data(rx_netbuf),
229 (unsigned long long)paddr,
230 (*desc_list)->rx_desc.cookie);
Kai Chen6eca1a62017-01-12 10:17:53 -0800231
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700232 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
233 (*desc_list)->rx_desc.cookie,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800234 rx_desc_pool->owner);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700235
236 *desc_list = next;
jiad3b8104b2019-03-08 17:23:35 +0800237
238 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700239 }
240
241 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
242
Krunal Sonic96a1162019-02-21 11:33:26 -0800243 dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
244 num_req_buffers, num_desc_to_free);
Houston Hoffmanae850c62017-08-11 16:47:50 -0700245
Ishank Jain57c42a12017-04-12 10:42:22 +0530246 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
247 (RX_BUFFER_SIZE * num_req_buffers));
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700248
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700249free_descs:
250 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700251 /*
252 * add any available free desc back to the free list
253 */
254 if (*desc_list)
Kai Chen6eca1a62017-01-12 10:17:53 -0800255 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
256 mac_id, rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700257
258 return QDF_STATUS_SUCCESS;
259}
260
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530261/*
262 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
263 * pkts to RAW mode simulation to
264 * decapsulate the pkt.
265 *
266 * @vdev: vdev on which RAW mode is enabled
267 * @nbuf_list: list of RAW pkts to process
c_cgodavbd5b3c22017-06-07 12:31:40 +0530268 * @peer: peer object from which the pkt is rx
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530269 *
270 * Return: void
271 */
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530272void
c_cgodavbd5b3c22017-06-07 12:31:40 +0530273dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530274 struct dp_peer *peer)
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530275{
276 qdf_nbuf_t deliver_list_head = NULL;
277 qdf_nbuf_t deliver_list_tail = NULL;
278 qdf_nbuf_t nbuf;
279
280 nbuf = nbuf_list;
281 while (nbuf) {
282 qdf_nbuf_t next = qdf_nbuf_next(nbuf);
283
284 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
285
Chaithanya Garrepalli9b3988c2018-05-12 15:47:15 +0530286 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
Anish Nataraj7b6d21f2018-04-30 11:08:54 +0530287 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530288 /*
289 * reset the chfrag_start and chfrag_end bits in nbuf cb
290 * as this is a non-amsdu pkt and RAW mode simulation expects
291 * these bit s to be 0 for non-amsdu pkt.
292 */
Vivekde90e592017-11-30 17:24:18 +0530293 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
294 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
295 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
296 qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530297 }
298
299 nbuf = next;
300 }
301
302 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530303 &deliver_list_tail, (struct cdp_peer*) peer);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530304
305 vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
306}
307
308
Pramod Simhab17d0672017-03-06 17:20:13 -0800309#ifdef DP_LFR
310/*
311 * In case of LFR, data of a new peer might be sent up
312 * even before peer is added.
313 */
314static inline struct dp_vdev *
315dp_get_vdev_from_peer(struct dp_soc *soc,
316 uint16_t peer_id,
317 struct dp_peer *peer,
318 struct hal_rx_mpdu_desc_info mpdu_desc_info)
319{
320 struct dp_vdev *vdev;
321 uint8_t vdev_id;
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530322
Pramod Simhab17d0672017-03-06 17:20:13 -0800323 if (unlikely(!peer)) {
324 if (peer_id != HTT_INVALID_PEER) {
325 vdev_id = DP_PEER_METADATA_ID_GET(
326 mpdu_desc_info.peer_meta_data);
327 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700328 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800329 FL("PeerID %d not found use vdevID %d"),
330 peer_id, vdev_id);
331 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
332 vdev_id);
333 } else {
334 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700335 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800336 FL("Invalid PeerID %d"),
337 peer_id);
338 return NULL;
339 }
340 } else {
341 vdev = peer->vdev;
342 }
343 return vdev;
344}
345#else
346static inline struct dp_vdev *
347dp_get_vdev_from_peer(struct dp_soc *soc,
348 uint16_t peer_id,
349 struct dp_peer *peer,
350 struct hal_rx_mpdu_desc_info mpdu_desc_info)
351{
352 if (unlikely(!peer)) {
353 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700354 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800355 FL("Peer not found for peerID %d"),
356 peer_id);
357 return NULL;
358 } else {
359 return peer->vdev;
360 }
361}
362#endif
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530363
Amir Patelcb990262019-05-28 15:12:48 +0530364#ifndef FEATURE_WDS
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530365static void
366dp_rx_da_learn(struct dp_soc *soc,
367 uint8_t *rx_tlv_hdr,
368 struct dp_peer *ta_peer,
369 qdf_nbuf_t nbuf)
370{
371}
372#endif
Amir Patelcb990262019-05-28 15:12:48 +0530373/*
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700374 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
375 *
376 * @soc: core txrx main context
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530377 * @ta_peer : source peer entry
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530378 * @rx_tlv_hdr : start address of rx tlvs
379 * @nbuf : nbuf that has to be intrabss forwarded
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700380 *
381 * Return: bool: true if it is forwarded else false
382 */
383static bool
384dp_rx_intrabss_fwd(struct dp_soc *soc,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530385 struct dp_peer *ta_peer,
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530386 uint8_t *rx_tlv_hdr,
387 qdf_nbuf_t nbuf)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700388{
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530389 uint16_t da_idx;
390 uint16_t len;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530391 uint8_t is_frag;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530392 struct dp_peer *da_peer;
393 struct dp_ast_entry *ast_entry;
394 qdf_nbuf_t nbuf_copy;
Ankit Kumare2227752019-04-30 00:16:04 +0530395 uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
Varsha Mishra18281792019-03-06 17:57:23 +0530396 struct cdp_tid_rx_stats *tid_stats =
397 &ta_peer->vdev->pdev->stats.tid_stats.tid_rx_stats[tid];
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530398
399 /* check if the destination peer is available in peer table
400 * and also check if the source peer and destination peer
401 * belong to the same vap and destination peer is not bss peer.
402 */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530403
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530404 if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +0530405 da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530406
407 ast_entry = soc->ast_table[da_idx];
408 if (!ast_entry)
409 return false;
410
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530411 if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
412 ast_entry->is_active = TRUE;
413 return false;
414 }
415
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530416 da_peer = ast_entry->peer;
417
418 if (!da_peer)
419 return false;
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530420 /* TA peer cannot be same as peer(DA) on which AST is present
421 * this indicates a change in topology and that AST entries
422 * are yet to be updated.
423 */
424 if (da_peer == ta_peer)
425 return false;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530426
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530427 if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530428 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
429 is_frag = qdf_nbuf_is_frag(nbuf);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530430 memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530431
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530432 /* linearize the nbuf just before we send to
433 * dp_tx_send()
434 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530435 if (qdf_unlikely(is_frag)) {
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530436 if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
437 return false;
438
439 nbuf = qdf_nbuf_unshare(nbuf);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530440 if (!nbuf) {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530441 DP_STATS_INC_PKT(ta_peer,
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530442 rx.intra_bss.fail,
443 1,
444 len);
445 /* return true even though the pkt is
446 * not forwarded. Basically skb_unshare
447 * failed and we want to continue with
448 * next nbuf.
449 */
Varsha Mishra18281792019-03-06 17:57:23 +0530450 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530451 return true;
452 }
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530453 }
454
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530455 if (!dp_tx_send(ta_peer->vdev, nbuf)) {
456 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
457 len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530458 return true;
Ishank Jain57c42a12017-04-12 10:42:22 +0530459 } else {
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530460 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
Varsha Mishra18281792019-03-06 17:57:23 +0530461 len);
462 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530463 return false;
Ishank Jain57c42a12017-04-12 10:42:22 +0530464 }
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530465 }
466 }
467 /* if it is a broadcast pkt (eg: ARP) and it is not its own
468 * source, then clone the pkt and send the cloned pkt for
469 * intra BSS forwarding and original pkt up the network stack
470 * Note: how do we handle multicast pkts. do we forward
471 * all multicast pkts as is or let a higher layer module
472 * like igmpsnoop decide whether to forward or not with
473 * Mcast enhancement.
474 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530475 else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
476 !ta_peer->bss_peer))) {
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530477 nbuf_copy = qdf_nbuf_copy(nbuf);
478 if (!nbuf_copy)
479 return false;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +0530480
481 len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530482 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530483
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530484 if (dp_tx_send(ta_peer->vdev, nbuf_copy)) {
485 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
Varsha Mishra18281792019-03-06 17:57:23 +0530486 tid_stats->fail_cnt[INTRABSS_DROP]++;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530487 qdf_nbuf_free(nbuf_copy);
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530488 } else {
489 DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
Varsha Mishra18281792019-03-06 17:57:23 +0530490 tid_stats->intrabss_cnt++;
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530491 }
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530492 }
493 /* return false as we have to still send the original pkt
494 * up the stack
495 */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700496 return false;
497}
498
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530499#ifdef MESH_MODE_SUPPORT
500
501/**
502 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
503 *
504 * @vdev: DP Virtual device handle
505 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530506 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530507 * @peer: pointer to peer
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530508 *
509 * This function allocated memory for mesh receive stats and fill the
510 * required stats. Stores the memory address in skb cb.
511 *
512 * Return: void
513 */
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530514
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530515void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530516 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530517{
518 struct mesh_recv_hdr_s *rx_info = NULL;
519 uint32_t pkt_type;
520 uint32_t nss;
521 uint32_t rate_mcs;
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530522 uint32_t bw;
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530523
524 /* fill recv mesh stats */
525 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
526
527 /* upper layers are resposible to free this memory */
528
Jeff Johnsona8edf332019-03-18 09:51:52 -0700529 if (!rx_info) {
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530530 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
531 "Memory allocation failed for mesh rx stats");
Ishank Jain57c42a12017-04-12 10:42:22 +0530532 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530533 return;
534 }
535
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530536 rx_info->rs_flags = MESH_RXHDR_VER1;
Vivekde90e592017-11-30 17:24:18 +0530537 if (qdf_nbuf_is_rx_chfrag_start(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530538 rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
539
Vivekde90e592017-11-30 17:24:18 +0530540 if (qdf_nbuf_is_rx_chfrag_end(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530541 rx_info->rs_flags |= MESH_RX_LAST_MSDU;
542
543 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
544 rx_info->rs_flags |= MESH_RX_DECRYPTED;
545 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530546 if (vdev->osif_get_key)
547 vdev->osif_get_key(vdev->osif_vdev,
548 &rx_info->rs_decryptkey[0],
549 &peer->mac_addr.raw[0],
550 rx_info->rs_keyix);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530551 }
552
553 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
554 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
555 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
556 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530557 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530558 nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530559 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
560 (bw << 24);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530561
Vivekde90e592017-11-30 17:24:18 +0530562 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530563
Venkateswara Swamy Bandaru37a3a452018-02-12 15:37:14 +0530564 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530565 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
566 rx_info->rs_flags,
567 rx_info->rs_rssi,
568 rx_info->rs_channel,
569 rx_info->rs_ratephy1,
570 rx_info->rs_keyix);
571
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530572}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530573
574/**
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530575 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530576 *
577 * @vdev: DP Virtual device handle
578 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530579 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530580 *
581 * This checks if the received packet is matching any filter out
582 * catogery and and drop the packet if it matches.
583 *
584 * Return: status(0 indicates drop, 1 indicate to no drop)
585 */
586
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530587QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
588 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530589{
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530590 union dp_align_mac_addr mac_addr;
591
592 if (qdf_unlikely(vdev->mesh_rx_filter)) {
593 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
594 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
595 return QDF_STATUS_SUCCESS;
596
597 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
598 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
599 return QDF_STATUS_SUCCESS;
600
601 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
602 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
603 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
604 return QDF_STATUS_SUCCESS;
605
606 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
607 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
608 &mac_addr.raw[0]))
609 return QDF_STATUS_E_FAILURE;
610
611 if (!qdf_mem_cmp(&mac_addr.raw[0],
612 &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800613 QDF_MAC_ADDR_SIZE))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530614 return QDF_STATUS_SUCCESS;
615 }
616
617 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
618 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
619 &mac_addr.raw[0]))
620 return QDF_STATUS_E_FAILURE;
621
622 if (!qdf_mem_cmp(&mac_addr.raw[0],
623 &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800624 QDF_MAC_ADDR_SIZE))
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530625 return QDF_STATUS_SUCCESS;
626 }
627 }
628
629 return QDF_STATUS_E_FAILURE;
630}
631
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530632#else
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530633void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530634 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530635{
636}
637
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530638QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
639 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530640{
641 return QDF_STATUS_E_FAILURE;
642}
643
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530644#endif
645
Pranita Solanke05862962019-01-09 11:39:29 +0530646#ifdef FEATURE_NAC_RSSI
Ishank Jain9f174c62017-03-30 18:37:42 +0530647/**
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530648 * dp_rx_nac_filter(): Function to perform filtering of non-associated
649 * clients
650 * @pdev: DP pdev handle
651 * @rx_pkt_hdr: Rx packet Header
652 *
653 * return: dp_vdev*
654 */
655static
656struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
657 uint8_t *rx_pkt_hdr)
658{
659 struct ieee80211_frame *wh;
660 struct dp_neighbour_peer *peer = NULL;
661
662 wh = (struct ieee80211_frame *)rx_pkt_hdr;
663
664 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
665 return NULL;
666
667 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
668 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
669 neighbour_peer_list_elem) {
670 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800671 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530672 QDF_TRACE(
Houston Hoffmanae850c62017-08-11 16:47:50 -0700673 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530674 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
675 peer->neighbour_peers_macaddr.raw[0],
676 peer->neighbour_peers_macaddr.raw[1],
677 peer->neighbour_peers_macaddr.raw[2],
678 peer->neighbour_peers_macaddr.raw[3],
679 peer->neighbour_peers_macaddr.raw[4],
680 peer->neighbour_peers_macaddr.raw[5]);
Pratik Gandhi97fa0b02017-07-14 00:55:43 +0530681
682 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
683
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530684 return pdev->monitor_vdev;
685 }
686 }
687 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
688
689 return NULL;
690}
691
692/**
Ishank Jain9f174c62017-03-30 18:37:42 +0530693 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
694 * @soc: DP SOC handle
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530695 * @mpdu: mpdu for which peer is invalid
Ishank Jain9f174c62017-03-30 18:37:42 +0530696 *
697 * return: integer type
698 */
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530699uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
Ishank Jain9f174c62017-03-30 18:37:42 +0530700{
701 struct dp_invalid_peer_msg msg;
702 struct dp_vdev *vdev = NULL;
703 struct dp_pdev *pdev = NULL;
704 struct ieee80211_frame *wh;
705 uint8_t i;
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530706 qdf_nbuf_t curr_nbuf, next_nbuf;
Soumya Bhatbc719e62018-02-18 18:21:25 +0530707 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
708 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
Ishank Jain9f174c62017-03-30 18:37:42 +0530709
Shashikala Prabhue11412d2019-03-08 11:37:15 +0530710 if (!HAL_IS_DECAP_FORMAT_RAW(rx_tlv_hdr)) {
711 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
712 "Drop decapped frames");
713 goto free;
714 }
715
Ishank Jain9f174c62017-03-30 18:37:42 +0530716 wh = (struct ieee80211_frame *)rx_pkt_hdr;
717
718 if (!DP_FRAME_IS_DATA(wh)) {
719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
720 "NAWDS valid only for data frames");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530721 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530722 }
723
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530724 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
Ishank Jain9f174c62017-03-30 18:37:42 +0530725 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
726 "Invalid nbuf length");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530727 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530728 }
729
730
731 for (i = 0; i < MAX_PDEV_CNT; i++) {
732 pdev = soc->pdev_list[i];
733 if (!pdev) {
734 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
735 "PDEV not found");
736 continue;
737 }
738
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530739 if (pdev->filter_neighbour_peers) {
740 /* Next Hop scenario not yet handle */
741 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
742 if (vdev) {
743 dp_rx_mon_deliver(soc, i,
Pratik Gandhi76139082017-07-28 19:18:02 +0530744 pdev->invalid_peer_head_msdu,
745 pdev->invalid_peer_tail_msdu);
Chaithanya Garrepalli974da262018-02-22 20:32:19 +0530746
747 pdev->invalid_peer_head_msdu = NULL;
748 pdev->invalid_peer_tail_msdu = NULL;
749
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530750 return 0;
751 }
752 }
Soumya Bhatbc719e62018-02-18 18:21:25 +0530753
754
Ishank Jain9f174c62017-03-30 18:37:42 +0530755 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Soumya Bhatbc719e62018-02-18 18:21:25 +0530756
Ishank Jain9f174c62017-03-30 18:37:42 +0530757 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800758 QDF_MAC_ADDR_SIZE) == 0) {
Ishank Jain9f174c62017-03-30 18:37:42 +0530759 goto out;
760 }
761 }
762 }
763
764 if (!vdev) {
765 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
766 "VDEV not found");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530767 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530768 }
769
770out:
771 msg.wh = wh;
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530772 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
773 msg.nbuf = mpdu;
Ishank Jain9f174c62017-03-30 18:37:42 +0530774 msg.vdev_id = vdev->vdev_id;
775 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +0530776 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev,
777 &msg);
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530778
779free:
780 /* Drop and free packet */
781 curr_nbuf = mpdu;
782 while (curr_nbuf) {
783 next_nbuf = qdf_nbuf_next(curr_nbuf);
784 qdf_nbuf_free(curr_nbuf);
785 curr_nbuf = next_nbuf;
786 }
Ishank Jain9f174c62017-03-30 18:37:42 +0530787
788 return 0;
789}
chenguo91c90102017-12-12 16:16:37 +0800790
791/**
792 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
793 * @soc: DP SOC handle
794 * @mpdu: mpdu for which peer is invalid
795 * @mpdu_done: if an mpdu is completed
796 *
797 * return: integer type
798 */
799void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
800 qdf_nbuf_t mpdu, bool mpdu_done)
801{
802 /* Only trigger the process when mpdu is completed */
803 if (mpdu_done)
804 dp_rx_process_invalid_peer(soc, mpdu);
805}
Ishank Jain9f174c62017-03-30 18:37:42 +0530806#else
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530807uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
Ishank Jain9f174c62017-03-30 18:37:42 +0530808{
chenguo91c90102017-12-12 16:16:37 +0800809 qdf_nbuf_t curr_nbuf, next_nbuf;
810 struct dp_pdev *pdev;
811 uint8_t i;
Jinwei Chen46733102018-08-20 15:42:08 +0800812 struct dp_vdev *vdev = NULL;
813 struct ieee80211_frame *wh;
814 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
815 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
chenguo91c90102017-12-12 16:16:37 +0800816
Jinwei Chen46733102018-08-20 15:42:08 +0800817 wh = (struct ieee80211_frame *)rx_pkt_hdr;
818
819 if (!DP_FRAME_IS_DATA(wh)) {
Jinwei Chen214590a2018-12-06 16:45:44 +0800820 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
821 "only for data frames");
Jinwei Chen46733102018-08-20 15:42:08 +0800822 goto free;
chenguo91c90102017-12-12 16:16:37 +0800823 }
824
Jinwei Chen46733102018-08-20 15:42:08 +0800825 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
826 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
827 "Invalid nbuf length");
828 goto free;
829 }
Jinwei Chen214590a2018-12-06 16:45:44 +0800830
chenguo91c90102017-12-12 16:16:37 +0800831 for (i = 0; i < MAX_PDEV_CNT; i++) {
832 pdev = soc->pdev_list[i];
833 if (!pdev) {
834 QDF_TRACE(QDF_MODULE_ID_DP,
Jinwei Chen46733102018-08-20 15:42:08 +0800835 QDF_TRACE_LEVEL_ERROR,
836 "PDEV not found");
chenguo91c90102017-12-12 16:16:37 +0800837 continue;
838 }
839
Jinwei Chen46733102018-08-20 15:42:08 +0800840 qdf_spin_lock_bh(&pdev->vdev_list_lock);
841 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
842 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800843 QDF_MAC_ADDR_SIZE) == 0) {
Jinwei Chen46733102018-08-20 15:42:08 +0800844 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
845 goto out;
846 }
847 }
848 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
chenguo91c90102017-12-12 16:16:37 +0800849 }
Jinwei Chen46733102018-08-20 15:42:08 +0800850
Jeff Johnsona8edf332019-03-18 09:51:52 -0700851 if (!vdev) {
Jinwei Chen46733102018-08-20 15:42:08 +0800852 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
853 "VDEV not found");
854 goto free;
855 }
856
857out:
858 if (soc->cdp_soc.ol_ops->rx_invalid_peer)
859 soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
860free:
Jinwei Chen214590a2018-12-06 16:45:44 +0800861 /* reset the head and tail pointers */
862 for (i = 0; i < MAX_PDEV_CNT; i++) {
863 pdev = soc->pdev_list[i];
864 if (!pdev) {
865 QDF_TRACE(QDF_MODULE_ID_DP,
866 QDF_TRACE_LEVEL_ERROR,
867 "PDEV not found");
868 continue;
869 }
870
871 pdev->invalid_peer_head_msdu = NULL;
872 pdev->invalid_peer_tail_msdu = NULL;
873 }
874
Jinwei Chen46733102018-08-20 15:42:08 +0800875 /* Drop and free packet */
876 curr_nbuf = mpdu;
877 while (curr_nbuf) {
878 next_nbuf = qdf_nbuf_next(curr_nbuf);
Jinwei Chen46733102018-08-20 15:42:08 +0800879 qdf_nbuf_free(curr_nbuf);
880 curr_nbuf = next_nbuf;
881 }
882
Ishank Jain9f174c62017-03-30 18:37:42 +0530883 return 0;
884}
chenguo91c90102017-12-12 16:16:37 +0800885
886void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
887 qdf_nbuf_t mpdu, bool mpdu_done)
888{
chenguo91c90102017-12-12 16:16:37 +0800889 /* Process the nbuf */
890 dp_rx_process_invalid_peer(soc, mpdu);
891}
Ishank Jain9f174c62017-03-30 18:37:42 +0530892#endif
893
Mohit Khanna16816ae2018-10-30 14:12:03 -0700894#ifdef RECEIVE_OFFLOAD
895/**
896 * dp_rx_print_offload_info() - Print offload info from RX TLV
897 * @rx_tlv: RX TLV for which offload information is to be printed
898 *
899 * Return: None
900 */
901static void dp_rx_print_offload_info(uint8_t *rx_tlv)
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700902{
Krunal Sonic96a1162019-02-21 11:33:26 -0800903 dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
904 dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
905 dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
906 dp_verbose_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
907 dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
908 dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
909 dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
910 dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
911 dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
912 dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
913 dp_verbose_debug("---------------------------------------------------------");
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700914}
915
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700916/**
Mohit Khanna16816ae2018-10-30 14:12:03 -0700917 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
918 * @soc: DP SOC handle
919 * @rx_tlv: RX TLV received for the msdu
920 * @msdu: msdu for which GRO info needs to be filled
Dhanashri Atre0da31222017-03-23 12:30:58 -0700921 *
Mohit Khanna16816ae2018-10-30 14:12:03 -0700922 * Return: None
Dhanashri Atre0da31222017-03-23 12:30:58 -0700923 */
Mohit Khanna16816ae2018-10-30 14:12:03 -0700924static
925void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
926 qdf_nbuf_t msdu)
Dhanashri Atre0da31222017-03-23 12:30:58 -0700927{
Mohit Khanna16816ae2018-10-30 14:12:03 -0700928 if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700929 return;
Mohit Khanna16816ae2018-10-30 14:12:03 -0700930
931 /* Filling up RX offload info only for TCP packets */
932 if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
933 return;
Dhanashri Atre0da31222017-03-23 12:30:58 -0700934
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700935 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
936 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700937 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
938 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700939 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
940 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
941 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
942 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
943 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
944 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
945 QDF_NBUF_CB_RX_TCP_WIN(msdu) =
946 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
947 QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
948 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
949 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
950 HAL_RX_TLV_GET_IPV6(rx_tlv);
951 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
952 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
Manjunathappa Prakash71772a52017-11-07 18:01:31 -0800953 QDF_NBUF_CB_RX_FLOW_ID(msdu) =
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700954 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700955
Mohit Khanna16816ae2018-10-30 14:12:03 -0700956 dp_rx_print_offload_info(rx_tlv);
Dhanashri Atre0da31222017-03-23 12:30:58 -0700957}
958#else
Mohit Khanna16816ae2018-10-30 14:12:03 -0700959static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
960 qdf_nbuf_t msdu)
Dhanashri Atre0da31222017-03-23 12:30:58 -0700961{
Dhanashri Atre0da31222017-03-23 12:30:58 -0700962}
Mohit Khanna16816ae2018-10-30 14:12:03 -0700963#endif /* RECEIVE_OFFLOAD */
Dhanashri Atre0da31222017-03-23 12:30:58 -0700964
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530965/**
966 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
967 *
968 * @nbuf: pointer to msdu.
969 * @mpdu_len: mpdu length
970 *
971 * Return: returns true if nbuf is last msdu of mpdu else retuns false.
972 */
973static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530974{
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530975 bool last_nbuf;
976
Tallapragada Kalyan30d651d2018-12-19 14:26:42 +0530977 if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530978 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530979 last_nbuf = false;
980 } else {
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530981 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530982 last_nbuf = true;
983 }
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530984
985 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530986
987 return last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530988}
989
990/**
991 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
992 * multiple nbufs.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530993 * @nbuf: pointer to the first msdu of an amsdu.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530994 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530995 *
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530996 *
997 * This function implements the creation of RX frag_list for cases
998 * where an MSDU is spread across multiple nbufs.
999 *
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301000 * Return: returns the head nbuf which contains complete frag_list.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301001 */
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301002qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301003{
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301004 qdf_nbuf_t parent, next, frag_list;
1005 uint16_t frag_list_len = 0;
1006 uint16_t mpdu_len;
1007 bool last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301008
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301009 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301010 /*
1011 * this is a case where the complete msdu fits in one single nbuf.
1012 * in this case HW sets both start and end bit and we only need to
1013 * reset these bits for RAW mode simulator to decap the pkt
1014 */
1015 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1016 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301017 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1018 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301019 return nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301020 }
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301021
1022 /*
1023 * This is a case where we have multiple msdus (A-MSDU) spread across
1024 * multiple nbufs. here we create a fraglist out of these nbufs.
1025 *
1026 * the moment we encounter a nbuf with continuation bit set we
1027 * know for sure we have an MSDU which is spread across multiple
1028 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1029 */
1030 parent = nbuf;
1031 frag_list = nbuf->next;
1032 nbuf = nbuf->next;
1033
1034 /*
1035 * set the start bit in the first nbuf we encounter with continuation
1036 * bit set. This has the proper mpdu length set as it is the first
1037 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1038 * nbufs will form the frag_list of the parent nbuf.
1039 */
1040 qdf_nbuf_set_rx_chfrag_start(parent, 1);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301041 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1042
1043 /*
1044 * this is where we set the length of the fragments which are
1045 * associated to the parent nbuf. We iterate through the frag_list
1046 * till we hit the last_nbuf of the list.
1047 */
1048 do {
1049 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1050 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1051 frag_list_len += qdf_nbuf_len(nbuf);
1052
1053 if (last_nbuf) {
1054 next = nbuf->next;
1055 nbuf->next = NULL;
1056 break;
1057 }
1058
1059 nbuf = nbuf->next;
1060 } while (!last_nbuf);
1061
1062 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1063 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1064 parent->next = next;
1065
1066 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1067 return parent;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301068}
1069
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301070/**
1071 * dp_rx_compute_delay() - Compute and fill in all timestamps
1072 * to pass in correct fields
1073 *
1074 * @vdev: pdev handle
1075 * @tx_desc: tx descriptor
1076 * @tid: tid value
1077 * Return: none
1078 */
1079void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1080{
1081 int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1082 uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
Ankit Kumare2227752019-04-30 00:16:04 +05301083 uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301084 uint32_t interframe_delay =
1085 (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1086
1087 dp_update_delay_stats(vdev->pdev, to_stack, tid,
1088 CDP_DELAY_STATS_REAP_STACK);
1089 /*
1090 * Update interframe delay stats calculated at deliver_data_ol point.
1091 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1092 * interframe delay will not be calculate correctly for 1st frame.
1093 * On the other side, this will help in avoiding extra per packet check
1094 * of vdev->prev_rx_deliver_tstamp.
1095 */
1096 dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1097 CDP_DELAY_STATS_RX_INTERFRAME);
1098 vdev->prev_rx_deliver_tstamp = current_ts;
1099}
1100
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301101/**
1102 * dp_rx_drop_nbuf_list() - drop an nbuf list
1103 * @pdev: dp pdev reference
1104 * @buf_list: buffer list to be dropepd
1105 *
1106 * Return: int (number of bufs dropped)
1107 */
1108static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1109 qdf_nbuf_t buf_list)
1110{
1111 struct cdp_tid_rx_stats *stats = NULL;
1112 uint8_t tid = 0;
1113 int num_dropped = 0;
1114 qdf_nbuf_t buf, next_buf;
1115
1116 buf = buf_list;
1117 while (buf) {
1118 next_buf = qdf_nbuf_queue_next(buf);
Ankit Kumare2227752019-04-30 00:16:04 +05301119 tid = qdf_nbuf_get_tid_val(buf);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301120 stats = &pdev->stats.tid_stats.tid_rx_stats[tid];
1121 stats->fail_cnt[INVALID_PEER_VDEV]++;
1122 stats->delivered_to_stack--;
1123 qdf_nbuf_free(buf);
1124 buf = next_buf;
1125 num_dropped++;
1126 }
1127
1128 return num_dropped;
1129}
1130
1131#ifdef PEER_CACHE_RX_PKTS
1132/**
1133 * dp_rx_flush_rx_cached() - flush cached rx frames
1134 * @peer: peer
1135 * @drop: flag to drop frames or forward to net stack
1136 *
1137 * Return: None
1138 */
1139void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1140{
1141 struct dp_peer_cached_bufq *bufqi;
1142 struct dp_rx_cached_buf *cache_buf = NULL;
1143 ol_txrx_rx_fp data_rx = NULL;
1144 int num_buff_elem;
1145 QDF_STATUS status;
1146
1147 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1148 qdf_atomic_dec(&peer->flush_in_progress);
1149 return;
1150 }
1151
1152 qdf_spin_lock_bh(&peer->peer_info_lock);
1153 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1154 data_rx = peer->vdev->osif_rx;
1155 else
1156 drop = true;
1157 qdf_spin_unlock_bh(&peer->peer_info_lock);
1158
1159 bufqi = &peer->bufq_info;
1160
1161 qdf_spin_lock_bh(&bufqi->bufq_lock);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301162 qdf_list_remove_front(&bufqi->cached_bufq,
1163 (qdf_list_node_t **)&cache_buf);
1164 while (cache_buf) {
1165 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1166 cache_buf->buf);
1167 bufqi->entries -= num_buff_elem;
1168 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1169 if (drop) {
1170 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1171 cache_buf->buf);
1172 } else {
1173 /* Flush the cached frames to OSIF DEV */
1174 status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1175 if (status != QDF_STATUS_SUCCESS)
1176 bufqi->dropped = dp_rx_drop_nbuf_list(
1177 peer->vdev->pdev,
1178 cache_buf->buf);
1179 }
1180 qdf_mem_free(cache_buf);
1181 cache_buf = NULL;
1182 qdf_spin_lock_bh(&bufqi->bufq_lock);
1183 qdf_list_remove_front(&bufqi->cached_bufq,
1184 (qdf_list_node_t **)&cache_buf);
1185 }
1186 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1187 qdf_atomic_dec(&peer->flush_in_progress);
1188}
1189
1190/**
1191 * dp_rx_enqueue_rx() - cache rx frames
1192 * @peer: peer
1193 * @rx_buf_list: cache buffer list
1194 *
1195 * Return: None
1196 */
1197static QDF_STATUS
1198dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1199{
1200 struct dp_rx_cached_buf *cache_buf;
1201 struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1202 int num_buff_elem;
1203
1204 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TXRX, "bufq->curr %d bufq->drops %d",
1205 bufqi->entries, bufqi->dropped);
1206
1207 if (!peer->valid) {
1208 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1209 rx_buf_list);
1210 return QDF_STATUS_E_INVAL;
1211 }
1212
1213 qdf_spin_lock_bh(&bufqi->bufq_lock);
1214 if (bufqi->entries >= bufqi->thresh) {
1215 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1216 rx_buf_list);
1217 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1218 return QDF_STATUS_E_RESOURCES;
1219 }
1220 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1221
1222 num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1223
1224 cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1225 if (!cache_buf) {
1226 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1227 "Failed to allocate buf to cache rx frames");
1228 bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1229 rx_buf_list);
1230 return QDF_STATUS_E_NOMEM;
1231 }
1232
1233 cache_buf->buf = rx_buf_list;
1234
1235 qdf_spin_lock_bh(&bufqi->bufq_lock);
1236 qdf_list_insert_back(&bufqi->cached_bufq,
1237 &cache_buf->node);
1238 bufqi->entries += num_buff_elem;
1239 qdf_spin_unlock_bh(&bufqi->bufq_lock);
1240
1241 return QDF_STATUS_SUCCESS;
1242}
1243
1244static inline
1245bool dp_rx_is_peer_cache_bufq_supported(void)
1246{
1247 return true;
1248}
1249#else
1250static inline
1251bool dp_rx_is_peer_cache_bufq_supported(void)
1252{
1253 return false;
1254}
1255
1256static inline QDF_STATUS
1257dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1258{
1259 return QDF_STATUS_SUCCESS;
1260}
1261#endif
1262
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301263static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
1264 struct dp_peer *peer,
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301265 qdf_nbuf_t nbuf_head,
1266 qdf_nbuf_t nbuf_tail)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301267{
1268 /*
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07001269 * highly unlikely to have a vdev without a registered rx
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301270 * callback function. if so let us free the nbuf_list.
1271 */
1272 if (qdf_unlikely(!vdev->osif_rx)) {
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301273 if (dp_rx_is_peer_cache_bufq_supported())
1274 dp_rx_enqueue_rx(peer, nbuf_head);
1275 else
1276 dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301277
1278 return;
1279 }
1280
1281 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301282 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1283 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1284 &nbuf_tail, (struct cdp_peer *) peer);
1285 }
1286
1287 vdev->osif_rx(vdev->osif_vdev, nbuf_head);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301288}
1289
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301290/**
1291 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1292 * @nbuf: pointer to the first msdu of an amsdu.
1293 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1294 *
1295 * The ipsumed field of the skb is set based on whether HW validated the
1296 * IP/TCP/UDP checksum.
1297 *
1298 * Return: void
1299 */
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301300static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1301 qdf_nbuf_t nbuf,
1302 uint8_t *rx_tlv_hdr)
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301303{
1304 qdf_nbuf_rx_cksum_t cksum = {0};
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301305 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1306 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301307
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301308 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301309 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301310 qdf_nbuf_set_rx_cksum(nbuf, &cksum);
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301311 } else {
1312 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1313 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301314 }
1315}
1316
1317/**
1318 * dp_rx_msdu_stats_update() - update per msdu stats.
1319 * @soc: core txrx main context
1320 * @nbuf: pointer to the first msdu of an amsdu.
1321 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1322 * @peer: pointer to the peer object.
1323 * @ring_id: reo dest ring number on which pkt is reaped.
Varsha Mishra9d42f122019-05-03 12:47:40 +05301324 * @tid_stats: per tid rx stats.
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301325 *
1326 * update all the per msdu stats for that nbuf.
1327 * Return: void
1328 */
1329static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1330 qdf_nbuf_t nbuf,
1331 uint8_t *rx_tlv_hdr,
1332 struct dp_peer *peer,
Varsha Mishra9d42f122019-05-03 12:47:40 +05301333 uint8_t ring_id,
1334 struct cdp_tid_rx_stats *tid_stats)
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301335{
1336 bool is_ampdu, is_not_amsdu;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301337 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1338 struct dp_vdev *vdev = peer->vdev;
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -08001339 qdf_ether_header_t *eh;
Ankit Kumarf90c9442019-05-02 18:55:20 +05301340 uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301341
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301342 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1343 qdf_nbuf_is_rx_chfrag_end(nbuf);
1344
Aditya Sathish6add3db2018-04-10 19:43:34 +05301345 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301346 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1347 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1348
Varsha Mishra9d42f122019-05-03 12:47:40 +05301349 tid_stats->msdu_cnt++;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301350 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
Aditya Sathish6add3db2018-04-10 19:43:34 +05301351 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
Srinivas Girigowda03bd4b62019-02-25 10:57:08 -08001352 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
Amir Patel3217ade2018-09-07 12:21:35 +05301353 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
Varsha Mishra9d42f122019-05-03 12:47:40 +05301354 tid_stats->mcast_msdu_cnt++;
Srinivas Girigowda79502972019-02-11 12:25:12 -08001355 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
Aditya Sathish6add3db2018-04-10 19:43:34 +05301356 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
Varsha Mishra9d42f122019-05-03 12:47:40 +05301357 tid_stats->bcast_msdu_cnt++;
Aditya Sathish6add3db2018-04-10 19:43:34 +05301358 }
1359 }
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301360
1361 /*
1362 * currently we can return from here as we have similar stats
1363 * updated at per ppdu level instead of msdu level
1364 */
1365 if (!soc->process_rx_status)
1366 return;
1367
1368 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1369 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1370 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1371
1372 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1373 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
Ankit Kumare2227752019-04-30 00:16:04 +05301374 tid = qdf_nbuf_get_tid_val(nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301375 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301376 reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1377 rx_tlv_hdr);
1378 nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301379 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1380
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07001381 DP_STATS_INC(peer, rx.bw[bw], 1);
Jinwei Chene6611272019-04-22 18:38:51 +08001382 /*
1383 * only if nss > 0 and pkt_type is 11N/AC/AX,
1384 * then increase index [nss - 1] in array counter.
1385 */
1386 if (nss > 0 && (pkt_type == DOT11_N ||
1387 pkt_type == DOT11_AC ||
1388 pkt_type == DOT11_AX))
1389 DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1390
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301391 DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1392 DP_STATS_INCC(peer, rx.err.mic_err, 1,
1393 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1394 DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1395 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1396
1397 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301398 DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1399
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001400 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301401 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1402 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1403 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001404 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301405 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1406 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1407 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001408 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301409 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1410 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1411 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001412 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301413 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1414 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1415 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07001416 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301417 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1418 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
phadiman6c3432b2019-01-09 12:45:28 +05301419 ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301420
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301421 if ((soc->process_rx_status) &&
1422 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
Amir Patel756d05e2018-10-10 12:35:30 +05301423#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
phadiman49757302018-12-18 16:13:59 +05301424 if (!vdev->pdev)
1425 return;
1426
Amir Patel756d05e2018-10-10 12:35:30 +05301427 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +05301428 &peer->stats, peer->peer_ids[0],
Amir Patel756d05e2018-10-10 12:35:30 +05301429 UPDATE_PEER_STATS,
1430 vdev->pdev->pdev_id);
1431#endif
1432
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301433 }
1434}
1435
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301436static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301437 void *rx_tlv_hdr,
1438 qdf_nbuf_t nbuf)
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301439{
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301440 if ((qdf_nbuf_is_sa_valid(nbuf) &&
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301441 (hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr) >
1442 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301443 (qdf_nbuf_is_da_valid(nbuf) &&
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301444 (hal_rx_msdu_end_da_idx_get(soc->hal_soc,
1445 rx_tlv_hdr) >
1446 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1447 return false;
1448
1449 return true;
1450}
1451
Amir Patelcb990262019-05-28 15:12:48 +05301452#ifndef WDS_VENDOR_EXTENSION
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301453int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1454 struct dp_vdev *vdev,
1455 struct dp_peer *peer)
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301456{
1457 return 1;
1458}
1459#endif
1460
Manjunathappa Prakash8f708622019-02-20 17:02:59 -08001461#ifdef RX_DESC_DEBUG_CHECK
1462/**
1463 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1464 * corruption
1465 *
1466 * @ring_desc: REO ring descriptor
1467 * @rx_desc: Rx descriptor
1468 *
1469 * Return: NONE
1470 */
1471static inline void dp_rx_desc_nbuf_sanity_check(void *ring_desc,
1472 struct dp_rx_desc *rx_desc)
1473{
1474 struct hal_buf_info hbi;
1475
1476 hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1477 /* Sanity check for possible buffer paddr corruption */
1478 qdf_assert_always((&hbi)->paddr ==
1479 qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1480}
1481#else
1482static inline void dp_rx_desc_nbuf_sanity_check(void *ring_desc,
1483 struct dp_rx_desc *rx_desc)
1484{
1485}
1486#endif
1487
Mohit Khannae5a6e942018-11-28 14:22:48 -08001488#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1489static inline
1490bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1491{
1492 bool limit_hit = false;
1493 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1494
1495 limit_hit =
1496 (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1497
1498 if (limit_hit)
1499 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1500
1501 return limit_hit;
1502}
1503
Mohit Khannae5a6e942018-11-28 14:22:48 -08001504static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1505{
1506 return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1507}
1508
1509#else
1510static inline
1511bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1512{
1513 return false;
1514}
1515
Mohit Khannae5a6e942018-11-28 14:22:48 -08001516static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1517{
1518 return false;
1519}
1520
1521#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
Dhanashri Atre0da31222017-03-23 12:30:58 -07001522/**
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001523 * dp_rx_process() - Brain of the Rx processing functionality
1524 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1525 * @soc: core txrx main context
1526 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001527 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001528 * @quota: No. of units (packets) that can be serviced in one shot.
1529 *
1530 * This function implements the core of Rx functionality. This is
1531 * expected to handle only non-error frames.
1532 *
1533 * Return: uint32_t: No. of elements processed
1534 */
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001535uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring,
1536 uint8_t reo_ring_num, uint32_t quota)
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001537{
1538 void *hal_soc;
1539 void *ring_desc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001540 struct dp_rx_desc *rx_desc = NULL;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301541 qdf_nbuf_t nbuf, next;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001542 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1543 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
Chaithanya Garrepalli8aaf9b62018-05-17 15:53:21 +05301544 uint32_t rx_bufs_used = 0, rx_buf_cookie;
1545 uint32_t l2_hdr_offset = 0;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301546 uint16_t msdu_len = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001547 uint16_t peer_id;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001548 struct dp_peer *peer;
1549 struct dp_vdev *vdev;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301550 uint32_t pkt_len = 0;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001551 struct hal_rx_mpdu_desc_info mpdu_desc_info;
1552 struct hal_rx_msdu_desc_info msdu_desc_info;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001553 enum hal_reo_error_status error;
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05301554 uint32_t peer_mdata;
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301555 uint8_t *rx_tlv_hdr;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001556 uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301557 uint8_t mac_id = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08001558 struct dp_pdev *pdev;
Varsha Mishra18281792019-03-06 17:57:23 +05301559 struct dp_pdev *rx_pdev;
Kai Chen6eca1a62017-01-12 10:17:53 -08001560 struct dp_srng *dp_rxdma_srng;
1561 struct rx_desc_pool *rx_desc_pool;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001562 struct dp_soc *soc = int_ctx->soc;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301563 uint8_t ring_id = 0;
1564 uint8_t core_id = 0;
Varsha Mishra18281792019-03-06 17:57:23 +05301565 struct cdp_tid_rx_stats *tid_stats;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001566 qdf_nbuf_t nbuf_head;
1567 qdf_nbuf_t nbuf_tail;
1568 qdf_nbuf_t deliver_list_head;
1569 qdf_nbuf_t deliver_list_tail;
1570 uint32_t num_rx_bufs_reaped = 0;
1571 uint32_t intr_id;
1572 struct hif_opaque_softc *scn;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001573 int32_t tid = 0;
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301574 bool is_prev_msdu_last = true;
1575 uint32_t num_entries_avail = 0;
1576
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001577 DP_HIST_INIT();
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001578
Mohit Khannae5a6e942018-11-28 14:22:48 -08001579 qdf_assert_always(soc && hal_ring);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001580 hal_soc = soc->hal_soc;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001581 qdf_assert_always(hal_soc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001582
Yue Ma245b47b2017-02-21 16:35:31 -08001583 hif_pm_runtime_mark_last_busy(soc->osdev->dev);
Mohit Khannae5a6e942018-11-28 14:22:48 -08001584 scn = soc->hif_handle;
1585 intr_id = int_ctx->dp_intr_id;
1586
1587more_data:
1588 /* reset local variables here to be re-used in the function */
1589 nbuf_head = NULL;
1590 nbuf_tail = NULL;
1591 deliver_list_head = NULL;
1592 deliver_list_tail = NULL;
1593 peer = NULL;
1594 vdev = NULL;
1595 num_rx_bufs_reaped = 0;
1596
1597 qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1598 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1599 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1600 qdf_mem_zero(head, sizeof(head));
1601 qdf_mem_zero(tail, sizeof(tail));
Yue Ma245b47b2017-02-21 16:35:31 -08001602
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001603 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
1604
1605 /*
1606 * Need API to convert from hal_ring pointer to
1607 * Ring Type / Ring Id combo
1608 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001609 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001610 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001611 FL("HAL RING Access Failed -- %pK"), hal_ring);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001612 hal_srng_access_end(hal_soc, hal_ring);
1613 goto done;
1614 }
1615
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301616 /*
1617 * start reaping the buffers from reo ring and queue
1618 * them in per vdev queue.
1619 * Process the received pkts in a different per vdev loop.
1620 */
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301621 while (qdf_likely(quota &&
1622 (ring_desc = hal_srng_dst_peek(hal_soc, hal_ring)))) {
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001623
1624 error = HAL_RX_ERROR_STATUS_GET(ring_desc);
Ishank Jain57c42a12017-04-12 10:42:22 +05301625 ring_id = hal_srng_ring_id_get(hal_ring);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001626
1627 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1628 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001629 FL("HAL RING 0x%pK:error %d"), hal_ring, error);
Ishank Jain57c42a12017-04-12 10:42:22 +05301630 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001631 /* Don't know how to deal with this -- assert */
1632 qdf_assert(0);
1633 }
1634
1635 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1636
Kai Chen6eca1a62017-01-12 10:17:53 -08001637 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001638 qdf_assert(rx_desc);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301639
Manjunathappa Prakash8f708622019-02-20 17:02:59 -08001640 dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301641 /*
1642 * this is a unlikely scenario where the host is reaping
1643 * a descriptor which it already reaped just a while ago
1644 * but is yet to replenish it back to HW.
1645 * In this case host will dump the last 128 descriptors
1646 * including the software descriptor rx_desc and assert.
1647 */
1648 if (qdf_unlikely(!rx_desc->in_use)) {
1649 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001650 dp_err("Reaping rx_desc not in use!");
1651 dp_rx_dump_info_and_assert(soc, hal_ring,
1652 ring_desc, rx_desc);
1653 }
1654
1655 if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1656 dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1657 DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301658 dp_rx_dump_info_and_assert(soc, hal_ring,
1659 ring_desc, rx_desc);
1660 }
1661
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001662 /* TODO */
1663 /*
1664 * Need a separate API for unmapping based on
1665 * phyiscal address
1666 */
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301667 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05301668 QDF_DMA_FROM_DEVICE);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001669 rx_desc->unmapped = 1;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001670
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07001671 core_id = smp_processor_id();
1672 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1673
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301674 /* Get MPDU DESC info */
1675 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001676
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301677 /* Get MSDU DESC info */
1678 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1679
1680 if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
1681 HAL_MPDU_F_RAW_AMPDU)) {
1682 /* previous msdu has end bit set, so current one is
1683 * the new MPDU
1684 */
1685 if (is_prev_msdu_last) {
1686 is_prev_msdu_last = false;
1687 /* Get number of entries available in HW ring */
1688 num_entries_avail =
1689 hal_srng_dst_num_valid(hal_soc, hal_ring, 1);
1690
1691 /* For new MPDU check if we can read complete
1692 * MPDU by comparing the number of buffers
1693 * available and number of buffers needed to
1694 * reap this MPDU
1695 */
1696 if (((msdu_desc_info.msdu_len /
1697 (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) >
1698 num_entries_avail)
1699 break;
1700 } else {
1701 if (msdu_desc_info.msdu_flags &
1702 HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1703 is_prev_msdu_last = true;
1704 }
1705 qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
1706 }
1707
1708 /* Pop out the descriptor*/
1709 hal_srng_dst_get_next(hal_soc, hal_ring);
1710
1711 rx_bufs_reaped[rx_desc->pool_id]++;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301712 peer_mdata = mpdu_desc_info.peer_meta_data;
1713 QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
1714 DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05301715
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301716 /*
1717 * save msdu flags first, last and continuation msdu in
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301718 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
1719 * length to nbuf->cb. This ensures the info required for
1720 * per pkt processing is always in the same cache line.
1721 * This helps in improving throughput for smaller pkt
1722 * sizes.
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301723 */
1724 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05301725 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301726
1727 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
Vivekde90e592017-11-30 17:24:18 +05301728 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301729
1730 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05301731 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301732
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301733 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
1734 qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
1735
1736 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
1737 qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
1738
1739 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
1740 qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
1741
Ankit Kumare2227752019-04-30 00:16:04 +05301742 qdf_nbuf_set_tid_val(rx_desc->nbuf,
1743 HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
1744
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301745 QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
1746
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001747 QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301748
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301749 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301750
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301751 /*
1752 * if continuation bit is set then we have MSDU spread
1753 * across multiple buffers, let us not decrement quota
1754 * till we reap all buffers of that MSDU.
1755 */
Vivekde90e592017-11-30 17:24:18 +05301756 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301757 quota -= 1;
1758
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301759 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1760 &tail[rx_desc->pool_id],
1761 rx_desc);
Mohit Khannae5a6e942018-11-28 14:22:48 -08001762
1763 num_rx_bufs_reaped++;
1764 if (dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
1765 break;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001766 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301767done:
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001768 hal_srng_access_end(hal_soc, hal_ring);
1769
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001770 if (nbuf_tail)
1771 QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1;
1772
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301773 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1774 /*
1775 * continue with next mac_id if no pkts were reaped
1776 * from that pool
1777 */
1778 if (!rx_bufs_reaped[mac_id])
1779 continue;
1780
Kai Chen6eca1a62017-01-12 10:17:53 -08001781 pdev = soc->pdev_list[mac_id];
1782 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1783 rx_desc_pool = &soc->rx_desc_buf[mac_id];
1784
1785 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1786 rx_desc_pool, rx_bufs_reaped[mac_id],
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001787 &head[mac_id], &tail[mac_id]);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301788 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001789
Mohit Khannae5a6e942018-11-28 14:22:48 -08001790 dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
psimha03f9a792017-10-17 10:42:58 -07001791 /* Peer can be NULL is case of LFR */
Jeff Johnsona8edf332019-03-18 09:51:52 -07001792 if (qdf_likely(peer))
psimha03f9a792017-10-17 10:42:58 -07001793 vdev = NULL;
1794
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301795 /*
1796 * BIG loop where each nbuf is dequeued from global queue,
1797 * processed and queued back on a per vdev basis. These nbufs
1798 * are sent to stack as and when we run out of nbufs
1799 * or a new nbuf dequeued from global queue has a different
1800 * vdev when compared to previous nbuf.
1801 */
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301802 nbuf = nbuf_head;
1803 while (nbuf) {
1804 next = nbuf->next;
1805 rx_tlv_hdr = qdf_nbuf_data(nbuf);
Ankit Kumare2227752019-04-30 00:16:04 +05301806 /* Get TID from struct cb->tid_val, save to tid */
Varsha Mishra18281792019-03-06 17:57:23 +05301807 if (qdf_nbuf_is_rx_chfrag_start(nbuf))
Ankit Kumare2227752019-04-30 00:16:04 +05301808 tid = qdf_nbuf_get_tid_val(nbuf);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001809
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301810 /*
1811 * Check if DMA completed -- msdu_done is the last bit
1812 * to be written
1813 */
Varsha Mishra18281792019-03-06 17:57:23 +05301814 rx_pdev = soc->pdev_list[rx_desc->pool_id];
Varsha Mishra09a4c0e2019-05-22 12:09:24 +05301815 DP_RX_TID_SAVE(nbuf, tid);
1816 if (qdf_unlikely(rx_pdev->delay_stats_flag))
1817 qdf_nbuf_set_timestamp(nbuf);
1818
Varsha Mishra18281792019-03-06 17:57:23 +05301819 tid_stats = &rx_pdev->stats.tid_stats.tid_rx_stats[tid];
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301820 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001821 dp_err("MSDU DONE failure");
1822 DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301823 hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
Varsha Mishra18281792019-03-06 17:57:23 +05301824 QDF_TRACE_LEVEL_INFO);
1825 tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001826 qdf_nbuf_free(nbuf);
Tallapragada Kalyan12a68402019-04-10 14:10:09 +05301827 qdf_assert(0);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001828 nbuf = next;
1829 continue;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301830 }
1831
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301832 peer_mdata = QDF_NBUF_CB_RX_PEER_ID(nbuf);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301833 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1834 peer = dp_peer_find_by_id(soc, peer_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001835
Mohit Khanna163c3172018-06-27 01:34:02 -07001836 if (peer) {
1837 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
1838 qdf_dp_trace_set_track(nbuf, QDF_RX);
1839 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
1840 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
1841 QDF_NBUF_RX_PKT_DATA_TRACK;
1842 }
1843
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301844 rx_bufs_used++;
1845
psimha03f9a792017-10-17 10:42:58 -07001846 if (deliver_list_head && peer && (vdev != peer->vdev)) {
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301847 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
1848 deliver_list_tail);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301849 deliver_list_head = NULL;
1850 deliver_list_tail = NULL;
1851 }
psimha03f9a792017-10-17 10:42:58 -07001852
Jeff Johnsona8edf332019-03-18 09:51:52 -07001853 if (qdf_likely(peer)) {
psimha03f9a792017-10-17 10:42:58 -07001854 vdev = peer->vdev;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301855 } else {
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001856 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301857 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
Varsha Mishra18281792019-03-06 17:57:23 +05301858 tid_stats->fail_cnt[INVALID_PEER_VDEV]++;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301859 qdf_nbuf_free(nbuf);
1860 nbuf = next;
1861 continue;
1862 }
1863
Jeff Johnsona8edf332019-03-18 09:51:52 -07001864 if (qdf_unlikely(!vdev)) {
Varsha Mishra18281792019-03-06 17:57:23 +05301865 tid_stats->fail_cnt[INVALID_PEER_VDEV]++;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301866 qdf_nbuf_free(nbuf);
1867 nbuf = next;
1868 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301869 dp_peer_unref_del_find_by_id(peer);
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301870 continue;
1871 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301872
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301873 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301874 /*
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301875 * First IF condition:
1876 * 802.11 Fragmented pkts are reinjected to REO
1877 * HW block as SG pkts and for these pkts we only
1878 * need to pull the RX TLVS header length.
1879 * Second IF condition:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301880 * The below condition happens when an MSDU is spread
1881 * across multiple buffers. This can happen in two cases
1882 * 1. The nbuf size is smaller then the received msdu.
1883 * ex: we have set the nbuf size to 2048 during
1884 * nbuf_alloc. but we received an msdu which is
1885 * 2304 bytes in size then this msdu is spread
1886 * across 2 nbufs.
1887 *
1888 * 2. AMSDUs when RAW mode is enabled.
1889 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
1890 * across 1st nbuf and 2nd nbuf and last MSDU is
1891 * spread across 2nd nbuf and 3rd nbuf.
1892 *
1893 * for these scenarios let us create a skb frag_list and
1894 * append these buffers till the last MSDU of the AMSDU
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301895 * Third condition:
1896 * This is the most likely case, we receive 802.3 pkts
1897 * decapsulated by HW, here we need to set the pkt length.
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301898 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301899 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1900 bool is_mcbc, is_sa_vld, is_da_vld;
1901
1902 is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr);
1903 is_sa_vld = hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr);
1904 is_da_vld = hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr);
1905
1906 qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
1907 qdf_nbuf_set_da_valid(nbuf, is_da_vld);
1908 qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
1909
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301910 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
Chaithanya Garrepallid3d99db2018-12-19 21:54:03 +05301911 } else if (qdf_nbuf_is_raw_frame(nbuf)) {
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301912 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301913 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301914
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301915 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301916 DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301917
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301918 next = nbuf->next;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301919 } else {
1920 l2_hdr_offset =
1921 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
1922
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301923 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301924 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1925
1926 qdf_nbuf_set_pktlen(nbuf, pkt_len);
1927 qdf_nbuf_pull_head(nbuf,
1928 RX_PKT_TLVS_LEN +
1929 l2_hdr_offset);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301930 }
1931
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301932 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301933 QDF_TRACE(QDF_MODULE_ID_DP,
1934 QDF_TRACE_LEVEL_ERROR,
1935 FL("Policy Check Drop pkt"));
Varsha Mishra18281792019-03-06 17:57:23 +05301936 tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301937 /* Drop & free packet */
1938 qdf_nbuf_free(nbuf);
1939 /* Statistics */
1940 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301941 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301942 continue;
1943 }
1944
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301945 if (qdf_unlikely(peer && (peer->nawds_enabled) &&
1946 (qdf_nbuf_is_da_mcbc(nbuf)) &&
1947 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) ==
1948 false))) {
Varsha Mishra18281792019-03-06 17:57:23 +05301949 tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
Ruchi, Agrawal27550482018-02-20 19:43:41 +05301950 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301951 qdf_nbuf_free(nbuf);
1952 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301953 dp_peer_unref_del_find_by_id(peer);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301954 continue;
1955 }
1956
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301957 if (soc->process_rx_status)
1958 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301959
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001960 /* Update the protocol tag in SKB based on CCE metadata */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001961 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1962 reo_ring_num, false, true);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001963
Varsha Mishra9d42f122019-05-03 12:47:40 +05301964 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
1965 ring_id, tid_stats);
Aditya Sathish6add3db2018-04-10 19:43:34 +05301966
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301967 if (qdf_unlikely(vdev->mesh_vdev)) {
Varsha Mishra18281792019-03-06 17:57:23 +05301968 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301969 == QDF_STATUS_SUCCESS) {
1970 QDF_TRACE(QDF_MODULE_ID_DP,
Varsha Mishra18281792019-03-06 17:57:23 +05301971 QDF_TRACE_LEVEL_INFO_MED,
1972 FL("mesh pkt filtered"));
1973 tid_stats->fail_cnt[MESH_FILTER_DROP]++;
1974 DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1975 1);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301976
1977 qdf_nbuf_free(nbuf);
1978 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05301979 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301980 continue;
1981 }
1982 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1983 }
1984
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301985 if (qdf_likely(vdev->rx_decap_type ==
Sravan Kumar Kairamd7d1d672018-09-04 14:56:33 +05301986 htt_cmn_pkt_type_ethernet) &&
1987 qdf_likely(!vdev->mesh_vdev)) {
phadiman4213e9c2018-10-29 12:50:02 +05301988 /* WDS Destination Address Learning */
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05301989 dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
phadiman4213e9c2018-10-29 12:50:02 +05301990
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05301991 /* Due to HW issue, sometimes we see that the sa_idx
1992 * and da_idx are invalid with sa_valid and da_valid
1993 * bits set
1994 *
1995 * in this case we also see that value of
1996 * sa_sw_peer_id is set as 0
1997 *
1998 * Drop the packet if sa_idx and da_idx OOB or
1999 * sa_sw_peerid is 0
2000 */
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302001 if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf)) {
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05302002 qdf_nbuf_free(nbuf);
2003 nbuf = next;
2004 DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
Jinwei Chen5bcc30f2019-05-20 21:17:56 +08002005 dp_peer_unref_del_find_by_id(peer);
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05302006 continue;
2007 }
phadiman4213e9c2018-10-29 12:50:02 +05302008 /* WDS Source Port Learning */
Ankit Kumarf2526d42019-05-02 15:13:27 +05302009 if (qdf_likely(vdev->wds_enabled))
Sravan Kumar Kairamd7d1d672018-09-04 14:56:33 +05302010 dp_rx_wds_srcport_learn(soc, rx_tlv_hdr,
2011 peer, nbuf);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302012
2013 /* Intrabss-fwd */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05302014 if (dp_rx_check_ap_bridge(vdev))
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302015 if (dp_rx_intrabss_fwd(soc,
2016 peer,
2017 rx_tlv_hdr,
2018 nbuf)) {
2019 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302020 dp_peer_unref_del_find_by_id(peer);
Varsha Mishra18281792019-03-06 17:57:23 +05302021 tid_stats->intrabss_cnt++;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302022 continue; /* Get next desc */
2023 }
2024 }
2025
Mohit Khanna16816ae2018-10-30 14:12:03 -07002026 dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf);
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002027 qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
nobelj9d8154d2018-10-09 07:23:25 -07002028
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302029 DP_RX_LIST_APPEND(deliver_list_head,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002030 deliver_list_tail,
2031 nbuf);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302032 DP_STATS_INC_PKT(peer, rx.to_stack, 1,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05302033 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302034
Varsha Mishra18281792019-03-06 17:57:23 +05302035 tid_stats->delivered_to_stack++;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302036 nbuf = next;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302037 dp_peer_unref_del_find_by_id(peer);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302038 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07002039
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302040 if (deliver_list_head)
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05302041 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002042 deliver_list_tail);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302043
Mohit Khannae5a6e942018-11-28 14:22:48 -08002044 if (dp_rx_enable_eol_data_check(soc)) {
2045 if (quota &&
2046 hal_srng_dst_peek_sync_locked(soc, hal_ring)) {
2047 DP_STATS_INC(soc, rx.hp_oos2, 1);
2048 if (!hif_exec_should_yield(scn, intr_id))
2049 goto more_data;
2050 }
2051 }
2052 /* Update histogram statistics by looping through pdev's */
2053 DP_RX_HIST_STATS_PER_PDEV();
2054
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002055 return rx_bufs_used; /* Assume no scale factor for now */
2056}
2057
2058/**
2059 * dp_rx_detach() - detach dp rx
Kai Chen6eca1a62017-01-12 10:17:53 -08002060 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002061 *
2062 * This function will detach DP RX into main device context
2063 * will free DP Rx resources.
2064 *
2065 * Return: void
2066 */
2067void
2068dp_rx_pdev_detach(struct dp_pdev *pdev)
2069{
2070 uint8_t pdev_id = pdev->pdev_id;
2071 struct dp_soc *soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08002072 struct rx_desc_pool *rx_desc_pool;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002073
Kai Chen6eca1a62017-01-12 10:17:53 -08002074 rx_desc_pool = &soc->rx_desc_buf[pdev_id];
2075
psimhaeae1b412017-08-25 16:10:13 -07002076 if (rx_desc_pool->pool_size != 0) {
phadiman449a2682019-02-20 14:00:00 +05302077 if (!dp_is_soc_reinit(soc))
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07002078 dp_rx_desc_nbuf_and_pool_free(soc, pdev_id,
2079 rx_desc_pool);
phadiman449a2682019-02-20 14:00:00 +05302080 else
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07002081 dp_rx_desc_nbuf_free(soc, rx_desc_pool);
psimhaeae1b412017-08-25 16:10:13 -07002082 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05302083
2084 return;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002085}
2086
jiad5679e392019-04-03 17:00:02 +08002087static QDF_STATUS
2088dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2089 struct dp_srng *dp_rxdma_srng,
2090 struct rx_desc_pool *rx_desc_pool,
2091 uint32_t num_req_buffers,
2092 union dp_rx_desc_list_elem_t **desc_list,
2093 union dp_rx_desc_list_elem_t **tail)
2094{
2095 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
2096 void *rxdma_srng = dp_rxdma_srng->hal_srng;
2097 union dp_rx_desc_list_elem_t *next;
2098 void *rxdma_ring_entry;
2099 qdf_dma_addr_t paddr;
2100 void **rx_nbuf_arr;
2101 uint32_t nr_descs;
2102 uint32_t nr_nbuf;
2103 qdf_nbuf_t nbuf;
2104 QDF_STATUS ret;
2105 int i;
2106
2107 if (qdf_unlikely(!rxdma_srng)) {
2108 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2109 return QDF_STATUS_E_FAILURE;
2110 }
2111
2112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2113 "requested %u RX buffers for driver attach", num_req_buffers);
2114
2115 nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2116 num_req_buffers, desc_list, tail);
2117 if (!nr_descs) {
2118 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2119 "no free rx_descs in freelist");
2120 DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2121 return QDF_STATUS_E_NOMEM;
2122 }
2123
2124 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2125 "got %u RX descs for driver attach", nr_descs);
2126
2127 rx_nbuf_arr = qdf_mem_malloc(nr_descs * sizeof(*rx_nbuf_arr));
2128 if (!rx_nbuf_arr) {
2129 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2130 "failed to allocate nbuf array");
2131 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2132 return QDF_STATUS_E_NOMEM;
2133 }
2134
2135 for (nr_nbuf = 0; nr_nbuf < nr_descs; nr_nbuf++) {
2136 nbuf = qdf_nbuf_alloc(dp_soc->osdev, RX_BUFFER_SIZE,
2137 RX_BUFFER_RESERVATION,
2138 RX_BUFFER_ALIGNMENT,
2139 FALSE);
2140 if (!nbuf) {
2141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2142 "nbuf alloc failed");
2143 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2144 break;
2145 }
2146
2147 ret = qdf_nbuf_map_single(dp_soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05302148 QDF_DMA_FROM_DEVICE);
jiad5679e392019-04-03 17:00:02 +08002149 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2150 qdf_nbuf_free(nbuf);
2151 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2152 "nbuf map failed");
2153 DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2154 break;
2155 }
2156
2157 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2158
2159 ret = check_x86_paddr(dp_soc, &nbuf, &paddr, dp_pdev);
2160 if (ret == QDF_STATUS_E_FAILURE) {
2161 qdf_nbuf_unmap_single(dp_soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05302162 QDF_DMA_FROM_DEVICE);
jiad5679e392019-04-03 17:00:02 +08002163 qdf_nbuf_free(nbuf);
2164 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2165 "nbuf check x86 failed");
2166 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2167 break;
2168 }
2169
2170 rx_nbuf_arr[nr_nbuf] = (void *)nbuf;
2171 }
2172
2173 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2174 "allocated %u nbuf for driver attach", nr_nbuf);
2175
2176 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2177
2178 for (i = 0; i < nr_nbuf; i++) {
2179 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
2180 rxdma_srng);
2181 qdf_assert_always(rxdma_ring_entry);
2182
2183 next = (*desc_list)->next;
2184 nbuf = rx_nbuf_arr[i];
2185 paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2186
2187 dp_rx_desc_prep(&((*desc_list)->rx_desc), nbuf);
2188 (*desc_list)->rx_desc.in_use = 1;
2189
2190 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2191 (*desc_list)->rx_desc.cookie,
2192 rx_desc_pool->owner);
2193
2194 dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2195
2196 *desc_list = next;
2197 }
2198
2199 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2200
2201 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2202 "filled %u RX buffers for driver attach", nr_nbuf);
2203 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, RX_BUFFER_SIZE *
2204 nr_nbuf);
2205
2206 qdf_mem_free(rx_nbuf_arr);
2207
2208 return QDF_STATUS_SUCCESS;
2209}
2210
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002211/**
2212 * dp_rx_attach() - attach DP RX
Kai Chen6eca1a62017-01-12 10:17:53 -08002213 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002214 *
2215 * This function will attach a DP RX instance into the main
2216 * device (SOC) context. Will allocate dp rx resource and
2217 * initialize resources.
2218 *
2219 * Return: QDF_STATUS_SUCCESS: success
2220 * QDF_STATUS_E_RESOURCES: Error return
2221 */
2222QDF_STATUS
2223dp_rx_pdev_attach(struct dp_pdev *pdev)
2224{
2225 uint8_t pdev_id = pdev->pdev_id;
2226 struct dp_soc *soc = pdev->soc;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002227 uint32_t rxdma_entries;
2228 union dp_rx_desc_list_elem_t *desc_list = NULL;
2229 union dp_rx_desc_list_elem_t *tail = NULL;
Kai Chen6eca1a62017-01-12 10:17:53 -08002230 struct dp_srng *dp_rxdma_srng;
2231 struct rx_desc_pool *rx_desc_pool;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002232
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302233 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
Aditya Sathishded018e2018-07-02 16:25:21 +05302234 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2235 "nss-wifi<4> skip Rx refil %d", pdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302236 return QDF_STATUS_SUCCESS;
2237 }
2238
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002239 pdev = soc->pdev_list[pdev_id];
Mohit Khanna70514992018-11-12 18:39:03 -08002240 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
2241 rxdma_entries = dp_rxdma_srng->num_entries;
2242
chenguo9bece1a2017-12-19 18:49:41 +08002243 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002244
Kai Chen6eca1a62017-01-12 10:17:53 -08002245 rx_desc_pool = &soc->rx_desc_buf[pdev_id];
Mohit Khanna70514992018-11-12 18:39:03 -08002246 dp_rx_desc_pool_alloc(soc, pdev_id,
2247 DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries,
2248 rx_desc_pool);
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08002249
2250 rx_desc_pool->owner = DP_WBM2SW_RBM;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002251 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
Mohit Khanna70514992018-11-12 18:39:03 -08002252
jiad5679e392019-04-03 17:00:02 +08002253 return dp_pdev_rx_buffers_attach(soc, pdev_id, dp_rxdma_srng,
2254 rx_desc_pool, rxdma_entries - 1,
2255 &desc_list, &tail);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07002256}
jinweic chenc3546322018-02-02 15:03:41 +08002257
2258/*
2259 * dp_rx_nbuf_prepare() - prepare RX nbuf
2260 * @soc: core txrx main context
2261 * @pdev: core txrx pdev context
2262 *
2263 * This function alloc & map nbuf for RX dma usage, retry it if failed
2264 * until retry times reaches max threshold or succeeded.
2265 *
2266 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2267 */
2268qdf_nbuf_t
2269dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2270{
2271 uint8_t *buf;
2272 int32_t nbuf_retry_count;
2273 QDF_STATUS ret;
2274 qdf_nbuf_t nbuf = NULL;
2275
2276 for (nbuf_retry_count = 0; nbuf_retry_count <
2277 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2278 nbuf_retry_count++) {
2279 /* Allocate a new skb */
2280 nbuf = qdf_nbuf_alloc(soc->osdev,
2281 RX_BUFFER_SIZE,
2282 RX_BUFFER_RESERVATION,
2283 RX_BUFFER_ALIGNMENT,
2284 FALSE);
2285
Jeff Johnsona8edf332019-03-18 09:51:52 -07002286 if (!nbuf) {
jinweic chenc3546322018-02-02 15:03:41 +08002287 DP_STATS_INC(pdev,
2288 replenish.nbuf_alloc_fail, 1);
2289 continue;
2290 }
2291
2292 buf = qdf_nbuf_data(nbuf);
2293
2294 memset(buf, 0, RX_BUFFER_SIZE);
2295
2296 ret = qdf_nbuf_map_single(soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +05302297 QDF_DMA_FROM_DEVICE);
jinweic chenc3546322018-02-02 15:03:41 +08002298
2299 /* nbuf map failed */
2300 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2301 qdf_nbuf_free(nbuf);
2302 DP_STATS_INC(pdev, replenish.map_err, 1);
2303 continue;
2304 }
2305 /* qdf_nbuf alloc and map succeeded */
2306 break;
2307 }
2308
2309 /* qdf_nbuf still alloc or map failed */
2310 if (qdf_unlikely(nbuf_retry_count >=
2311 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2312 return NULL;
2313
2314 return nbuf;
2315}