blob: 671e4889015723e97475189859a76cd9735be895 [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Ruben Columbus073874c2019-10-08 14:29:30 -07002 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _DP_RX_H
20#define _DP_RX_H
21
22#include "hal_rx.h"
Tallapragada Kalyan603c5942016-12-07 21:30:44 +053023#include "dp_tx.h"
Ishank Jain2bf04b42017-02-23 22:38:42 +053024#include "dp_peer.h"
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -080025#include "dp_internal.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070026
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053027#ifdef RXDMA_OPTIMIZATION
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +053028#ifndef RX_DATA_BUFFER_ALIGNMENT
29#define RX_DATA_BUFFER_ALIGNMENT 128
30#endif
31#ifndef RX_MONITOR_BUFFER_ALIGNMENT
32#define RX_MONITOR_BUFFER_ALIGNMENT 128
33#endif
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053034#else /* RXDMA_OPTIMIZATION */
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +053035#define RX_DATA_BUFFER_ALIGNMENT 4
36#define RX_MONITOR_BUFFER_ALIGNMENT 4
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053037#endif /* RXDMA_OPTIMIZATION */
38
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080039#ifdef QCA_HOST2FW_RXBUF_RING
40#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
Mohit Khanna918456b2019-12-03 11:54:58 -080041/* RBM value used for re-injecting defragmented packets into REO */
42#define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080043#else
44#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
Mohit Khanna918456b2019-12-03 11:54:58 -080045#define DP_DEFRAG_RBM DP_WBM2SW_RBM
Mohit Khanna70514992018-11-12 18:39:03 -080046#endif /* QCA_HOST2FW_RXBUF_RING */
47
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053048#define RX_BUFFER_RESERVATION 0
49
Debashis Duttc4c52dc2016-10-04 17:12:23 -070050#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff
51#define DP_PEER_METADATA_PEER_ID_SHIFT 0
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +053052#define DP_PEER_METADATA_VDEV_ID_MASK 0x003f0000
Debashis Duttc4c52dc2016-10-04 17:12:23 -070053#define DP_PEER_METADATA_VDEV_ID_SHIFT 16
54
55#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \
56 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \
57 >> DP_PEER_METADATA_PEER_ID_SHIFT)
58
Chaithanya Garrepalli52511a12019-12-12 20:24:40 +053059#define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata) \
Debashis Duttc4c52dc2016-10-04 17:12:23 -070060 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \
61 >> DP_PEER_METADATA_VDEV_ID_SHIFT)
62
Pamidipati, Vijay53794742017-06-03 11:24:32 +053063#define DP_RX_DESC_MAGIC 0xdec0de
64
Debashis Duttc4c52dc2016-10-04 17:12:23 -070065/**
66 * struct dp_rx_desc
67 *
68 * @nbuf : VA of the "skb" posted
69 * @rx_buf_start : VA of the original Rx buffer, before
70 * movement of any skb->data pointer
71 * @cookie : index into the sw array which holds
72 * the sw Rx descriptors
73 * Cookie space is 21 bits:
74 * lower 18 bits -- index
75 * upper 3 bits -- pool_id
76 * @pool_id : pool Id for which this allocated.
77 * Can only be used if there is no flow
78 * steering
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080079 * @in_use rx_desc is in use
80 * @unmapped used to mark rx_desc an unmapped if the corresponding
81 * nbuf is already unmapped
Debashis Duttc4c52dc2016-10-04 17:12:23 -070082 */
83struct dp_rx_desc {
84 qdf_nbuf_t nbuf;
85 uint8_t *rx_buf_start;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +053086 uint32_t cookie;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070087 uint8_t pool_id;
Pamidipati, Vijay53794742017-06-03 11:24:32 +053088#ifdef RX_DESC_DEBUG_CHECK
89 uint32_t magic;
90#endif
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080091 uint8_t in_use:1,
92 unmapped:1;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070093};
94
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -070095/* RX Descriptor Multi Page memory alloc related */
96#define DP_RX_DESC_OFFSET_NUM_BITS 8
97#define DP_RX_DESC_PAGE_ID_NUM_BITS 8
98#define DP_RX_DESC_POOL_ID_NUM_BITS 4
99
100#define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
101#define DP_RX_DESC_POOL_ID_SHIFT \
102 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
103#define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
104 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
105#define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
106 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
107 DP_RX_DESC_PAGE_ID_SHIFT)
108#define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
109 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
110#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
111 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
112 DP_RX_DESC_POOL_ID_SHIFT)
113#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
114 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
115 DP_RX_DESC_PAGE_ID_SHIFT)
116#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
117 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
118
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700119#define RX_DESC_COOKIE_INDEX_SHIFT 0
120#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
121#define RX_DESC_COOKIE_POOL_ID_SHIFT 18
122#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
123
Karunakar Dasineni700ad732018-11-06 12:40:07 -0800124#define DP_RX_DESC_COOKIE_MAX \
125 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
126
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700127#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
128 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
129 RX_DESC_COOKIE_POOL_ID_SHIFT)
130
131#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
132 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
133 RX_DESC_COOKIE_INDEX_SHIFT)
134
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800135/* DOC: Offset to obtain LLC hdr
136 *
137 * In the case of Wifi parse error
138 * to reach LLC header from beginning
139 * of VLAN tag we need to skip 8 bytes.
140 * Vlan_tag(4)+length(2)+length added
141 * by HW(2) = 8 bytes.
142 */
143#define DP_SKIP_VLAN 8
144
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +0530145/**
146 * struct dp_rx_cached_buf - rx cached buffer
147 * @list: linked list node
148 * @buf: skb buffer
149 */
150struct dp_rx_cached_buf {
151 qdf_list_node_t node;
152 qdf_nbuf_t buf;
153};
154
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800155/*
156 *dp_rx_xor_block() - xor block of data
157 *@b: destination data block
158 *@a: source data block
159 *@len: length of the data to process
160 *
161 *Returns: None
162 */
163static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
164{
165 qdf_size_t i;
166
167 for (i = 0; i < len; i++)
168 b[i] ^= a[i];
169}
170
171/*
172 *dp_rx_rotl() - rotate the bits left
173 *@val: unsigned integer input value
174 *@bits: number of bits
175 *
176 *Returns: Integer with left rotated by number of 'bits'
177 */
178static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
179{
180 return (val << bits) | (val >> (32 - bits));
181}
182
183/*
184 *dp_rx_rotr() - rotate the bits right
185 *@val: unsigned integer input value
186 *@bits: number of bits
187 *
188 *Returns: Integer with right rotated by number of 'bits'
189 */
190static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
191{
192 return (val >> bits) | (val << (32 - bits));
193}
194
195/*
Prathyusha Guduri02ed9482018-04-17 19:06:30 +0530196 * dp_set_rx_queue() - set queue_mapping in skb
197 * @nbuf: skb
198 * @queue_id: rx queue_id
199 *
200 * Return: void
201 */
202#ifdef QCA_OL_RX_MULTIQ_SUPPORT
203static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
204{
205 qdf_nbuf_record_rx_queue(nbuf, queue_id);
206 return;
207}
208#else
209static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
210{
211}
212#endif
213
214/*
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800215 *dp_rx_xswap() - swap the bits left
216 *@val: unsigned integer input value
217 *
218 *Returns: Integer with bits swapped
219 */
220static inline uint32_t dp_rx_xswap(uint32_t val)
221{
222 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
223}
224
225/*
226 *dp_rx_get_le32_split() - get little endian 32 bits split
227 *@b0: byte 0
228 *@b1: byte 1
229 *@b2: byte 2
230 *@b3: byte 3
231 *
232 *Returns: Integer with split little endian 32 bits
233 */
234static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
235 uint8_t b3)
236{
237 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
238}
239
240/*
241 *dp_rx_get_le32() - get little endian 32 bits
242 *@b0: byte 0
243 *@b1: byte 1
244 *@b2: byte 2
245 *@b3: byte 3
246 *
247 *Returns: Integer with little endian 32 bits
248 */
249static inline uint32_t dp_rx_get_le32(const uint8_t *p)
250{
251 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
252}
253
254/*
255 * dp_rx_put_le32() - put little endian 32 bits
256 * @p: destination char array
257 * @v: source 32-bit integer
258 *
259 * Returns: None
260 */
261static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
262{
263 p[0] = (v) & 0xff;
264 p[1] = (v >> 8) & 0xff;
265 p[2] = (v >> 16) & 0xff;
266 p[3] = (v >> 24) & 0xff;
267}
268
269/* Extract michal mic block of data */
270#define dp_rx_michael_block(l, r) \
271 do { \
272 r ^= dp_rx_rotl(l, 17); \
273 l += r; \
274 r ^= dp_rx_xswap(l); \
275 l += r; \
276 r ^= dp_rx_rotl(l, 3); \
277 l += r; \
278 r ^= dp_rx_rotr(l, 2); \
279 l += r; \
280 } while (0)
281
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700282/**
283 * struct dp_rx_desc_list_elem_t
284 *
285 * @next : Next pointer to form free list
286 * @rx_desc : DP Rx descriptor
287 */
288union dp_rx_desc_list_elem_t {
289 union dp_rx_desc_list_elem_t *next;
290 struct dp_rx_desc rx_desc;
291};
292
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700293#ifdef RX_DESC_MULTI_PAGE_ALLOC
294/**
295 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
296 * @page_id: Page ID
297 * @offset: Offset of the descriptor element
298 *
299 * Return: RX descriptor element
300 */
301union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
302 struct rx_desc_pool *rx_pool);
303
304static inline
305struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
306 struct rx_desc_pool *pool,
307 uint32_t cookie)
308{
309 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
310 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
311 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
312 struct rx_desc_pool *rx_desc_pool;
313 union dp_rx_desc_list_elem_t *rx_desc_elem;
314
315 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
316 return NULL;
317
318 rx_desc_pool = &pool[pool_id];
319 rx_desc_elem = (union dp_rx_desc_list_elem_t *)
320 (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
321 rx_desc_pool->elem_size * offset);
322
323 return &rx_desc_elem->rx_desc;
324}
325
326/**
327 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
328 * the Rx descriptor on Rx DMA source ring buffer
329 * @soc: core txrx main context
330 * @cookie: cookie used to lookup virtual address
331 *
332 * Return: Pointer to the Rx descriptor
333 */
334static inline
335struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
336 uint32_t cookie)
337{
338 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
339}
340
341/**
342 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
343 * the Rx descriptor on monitor ring buffer
344 * @soc: core txrx main context
345 * @cookie: cookie used to lookup virtual address
346 *
347 * Return: Pointer to the Rx descriptor
348 */
349static inline
350struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
351 uint32_t cookie)
352{
353 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
354}
355
356/**
357 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
358 * the Rx descriptor on monitor status ring buffer
359 * @soc: core txrx main context
360 * @cookie: cookie used to lookup virtual address
361 *
362 * Return: Pointer to the Rx descriptor
363 */
364static inline
365struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
366 uint32_t cookie)
367{
368 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
369}
370#else
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700371/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800372 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
373 * the Rx descriptor on Rx DMA source ring buffer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700374 * @soc: core txrx main context
375 * @cookie: cookie used to lookup virtual address
376 *
377 * Return: void *: Virtual Address of the Rx descriptor
378 */
379static inline
Kai Chen6eca1a62017-01-12 10:17:53 -0800380void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700381{
382 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
383 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
psimha4d9c3f92018-04-09 15:27:08 -0700384 struct rx_desc_pool *rx_desc_pool;
385
386 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
387 return NULL;
388
389 rx_desc_pool = &soc->rx_desc_buf[pool_id];
390
391 if (qdf_unlikely(index >= rx_desc_pool->pool_size))
392 return NULL;
393
Kai Chen6eca1a62017-01-12 10:17:53 -0800394 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
395}
396
397/**
398 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
399 * the Rx descriptor on monitor ring buffer
400 * @soc: core txrx main context
401 * @cookie: cookie used to lookup virtual address
402 *
403 * Return: void *: Virtual Address of the Rx descriptor
404 */
405static inline
406void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
407{
408 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
409 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
410 /* TODO */
411 /* Add sanity for pool_id & index */
412 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
413}
414
415/**
416 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
417 * the Rx descriptor on monitor status ring buffer
418 * @soc: core txrx main context
419 * @cookie: cookie used to lookup virtual address
420 *
421 * Return: void *: Virtual Address of the Rx descriptor
422 */
423static inline
424void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
425{
426 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
427 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
428 /* TODO */
429 /* Add sanity for pool_id & index */
430 return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700431}
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700432#endif /* RX_DESC_MULTI_PAGE_ALLOC */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700433
434void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
435 union dp_rx_desc_list_elem_t **local_desc_list,
436 union dp_rx_desc_list_elem_t **tail,
Kai Chen6eca1a62017-01-12 10:17:53 -0800437 uint16_t pool_id,
438 struct rx_desc_pool *rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700439
440uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800441 struct rx_desc_pool *rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700442 uint16_t num_descs,
443 union dp_rx_desc_list_elem_t **desc_list,
444 union dp_rx_desc_list_elem_t **tail);
445
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700446
447QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800448
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700449void dp_rx_pdev_detach(struct dp_pdev *pdev);
450
Ruben Columbus43194932019-05-24 09:56:52 -0700451void dp_print_napi_stats(struct dp_soc *soc);
Kai Chen6eca1a62017-01-12 10:17:53 -0800452
Rakesh Pillai534a1432019-10-24 06:44:11 +0530453/**
454 * dp_rx_vdev_detach() - detach vdev from dp rx
455 * @vdev: virtual device instance
456 *
457 * Return: QDF_STATUS_SUCCESS: success
458 * QDF_STATUS_E_RESOURCES: Error return
459 */
460QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
461
Dhanashri Atre0da31222017-03-23 12:30:58 -0700462uint32_t
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530463dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
464 uint8_t reo_ring_num,
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700465 uint32_t quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800466
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530467/**
468 * dp_rx_err_process() - Processes error frames routed to REO error ring
469 * @int_ctx: pointer to DP interrupt context
470 * @soc: core txrx main context
471 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
472 * @quota: No. of units (packets) that can be serviced in one shot.
473 *
474 * This function implements error processing and top level demultiplexer
475 * for all the frames routed to REO error ring.
476 *
477 * Return: uint32_t: No. of elements processed
478 */
479uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530480 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530481
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530482/**
483 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
484 * @int_ctx: pointer to DP interrupt context
485 * @soc: core txrx main context
486 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
487 * @quota: No. of units (packets) that can be serviced in one shot.
488 *
489 * This function implements error processing and top level demultiplexer
490 * for all the frames routed to WBM2HOST sw release ring.
491 *
492 * Return: uint32_t: No. of elements processed
493 */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700494uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530495dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530496 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700497
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530498/**
499 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
500 * multiple nbufs.
501 * @nbuf: pointer to the first msdu of an amsdu.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530502 *
503 * This function implements the creation of RX frag_list for cases
504 * where an MSDU is spread across multiple nbufs.
505 *
506 * Return: returns the head nbuf which contains complete frag_list.
507 */
Jinwei Chen0b924692020-01-14 13:52:06 +0800508qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530509
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700510/*
511 * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
512 * at the time of dp rx initialization
513 *
514 * @soc: core txrx main context
515 * @pool_id: pool_id which is one of 3 mac_ids
516 * @pool_size: number of Rx descriptor in the pool
517 * @rx_desc_pool: rx descriptor pool pointer
518 *
519 * Return: QDF status
520 */
521QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
522 uint32_t pool_size, struct rx_desc_pool *pool);
Kai Chen6eca1a62017-01-12 10:17:53 -0800523
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700524/*
525 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
526 * de-initialization of wifi module.
527 *
528 * @soc: core txrx main context
529 * @pool_id: pool_id which is one of 3 mac_ids
530 * @rx_desc_pool: rx descriptor pool pointer
531 *
532 * Return: None
533 */
534void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
535 struct rx_desc_pool *rx_desc_pool);
536
537/*
538 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
539 * de-initialization of wifi module.
540 *
541 * @soc: core txrx main context
542 * @pool_id: pool_id which is one of 3 mac_ids
543 * @rx_desc_pool: rx descriptor pool pointer
544 *
545 * Return: None
546 */
547void dp_rx_desc_nbuf_free(struct dp_soc *soc,
548 struct rx_desc_pool *rx_desc_pool);
549
550/*
551 * dp_rx_desc_pool_free() - free the sw rx desc array called during
552 * de-initialization of wifi module.
553 *
554 * @soc: core txrx main context
555 * @rx_desc_pool: rx descriptor pool pointer
556 *
557 * Return: None
558 */
Kai Chen6eca1a62017-01-12 10:17:53 -0800559void dp_rx_desc_pool_free(struct dp_soc *soc,
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700560 struct rx_desc_pool *rx_desc_pool);
phadiman7dd261d2019-03-15 01:48:50 +0530561
c_cgodavbd5b3c22017-06-07 12:31:40 +0530562void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530563 struct dp_peer *peer);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530564
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700565/**
566 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
567 *
568 * @head: pointer to the head of local free list
569 * @tail: pointer to the tail of local free list
570 * @new: new descriptor that is added to the free list
571 *
572 * Return: void:
573 */
574static inline
575void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
576 union dp_rx_desc_list_elem_t **tail,
577 struct dp_rx_desc *new)
578{
579 qdf_assert(head && new);
580
581 new->nbuf = NULL;
Pramod Simha59fcb312017-06-22 17:43:16 -0700582 new->in_use = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700583
584 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
585 *head = (union dp_rx_desc_list_elem_t *)new;
Jinwei Chen32221842020-01-16 19:52:41 +0800586 /* reset tail if head->next is NULL */
587 if (!*tail || !(*head)->next)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700588 *tail = *head;
589
590}
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530591
Keyur Parekhb8149a52019-04-16 21:30:25 -0700592uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
593 uint8_t mac_id);
chenguo91c90102017-12-12 16:16:37 +0800594void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700595 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530596void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
597 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530598void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
599 uint16_t peer_id, uint8_t tid);
600
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +0530601
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530602#define DP_RX_LIST_APPEND(head, tail, elem) \
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700603 do { \
604 if (!(head)) { \
605 (head) = (elem); \
606 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
607 } else { \
608 qdf_nbuf_set_next((tail), (elem)); \
609 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \
610 } \
611 (tail) = (elem); \
612 qdf_nbuf_set_next((tail), NULL); \
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530613 } while (0)
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530614
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530615/*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
616#if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530617static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530618 qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530619{
620 return QDF_STATUS_SUCCESS;
621}
622#else
623#define MAX_RETRY 100
624static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530625 qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530626{
627 uint32_t nbuf_retry = 0;
628 int32_t ret;
629 const uint32_t x86_phy_addr = 0x50000000;
630 /*
631 * in M2M emulation platforms (x86) the memory below 0x50000000
632 * is reserved for target use, so any memory allocated in this
633 * region should not be used by host
634 */
635 do {
636 if (qdf_likely(*paddr > x86_phy_addr))
637 return QDF_STATUS_SUCCESS;
638 else {
639 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +0530640 "phy addr %pK exceeded 0x50000000 trying again",
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530641 paddr);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530642
643 nbuf_retry++;
644 if ((*rx_netbuf)) {
645 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530646 QDF_DMA_FROM_DEVICE);
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +0530647 /* Not freeing buffer intentionally.
648 * Observed that same buffer is getting
649 * re-allocated resulting in longer load time
650 * WMI init timeout.
651 * This buffer is anyway not useful so skip it.
652 **/
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530653 }
654
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530655 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Shashikala Prabhu03a9f5b2020-01-28 19:11:30 +0530656 rx_desc_pool->buf_size,
657 RX_BUFFER_RESERVATION,
658 rx_desc_pool->buf_alignment,
659 FALSE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530660
661 if (qdf_unlikely(!(*rx_netbuf)))
662 return QDF_STATUS_E_FAILURE;
663
664 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530665 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530666
667 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
668 qdf_nbuf_free(*rx_netbuf);
669 *rx_netbuf = NULL;
670 continue;
671 }
672
673 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
674 }
675 } while (nbuf_retry < MAX_RETRY);
676
677 if ((*rx_netbuf)) {
678 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530679 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530680 qdf_nbuf_free(*rx_netbuf);
681 }
682
683 return QDF_STATUS_E_FAILURE;
684}
685#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800686
Kai Chen6eca1a62017-01-12 10:17:53 -0800687/**
688 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
689 * the MSDU Link Descriptor
690 * @soc: core txrx main context
Nisha Menonbe9d06a2019-12-05 17:36:41 -0800691 * @buf_info: buf_info includes cookie that is used to lookup
692 * virtual address of link descriptor after deriving the page id
693 * and the offset or index of the desc on the associatde page.
Kai Chen6eca1a62017-01-12 10:17:53 -0800694 *
695 * This is the VA of the link descriptor, that HAL layer later uses to
696 * retrieve the list of MSDU's for a given MPDU.
697 *
698 * Return: void *: Virtual Address of the Rx descriptor
699 */
700static inline
701void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
702 struct hal_buf_info *buf_info)
703{
704 void *link_desc_va;
Nisha Menonbe9d06a2019-12-05 17:36:41 -0800705 struct qdf_mem_multi_page_t *pages;
706 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700707
Nisha Menonbe9d06a2019-12-05 17:36:41 -0800708 pages = &soc->link_desc_pages;
709 if (!pages)
710 return NULL;
711 if (qdf_unlikely(page_id >= pages->num_pages))
712 return NULL;
713 link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
714 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800715 return link_desc_va;
716}
717
718/**
719 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
720 * the MSDU Link Descriptor
721 * @pdev: core txrx pdev context
722 * @buf_info: buf_info includes cookie that used to lookup virtual address of
723 * link descriptor. Normally this is just an index into a per pdev array.
724 *
725 * This is the VA of the link descriptor in monitor mode destination ring,
726 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
727 *
728 * Return: void *: Virtual Address of the Rx descriptor
729 */
730static inline
731void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800732 struct hal_buf_info *buf_info,
733 int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -0800734{
735 void *link_desc_va;
736
737 /* TODO */
738 /* Add sanity for cookie */
739
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800740 link_desc_va =
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700741 pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie]
742 .base_vaddr +
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800743 (buf_info->paddr -
Amit Shukla1edfe5a2019-10-24 14:03:39 -0700744 pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie]
745 .base_paddr);
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800746
Kai Chen6eca1a62017-01-12 10:17:53 -0800747 return link_desc_va;
748}
749
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800750/**
751 * dp_rx_defrag_concat() - Concatenate the fragments
752 *
753 * @dst: destination pointer to the buffer
754 * @src: source pointer from where the fragment payload is to be copied
755 *
756 * Return: QDF_STATUS
757 */
758static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
759{
760 /*
761 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
762 * to provide space for src, the headroom portion is copied from
763 * the original dst buffer to the larger new dst buffer.
764 * (This is needed, because the headroom of the dst buffer
765 * contains the rx desc.)
766 */
phadiman2c146ea2019-03-07 12:45:16 +0530767 if (!qdf_nbuf_cat(dst, src)) {
768 /*
769 * qdf_nbuf_cat does not free the src memory.
770 * Free src nbuf before returning
771 * For failure case the caller takes of freeing the nbuf
772 */
773 qdf_nbuf_free(src);
774 return QDF_STATUS_SUCCESS;
775 }
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800776
phadiman2c146ea2019-03-07 12:45:16 +0530777 return QDF_STATUS_E_DEFRAG_ERROR;
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800778}
779
Amir Patelcb990262019-05-28 15:12:48 +0530780#ifndef FEATURE_WDS
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530781static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
782{
783 return QDF_STATUS_SUCCESS;
784}
Amir Patelcb990262019-05-28 15:12:48 +0530785
786static inline void
787dp_rx_wds_srcport_learn(struct dp_soc *soc,
788 uint8_t *rx_tlv_hdr,
789 struct dp_peer *ta_peer,
syed touqeer pasha6997a372019-12-31 15:45:55 +0530790 qdf_nbuf_t nbuf,
791 struct hal_rx_msdu_metadata msdu_metadata)
Amir Patelcb990262019-05-28 15:12:48 +0530792{
793}
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530794#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800795
Kai Chen6eca1a62017-01-12 10:17:53 -0800796/*
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +0530797 * dp_rx_desc_dump() - dump the sw rx descriptor
798 *
799 * @rx_desc: sw rx descriptor
800 */
801static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
802{
Saket Jha16d84322019-07-11 16:09:41 -0700803 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
804 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
805 rx_desc->in_use, rx_desc->unmapped);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +0530806}
807
808/*
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530809 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
810 * In qwrap mode, packets originated from
811 * any vdev should not loopback and
812 * should be dropped.
813 * @vdev: vdev on which rx packet is received
814 * @nbuf: rx pkt
815 *
816 */
817#if ATH_SUPPORT_WRAP
818static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
819 qdf_nbuf_t nbuf)
820{
821 struct dp_vdev *psta_vdev;
822 struct dp_pdev *pdev = vdev->pdev;
823 uint8_t *data = qdf_nbuf_data(nbuf);
824
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530825 if (qdf_unlikely(vdev->proxysta_vdev)) {
826 /* In qwrap isolation mode, allow loopback packets as all
827 * packets go to RootAP and Loopback on the mpsta.
828 */
829 if (vdev->isolation_vdev)
830 return false;
831 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
832 if (qdf_unlikely(psta_vdev->proxysta_vdev &&
833 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800834 &data[QDF_MAC_ADDR_SIZE],
835 QDF_MAC_ADDR_SIZE))) {
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530836 /* Drop packet if source address is equal to
837 * any of the vdev addresses.
838 */
839 return true;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530840 }
841 }
842 }
843 return false;
844}
845#else
846static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
847 qdf_nbuf_t nbuf)
848{
849 return false;
850}
851#endif
852
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700853#if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
854 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
855 defined(WLAN_SUPPORT_RX_FLOW_TAG)
856#include "dp_rx_tag.h"
857#endif
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700858
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700859#ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700860/**
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700861 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
862 * and set the corresponding tag in QDF packet
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700863 * @soc: core txrx main context
864 * @vdev: vdev on which the packet is received
865 * @nbuf: QDF pkt buffer on which the protocol tag should be set
866 * @rx_tlv_hdr: rBbase address where the RX TLVs starts
867 * @ring_index: REO ring number, not used for error & monitor ring
868 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
869 * @is_update_stats: flag to indicate whether to update stats or not
870 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700871 */
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700872static inline void
873dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
874 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700875 uint16_t ring_index,
876 bool is_reo_exception, bool is_update_stats)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700877{
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700878}
879#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
880
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700881#ifndef WLAN_SUPPORT_RX_FLOW_TAG
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700882/**
883 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
884 * and set the corresponding tag in QDF packet
885 * @soc: core txrx main context
886 * @vdev: vdev on which the packet is received
887 * @nbuf: QDF pkt buffer on which the protocol tag should be set
888 * @rx_tlv_hdr: base address where the RX TLVs starts
889 * @is_update_stats: flag to indicate whether to update stats or not
890 *
891 * Return: void
892 */
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700893static inline void
894dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
895 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
896{
897}
898#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
899
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700900#if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
901 !defined(WLAN_SUPPORT_RX_FLOW_TAG)
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700902/**
903 * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
904 * mode and then tags appropriate packets
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700905 * @soc: core txrx main context
906 * @vdev: pdev on which packet is received
907 * @msdu: QDF packet buffer on which the protocol tag should be set
908 * @rx_desc: base address where the RX TLVs start
909 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700910 */
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700911static inline
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700912void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
913 struct dp_pdev *dp_pdev,
914 qdf_nbuf_t msdu, void *rx_desc)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700915{
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700916}
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700917#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
918
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530919/*
Kai Chen6eca1a62017-01-12 10:17:53 -0800920 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
921 * called during dp rx initialization
922 * and at the end of dp_rx_process.
923 *
924 * @soc: core txrx main context
925 * @mac_id: mac_id which is one of 3 mac_ids
926 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700927 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -0800928 * @num_req_buffers: number of buffer to be replenished
929 * @desc_list: list of descs if called from dp_rx_process
930 * or NULL during dp rx initialization or out of buffer
931 * interrupt.
932 * @tail: tail of descs list
Kai Chen6eca1a62017-01-12 10:17:53 -0800933 * Return: return success or failure
934 */
935QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
936 struct dp_srng *dp_rxdma_srng,
937 struct rx_desc_pool *rx_desc_pool,
938 uint32_t num_req_buffers,
939 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800940 union dp_rx_desc_list_elem_t **tail);
Kai Chen6eca1a62017-01-12 10:17:53 -0800941
Kiran Venkatappa115309a2019-07-16 22:15:35 +0530942/*
943 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
944 * called during dp rx initialization
945 *
946 * @soc: core txrx main context
947 * @mac_id: mac_id which is one of 3 mac_ids
948 * @dp_rxdma_srng: dp rxdma circular ring
949 * @rx_desc_pool: Pointer to free Rx descriptor pool
950 * @num_req_buffers: number of buffer to be replenished
951 *
952 * Return: return success or failure
953 */
954QDF_STATUS
955dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
956 struct dp_srng *dp_rxdma_srng,
957 struct rx_desc_pool *rx_desc_pool,
958 uint32_t num_req_buffers);
959
Kai Chen6eca1a62017-01-12 10:17:53 -0800960/**
961 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
962 * (WBM), following error handling
963 *
964 * @soc: core DP main context
965 * @buf_addr_info: opaque pointer to the REO error ring descriptor
966 * @buf_addr_info: void pointer to the buffer_addr_info
Tallapragada Kalyan00172912017-09-26 21:04:24 +0530967 * @bm_action: put to idle_list or release to msdu_list
Akshay Kosigi91c56522019-07-02 11:49:39 +0530968 *
969 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
Kai Chen6eca1a62017-01-12 10:17:53 -0800970 */
971QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530972dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
973 uint8_t bm_action);
psimha223883f2017-11-16 17:18:51 -0800974
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530975/**
976 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
977 * (WBM) by address
978 *
979 * @soc: core DP main context
980 * @link_desc_addr: link descriptor addr
981 *
Akshay Kosigi91c56522019-07-02 11:49:39 +0530982 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530983 */
984QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530985dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
Aniruddha Paul33fce952019-11-27 18:48:04 +0530986 hal_buff_addrinfo_t link_desc_addr,
Akshay Kosigi91c56522019-07-02 11:49:39 +0530987 uint8_t bm_action);
Pramod Simhae382ff82017-06-05 18:09:26 -0700988
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530989/**
990 * dp_rxdma_err_process() - RxDMA error processing functionality
991 * @soc: core txrx main contex
992 * @mac_id: mac id which is one of 3 mac_ids
993 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
994 * @quota: No. of units (packets) that can be serviced in one shot.
995 *
996 * Return: num of buffers processed
997 */
Pramod Simhae382ff82017-06-05 18:09:26 -0700998uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530999dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1000 uint32_t mac_id, uint32_t quota);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +05301001
1002void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1003 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1004QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1005 uint8_t *rx_tlv_hdr);
1006
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301007int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301008 struct dp_peer *peer);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301009
jinweic chenc3546322018-02-02 15:03:41 +08001010qdf_nbuf_t
1011dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1012
Saket Jha7f890142019-07-10 18:31:36 -07001013/*
1014 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1015 *
1016 * @soc: core txrx main context
1017 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1018 * @ring_desc: opaque pointer to the RX ring descriptor
1019 * @rx_desc: host rs descriptor
1020 *
1021 * Return: void
1022 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301023void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1024 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi91c56522019-07-02 11:49:39 +05301025 hal_ring_desc_t ring_desc,
1026 struct dp_rx_desc *rx_desc);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301027
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301028void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001029#ifdef RX_DESC_DEBUG_CHECK
1030/**
1031 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1032 * @rx_desc: rx descriptor pointer
1033 *
1034 * Return: true, if magic is correct, else false.
1035 */
1036static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1037{
1038 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1039 return false;
1040
1041 rx_desc->magic = 0;
1042 return true;
1043}
1044
1045/**
1046 * dp_rx_desc_prep() - prepare rx desc
1047 * @rx_desc: rx descriptor pointer to be prepared
1048 * @nbuf: nbuf to be associated with rx_desc
1049 *
Mohit Khannac30b51c2019-02-01 15:30:26 -08001050 * Note: assumption is that we are associating a nbuf which is mapped
1051 *
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001052 * Return: none
1053 */
1054static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1055{
1056 rx_desc->magic = DP_RX_DESC_MAGIC;
1057 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001058 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001059}
1060
1061#else
1062
1063static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1064{
1065 return true;
1066}
1067
1068static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1069{
1070 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001071 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001072}
1073#endif /* RX_DESC_DEBUG_CHECK */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001074
1075void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1076 uint8_t *rx_tlv_hdr, struct dp_peer *peer,
Keyur Parekhb8149a52019-04-16 21:30:25 -07001077 uint8_t err_code, uint8_t mac_id);
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001078
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301079#ifdef PEER_CACHE_RX_PKTS
1080/**
1081 * dp_rx_flush_rx_cached() - flush cached rx frames
1082 * @peer: peer
1083 * @drop: set flag to drop frames
1084 *
1085 * Return: None
1086 */
1087void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
1088#else
1089static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1090{
1091}
1092#endif
Varsha Mishra6e1760c2019-07-27 22:51:42 +05301093
1094#ifndef QCA_MULTIPASS_SUPPORT
1095static inline
1096bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1097{
1098 return false;
1099}
1100#else
1101bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1102 uint8_t tid);
1103#endif
1104
Ruben Columbus073874c2019-10-08 14:29:30 -07001105#ifndef WLAN_RX_PKT_CAPTURE_ENH
1106static inline
Pavankumar Nandeshwar0ce38702019-09-30 18:43:03 +05301107void dp_peer_set_rx_capture_enabled(struct dp_peer *peer_handle, bool value)
Ruben Columbus073874c2019-10-08 14:29:30 -07001108{
1109}
1110#endif
Nisha Menon4f633662020-01-21 18:17:28 -08001111
1112/**
1113 * dp_rx_deliver_to_stack() - deliver pkts to network stack
1114 * Caller to hold peer refcount and check for valid peer
1115 * @soc: soc
1116 * @vdev: vdev
1117 * @peer: peer
1118 * @nbuf_head: skb list head
1119 * @nbuf_tail: skb list tail
1120 *
1121 * Return: None
1122 */
1123void dp_rx_deliver_to_stack(struct dp_soc *soc,
1124 struct dp_vdev *vdev,
1125 struct dp_peer *peer,
1126 qdf_nbuf_t nbuf_head,
1127 qdf_nbuf_t nbuf_tail);
1128
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001129#endif /* _DP_RX_H */