blob: 3940f34bf650ee6246a985d81e667b68f2e66a38 [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _DP_RX_H
20#define _DP_RX_H
21
22#include "hal_rx.h"
Tallapragada Kalyan603c5942016-12-07 21:30:44 +053023#include "dp_tx.h"
Ishank Jain2bf04b42017-02-23 22:38:42 +053024#include "dp_peer.h"
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -080025#include "dp_internal.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070026
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053027#ifdef RXDMA_OPTIMIZATION
Shashikala Prabhue11412d2019-03-08 11:37:15 +053028#ifdef NO_RX_PKT_HDR_TLV
29#define RX_BUFFER_ALIGNMENT 0
30#else
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053031#define RX_BUFFER_ALIGNMENT 128
Shashikala Prabhue11412d2019-03-08 11:37:15 +053032#endif /* NO_RX_PKT_HDR_TLV */
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053033#else /* RXDMA_OPTIMIZATION */
34#define RX_BUFFER_ALIGNMENT 4
35#endif /* RXDMA_OPTIMIZATION */
36
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080037#ifdef QCA_HOST2FW_RXBUF_RING
38#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
39#else
40#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
Mohit Khanna70514992018-11-12 18:39:03 -080041#endif /* QCA_HOST2FW_RXBUF_RING */
42
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053043#define RX_BUFFER_RESERVATION 0
44
Debashis Duttc4c52dc2016-10-04 17:12:23 -070045#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff
46#define DP_PEER_METADATA_PEER_ID_SHIFT 0
47#define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000
48#define DP_PEER_METADATA_VDEV_ID_SHIFT 16
49
50#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \
51 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \
52 >> DP_PEER_METADATA_PEER_ID_SHIFT)
53
54#define DP_PEER_METADATA_ID_GET(_peer_metadata) \
55 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \
56 >> DP_PEER_METADATA_VDEV_ID_SHIFT)
57
Pamidipati, Vijay53794742017-06-03 11:24:32 +053058#define DP_RX_DESC_MAGIC 0xdec0de
59
Debashis Duttc4c52dc2016-10-04 17:12:23 -070060/**
61 * struct dp_rx_desc
62 *
63 * @nbuf : VA of the "skb" posted
64 * @rx_buf_start : VA of the original Rx buffer, before
65 * movement of any skb->data pointer
66 * @cookie : index into the sw array which holds
67 * the sw Rx descriptors
68 * Cookie space is 21 bits:
69 * lower 18 bits -- index
70 * upper 3 bits -- pool_id
71 * @pool_id : pool Id for which this allocated.
72 * Can only be used if there is no flow
73 * steering
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080074 * @in_use rx_desc is in use
75 * @unmapped used to mark rx_desc an unmapped if the corresponding
76 * nbuf is already unmapped
Debashis Duttc4c52dc2016-10-04 17:12:23 -070077 */
78struct dp_rx_desc {
79 qdf_nbuf_t nbuf;
80 uint8_t *rx_buf_start;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +053081 uint32_t cookie;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070082 uint8_t pool_id;
Pamidipati, Vijay53794742017-06-03 11:24:32 +053083#ifdef RX_DESC_DEBUG_CHECK
84 uint32_t magic;
85#endif
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080086 uint8_t in_use:1,
87 unmapped:1;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070088};
89
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -070090/* RX Descriptor Multi Page memory alloc related */
91#define DP_RX_DESC_OFFSET_NUM_BITS 8
92#define DP_RX_DESC_PAGE_ID_NUM_BITS 8
93#define DP_RX_DESC_POOL_ID_NUM_BITS 4
94
95#define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
96#define DP_RX_DESC_POOL_ID_SHIFT \
97 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
98#define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
99 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
100#define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
101 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
102 DP_RX_DESC_PAGE_ID_SHIFT)
103#define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
104 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
105#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
106 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
107 DP_RX_DESC_POOL_ID_SHIFT)
108#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
109 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
110 DP_RX_DESC_PAGE_ID_SHIFT)
111#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
112 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
113
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700114#define RX_DESC_COOKIE_INDEX_SHIFT 0
115#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
116#define RX_DESC_COOKIE_POOL_ID_SHIFT 18
117#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
118
Karunakar Dasineni700ad732018-11-06 12:40:07 -0800119#define DP_RX_DESC_COOKIE_MAX \
120 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
121
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700122#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
123 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
124 RX_DESC_COOKIE_POOL_ID_SHIFT)
125
126#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
127 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
128 RX_DESC_COOKIE_INDEX_SHIFT)
129
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800130/* DOC: Offset to obtain LLC hdr
131 *
132 * In the case of Wifi parse error
133 * to reach LLC header from beginning
134 * of VLAN tag we need to skip 8 bytes.
135 * Vlan_tag(4)+length(2)+length added
136 * by HW(2) = 8 bytes.
137 */
138#define DP_SKIP_VLAN 8
139
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +0530140/**
141 * struct dp_rx_cached_buf - rx cached buffer
142 * @list: linked list node
143 * @buf: skb buffer
144 */
145struct dp_rx_cached_buf {
146 qdf_list_node_t node;
147 qdf_nbuf_t buf;
148};
149
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800150/*
151 *dp_rx_xor_block() - xor block of data
152 *@b: destination data block
153 *@a: source data block
154 *@len: length of the data to process
155 *
156 *Returns: None
157 */
158static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
159{
160 qdf_size_t i;
161
162 for (i = 0; i < len; i++)
163 b[i] ^= a[i];
164}
165
166/*
167 *dp_rx_rotl() - rotate the bits left
168 *@val: unsigned integer input value
169 *@bits: number of bits
170 *
171 *Returns: Integer with left rotated by number of 'bits'
172 */
173static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
174{
175 return (val << bits) | (val >> (32 - bits));
176}
177
178/*
179 *dp_rx_rotr() - rotate the bits right
180 *@val: unsigned integer input value
181 *@bits: number of bits
182 *
183 *Returns: Integer with right rotated by number of 'bits'
184 */
185static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
186{
187 return (val >> bits) | (val << (32 - bits));
188}
189
190/*
Prathyusha Guduri02ed9482018-04-17 19:06:30 +0530191 * dp_set_rx_queue() - set queue_mapping in skb
192 * @nbuf: skb
193 * @queue_id: rx queue_id
194 *
195 * Return: void
196 */
197#ifdef QCA_OL_RX_MULTIQ_SUPPORT
198static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
199{
200 qdf_nbuf_record_rx_queue(nbuf, queue_id);
201 return;
202}
203#else
204static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
205{
206}
207#endif
208
209/*
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800210 *dp_rx_xswap() - swap the bits left
211 *@val: unsigned integer input value
212 *
213 *Returns: Integer with bits swapped
214 */
215static inline uint32_t dp_rx_xswap(uint32_t val)
216{
217 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
218}
219
220/*
221 *dp_rx_get_le32_split() - get little endian 32 bits split
222 *@b0: byte 0
223 *@b1: byte 1
224 *@b2: byte 2
225 *@b3: byte 3
226 *
227 *Returns: Integer with split little endian 32 bits
228 */
229static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
230 uint8_t b3)
231{
232 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
233}
234
235/*
236 *dp_rx_get_le32() - get little endian 32 bits
237 *@b0: byte 0
238 *@b1: byte 1
239 *@b2: byte 2
240 *@b3: byte 3
241 *
242 *Returns: Integer with little endian 32 bits
243 */
244static inline uint32_t dp_rx_get_le32(const uint8_t *p)
245{
246 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
247}
248
249/*
250 * dp_rx_put_le32() - put little endian 32 bits
251 * @p: destination char array
252 * @v: source 32-bit integer
253 *
254 * Returns: None
255 */
256static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
257{
258 p[0] = (v) & 0xff;
259 p[1] = (v >> 8) & 0xff;
260 p[2] = (v >> 16) & 0xff;
261 p[3] = (v >> 24) & 0xff;
262}
263
264/* Extract michal mic block of data */
265#define dp_rx_michael_block(l, r) \
266 do { \
267 r ^= dp_rx_rotl(l, 17); \
268 l += r; \
269 r ^= dp_rx_xswap(l); \
270 l += r; \
271 r ^= dp_rx_rotl(l, 3); \
272 l += r; \
273 r ^= dp_rx_rotr(l, 2); \
274 l += r; \
275 } while (0)
276
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700277/**
278 * struct dp_rx_desc_list_elem_t
279 *
280 * @next : Next pointer to form free list
281 * @rx_desc : DP Rx descriptor
282 */
283union dp_rx_desc_list_elem_t {
284 union dp_rx_desc_list_elem_t *next;
285 struct dp_rx_desc rx_desc;
286};
287
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700288#ifdef RX_DESC_MULTI_PAGE_ALLOC
289/**
290 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
291 * @page_id: Page ID
292 * @offset: Offset of the descriptor element
293 *
294 * Return: RX descriptor element
295 */
296union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
297 struct rx_desc_pool *rx_pool);
298
299static inline
300struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
301 struct rx_desc_pool *pool,
302 uint32_t cookie)
303{
304 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
305 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
306 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
307 struct rx_desc_pool *rx_desc_pool;
308 union dp_rx_desc_list_elem_t *rx_desc_elem;
309
310 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
311 return NULL;
312
313 rx_desc_pool = &pool[pool_id];
314 rx_desc_elem = (union dp_rx_desc_list_elem_t *)
315 (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
316 rx_desc_pool->elem_size * offset);
317
318 return &rx_desc_elem->rx_desc;
319}
320
321/**
322 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
323 * the Rx descriptor on Rx DMA source ring buffer
324 * @soc: core txrx main context
325 * @cookie: cookie used to lookup virtual address
326 *
327 * Return: Pointer to the Rx descriptor
328 */
329static inline
330struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
331 uint32_t cookie)
332{
333 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
334}
335
336/**
337 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
338 * the Rx descriptor on monitor ring buffer
339 * @soc: core txrx main context
340 * @cookie: cookie used to lookup virtual address
341 *
342 * Return: Pointer to the Rx descriptor
343 */
344static inline
345struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
346 uint32_t cookie)
347{
348 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
349}
350
351/**
352 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
353 * the Rx descriptor on monitor status ring buffer
354 * @soc: core txrx main context
355 * @cookie: cookie used to lookup virtual address
356 *
357 * Return: Pointer to the Rx descriptor
358 */
359static inline
360struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
361 uint32_t cookie)
362{
363 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
364}
365#else
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700366/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800367 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
368 * the Rx descriptor on Rx DMA source ring buffer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700369 * @soc: core txrx main context
370 * @cookie: cookie used to lookup virtual address
371 *
372 * Return: void *: Virtual Address of the Rx descriptor
373 */
374static inline
Kai Chen6eca1a62017-01-12 10:17:53 -0800375void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700376{
377 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
378 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
psimha4d9c3f92018-04-09 15:27:08 -0700379 struct rx_desc_pool *rx_desc_pool;
380
381 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
382 return NULL;
383
384 rx_desc_pool = &soc->rx_desc_buf[pool_id];
385
386 if (qdf_unlikely(index >= rx_desc_pool->pool_size))
387 return NULL;
388
Kai Chen6eca1a62017-01-12 10:17:53 -0800389 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
390}
391
392/**
393 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
394 * the Rx descriptor on monitor ring buffer
395 * @soc: core txrx main context
396 * @cookie: cookie used to lookup virtual address
397 *
398 * Return: void *: Virtual Address of the Rx descriptor
399 */
400static inline
401void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
402{
403 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
404 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
405 /* TODO */
406 /* Add sanity for pool_id & index */
407 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
408}
409
410/**
411 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
412 * the Rx descriptor on monitor status ring buffer
413 * @soc: core txrx main context
414 * @cookie: cookie used to lookup virtual address
415 *
416 * Return: void *: Virtual Address of the Rx descriptor
417 */
418static inline
419void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
420{
421 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
422 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
423 /* TODO */
424 /* Add sanity for pool_id & index */
425 return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700426}
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700427#endif /* RX_DESC_MULTI_PAGE_ALLOC */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700428
429void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
430 union dp_rx_desc_list_elem_t **local_desc_list,
431 union dp_rx_desc_list_elem_t **tail,
Kai Chen6eca1a62017-01-12 10:17:53 -0800432 uint16_t pool_id,
433 struct rx_desc_pool *rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700434
435uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800436 struct rx_desc_pool *rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700437 uint16_t num_descs,
438 union dp_rx_desc_list_elem_t **desc_list,
439 union dp_rx_desc_list_elem_t **tail);
440
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700441
442QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800443
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700444void dp_rx_pdev_detach(struct dp_pdev *pdev);
445
Ruben Columbus43194932019-05-24 09:56:52 -0700446void dp_print_napi_stats(struct dp_soc *soc);
Kai Chen6eca1a62017-01-12 10:17:53 -0800447
Rakesh Pillai534a1432019-10-24 06:44:11 +0530448/**
449 * dp_rx_vdev_detach() - detach vdev from dp rx
450 * @vdev: virtual device instance
451 *
452 * Return: QDF_STATUS_SUCCESS: success
453 * QDF_STATUS_E_RESOURCES: Error return
454 */
455QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
456
Dhanashri Atre0da31222017-03-23 12:30:58 -0700457uint32_t
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530458dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
459 uint8_t reo_ring_num,
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700460 uint32_t quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800461
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530462/**
463 * dp_rx_err_process() - Processes error frames routed to REO error ring
464 * @int_ctx: pointer to DP interrupt context
465 * @soc: core txrx main context
466 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
467 * @quota: No. of units (packets) that can be serviced in one shot.
468 *
469 * This function implements error processing and top level demultiplexer
470 * for all the frames routed to REO error ring.
471 *
472 * Return: uint32_t: No. of elements processed
473 */
474uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530475 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530476
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530477/**
478 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
479 * @int_ctx: pointer to DP interrupt context
480 * @soc: core txrx main context
481 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
482 * @quota: No. of units (packets) that can be serviced in one shot.
483 *
484 * This function implements error processing and top level demultiplexer
485 * for all the frames routed to WBM2HOST sw release ring.
486 *
487 * Return: uint32_t: No. of elements processed
488 */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700489uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530490dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530491 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700492
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530493/**
494 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
495 * multiple nbufs.
496 * @nbuf: pointer to the first msdu of an amsdu.
497 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
498 *
499 * This function implements the creation of RX frag_list for cases
500 * where an MSDU is spread across multiple nbufs.
501 *
502 * Return: returns the head nbuf which contains complete frag_list.
503 */
504qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530505
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700506/*
507 * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
508 * at the time of dp rx initialization
509 *
510 * @soc: core txrx main context
511 * @pool_id: pool_id which is one of 3 mac_ids
512 * @pool_size: number of Rx descriptor in the pool
513 * @rx_desc_pool: rx descriptor pool pointer
514 *
515 * Return: QDF status
516 */
517QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
518 uint32_t pool_size, struct rx_desc_pool *pool);
Kai Chen6eca1a62017-01-12 10:17:53 -0800519
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700520/*
521 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
522 * de-initialization of wifi module.
523 *
524 * @soc: core txrx main context
525 * @pool_id: pool_id which is one of 3 mac_ids
526 * @rx_desc_pool: rx descriptor pool pointer
527 *
528 * Return: None
529 */
530void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
531 struct rx_desc_pool *rx_desc_pool);
532
533/*
534 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
535 * de-initialization of wifi module.
536 *
537 * @soc: core txrx main context
538 * @pool_id: pool_id which is one of 3 mac_ids
539 * @rx_desc_pool: rx descriptor pool pointer
540 *
541 * Return: None
542 */
543void dp_rx_desc_nbuf_free(struct dp_soc *soc,
544 struct rx_desc_pool *rx_desc_pool);
545
546/*
547 * dp_rx_desc_pool_free() - free the sw rx desc array called during
548 * de-initialization of wifi module.
549 *
550 * @soc: core txrx main context
551 * @rx_desc_pool: rx descriptor pool pointer
552 *
553 * Return: None
554 */
Kai Chen6eca1a62017-01-12 10:17:53 -0800555void dp_rx_desc_pool_free(struct dp_soc *soc,
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700556 struct rx_desc_pool *rx_desc_pool);
phadiman7dd261d2019-03-15 01:48:50 +0530557
c_cgodavbd5b3c22017-06-07 12:31:40 +0530558void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530559 struct dp_peer *peer);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530560
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700561/**
562 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
563 *
564 * @head: pointer to the head of local free list
565 * @tail: pointer to the tail of local free list
566 * @new: new descriptor that is added to the free list
567 *
568 * Return: void:
569 */
570static inline
571void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
572 union dp_rx_desc_list_elem_t **tail,
573 struct dp_rx_desc *new)
574{
575 qdf_assert(head && new);
576
577 new->nbuf = NULL;
Pramod Simha59fcb312017-06-22 17:43:16 -0700578 new->in_use = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700579
580 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
581 *head = (union dp_rx_desc_list_elem_t *)new;
Jeff Johnsona8edf332019-03-18 09:51:52 -0700582 if (!*tail)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700583 *tail = *head;
584
585}
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530586
Keyur Parekhb8149a52019-04-16 21:30:25 -0700587uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
588 uint8_t mac_id);
chenguo91c90102017-12-12 16:16:37 +0800589void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
Keyur Parekhb8149a52019-04-16 21:30:25 -0700590 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530591void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
592 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530593void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
594 uint16_t peer_id, uint8_t tid);
595
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +0530596
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530597#define DP_RX_LIST_APPEND(head, tail, elem) \
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700598 do { \
599 if (!(head)) { \
600 (head) = (elem); \
601 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
602 } else { \
603 qdf_nbuf_set_next((tail), (elem)); \
604 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \
605 } \
606 (tail) = (elem); \
607 qdf_nbuf_set_next((tail), NULL); \
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530608 } while (0)
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530609
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530610/*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
611#if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530612static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530613 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530614{
615 return QDF_STATUS_SUCCESS;
616}
617#else
618#define MAX_RETRY 100
619static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530620 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530621{
622 uint32_t nbuf_retry = 0;
623 int32_t ret;
624 const uint32_t x86_phy_addr = 0x50000000;
625 /*
626 * in M2M emulation platforms (x86) the memory below 0x50000000
627 * is reserved for target use, so any memory allocated in this
628 * region should not be used by host
629 */
630 do {
631 if (qdf_likely(*paddr > x86_phy_addr))
632 return QDF_STATUS_SUCCESS;
633 else {
634 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +0530635 "phy addr %pK exceeded 0x50000000 trying again",
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530636 paddr);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530637
638 nbuf_retry++;
639 if ((*rx_netbuf)) {
640 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530641 QDF_DMA_FROM_DEVICE);
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +0530642 /* Not freeing buffer intentionally.
643 * Observed that same buffer is getting
644 * re-allocated resulting in longer load time
645 * WMI init timeout.
646 * This buffer is anyway not useful so skip it.
647 **/
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530648 }
649
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530650 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530651 RX_BUFFER_SIZE,
652 RX_BUFFER_RESERVATION,
653 RX_BUFFER_ALIGNMENT,
654 FALSE);
655
656 if (qdf_unlikely(!(*rx_netbuf)))
657 return QDF_STATUS_E_FAILURE;
658
659 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530660 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530661
662 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
663 qdf_nbuf_free(*rx_netbuf);
664 *rx_netbuf = NULL;
665 continue;
666 }
667
668 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
669 }
670 } while (nbuf_retry < MAX_RETRY);
671
672 if ((*rx_netbuf)) {
673 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530674 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530675 qdf_nbuf_free(*rx_netbuf);
676 }
677
678 return QDF_STATUS_E_FAILURE;
679}
680#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800681
Kai Chen6eca1a62017-01-12 10:17:53 -0800682/**
683 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
684 * the MSDU Link Descriptor
685 * @soc: core txrx main context
686 * @buf_info: buf_info include cookie that used to lookup virtual address of
687 * link descriptor Normally this is just an index into a per SOC array.
688 *
689 * This is the VA of the link descriptor, that HAL layer later uses to
690 * retrieve the list of MSDU's for a given MPDU.
691 *
692 * Return: void *: Virtual Address of the Rx descriptor
693 */
694static inline
695void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
696 struct hal_buf_info *buf_info)
697{
698 void *link_desc_va;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700699 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
700
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530701
Kai Chen6eca1a62017-01-12 10:17:53 -0800702 /* TODO */
703 /* Add sanity for cookie */
704
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700705 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
Kai Chen6eca1a62017-01-12 10:17:53 -0800706 (buf_info->paddr -
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700707 soc->link_desc_banks[bank_id].base_paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800708
709 return link_desc_va;
710}
711
712/**
713 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
714 * the MSDU Link Descriptor
715 * @pdev: core txrx pdev context
716 * @buf_info: buf_info includes cookie that used to lookup virtual address of
717 * link descriptor. Normally this is just an index into a per pdev array.
718 *
719 * This is the VA of the link descriptor in monitor mode destination ring,
720 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
721 *
722 * Return: void *: Virtual Address of the Rx descriptor
723 */
724static inline
725void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800726 struct hal_buf_info *buf_info,
727 int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -0800728{
729 void *link_desc_va;
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800730 int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800731
732 /* TODO */
733 /* Add sanity for cookie */
734
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800735 link_desc_va =
736 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
737 (buf_info->paddr -
738 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
739
Kai Chen6eca1a62017-01-12 10:17:53 -0800740 return link_desc_va;
741}
742
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800743/**
744 * dp_rx_defrag_concat() - Concatenate the fragments
745 *
746 * @dst: destination pointer to the buffer
747 * @src: source pointer from where the fragment payload is to be copied
748 *
749 * Return: QDF_STATUS
750 */
751static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
752{
753 /*
754 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
755 * to provide space for src, the headroom portion is copied from
756 * the original dst buffer to the larger new dst buffer.
757 * (This is needed, because the headroom of the dst buffer
758 * contains the rx desc.)
759 */
phadiman2c146ea2019-03-07 12:45:16 +0530760 if (!qdf_nbuf_cat(dst, src)) {
761 /*
762 * qdf_nbuf_cat does not free the src memory.
763 * Free src nbuf before returning
764 * For failure case the caller takes of freeing the nbuf
765 */
766 qdf_nbuf_free(src);
767 return QDF_STATUS_SUCCESS;
768 }
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800769
phadiman2c146ea2019-03-07 12:45:16 +0530770 return QDF_STATUS_E_DEFRAG_ERROR;
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800771}
772
Amir Patelcb990262019-05-28 15:12:48 +0530773#ifndef FEATURE_WDS
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530774static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
775{
776 return QDF_STATUS_SUCCESS;
777}
Amir Patelcb990262019-05-28 15:12:48 +0530778
779static inline void
780dp_rx_wds_srcport_learn(struct dp_soc *soc,
781 uint8_t *rx_tlv_hdr,
782 struct dp_peer *ta_peer,
783 qdf_nbuf_t nbuf)
784{
785}
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530786#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800787
Kai Chen6eca1a62017-01-12 10:17:53 -0800788/*
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +0530789 * dp_rx_desc_dump() - dump the sw rx descriptor
790 *
791 * @rx_desc: sw rx descriptor
792 */
793static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
794{
Saket Jha16d84322019-07-11 16:09:41 -0700795 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
796 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
797 rx_desc->in_use, rx_desc->unmapped);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +0530798}
799
800/*
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530801 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
802 * In qwrap mode, packets originated from
803 * any vdev should not loopback and
804 * should be dropped.
805 * @vdev: vdev on which rx packet is received
806 * @nbuf: rx pkt
807 *
808 */
809#if ATH_SUPPORT_WRAP
810static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
811 qdf_nbuf_t nbuf)
812{
813 struct dp_vdev *psta_vdev;
814 struct dp_pdev *pdev = vdev->pdev;
815 uint8_t *data = qdf_nbuf_data(nbuf);
816
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530817 if (qdf_unlikely(vdev->proxysta_vdev)) {
818 /* In qwrap isolation mode, allow loopback packets as all
819 * packets go to RootAP and Loopback on the mpsta.
820 */
821 if (vdev->isolation_vdev)
822 return false;
823 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
824 if (qdf_unlikely(psta_vdev->proxysta_vdev &&
825 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800826 &data[QDF_MAC_ADDR_SIZE],
827 QDF_MAC_ADDR_SIZE))) {
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530828 /* Drop packet if source address is equal to
829 * any of the vdev addresses.
830 */
831 return true;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530832 }
833 }
834 }
835 return false;
836}
837#else
838static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
839 qdf_nbuf_t nbuf)
840{
841 return false;
842}
843#endif
844
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700845#if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
846 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
847 defined(WLAN_SUPPORT_RX_FLOW_TAG)
848#include "dp_rx_tag.h"
849#endif
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700850
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700851#ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700852/**
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700853 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
854 * and set the corresponding tag in QDF packet
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700855 * @soc: core txrx main context
856 * @vdev: vdev on which the packet is received
857 * @nbuf: QDF pkt buffer on which the protocol tag should be set
858 * @rx_tlv_hdr: rBbase address where the RX TLVs starts
859 * @ring_index: REO ring number, not used for error & monitor ring
860 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
861 * @is_update_stats: flag to indicate whether to update stats or not
862 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700863 */
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700864static inline void
865dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
866 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700867 uint16_t ring_index,
868 bool is_reo_exception, bool is_update_stats)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700869{
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700870}
871#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
872
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700873#ifndef WLAN_SUPPORT_RX_FLOW_TAG
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700874/**
875 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
876 * and set the corresponding tag in QDF packet
877 * @soc: core txrx main context
878 * @vdev: vdev on which the packet is received
879 * @nbuf: QDF pkt buffer on which the protocol tag should be set
880 * @rx_tlv_hdr: base address where the RX TLVs starts
881 * @is_update_stats: flag to indicate whether to update stats or not
882 *
883 * Return: void
884 */
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700885static inline void
886dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
887 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
888{
889}
890#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
891
Sumeet Rao2b730bb2019-08-09 15:29:43 -0700892#if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
893 !defined(WLAN_SUPPORT_RX_FLOW_TAG)
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700894/**
895 * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
896 * mode and then tags appropriate packets
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700897 * @soc: core txrx main context
898 * @vdev: pdev on which packet is received
899 * @msdu: QDF packet buffer on which the protocol tag should be set
900 * @rx_desc: base address where the RX TLVs start
901 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700902 */
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700903static inline
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700904void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
905 struct dp_pdev *dp_pdev,
906 qdf_nbuf_t msdu, void *rx_desc)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700907{
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700908}
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700909#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
910
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530911/*
Kai Chen6eca1a62017-01-12 10:17:53 -0800912 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
913 * called during dp rx initialization
914 * and at the end of dp_rx_process.
915 *
916 * @soc: core txrx main context
917 * @mac_id: mac_id which is one of 3 mac_ids
918 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700919 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -0800920 * @num_req_buffers: number of buffer to be replenished
921 * @desc_list: list of descs if called from dp_rx_process
922 * or NULL during dp rx initialization or out of buffer
923 * interrupt.
924 * @tail: tail of descs list
Kai Chen6eca1a62017-01-12 10:17:53 -0800925 * Return: return success or failure
926 */
927QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
928 struct dp_srng *dp_rxdma_srng,
929 struct rx_desc_pool *rx_desc_pool,
930 uint32_t num_req_buffers,
931 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800932 union dp_rx_desc_list_elem_t **tail);
Kai Chen6eca1a62017-01-12 10:17:53 -0800933
Kiran Venkatappa115309a2019-07-16 22:15:35 +0530934/*
935 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
936 * called during dp rx initialization
937 *
938 * @soc: core txrx main context
939 * @mac_id: mac_id which is one of 3 mac_ids
940 * @dp_rxdma_srng: dp rxdma circular ring
941 * @rx_desc_pool: Pointer to free Rx descriptor pool
942 * @num_req_buffers: number of buffer to be replenished
943 *
944 * Return: return success or failure
945 */
946QDF_STATUS
947dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
948 struct dp_srng *dp_rxdma_srng,
949 struct rx_desc_pool *rx_desc_pool,
950 uint32_t num_req_buffers);
951
Kai Chen6eca1a62017-01-12 10:17:53 -0800952/**
953 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
954 * (WBM), following error handling
955 *
956 * @soc: core DP main context
957 * @buf_addr_info: opaque pointer to the REO error ring descriptor
958 * @buf_addr_info: void pointer to the buffer_addr_info
Tallapragada Kalyan00172912017-09-26 21:04:24 +0530959 * @bm_action: put to idle_list or release to msdu_list
Akshay Kosigi91c56522019-07-02 11:49:39 +0530960 *
961 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
Kai Chen6eca1a62017-01-12 10:17:53 -0800962 */
963QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530964dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
965 uint8_t bm_action);
psimha223883f2017-11-16 17:18:51 -0800966
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530967/**
968 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
969 * (WBM) by address
970 *
971 * @soc: core DP main context
972 * @link_desc_addr: link descriptor addr
973 *
Akshay Kosigi91c56522019-07-02 11:49:39 +0530974 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530975 */
976QDF_STATUS
Akshay Kosigi91c56522019-07-02 11:49:39 +0530977dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530978 hal_link_desc_t link_desc_addr,
Akshay Kosigi91c56522019-07-02 11:49:39 +0530979 uint8_t bm_action);
Pramod Simhae382ff82017-06-05 18:09:26 -0700980
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530981/**
982 * dp_rxdma_err_process() - RxDMA error processing functionality
983 * @soc: core txrx main contex
984 * @mac_id: mac id which is one of 3 mac_ids
985 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
986 * @quota: No. of units (packets) that can be serviced in one shot.
987 *
988 * Return: num of buffers processed
989 */
Pramod Simhae382ff82017-06-05 18:09:26 -0700990uint32_t
Rakesh Pillai2529ae12019-05-31 20:28:30 +0530991dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
992 uint32_t mac_id, uint32_t quota);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530993
994void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
995 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
996QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
997 uint8_t *rx_tlv_hdr);
998
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +0530999int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301000 struct dp_peer *peer);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301001
jinweic chenc3546322018-02-02 15:03:41 +08001002qdf_nbuf_t
1003dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1004
Saket Jha7f890142019-07-10 18:31:36 -07001005/*
1006 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1007 *
1008 * @soc: core txrx main context
1009 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1010 * @ring_desc: opaque pointer to the RX ring descriptor
1011 * @rx_desc: host rs descriptor
1012 *
1013 * Return: void
1014 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301015void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1016 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi91c56522019-07-02 11:49:39 +05301017 hal_ring_desc_t ring_desc,
1018 struct dp_rx_desc *rx_desc);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301019
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301020void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001021#ifdef RX_DESC_DEBUG_CHECK
1022/**
1023 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1024 * @rx_desc: rx descriptor pointer
1025 *
1026 * Return: true, if magic is correct, else false.
1027 */
1028static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1029{
1030 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1031 return false;
1032
1033 rx_desc->magic = 0;
1034 return true;
1035}
1036
1037/**
1038 * dp_rx_desc_prep() - prepare rx desc
1039 * @rx_desc: rx descriptor pointer to be prepared
1040 * @nbuf: nbuf to be associated with rx_desc
1041 *
Mohit Khannac30b51c2019-02-01 15:30:26 -08001042 * Note: assumption is that we are associating a nbuf which is mapped
1043 *
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001044 * Return: none
1045 */
1046static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1047{
1048 rx_desc->magic = DP_RX_DESC_MAGIC;
1049 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001050 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001051}
1052
1053#else
1054
1055static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1056{
1057 return true;
1058}
1059
1060static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1061{
1062 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001063 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001064}
1065#endif /* RX_DESC_DEBUG_CHECK */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001066
1067void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1068 uint8_t *rx_tlv_hdr, struct dp_peer *peer,
Keyur Parekhb8149a52019-04-16 21:30:25 -07001069 uint8_t err_code, uint8_t mac_id);
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001070
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301071#ifdef PEER_CACHE_RX_PKTS
1072/**
1073 * dp_rx_flush_rx_cached() - flush cached rx frames
1074 * @peer: peer
1075 * @drop: set flag to drop frames
1076 *
1077 * Return: None
1078 */
1079void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
1080#else
1081static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1082{
1083}
1084#endif
Varsha Mishra6e1760c2019-07-27 22:51:42 +05301085
1086#ifndef QCA_MULTIPASS_SUPPORT
1087static inline
1088bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1089{
1090 return false;
1091}
1092#else
1093bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1094 uint8_t tid);
1095#endif
1096
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001097#endif /* _DP_RX_H */