blob: 859091df157ed20dbaebfb61e5ec0dad28b9a11c [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _DP_RX_H
20#define _DP_RX_H
21
22#include "hal_rx.h"
Tallapragada Kalyan603c5942016-12-07 21:30:44 +053023#include "dp_tx.h"
Ishank Jain2bf04b42017-02-23 22:38:42 +053024#include "dp_peer.h"
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -080025#include "dp_internal.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070026
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053027#ifdef RXDMA_OPTIMIZATION
Shashikala Prabhue11412d2019-03-08 11:37:15 +053028#ifdef NO_RX_PKT_HDR_TLV
29#define RX_BUFFER_ALIGNMENT 0
30#else
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053031#define RX_BUFFER_ALIGNMENT 128
Shashikala Prabhue11412d2019-03-08 11:37:15 +053032#endif /* NO_RX_PKT_HDR_TLV */
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053033#else /* RXDMA_OPTIMIZATION */
34#define RX_BUFFER_ALIGNMENT 4
35#endif /* RXDMA_OPTIMIZATION */
36
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080037#ifdef QCA_HOST2FW_RXBUF_RING
38#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
Mohit Khanna70514992018-11-12 18:39:03 -080039
40/**
41 * For MCL cases, allocate as many RX descriptors as buffers in the SW2RXDMA
42 * ring. This value may need to be tuned later.
43 */
44#define DP_RX_DESC_ALLOC_MULTIPLIER 1
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080045#else
46#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
Mohit Khanna70514992018-11-12 18:39:03 -080047
48/**
49 * AP use cases need to allocate more RX Descriptors than the number of
50 * entries avaialable in the SW2RXDMA buffer replenish ring. This is to account
51 * for frames sitting in REO queues, HW-HW DMA rings etc. Hence using a
52 * multiplication factor of 3, to allocate three times as many RX descriptors
53 * as RX buffers.
54 */
55#define DP_RX_DESC_ALLOC_MULTIPLIER 3
56#endif /* QCA_HOST2FW_RXBUF_RING */
57
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053058#define RX_BUFFER_RESERVATION 0
59
Debashis Duttc4c52dc2016-10-04 17:12:23 -070060#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff
61#define DP_PEER_METADATA_PEER_ID_SHIFT 0
62#define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000
63#define DP_PEER_METADATA_VDEV_ID_SHIFT 16
64
65#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \
66 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \
67 >> DP_PEER_METADATA_PEER_ID_SHIFT)
68
69#define DP_PEER_METADATA_ID_GET(_peer_metadata) \
70 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \
71 >> DP_PEER_METADATA_VDEV_ID_SHIFT)
72
Pamidipati, Vijay53794742017-06-03 11:24:32 +053073#define DP_RX_DESC_MAGIC 0xdec0de
74
Debashis Duttc4c52dc2016-10-04 17:12:23 -070075/**
76 * struct dp_rx_desc
77 *
78 * @nbuf : VA of the "skb" posted
79 * @rx_buf_start : VA of the original Rx buffer, before
80 * movement of any skb->data pointer
81 * @cookie : index into the sw array which holds
82 * the sw Rx descriptors
83 * Cookie space is 21 bits:
84 * lower 18 bits -- index
85 * upper 3 bits -- pool_id
86 * @pool_id : pool Id for which this allocated.
87 * Can only be used if there is no flow
88 * steering
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080089 * @in_use rx_desc is in use
90 * @unmapped used to mark rx_desc an unmapped if the corresponding
91 * nbuf is already unmapped
Debashis Duttc4c52dc2016-10-04 17:12:23 -070092 */
93struct dp_rx_desc {
94 qdf_nbuf_t nbuf;
95 uint8_t *rx_buf_start;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +053096 uint32_t cookie;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070097 uint8_t pool_id;
Pamidipati, Vijay53794742017-06-03 11:24:32 +053098#ifdef RX_DESC_DEBUG_CHECK
99 uint32_t magic;
100#endif
Mohit Khannae1d7e0e2018-02-09 10:07:43 -0800101 uint8_t in_use:1,
102 unmapped:1;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700103};
104
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700105/* RX Descriptor Multi Page memory alloc related */
106#define DP_RX_DESC_OFFSET_NUM_BITS 8
107#define DP_RX_DESC_PAGE_ID_NUM_BITS 8
108#define DP_RX_DESC_POOL_ID_NUM_BITS 4
109
110#define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
111#define DP_RX_DESC_POOL_ID_SHIFT \
112 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
113#define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
114 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
115#define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
116 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
117 DP_RX_DESC_PAGE_ID_SHIFT)
118#define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
119 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
120#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
121 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
122 DP_RX_DESC_POOL_ID_SHIFT)
123#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
124 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
125 DP_RX_DESC_PAGE_ID_SHIFT)
126#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
127 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
128
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700129#define RX_DESC_COOKIE_INDEX_SHIFT 0
130#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
131#define RX_DESC_COOKIE_POOL_ID_SHIFT 18
132#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
133
Karunakar Dasineni700ad732018-11-06 12:40:07 -0800134#define DP_RX_DESC_COOKIE_MAX \
135 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
136
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700137#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
138 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
139 RX_DESC_COOKIE_POOL_ID_SHIFT)
140
141#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
142 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
143 RX_DESC_COOKIE_INDEX_SHIFT)
144
sumedh baikadyc2fa7c92018-12-28 15:26:08 -0800145/* DOC: Offset to obtain LLC hdr
146 *
147 * In the case of Wifi parse error
148 * to reach LLC header from beginning
149 * of VLAN tag we need to skip 8 bytes.
150 * Vlan_tag(4)+length(2)+length added
151 * by HW(2) = 8 bytes.
152 */
153#define DP_SKIP_VLAN 8
154
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +0530155/**
156 * struct dp_rx_cached_buf - rx cached buffer
157 * @list: linked list node
158 * @buf: skb buffer
159 */
160struct dp_rx_cached_buf {
161 qdf_list_node_t node;
162 qdf_nbuf_t buf;
163};
164
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800165/*
166 *dp_rx_xor_block() - xor block of data
167 *@b: destination data block
168 *@a: source data block
169 *@len: length of the data to process
170 *
171 *Returns: None
172 */
173static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
174{
175 qdf_size_t i;
176
177 for (i = 0; i < len; i++)
178 b[i] ^= a[i];
179}
180
181/*
182 *dp_rx_rotl() - rotate the bits left
183 *@val: unsigned integer input value
184 *@bits: number of bits
185 *
186 *Returns: Integer with left rotated by number of 'bits'
187 */
188static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
189{
190 return (val << bits) | (val >> (32 - bits));
191}
192
193/*
194 *dp_rx_rotr() - rotate the bits right
195 *@val: unsigned integer input value
196 *@bits: number of bits
197 *
198 *Returns: Integer with right rotated by number of 'bits'
199 */
200static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
201{
202 return (val >> bits) | (val << (32 - bits));
203}
204
205/*
Prathyusha Guduri02ed9482018-04-17 19:06:30 +0530206 * dp_set_rx_queue() - set queue_mapping in skb
207 * @nbuf: skb
208 * @queue_id: rx queue_id
209 *
210 * Return: void
211 */
212#ifdef QCA_OL_RX_MULTIQ_SUPPORT
213static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
214{
215 qdf_nbuf_record_rx_queue(nbuf, queue_id);
216 return;
217}
218#else
219static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
220{
221}
222#endif
223
224/*
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800225 *dp_rx_xswap() - swap the bits left
226 *@val: unsigned integer input value
227 *
228 *Returns: Integer with bits swapped
229 */
230static inline uint32_t dp_rx_xswap(uint32_t val)
231{
232 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
233}
234
235/*
236 *dp_rx_get_le32_split() - get little endian 32 bits split
237 *@b0: byte 0
238 *@b1: byte 1
239 *@b2: byte 2
240 *@b3: byte 3
241 *
242 *Returns: Integer with split little endian 32 bits
243 */
244static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
245 uint8_t b3)
246{
247 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
248}
249
250/*
251 *dp_rx_get_le32() - get little endian 32 bits
252 *@b0: byte 0
253 *@b1: byte 1
254 *@b2: byte 2
255 *@b3: byte 3
256 *
257 *Returns: Integer with little endian 32 bits
258 */
259static inline uint32_t dp_rx_get_le32(const uint8_t *p)
260{
261 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
262}
263
264/*
265 * dp_rx_put_le32() - put little endian 32 bits
266 * @p: destination char array
267 * @v: source 32-bit integer
268 *
269 * Returns: None
270 */
271static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
272{
273 p[0] = (v) & 0xff;
274 p[1] = (v >> 8) & 0xff;
275 p[2] = (v >> 16) & 0xff;
276 p[3] = (v >> 24) & 0xff;
277}
278
279/* Extract michal mic block of data */
280#define dp_rx_michael_block(l, r) \
281 do { \
282 r ^= dp_rx_rotl(l, 17); \
283 l += r; \
284 r ^= dp_rx_xswap(l); \
285 l += r; \
286 r ^= dp_rx_rotl(l, 3); \
287 l += r; \
288 r ^= dp_rx_rotr(l, 2); \
289 l += r; \
290 } while (0)
291
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700292/**
293 * struct dp_rx_desc_list_elem_t
294 *
295 * @next : Next pointer to form free list
296 * @rx_desc : DP Rx descriptor
297 */
298union dp_rx_desc_list_elem_t {
299 union dp_rx_desc_list_elem_t *next;
300 struct dp_rx_desc rx_desc;
301};
302
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700303#ifdef RX_DESC_MULTI_PAGE_ALLOC
304/**
305 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
306 * @page_id: Page ID
307 * @offset: Offset of the descriptor element
308 *
309 * Return: RX descriptor element
310 */
311union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
312 struct rx_desc_pool *rx_pool);
313
314static inline
315struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
316 struct rx_desc_pool *pool,
317 uint32_t cookie)
318{
319 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
320 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
321 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
322 struct rx_desc_pool *rx_desc_pool;
323 union dp_rx_desc_list_elem_t *rx_desc_elem;
324
325 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
326 return NULL;
327
328 rx_desc_pool = &pool[pool_id];
329 rx_desc_elem = (union dp_rx_desc_list_elem_t *)
330 (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
331 rx_desc_pool->elem_size * offset);
332
333 return &rx_desc_elem->rx_desc;
334}
335
336/**
337 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
338 * the Rx descriptor on Rx DMA source ring buffer
339 * @soc: core txrx main context
340 * @cookie: cookie used to lookup virtual address
341 *
342 * Return: Pointer to the Rx descriptor
343 */
344static inline
345struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
346 uint32_t cookie)
347{
348 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
349}
350
351/**
352 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
353 * the Rx descriptor on monitor ring buffer
354 * @soc: core txrx main context
355 * @cookie: cookie used to lookup virtual address
356 *
357 * Return: Pointer to the Rx descriptor
358 */
359static inline
360struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
361 uint32_t cookie)
362{
363 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
364}
365
366/**
367 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
368 * the Rx descriptor on monitor status ring buffer
369 * @soc: core txrx main context
370 * @cookie: cookie used to lookup virtual address
371 *
372 * Return: Pointer to the Rx descriptor
373 */
374static inline
375struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
376 uint32_t cookie)
377{
378 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
379}
380#else
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700381/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800382 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
383 * the Rx descriptor on Rx DMA source ring buffer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700384 * @soc: core txrx main context
385 * @cookie: cookie used to lookup virtual address
386 *
387 * Return: void *: Virtual Address of the Rx descriptor
388 */
389static inline
Kai Chen6eca1a62017-01-12 10:17:53 -0800390void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700391{
392 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
393 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
psimha4d9c3f92018-04-09 15:27:08 -0700394 struct rx_desc_pool *rx_desc_pool;
395
396 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
397 return NULL;
398
399 rx_desc_pool = &soc->rx_desc_buf[pool_id];
400
401 if (qdf_unlikely(index >= rx_desc_pool->pool_size))
402 return NULL;
403
Kai Chen6eca1a62017-01-12 10:17:53 -0800404 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
405}
406
407/**
408 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
409 * the Rx descriptor on monitor ring buffer
410 * @soc: core txrx main context
411 * @cookie: cookie used to lookup virtual address
412 *
413 * Return: void *: Virtual Address of the Rx descriptor
414 */
415static inline
416void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
417{
418 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
419 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
420 /* TODO */
421 /* Add sanity for pool_id & index */
422 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
423}
424
425/**
426 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
427 * the Rx descriptor on monitor status ring buffer
428 * @soc: core txrx main context
429 * @cookie: cookie used to lookup virtual address
430 *
431 * Return: void *: Virtual Address of the Rx descriptor
432 */
433static inline
434void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
435{
436 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
437 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
438 /* TODO */
439 /* Add sanity for pool_id & index */
440 return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700441}
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700442#endif /* RX_DESC_MULTI_PAGE_ALLOC */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700443
444void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
445 union dp_rx_desc_list_elem_t **local_desc_list,
446 union dp_rx_desc_list_elem_t **tail,
Kai Chen6eca1a62017-01-12 10:17:53 -0800447 uint16_t pool_id,
448 struct rx_desc_pool *rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700449
450uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800451 struct rx_desc_pool *rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700452 uint16_t num_descs,
453 union dp_rx_desc_list_elem_t **desc_list,
454 union dp_rx_desc_list_elem_t **tail);
455
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700456
457QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800458
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700459void dp_rx_pdev_detach(struct dp_pdev *pdev);
460
Kai Chen6eca1a62017-01-12 10:17:53 -0800461
Dhanashri Atre0da31222017-03-23 12:30:58 -0700462uint32_t
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700463dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint8_t reo_ring_num,
464 uint32_t quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800465
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530466uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
467
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700468uint32_t
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530469dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700470
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530471/**
472 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
473 * multiple nbufs.
474 * @nbuf: pointer to the first msdu of an amsdu.
475 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
476 *
477 * This function implements the creation of RX frag_list for cases
478 * where an MSDU is spread across multiple nbufs.
479 *
480 * Return: returns the head nbuf which contains complete frag_list.
481 */
482qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530483
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700484/*
485 * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
486 * at the time of dp rx initialization
487 *
488 * @soc: core txrx main context
489 * @pool_id: pool_id which is one of 3 mac_ids
490 * @pool_size: number of Rx descriptor in the pool
491 * @rx_desc_pool: rx descriptor pool pointer
492 *
493 * Return: QDF status
494 */
495QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
496 uint32_t pool_size, struct rx_desc_pool *pool);
Kai Chen6eca1a62017-01-12 10:17:53 -0800497
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700498/*
499 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
500 * de-initialization of wifi module.
501 *
502 * @soc: core txrx main context
503 * @pool_id: pool_id which is one of 3 mac_ids
504 * @rx_desc_pool: rx descriptor pool pointer
505 *
506 * Return: None
507 */
508void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
509 struct rx_desc_pool *rx_desc_pool);
510
511/*
512 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
513 * de-initialization of wifi module.
514 *
515 * @soc: core txrx main context
516 * @pool_id: pool_id which is one of 3 mac_ids
517 * @rx_desc_pool: rx descriptor pool pointer
518 *
519 * Return: None
520 */
521void dp_rx_desc_nbuf_free(struct dp_soc *soc,
522 struct rx_desc_pool *rx_desc_pool);
523
524/*
525 * dp_rx_desc_pool_free() - free the sw rx desc array called during
526 * de-initialization of wifi module.
527 *
528 * @soc: core txrx main context
529 * @rx_desc_pool: rx descriptor pool pointer
530 *
531 * Return: None
532 */
Kai Chen6eca1a62017-01-12 10:17:53 -0800533void dp_rx_desc_pool_free(struct dp_soc *soc,
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700534 struct rx_desc_pool *rx_desc_pool);
phadiman7dd261d2019-03-15 01:48:50 +0530535
c_cgodavbd5b3c22017-06-07 12:31:40 +0530536void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530537 struct dp_peer *peer);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530538
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700539/**
540 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
541 *
542 * @head: pointer to the head of local free list
543 * @tail: pointer to the tail of local free list
544 * @new: new descriptor that is added to the free list
545 *
546 * Return: void:
547 */
548static inline
549void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
550 union dp_rx_desc_list_elem_t **tail,
551 struct dp_rx_desc *new)
552{
553 qdf_assert(head && new);
554
555 new->nbuf = NULL;
Pramod Simha59fcb312017-06-22 17:43:16 -0700556 new->in_use = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700557
558 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
559 *head = (union dp_rx_desc_list_elem_t *)new;
Jeff Johnsona8edf332019-03-18 09:51:52 -0700560 if (!*tail)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700561 *tail = *head;
562
563}
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530564
Ishank Jain9f174c62017-03-30 18:37:42 +0530565uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
chenguo91c90102017-12-12 16:16:37 +0800566void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530567 qdf_nbuf_t mpdu, bool mpdu_done);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +0530568void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
569 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
Aniruddha Paula2e7c932018-12-03 19:10:12 +0530570void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
571 uint16_t peer_id, uint8_t tid);
572
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +0530573
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530574#define DP_RX_LIST_APPEND(head, tail, elem) \
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700575 do { \
576 if (!(head)) { \
577 (head) = (elem); \
578 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
579 } else { \
580 qdf_nbuf_set_next((tail), (elem)); \
581 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \
582 } \
583 (tail) = (elem); \
584 qdf_nbuf_set_next((tail), NULL); \
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530585 } while (0)
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530586
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530587#ifndef BUILD_X86
588static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530589 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530590{
591 return QDF_STATUS_SUCCESS;
592}
593#else
594#define MAX_RETRY 100
595static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530596 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530597{
598 uint32_t nbuf_retry = 0;
599 int32_t ret;
600 const uint32_t x86_phy_addr = 0x50000000;
601 /*
602 * in M2M emulation platforms (x86) the memory below 0x50000000
603 * is reserved for target use, so any memory allocated in this
604 * region should not be used by host
605 */
606 do {
607 if (qdf_likely(*paddr > x86_phy_addr))
608 return QDF_STATUS_SUCCESS;
609 else {
610 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +0530611 "phy addr %pK exceeded 0x50000000 trying again",
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530612 paddr);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530613
614 nbuf_retry++;
615 if ((*rx_netbuf)) {
616 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530617 QDF_DMA_FROM_DEVICE);
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +0530618 /* Not freeing buffer intentionally.
619 * Observed that same buffer is getting
620 * re-allocated resulting in longer load time
621 * WMI init timeout.
622 * This buffer is anyway not useful so skip it.
623 **/
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530624 }
625
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530626 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530627 RX_BUFFER_SIZE,
628 RX_BUFFER_RESERVATION,
629 RX_BUFFER_ALIGNMENT,
630 FALSE);
631
632 if (qdf_unlikely(!(*rx_netbuf)))
633 return QDF_STATUS_E_FAILURE;
634
635 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530636 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530637
638 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
639 qdf_nbuf_free(*rx_netbuf);
640 *rx_netbuf = NULL;
641 continue;
642 }
643
644 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
645 }
646 } while (nbuf_retry < MAX_RETRY);
647
648 if ((*rx_netbuf)) {
649 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530650 QDF_DMA_FROM_DEVICE);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530651 qdf_nbuf_free(*rx_netbuf);
652 }
653
654 return QDF_STATUS_E_FAILURE;
655}
656#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800657
Kai Chen6eca1a62017-01-12 10:17:53 -0800658/**
659 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
660 * the MSDU Link Descriptor
661 * @soc: core txrx main context
662 * @buf_info: buf_info include cookie that used to lookup virtual address of
663 * link descriptor Normally this is just an index into a per SOC array.
664 *
665 * This is the VA of the link descriptor, that HAL layer later uses to
666 * retrieve the list of MSDU's for a given MPDU.
667 *
668 * Return: void *: Virtual Address of the Rx descriptor
669 */
670static inline
671void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
672 struct hal_buf_info *buf_info)
673{
674 void *link_desc_va;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700675 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
676
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530677
Kai Chen6eca1a62017-01-12 10:17:53 -0800678 /* TODO */
679 /* Add sanity for cookie */
680
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700681 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
Kai Chen6eca1a62017-01-12 10:17:53 -0800682 (buf_info->paddr -
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700683 soc->link_desc_banks[bank_id].base_paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800684
685 return link_desc_va;
686}
687
688/**
689 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
690 * the MSDU Link Descriptor
691 * @pdev: core txrx pdev context
692 * @buf_info: buf_info includes cookie that used to lookup virtual address of
693 * link descriptor. Normally this is just an index into a per pdev array.
694 *
695 * This is the VA of the link descriptor in monitor mode destination ring,
696 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
697 *
698 * Return: void *: Virtual Address of the Rx descriptor
699 */
700static inline
701void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800702 struct hal_buf_info *buf_info,
703 int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -0800704{
705 void *link_desc_va;
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800706 int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800707
708 /* TODO */
709 /* Add sanity for cookie */
710
Manjunathappa Prakash86f4ba72018-02-15 23:36:05 -0800711 link_desc_va =
712 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
713 (buf_info->paddr -
714 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
715
Kai Chen6eca1a62017-01-12 10:17:53 -0800716 return link_desc_va;
717}
718
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800719/**
720 * dp_rx_defrag_concat() - Concatenate the fragments
721 *
722 * @dst: destination pointer to the buffer
723 * @src: source pointer from where the fragment payload is to be copied
724 *
725 * Return: QDF_STATUS
726 */
727static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
728{
729 /*
730 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
731 * to provide space for src, the headroom portion is copied from
732 * the original dst buffer to the larger new dst buffer.
733 * (This is needed, because the headroom of the dst buffer
734 * contains the rx desc.)
735 */
phadiman2c146ea2019-03-07 12:45:16 +0530736 if (!qdf_nbuf_cat(dst, src)) {
737 /*
738 * qdf_nbuf_cat does not free the src memory.
739 * Free src nbuf before returning
740 * For failure case the caller takes of freeing the nbuf
741 */
742 qdf_nbuf_free(src);
743 return QDF_STATUS_SUCCESS;
744 }
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800745
phadiman2c146ea2019-03-07 12:45:16 +0530746 return QDF_STATUS_E_DEFRAG_ERROR;
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800747}
748
Amir Patelcb990262019-05-28 15:12:48 +0530749#ifndef FEATURE_WDS
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530750static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
751{
752 return QDF_STATUS_SUCCESS;
753}
Amir Patelcb990262019-05-28 15:12:48 +0530754
755static inline void
756dp_rx_wds_srcport_learn(struct dp_soc *soc,
757 uint8_t *rx_tlv_hdr,
758 struct dp_peer *ta_peer,
759 qdf_nbuf_t nbuf)
760{
761}
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530762#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800763
Kai Chen6eca1a62017-01-12 10:17:53 -0800764/*
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +0530765 * dp_rx_desc_dump() - dump the sw rx descriptor
766 *
767 * @rx_desc: sw rx descriptor
768 */
769static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
770{
771 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
772 "rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
773 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
774 rx_desc->in_use, rx_desc->unmapped);
775}
776
777/*
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530778 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
779 * In qwrap mode, packets originated from
780 * any vdev should not loopback and
781 * should be dropped.
782 * @vdev: vdev on which rx packet is received
783 * @nbuf: rx pkt
784 *
785 */
786#if ATH_SUPPORT_WRAP
787static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
788 qdf_nbuf_t nbuf)
789{
790 struct dp_vdev *psta_vdev;
791 struct dp_pdev *pdev = vdev->pdev;
792 uint8_t *data = qdf_nbuf_data(nbuf);
793
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530794 if (qdf_unlikely(vdev->proxysta_vdev)) {
795 /* In qwrap isolation mode, allow loopback packets as all
796 * packets go to RootAP and Loopback on the mpsta.
797 */
798 if (vdev->isolation_vdev)
799 return false;
800 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
801 if (qdf_unlikely(psta_vdev->proxysta_vdev &&
802 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800803 &data[QDF_MAC_ADDR_SIZE],
804 QDF_MAC_ADDR_SIZE))) {
Tallapragada Kalyanf07025a2019-01-09 11:30:45 +0530805 /* Drop packet if source address is equal to
806 * any of the vdev addresses.
807 */
808 return true;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530809 }
810 }
811 }
812 return false;
813}
814#else
815static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
816 qdf_nbuf_t nbuf)
817{
818 return false;
819}
820#endif
821
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700822#if defined(WLAN_SUPPORT_RX_TAG_STATISTICS) && \
823 defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG)
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700824/**
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700825 * dp_rx_update_rx_protocol_tag_stats() - Increments the protocol tag stats
826 * for the given protocol type
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700827 * @soc: core txrx main context
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700828 * @pdev: TXRX pdev context for which stats should be incremented
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700829 * @protocol_index: Protocol index for which the stats should be incremented
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700830 * @ring_index: REO ring number from which this tag was received.
831 *
832 * Since HKv2 is a SMP, two or more cores may simultaneously receive packets
833 * of same type, and hence attempt to increment counters for the same protocol
834 * type at the same time. This creates the possibility of missing stats.
835 *
836 * For example, when two or more CPUs have each read the old tag value, V,
837 * for protocol type, P and each increment the value to V+1. Instead, the
838 * operations should have been sequenced to achieve a final value of V+2.
839 *
840 * In order to avoid this scenario, we can either use locks or store stats
841 * on a per-CPU basis. Since tagging happens in the core data path, locks
842 * are not preferred. Instead, we use a per-ring counter, since each CPU
843 * operates on a REO ring.
844 *
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700845 * Return: void
846 */
847static inline void dp_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700848 uint16_t protocol_index,
849 uint16_t ring_index)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700850{
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700851 if (ring_index >= MAX_REO_DEST_RINGS)
852 return;
853
854 pdev->reo_proto_tag_stats[ring_index][protocol_index].tag_ctr++;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700855}
856#else
857static inline void dp_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700858 uint16_t protocol_index,
859 uint16_t ring_index)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700860{
861}
862#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
863
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700864#if defined(WLAN_SUPPORT_RX_TAG_STATISTICS) && \
865 defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG)
866/**
867 * dp_rx_update_rx_err_protocol_tag_stats() - Increments the protocol tag stats
868 * for the given protocol type
869 * received from exception ring
870 * @soc: core txrx main context
871 * @pdev: TXRX pdev context for which stats should be incremented
872 * @protocol_index: Protocol index for which the stats should be incremented
873 *
874 * In HKv2, all exception packets are received on Ring-0 (along with normal
875 * Rx). Hence tags are maintained separately for exception ring as well.
876 *
877 * Return: void
878 */
879static inline
880void dp_rx_update_rx_err_protocol_tag_stats(struct dp_pdev *pdev,
881 uint16_t protocol_index)
882{
883 pdev->rx_err_proto_tag_stats[protocol_index].tag_ctr++;
884}
885#else
886static inline
887void dp_rx_update_rx_err_protocol_tag_stats(struct dp_pdev *pdev,
888 uint16_t protocol_index)
889{
890}
891#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
892/**
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700893 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
894 * and set the corresponding tag in QDF packet
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700895 * @soc: core txrx main context
896 * @vdev: vdev on which the packet is received
897 * @nbuf: QDF pkt buffer on which the protocol tag should be set
898 * @rx_tlv_hdr: rBbase address where the RX TLVs starts
899 * @ring_index: REO ring number, not used for error & monitor ring
900 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
901 * @is_update_stats: flag to indicate whether to update stats or not
902 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700903 */
904#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
905static inline void
906dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
907 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700908 uint16_t ring_index,
909 bool is_reo_exception, bool is_update_stats)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700910{
911 uint16_t cce_metadata = RX_PROTOCOL_TAG_START_OFFSET;
912 bool cce_match = false;
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700913 struct dp_pdev *pdev;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700914 uint16_t protocol_tag = 0;
915
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700916 if (qdf_unlikely(!vdev))
917 return;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700918
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700919 pdev = vdev->pdev;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700920
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700921 if (qdf_likely(!pdev->is_rx_protocol_tagging_enabled))
922 return;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700923
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700924 /*
925 * In case of raw frames, rx_attention and rx_msdu_end tlv
926 * may be stale or invalid. Do not tag such frames.
927 * Default decap_type is set to ethernet for monitor vdev,
928 * therefore, cannot check decap_type for monitor mode.
929 * We will call this only for eth frames from dp_rx_mon_dest.c.
930 */
931 if (qdf_likely(!(pdev->monitor_vdev && pdev->monitor_vdev == vdev) &&
932 (vdev->rx_decap_type != htt_cmn_pkt_type_ethernet)))
933 return;
934
935 /*
936 * Check whether HW has filled in the CCE metadata in
937 * this packet, if not filled, just return
938 */
939 if (qdf_likely(!hal_rx_msdu_cce_match_get(rx_tlv_hdr)))
940 return;
941
942 cce_match = true;
943 /* Get the cce_metadata from RX MSDU TLV */
944 cce_metadata = (hal_rx_msdu_cce_metadata_get(rx_tlv_hdr) &
945 RX_MSDU_END_16_CCE_METADATA_MASK);
946 /*
947 * Received CCE metadata should be within the
948 * valid limits
949 */
950 qdf_assert_always((cce_metadata >= RX_PROTOCOL_TAG_START_OFFSET) &&
951 (cce_metadata < (RX_PROTOCOL_TAG_START_OFFSET +
952 RX_PROTOCOL_TAG_MAX)));
953
954 /*
955 * The CCE metadata received is just the
956 * packet_type + RX_PROTOCOL_TAG_START_OFFSET
957 */
958 cce_metadata -= RX_PROTOCOL_TAG_START_OFFSET;
959
960 /*
961 * Update the QDF packet with the user-specified
962 * tag/metadata by looking up tag value for
963 * received protocol type.
964 */
965 protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
966 qdf_nbuf_set_rx_protocol_tag(nbuf, protocol_tag);
967 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
968 "Seq:%u decap:%u CCE Match:%d ProtoID:%u Tag:%u US:%d",
969 hal_rx_get_rx_sequence(rx_tlv_hdr),
970 vdev->rx_decap_type, cce_match, cce_metadata,
971 protocol_tag, is_update_stats);
972
973 if (qdf_likely(!is_update_stats))
974 return;
975
976 if (qdf_unlikely(is_reo_exception)) {
977 dp_rx_update_rx_err_protocol_tag_stats(pdev,
978 cce_metadata);
979 } else {
980 dp_rx_update_rx_protocol_tag_stats(pdev,
981 cce_metadata,
982 ring_index);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700983 }
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700984
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700985}
986#else
987static inline void
988dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
989 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700990 uint16_t ring_index,
991 bool is_reo_exception, bool is_update_stats)
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700992{
993 /* Stub API */
994}
995#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
996
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -0700997/**
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700998 * dp_rx_mon_update_protocol_tag() - Performs necessary checks for monitor mode
999 * and then tags appropriate packets
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001000 * @soc: core txrx main context
1001 * @vdev: pdev on which packet is received
1002 * @msdu: QDF packet buffer on which the protocol tag should be set
1003 * @rx_desc: base address where the RX TLVs start
1004 * Return: void
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001005 */
1006#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1007static inline
1008void dp_rx_mon_update_protocol_tag(struct dp_soc *soc, struct dp_pdev *dp_pdev,
1009 qdf_nbuf_t msdu, void *rx_desc)
1010{
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001011 uint32_t msdu_ppdu_id = 0;
1012 struct mon_rx_status *mon_recv_status;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001013
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001014 if (qdf_likely(!dp_pdev->is_rx_protocol_tagging_enabled))
1015 return;
1016
1017 if (qdf_likely(!dp_pdev->monitor_vdev))
1018 return;
1019
1020 if (qdf_likely(1 != dp_pdev->ppdu_info.rx_status.rxpcu_filter_pass))
1021 return;
1022
1023 msdu_ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_desc);
1024
1025 if (msdu_ppdu_id != dp_pdev->ppdu_info.com_info.ppdu_id) {
1026 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001027 QDF_TRACE_LEVEL_ERROR,
1028 "msdu_ppdu_id=%x,com_info.ppdu_id=%x",
1029 msdu_ppdu_id,
1030 dp_pdev->ppdu_info.com_info.ppdu_id);
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001031 return;
1032 }
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001033
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07001034 /*
1035 * Update the protocol tag in SKB for packets received on BSS.
1036 * Do not update tag stats since it would double actual received count
1037 */
1038 mon_recv_status = &dp_pdev->ppdu_info.rx_status;
1039 if (mon_recv_status->frame_control_info_valid &&
1040 ((mon_recv_status->frame_control & IEEE80211_FC0_TYPE_MASK) ==
1041 IEEE80211_FC0_TYPE_DATA)) {
1042 dp_rx_update_protocol_tag(soc,
1043 dp_pdev->monitor_vdev,
1044 msdu, rx_desc,
1045 MAX_REO_DEST_RINGS,
1046 false, false);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07001047 }
1048}
1049#else
1050static inline
1051void dp_rx_mon_update_protocol_tag(struct dp_soc *soc, struct dp_pdev *dp_pdev,
1052 qdf_nbuf_t msdu, void *rx_desc)
1053{
1054 /* Stub API */
1055}
1056#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05301057/*
Kai Chen6eca1a62017-01-12 10:17:53 -08001058 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1059 * called during dp rx initialization
1060 * and at the end of dp_rx_process.
1061 *
1062 * @soc: core txrx main context
1063 * @mac_id: mac_id which is one of 3 mac_ids
1064 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07001065 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -08001066 * @num_req_buffers: number of buffer to be replenished
1067 * @desc_list: list of descs if called from dp_rx_process
1068 * or NULL during dp rx initialization or out of buffer
1069 * interrupt.
1070 * @tail: tail of descs list
Kai Chen6eca1a62017-01-12 10:17:53 -08001071 * Return: return success or failure
1072 */
1073QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1074 struct dp_srng *dp_rxdma_srng,
1075 struct rx_desc_pool *rx_desc_pool,
1076 uint32_t num_req_buffers,
1077 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001078 union dp_rx_desc_list_elem_t **tail);
Kai Chen6eca1a62017-01-12 10:17:53 -08001079
1080/**
1081 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1082 * (WBM), following error handling
1083 *
1084 * @soc: core DP main context
1085 * @buf_addr_info: opaque pointer to the REO error ring descriptor
1086 * @buf_addr_info: void pointer to the buffer_addr_info
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301087 * @bm_action: put to idle_list or release to msdu_list
Kai Chen6eca1a62017-01-12 10:17:53 -08001088 * Return: QDF_STATUS
1089 */
1090QDF_STATUS
psimha223883f2017-11-16 17:18:51 -08001091dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
1092
1093QDF_STATUS
Kai Chen6eca1a62017-01-12 10:17:53 -08001094dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
Tallapragada Kalyan00172912017-09-26 21:04:24 +05301095 void *buf_addr_info, uint8_t bm_action);
Aniruddha Paul80f52e72017-10-28 18:16:58 +05301096/**
1097 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1098 * (WBM) by address
1099 *
1100 * @soc: core DP main context
1101 * @link_desc_addr: link descriptor addr
1102 *
1103 * Return: QDF_STATUS
1104 */
1105QDF_STATUS
1106dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
1107 uint8_t bm_action);
Pramod Simhae382ff82017-06-05 18:09:26 -07001108
Pramod Simhae382ff82017-06-05 18:09:26 -07001109uint32_t
1110dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
1111 uint32_t quota);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +05301112
1113void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1114 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1115QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1116 uint8_t *rx_tlv_hdr);
1117
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301118int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
Tallapragada Kalyan7147b3c2019-03-27 18:40:27 +05301119 struct dp_peer *peer);
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301120
jinweic chenc3546322018-02-02 15:03:41 +08001121qdf_nbuf_t
1122dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1123
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301124void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring,
1125 void *ring_desc, struct dp_rx_desc *rx_desc);
1126
Varsha Mishraa331e6e2019-03-11 12:16:14 +05301127void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001128#ifdef RX_DESC_DEBUG_CHECK
1129/**
1130 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1131 * @rx_desc: rx descriptor pointer
1132 *
1133 * Return: true, if magic is correct, else false.
1134 */
1135static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1136{
1137 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1138 return false;
1139
1140 rx_desc->magic = 0;
1141 return true;
1142}
1143
1144/**
1145 * dp_rx_desc_prep() - prepare rx desc
1146 * @rx_desc: rx descriptor pointer to be prepared
1147 * @nbuf: nbuf to be associated with rx_desc
1148 *
Mohit Khannac30b51c2019-02-01 15:30:26 -08001149 * Note: assumption is that we are associating a nbuf which is mapped
1150 *
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001151 * Return: none
1152 */
1153static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1154{
1155 rx_desc->magic = DP_RX_DESC_MAGIC;
1156 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001157 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001158}
1159
1160#else
1161
1162static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1163{
1164 return true;
1165}
1166
1167static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1168{
1169 rx_desc->nbuf = nbuf;
Mohit Khannac30b51c2019-02-01 15:30:26 -08001170 rx_desc->unmapped = 0;
Mohit Khanna16cd1b22019-01-25 10:46:00 -08001171}
1172#endif /* RX_DESC_DEBUG_CHECK */
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08001173
1174void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1175 uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1176 uint8_t err_code);
1177
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05301178#ifdef PEER_CACHE_RX_PKTS
1179/**
1180 * dp_rx_flush_rx_cached() - flush cached rx frames
1181 * @peer: peer
1182 * @drop: set flag to drop frames
1183 *
1184 * Return: None
1185 */
1186void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
1187#else
1188static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1189{
1190}
1191#endif
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001192#endif /* _DP_RX_H */