blob: 4d28e3a7f24c0283e785e4495e91d94bff90713c [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Aniruddha Paul80f52e72017-10-28 18:16:58 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _DP_RX_H
20#define _DP_RX_H
21
22#include "hal_rx.h"
Tallapragada Kalyan603c5942016-12-07 21:30:44 +053023#include "dp_tx.h"
Ishank Jain2bf04b42017-02-23 22:38:42 +053024#include "dp_peer.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070025
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053026#ifdef RXDMA_OPTIMIZATION
27#define RX_BUFFER_ALIGNMENT 128
28#else /* RXDMA_OPTIMIZATION */
29#define RX_BUFFER_ALIGNMENT 4
30#endif /* RXDMA_OPTIMIZATION */
31
Keyur Parekhfad6d082017-05-07 08:54:47 -070032#define RX_BUFFER_SIZE 2048
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +053033#define RX_BUFFER_RESERVATION 0
34
Debashis Duttc4c52dc2016-10-04 17:12:23 -070035#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff
36#define DP_PEER_METADATA_PEER_ID_SHIFT 0
37#define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000
38#define DP_PEER_METADATA_VDEV_ID_SHIFT 16
39
40#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \
41 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \
42 >> DP_PEER_METADATA_PEER_ID_SHIFT)
43
44#define DP_PEER_METADATA_ID_GET(_peer_metadata) \
45 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \
46 >> DP_PEER_METADATA_VDEV_ID_SHIFT)
47
Pamidipati, Vijay53794742017-06-03 11:24:32 +053048#define DP_RX_DESC_MAGIC 0xdec0de
49
Debashis Duttc4c52dc2016-10-04 17:12:23 -070050/**
51 * struct dp_rx_desc
52 *
53 * @nbuf : VA of the "skb" posted
54 * @rx_buf_start : VA of the original Rx buffer, before
55 * movement of any skb->data pointer
56 * @cookie : index into the sw array which holds
57 * the sw Rx descriptors
58 * Cookie space is 21 bits:
59 * lower 18 bits -- index
60 * upper 3 bits -- pool_id
61 * @pool_id : pool Id for which this allocated.
62 * Can only be used if there is no flow
63 * steering
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080064 * @in_use rx_desc is in use
65 * @unmapped used to mark rx_desc an unmapped if the corresponding
66 * nbuf is already unmapped
Debashis Duttc4c52dc2016-10-04 17:12:23 -070067 */
68struct dp_rx_desc {
69 qdf_nbuf_t nbuf;
70 uint8_t *rx_buf_start;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +053071 uint32_t cookie;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070072 uint8_t pool_id;
Pamidipati, Vijay53794742017-06-03 11:24:32 +053073#ifdef RX_DESC_DEBUG_CHECK
74 uint32_t magic;
75#endif
Mohit Khannae1d7e0e2018-02-09 10:07:43 -080076 uint8_t in_use:1,
77 unmapped:1;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070078};
79
80#define RX_DESC_COOKIE_INDEX_SHIFT 0
81#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
82#define RX_DESC_COOKIE_POOL_ID_SHIFT 18
83#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
84
85#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
86 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
87 RX_DESC_COOKIE_POOL_ID_SHIFT)
88
89#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
90 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
91 RX_DESC_COOKIE_INDEX_SHIFT)
92
Ravi Joshi36f68ad2016-11-09 17:09:47 -080093/*
94 *dp_rx_xor_block() - xor block of data
95 *@b: destination data block
96 *@a: source data block
97 *@len: length of the data to process
98 *
99 *Returns: None
100 */
101static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
102{
103 qdf_size_t i;
104
105 for (i = 0; i < len; i++)
106 b[i] ^= a[i];
107}
108
109/*
110 *dp_rx_rotl() - rotate the bits left
111 *@val: unsigned integer input value
112 *@bits: number of bits
113 *
114 *Returns: Integer with left rotated by number of 'bits'
115 */
116static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
117{
118 return (val << bits) | (val >> (32 - bits));
119}
120
121/*
122 *dp_rx_rotr() - rotate the bits right
123 *@val: unsigned integer input value
124 *@bits: number of bits
125 *
126 *Returns: Integer with right rotated by number of 'bits'
127 */
128static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
129{
130 return (val >> bits) | (val << (32 - bits));
131}
132
133/*
134 *dp_rx_xswap() - swap the bits left
135 *@val: unsigned integer input value
136 *
137 *Returns: Integer with bits swapped
138 */
139static inline uint32_t dp_rx_xswap(uint32_t val)
140{
141 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
142}
143
144/*
145 *dp_rx_get_le32_split() - get little endian 32 bits split
146 *@b0: byte 0
147 *@b1: byte 1
148 *@b2: byte 2
149 *@b3: byte 3
150 *
151 *Returns: Integer with split little endian 32 bits
152 */
153static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
154 uint8_t b3)
155{
156 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
157}
158
159/*
160 *dp_rx_get_le32() - get little endian 32 bits
161 *@b0: byte 0
162 *@b1: byte 1
163 *@b2: byte 2
164 *@b3: byte 3
165 *
166 *Returns: Integer with little endian 32 bits
167 */
168static inline uint32_t dp_rx_get_le32(const uint8_t *p)
169{
170 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
171}
172
173/*
174 * dp_rx_put_le32() - put little endian 32 bits
175 * @p: destination char array
176 * @v: source 32-bit integer
177 *
178 * Returns: None
179 */
180static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
181{
182 p[0] = (v) & 0xff;
183 p[1] = (v >> 8) & 0xff;
184 p[2] = (v >> 16) & 0xff;
185 p[3] = (v >> 24) & 0xff;
186}
187
188/* Extract michal mic block of data */
189#define dp_rx_michael_block(l, r) \
190 do { \
191 r ^= dp_rx_rotl(l, 17); \
192 l += r; \
193 r ^= dp_rx_xswap(l); \
194 l += r; \
195 r ^= dp_rx_rotl(l, 3); \
196 l += r; \
197 r ^= dp_rx_rotr(l, 2); \
198 l += r; \
199 } while (0)
200
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700201/**
202 * struct dp_rx_desc_list_elem_t
203 *
204 * @next : Next pointer to form free list
205 * @rx_desc : DP Rx descriptor
206 */
207union dp_rx_desc_list_elem_t {
208 union dp_rx_desc_list_elem_t *next;
209 struct dp_rx_desc rx_desc;
210};
211
212/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800213 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
214 * the Rx descriptor on Rx DMA source ring buffer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700215 * @soc: core txrx main context
216 * @cookie: cookie used to lookup virtual address
217 *
218 * Return: void *: Virtual Address of the Rx descriptor
219 */
220static inline
Kai Chen6eca1a62017-01-12 10:17:53 -0800221void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700222{
223 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
224 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
225 /* TODO */
226 /* Add sanity for pool_id & index */
Kai Chen6eca1a62017-01-12 10:17:53 -0800227 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
228}
229
230/**
231 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
232 * the Rx descriptor on monitor ring buffer
233 * @soc: core txrx main context
234 * @cookie: cookie used to lookup virtual address
235 *
236 * Return: void *: Virtual Address of the Rx descriptor
237 */
238static inline
239void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
240{
241 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
242 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
243 /* TODO */
244 /* Add sanity for pool_id & index */
245 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
246}
247
248/**
249 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
250 * the Rx descriptor on monitor status ring buffer
251 * @soc: core txrx main context
252 * @cookie: cookie used to lookup virtual address
253 *
254 * Return: void *: Virtual Address of the Rx descriptor
255 */
256static inline
257void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
258{
259 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
260 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
261 /* TODO */
262 /* Add sanity for pool_id & index */
263 return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700264}
265
266void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
267 union dp_rx_desc_list_elem_t **local_desc_list,
268 union dp_rx_desc_list_elem_t **tail,
Kai Chen6eca1a62017-01-12 10:17:53 -0800269 uint16_t pool_id,
270 struct rx_desc_pool *rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700271
272uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800273 struct rx_desc_pool *rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700274 uint16_t num_descs,
275 union dp_rx_desc_list_elem_t **desc_list,
276 union dp_rx_desc_list_elem_t **tail);
277
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700278
279QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800280
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700281void dp_rx_pdev_detach(struct dp_pdev *pdev);
282
Kai Chen6eca1a62017-01-12 10:17:53 -0800283
Dhanashri Atre0da31222017-03-23 12:30:58 -0700284uint32_t
285dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800286
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530287uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
288
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700289uint32_t
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530290dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700291
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530292void
293dp_rx_sg_create(qdf_nbuf_t nbuf,
294 uint8_t *rx_tlv_hdr,
295 uint16_t *mpdu_len,
296 bool *is_first_frag,
297 uint16_t *frag_list_len,
298 qdf_nbuf_t *head_frag_nbuf,
299 qdf_nbuf_t *frag_list_head,
300 qdf_nbuf_t *frag_list_tail);
301
Kai Chen6eca1a62017-01-12 10:17:53 -0800302QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
303 uint32_t pool_id,
304 uint32_t pool_size,
305 struct rx_desc_pool *rx_desc_pool);
306
307void dp_rx_desc_pool_free(struct dp_soc *soc,
308 uint32_t pool_id,
309 struct rx_desc_pool *rx_desc_pool);
310
c_cgodavbd5b3c22017-06-07 12:31:40 +0530311void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530312 struct dp_peer *peer);
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530313
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700314/**
315 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
316 *
317 * @head: pointer to the head of local free list
318 * @tail: pointer to the tail of local free list
319 * @new: new descriptor that is added to the free list
320 *
321 * Return: void:
322 */
323static inline
324void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
325 union dp_rx_desc_list_elem_t **tail,
326 struct dp_rx_desc *new)
327{
328 qdf_assert(head && new);
329
330 new->nbuf = NULL;
Pramod Simha59fcb312017-06-22 17:43:16 -0700331 new->in_use = 0;
Mohit Khannae1d7e0e2018-02-09 10:07:43 -0800332 new->unmapped = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700333
334 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
335 *head = (union dp_rx_desc_list_elem_t *)new;
336 if (*tail == NULL)
337 *tail = *head;
338
339}
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530340
Ishank Jain2bf04b42017-02-23 22:38:42 +0530341/**
342 * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
343 * is behind the WDS repeater.
344 *
345 * @soc: core txrx main context
346 * @rx_tlv_hdr: base address of RX TLV header
347 * @ta_peer: WDS repeater peer
348 * @nbuf: rx pkt
349 *
350 * Return: void:
351 */
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530352#ifdef FEATURE_WDS
Ishank Jain2bf04b42017-02-23 22:38:42 +0530353static inline void
354dp_rx_wds_srcport_learn(struct dp_soc *soc,
355 uint8_t *rx_tlv_hdr,
356 struct dp_peer *ta_peer,
357 qdf_nbuf_t nbuf)
358{
359 uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
360 uint32_t flags = IEEE80211_NODE_F_WDS_HM;
361 uint32_t ret = 0;
362 uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
363
Tallapragada Kalyan85a14552017-08-23 14:41:02 +0530364 /* Do wds source port learning only if it is a 4-address mpdu */
Vivekde90e592017-11-30 17:24:18 +0530365 if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) &&
Tallapragada Kalyan85a14552017-08-23 14:41:02 +0530366 hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr)))
367 return;
368
Ishank Jain2bf04b42017-02-23 22:38:42 +0530369 memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
370 IEEE80211_ADDR_LEN);
371
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530372 if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) {
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530373 ret = dp_peer_add_ast(soc,
374 ta_peer,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530375 wds_src_mac,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530376 CDP_TXRX_AST_TYPE_WDS,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530377 flags);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530378
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530379 } else {
380 /*
381 * Get the AST entry from HW SA index and mark it as active
382 */
383 struct dp_ast_entry *ast;
384 uint16_t sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
385 ast = soc->ast_table[sa_idx];
386
387 /*
388 * Ensure we are updating the right AST entry by
389 * validating ast_idx.
390 * There is a possibility we might arrive here without
391 * AST MAP event , so this check is mandatory
392 */
393 if (ast && (ast->ast_idx == sa_idx)) {
394 ast->is_active = TRUE;
395 }
396
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530397 if (ast && sa_sw_peer_id != ta_peer->peer_ids[0])
398 dp_peer_update_ast(soc, ta_peer, ast, flags);
Ishank Jain2bf04b42017-02-23 22:38:42 +0530399 }
400 return;
401}
402#else
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530403 static inline void
Ishank Jain2bf04b42017-02-23 22:38:42 +0530404dp_rx_wds_srcport_learn(struct dp_soc *soc,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530405 uint8_t *rx_tlv_hdr,
406 struct dp_peer *ta_peer,
407 qdf_nbuf_t nbuf)
Ishank Jain2bf04b42017-02-23 22:38:42 +0530408{
409}
410#endif
411
Ishank Jain9f174c62017-03-30 18:37:42 +0530412uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
chenguo91c90102017-12-12 16:16:37 +0800413void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
414 qdf_nbuf_t mpdu, bool mpdu_done);
Chandru Neginahalccf1cbd2017-12-08 18:53:38 +0530415void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +0530416
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530417#define DP_RX_LIST_APPEND(head, tail, elem) \
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530418 do { \
419 if (!(head)) { \
420 (head) = (elem); \
421 } else { \
422 qdf_nbuf_set_next((tail), (elem)); \
423 } \
424 (tail) = (elem); \
Dhanashri Atre8b3f3772017-01-24 18:38:11 -0800425 qdf_nbuf_set_next((tail), NULL); \
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530426} while (0)
427
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530428#ifndef BUILD_X86
429static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
430 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
431{
432 return QDF_STATUS_SUCCESS;
433}
434#else
435#define MAX_RETRY 100
436static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
437 qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
438{
439 uint32_t nbuf_retry = 0;
440 int32_t ret;
441 const uint32_t x86_phy_addr = 0x50000000;
442 /*
443 * in M2M emulation platforms (x86) the memory below 0x50000000
444 * is reserved for target use, so any memory allocated in this
445 * region should not be used by host
446 */
447 do {
448 if (qdf_likely(*paddr > x86_phy_addr))
449 return QDF_STATUS_SUCCESS;
450 else {
451 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700452 "phy addr %pK exceded 0x50000000 trying again\n",
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530453 paddr);
454
455 nbuf_retry++;
456 if ((*rx_netbuf)) {
457 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
458 QDF_DMA_BIDIRECTIONAL);
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +0530459 /* Not freeing buffer intentionally.
460 * Observed that same buffer is getting
461 * re-allocated resulting in longer load time
462 * WMI init timeout.
463 * This buffer is anyway not useful so skip it.
464 **/
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530465 }
466
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530467 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530468 RX_BUFFER_SIZE,
469 RX_BUFFER_RESERVATION,
470 RX_BUFFER_ALIGNMENT,
471 FALSE);
472
473 if (qdf_unlikely(!(*rx_netbuf)))
474 return QDF_STATUS_E_FAILURE;
475
476 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
477 QDF_DMA_BIDIRECTIONAL);
478
479 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
480 qdf_nbuf_free(*rx_netbuf);
481 *rx_netbuf = NULL;
482 continue;
483 }
484
485 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
486 }
487 } while (nbuf_retry < MAX_RETRY);
488
489 if ((*rx_netbuf)) {
490 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
491 QDF_DMA_BIDIRECTIONAL);
492 qdf_nbuf_free(*rx_netbuf);
493 }
494
495 return QDF_STATUS_E_FAILURE;
496}
497#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800498
Kai Chen6eca1a62017-01-12 10:17:53 -0800499/**
500 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
501 * the MSDU Link Descriptor
502 * @soc: core txrx main context
503 * @buf_info: buf_info include cookie that used to lookup virtual address of
504 * link descriptor Normally this is just an index into a per SOC array.
505 *
506 * This is the VA of the link descriptor, that HAL layer later uses to
507 * retrieve the list of MSDU's for a given MPDU.
508 *
509 * Return: void *: Virtual Address of the Rx descriptor
510 */
511static inline
512void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
513 struct hal_buf_info *buf_info)
514{
515 void *link_desc_va;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700516 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
517
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530518
Kai Chen6eca1a62017-01-12 10:17:53 -0800519 /* TODO */
520 /* Add sanity for cookie */
521
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700522 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
Kai Chen6eca1a62017-01-12 10:17:53 -0800523 (buf_info->paddr -
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -0700524 soc->link_desc_banks[bank_id].base_paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800525
526 return link_desc_va;
527}
528
529/**
530 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
531 * the MSDU Link Descriptor
532 * @pdev: core txrx pdev context
533 * @buf_info: buf_info includes cookie that used to lookup virtual address of
534 * link descriptor. Normally this is just an index into a per pdev array.
535 *
536 * This is the VA of the link descriptor in monitor mode destination ring,
537 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
538 *
539 * Return: void *: Virtual Address of the Rx descriptor
540 */
541static inline
542void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
543 struct hal_buf_info *buf_info)
544{
545 void *link_desc_va;
546
547 /* TODO */
548 /* Add sanity for cookie */
549
550 link_desc_va = pdev->link_desc_banks[buf_info->sw_cookie].base_vaddr +
551 (buf_info->paddr -
552 pdev->link_desc_banks[buf_info->sw_cookie].base_paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800553 return link_desc_va;
554}
555
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800556/**
557 * dp_rx_defrag_concat() - Concatenate the fragments
558 *
559 * @dst: destination pointer to the buffer
560 * @src: source pointer from where the fragment payload is to be copied
561 *
562 * Return: QDF_STATUS
563 */
564static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
565{
566 /*
567 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
568 * to provide space for src, the headroom portion is copied from
569 * the original dst buffer to the larger new dst buffer.
570 * (This is needed, because the headroom of the dst buffer
571 * contains the rx desc.)
572 */
573 if (qdf_nbuf_cat(dst, src))
574 return QDF_STATUS_E_DEFRAG_ERROR;
575
576 return QDF_STATUS_SUCCESS;
577}
578
Aniruddha Paulfbeb4bb2017-08-10 15:18:59 +0530579/*
580 * dp_rx_ast_set_active() - set the active flag of the astentry
581 * corresponding to a hw index.
582 * @soc: core txrx main context
583 * @sa_idx: hw idx
584 * @is_active: active flag
585 *
586 */
587#ifdef FEATURE_WDS
588static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
589{
590 struct dp_ast_entry *ast;
591 qdf_spin_lock_bh(&soc->ast_lock);
592 ast = soc->ast_table[sa_idx];
593
594 /*
595 * Ensure we are updating the right AST entry by
596 * validating ast_idx.
597 * There is a possibility we might arrive here without
598 * AST MAP event , so this check is mandatory
599 */
600 if (ast && (ast->ast_idx == sa_idx)) {
601 ast->is_active = is_active;
602 qdf_spin_unlock_bh(&soc->ast_lock);
603 return QDF_STATUS_SUCCESS;
604 }
605
606 qdf_spin_unlock_bh(&soc->ast_lock);
607 return QDF_STATUS_E_FAILURE;
608}
609#else
610static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
611{
612 return QDF_STATUS_SUCCESS;
613}
614#endif
Ravi Joshi36f68ad2016-11-09 17:09:47 -0800615
Kai Chen6eca1a62017-01-12 10:17:53 -0800616/*
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +0530617 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
618 * In qwrap mode, packets originated from
619 * any vdev should not loopback and
620 * should be dropped.
621 * @vdev: vdev on which rx packet is received
622 * @nbuf: rx pkt
623 *
624 */
625#if ATH_SUPPORT_WRAP
626static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
627 qdf_nbuf_t nbuf)
628{
629 struct dp_vdev *psta_vdev;
630 struct dp_pdev *pdev = vdev->pdev;
631 uint8_t *data = qdf_nbuf_data(nbuf);
632
633 if (qdf_unlikely(vdev->proxysta_vdev)) {
634 /* In qwrap isolation mode, allow loopback packets as all
635 * packets go to RootAP and Loopback on the mpsta.
636 */
637 if (vdev->isolation_vdev)
638 return false;
639 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
640 if (qdf_unlikely(psta_vdev->proxysta_vdev &&
641 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
642 &data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) {
643 /* Drop packet if source address is equal to
644 * any of the vdev addresses.
645 */
646 return true;
647 }
648 }
649 }
650 return false;
651}
652#else
653static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
654 qdf_nbuf_t nbuf)
655{
656 return false;
657}
658#endif
659
660/*
Kai Chen6eca1a62017-01-12 10:17:53 -0800661 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
662 * called during dp rx initialization
663 * and at the end of dp_rx_process.
664 *
665 * @soc: core txrx main context
666 * @mac_id: mac_id which is one of 3 mac_ids
667 * @dp_rxdma_srng: dp rxdma circular ring
668 * @rx_desc_pool: Poiter to free Rx descriptor pool
669 * @num_req_buffers: number of buffer to be replenished
670 * @desc_list: list of descs if called from dp_rx_process
671 * or NULL during dp rx initialization or out of buffer
672 * interrupt.
673 * @tail: tail of descs list
674 * @owner: who owns the nbuf (host, NSS etc...)
675 * Return: return success or failure
676 */
677QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
678 struct dp_srng *dp_rxdma_srng,
679 struct rx_desc_pool *rx_desc_pool,
680 uint32_t num_req_buffers,
681 union dp_rx_desc_list_elem_t **desc_list,
682 union dp_rx_desc_list_elem_t **tail,
683 uint8_t owner);
684
685/**
686 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
687 * (WBM), following error handling
688 *
689 * @soc: core DP main context
690 * @buf_addr_info: opaque pointer to the REO error ring descriptor
691 * @buf_addr_info: void pointer to the buffer_addr_info
Tallapragada Kalyan00172912017-09-26 21:04:24 +0530692 * @bm_action: put to idle_list or release to msdu_list
Kai Chen6eca1a62017-01-12 10:17:53 -0800693 * Return: QDF_STATUS
694 */
695QDF_STATUS
psimha223883f2017-11-16 17:18:51 -0800696dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
697
698QDF_STATUS
Kai Chen6eca1a62017-01-12 10:17:53 -0800699dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
Tallapragada Kalyan00172912017-09-26 21:04:24 +0530700 void *buf_addr_info, uint8_t bm_action);
Aniruddha Paul80f52e72017-10-28 18:16:58 +0530701/**
702 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
703 * (WBM) by address
704 *
705 * @soc: core DP main context
706 * @link_desc_addr: link descriptor addr
707 *
708 * Return: QDF_STATUS
709 */
710QDF_STATUS
711dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
712 uint8_t bm_action);
Pramod Simhae382ff82017-06-05 18:09:26 -0700713
Pramod Simhae382ff82017-06-05 18:09:26 -0700714uint32_t
715dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
716 uint32_t quota);
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530717
718void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
719 uint8_t *rx_tlv_hdr, struct dp_peer *peer);
720QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
721 uint8_t *rx_tlv_hdr);
722
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +0530723int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
724 struct dp_peer *peer, int rx_mcast);
725
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700726#endif /* _DP_RX_H */