blob: cc4249a198bc989d1b6b7d1ce727cfa60f9f3dce [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "dp_types.h"
20#include "dp_rx.h"
21#include "dp_peer.h"
22#include "hal_rx.h"
23#include "hal_api.h"
24#include "qdf_nbuf.h"
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +053025#ifdef MESH_MODE_SUPPORT
26#include "if_meta_hdr.h"
27#endif
Ishank Jainbc2d91f2017-01-03 18:14:54 +053028#include "dp_internal.h"
Pratik Gandhi3da3bc72017-03-16 18:20:22 +053029#include "dp_rx_mon.h"
Pamidipati, Vijay53794742017-06-03 11:24:32 +053030#ifdef RX_DESC_DEBUG_CHECK
31static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
32{
33 rx_desc->magic = DP_RX_DESC_MAGIC;
34 rx_desc->nbuf = nbuf;
35}
36#else
37static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
38{
39 rx_desc->nbuf = nbuf;
40}
41#endif
42
URAJ SASAN81d95712017-08-21 20:51:03 +053043#ifdef CONFIG_WIN
44static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
45{
46 return vdev->ap_bridge_enabled;
47}
48#else
49static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
50{
51 if (vdev->opmode != wlan_op_mode_sta)
52 return true;
53 else
54 return false;
55}
56#endif
Debashis Duttc4c52dc2016-10-04 17:12:23 -070057/*
58 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
59 * called during dp rx initialization
60 * and at the end of dp_rx_process.
61 *
62 * @soc: core txrx main context
63 * @mac_id: mac_id which is one of 3 mac_ids
Kai Chen6eca1a62017-01-12 10:17:53 -080064 * @dp_rxdma_srng: dp rxdma circular ring
Jeff Johnsonff2dfb22018-05-12 10:27:57 -070065 * @rx_desc_pool: Pointer to free Rx descriptor pool
Kai Chen6eca1a62017-01-12 10:17:53 -080066 * @num_req_buffers: number of buffer to be replenished
Debashis Duttc4c52dc2016-10-04 17:12:23 -070067 * @desc_list: list of descs if called from dp_rx_process
68 * or NULL during dp rx initialization or out of buffer
69 * interrupt.
Kai Chen6eca1a62017-01-12 10:17:53 -080070 * @tail: tail of descs list
Debashis Duttc4c52dc2016-10-04 17:12:23 -070071 * Return: return success or failure
72 */
73QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -080074 struct dp_srng *dp_rxdma_srng,
75 struct rx_desc_pool *rx_desc_pool,
76 uint32_t num_req_buffers,
77 union dp_rx_desc_list_elem_t **desc_list,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -080078 union dp_rx_desc_list_elem_t **tail)
Debashis Duttc4c52dc2016-10-04 17:12:23 -070079{
80 uint32_t num_alloc_desc;
81 uint16_t num_desc_to_free = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080082 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -070083 uint32_t num_entries_avail;
84 uint32_t count;
85 int sync_hw_ptr = 1;
86 qdf_dma_addr_t paddr;
87 qdf_nbuf_t rx_netbuf;
88 void *rxdma_ring_entry;
89 union dp_rx_desc_list_elem_t *next;
Kai Chen6eca1a62017-01-12 10:17:53 -080090 QDF_STATUS ret;
91
92 void *rxdma_srng;
93
94 rxdma_srng = dp_rxdma_srng->hal_srng;
Debashis Duttc4c52dc2016-10-04 17:12:23 -070095
96 if (!rxdma_srng) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +080097 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
98 "rxdma srng not initialized");
Ishank Jain57c42a12017-04-12 10:42:22 +053099 DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700100 return QDF_STATUS_E_FAILURE;
101 }
102
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700104 "requested %d buffers for replenish", num_req_buffers);
105
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700106 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
107 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
108 rxdma_srng,
109 sync_hw_ptr);
110
111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700112 "no of available entries in rxdma ring: %d",
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700113 num_entries_avail);
114
115 if (!(*desc_list) && (num_entries_avail >
116 ((dp_rxdma_srng->num_entries * 3) / 4))) {
117 num_req_buffers = num_entries_avail;
118 } else if (num_entries_avail < num_req_buffers) {
119 num_desc_to_free = num_req_buffers - num_entries_avail;
120 num_req_buffers = num_entries_avail;
121 }
122
123 if (qdf_unlikely(!num_req_buffers)) {
124 num_desc_to_free = num_req_buffers;
125 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
126 goto free_descs;
127 }
128
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700129 /*
130 * if desc_list is NULL, allocate the descs from freelist
131 */
132 if (!(*desc_list)) {
133 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800134 rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700135 num_req_buffers,
136 desc_list,
137 tail);
138
139 if (!num_alloc_desc) {
140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141 "no free rx_descs in freelist");
Ishank Jain1e7401c2017-02-17 15:38:39 +0530142 DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
Ishank Jain57c42a12017-04-12 10:42:22 +0530143 num_req_buffers);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700144 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700145 return QDF_STATUS_E_NOMEM;
146 }
147
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700148 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700149 "%d rx desc allocated", num_alloc_desc);
150 num_req_buffers = num_alloc_desc;
151 }
152
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700153
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530154 count = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700155
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530156 while (count < num_req_buffers) {
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530157 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700158 RX_BUFFER_SIZE,
159 RX_BUFFER_RESERVATION,
160 RX_BUFFER_ALIGNMENT,
161 FALSE);
162
Ishank Jain57c42a12017-04-12 10:42:22 +0530163 if (rx_netbuf == NULL) {
164 DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530165 continue;
Ishank Jain57c42a12017-04-12 10:42:22 +0530166 }
Houston Hoffmanfc0a9602017-01-26 22:36:31 -0800167
Ishank Jain57c42a12017-04-12 10:42:22 +0530168 ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700169 QDF_DMA_BIDIRECTIONAL);
jinweic chenc3546322018-02-02 15:03:41 +0800170 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
Manjunathappa Prakash6d2f46d2017-11-10 20:27:19 -0800171 qdf_nbuf_free(rx_netbuf);
Ishank Jain57c42a12017-04-12 10:42:22 +0530172 DP_STATS_INC(dp_pdev, replenish.map_err, 1);
173 continue;
174 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700175
176 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
177
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530178 /*
179 * check if the physical address of nbuf->data is
180 * less then 0x50000000 then free the nbuf and try
181 * allocating new nbuf. We can try for 100 times.
182 * this is a temp WAR till we fix it properly.
183 */
184 ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
Ishank Jain57c42a12017-04-12 10:42:22 +0530185 if (ret == QDF_STATUS_E_FAILURE) {
186 DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530187 break;
Ishank Jain57c42a12017-04-12 10:42:22 +0530188 }
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530189
190 count++;
191
192 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
193 rxdma_srng);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530194 qdf_assert_always(rxdma_ring_entry);
Tallapragada Kalyan4e3341a2017-02-06 12:19:43 +0530195
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700196 next = (*desc_list)->next;
197
Pamidipati, Vijay53794742017-06-03 11:24:32 +0530198 dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
Pramod Simha59fcb312017-06-22 17:43:16 -0700199 (*desc_list)->rx_desc.in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800200
201 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Mohit Khanna9a6fdd52017-12-12 10:55:48 +0800202 "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
Kai Chen6eca1a62017-01-12 10:17:53 -0800203 rx_netbuf, qdf_nbuf_data(rx_netbuf),
204 (unsigned long long)paddr, (*desc_list)->rx_desc.cookie);
205
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700206 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
207 (*desc_list)->rx_desc.cookie,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -0800208 rx_desc_pool->owner);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700209
210 *desc_list = next;
211 }
212
213 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
214
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700216 "successfully replenished %d buffers", num_req_buffers);
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700217 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700218 "%d rx desc added back to free list", num_desc_to_free);
Houston Hoffmanae850c62017-08-11 16:47:50 -0700219
Ishank Jain57c42a12017-04-12 10:42:22 +0530220 DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
221 (RX_BUFFER_SIZE * num_req_buffers));
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700222
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700223free_descs:
224 DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700225 /*
226 * add any available free desc back to the free list
227 */
228 if (*desc_list)
Kai Chen6eca1a62017-01-12 10:17:53 -0800229 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
230 mac_id, rx_desc_pool);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700231
232 return QDF_STATUS_SUCCESS;
233}
234
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530235/*
236 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
237 * pkts to RAW mode simulation to
238 * decapsulate the pkt.
239 *
240 * @vdev: vdev on which RAW mode is enabled
241 * @nbuf_list: list of RAW pkts to process
c_cgodavbd5b3c22017-06-07 12:31:40 +0530242 * @peer: peer object from which the pkt is rx
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530243 *
244 * Return: void
245 */
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530246void
c_cgodavbd5b3c22017-06-07 12:31:40 +0530247dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530248 struct dp_peer *peer)
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530249{
250 qdf_nbuf_t deliver_list_head = NULL;
251 qdf_nbuf_t deliver_list_tail = NULL;
252 qdf_nbuf_t nbuf;
253
254 nbuf = nbuf_list;
255 while (nbuf) {
256 qdf_nbuf_t next = qdf_nbuf_next(nbuf);
257
258 DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
259
Chaithanya Garrepalli9b3988c2018-05-12 15:47:15 +0530260 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
Anish Nataraj7b6d21f2018-04-30 11:08:54 +0530261 DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530262 /*
263 * reset the chfrag_start and chfrag_end bits in nbuf cb
264 * as this is a non-amsdu pkt and RAW mode simulation expects
265 * these bit s to be 0 for non-amsdu pkt.
266 */
Vivekde90e592017-11-30 17:24:18 +0530267 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
268 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
269 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
270 qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530271 }
272
273 nbuf = next;
274 }
275
276 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
Pamidipati, Vijayeb8a92c2017-05-01 00:55:56 +0530277 &deliver_list_tail, (struct cdp_peer*) peer);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530278
279 vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
280}
281
282
Pramod Simhab17d0672017-03-06 17:20:13 -0800283#ifdef DP_LFR
284/*
285 * In case of LFR, data of a new peer might be sent up
286 * even before peer is added.
287 */
288static inline struct dp_vdev *
289dp_get_vdev_from_peer(struct dp_soc *soc,
290 uint16_t peer_id,
291 struct dp_peer *peer,
292 struct hal_rx_mpdu_desc_info mpdu_desc_info)
293{
294 struct dp_vdev *vdev;
295 uint8_t vdev_id;
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +0530296
Pramod Simhab17d0672017-03-06 17:20:13 -0800297 if (unlikely(!peer)) {
298 if (peer_id != HTT_INVALID_PEER) {
299 vdev_id = DP_PEER_METADATA_ID_GET(
300 mpdu_desc_info.peer_meta_data);
301 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700302 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800303 FL("PeerID %d not found use vdevID %d"),
304 peer_id, vdev_id);
305 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
306 vdev_id);
307 } else {
308 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700309 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800310 FL("Invalid PeerID %d"),
311 peer_id);
312 return NULL;
313 }
314 } else {
315 vdev = peer->vdev;
316 }
317 return vdev;
318}
319#else
320static inline struct dp_vdev *
321dp_get_vdev_from_peer(struct dp_soc *soc,
322 uint16_t peer_id,
323 struct dp_peer *peer,
324 struct hal_rx_mpdu_desc_info mpdu_desc_info)
325{
326 if (unlikely(!peer)) {
327 QDF_TRACE(QDF_MODULE_ID_DP,
Houston Hoffman41b912c2017-08-30 14:27:51 -0700328 QDF_TRACE_LEVEL_DEBUG,
Pramod Simhab17d0672017-03-06 17:20:13 -0800329 FL("Peer not found for peerID %d"),
330 peer_id);
331 return NULL;
332 } else {
333 return peer->vdev;
334 }
335}
336#endif
Tallapragada Kalyan3a0005c2017-03-10 15:22:57 +0530337
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700338/**
339 * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
340 *
341 * @soc: core txrx main context
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530342 * @sa_peer : source peer entry
343 * @rx_tlv_hdr : start address of rx tlvs
344 * @nbuf : nbuf that has to be intrabss forwarded
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700345 *
346 * Return: bool: true if it is forwarded else false
347 */
348static bool
349dp_rx_intrabss_fwd(struct dp_soc *soc,
Tallapragada Kalyan603c5942016-12-07 21:30:44 +0530350 struct dp_peer *sa_peer,
351 uint8_t *rx_tlv_hdr,
352 qdf_nbuf_t nbuf)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700353{
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530354 uint16_t da_idx;
355 uint16_t len;
356 struct dp_peer *da_peer;
357 struct dp_ast_entry *ast_entry;
358 qdf_nbuf_t nbuf_copy;
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530359 struct dp_vdev *vdev = sa_peer->vdev;
360
361 /*
362 * intrabss forwarding is not applicable if
363 * vap is nawds enabled or ap_bridge is false.
364 */
365 if (vdev->nawds_enabled)
366 return false;
367
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530368
369 /* check if the destination peer is available in peer table
370 * and also check if the source peer and destination peer
371 * belong to the same vap and destination peer is not bss peer.
372 */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +0530373
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530374 if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
375 !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
376 da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr);
377
378 ast_entry = soc->ast_table[da_idx];
379 if (!ast_entry)
380 return false;
381
382 da_peer = ast_entry->peer;
383
384 if (!da_peer)
385 return false;
386
387 if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) {
388 memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
389 len = qdf_nbuf_len(nbuf);
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530390
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530391 /* linearize the nbuf just before we send to
392 * dp_tx_send()
393 */
394 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) {
395 if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
396 return false;
397
398 nbuf = qdf_nbuf_unshare(nbuf);
Tallapragada Kalyanbc629892018-04-04 11:34:55 +0530399 if (!nbuf) {
400 DP_STATS_INC_PKT(sa_peer,
401 rx.intra_bss.fail,
402 1,
403 len);
404 /* return true even though the pkt is
405 * not forwarded. Basically skb_unshare
406 * failed and we want to continue with
407 * next nbuf.
408 */
409 return true;
410 }
Tallapragada Kalyan32e74e62018-01-11 11:32:44 +0530411 }
412
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530413 if (!dp_tx_send(sa_peer->vdev, nbuf)) {
Ishank Jain57c42a12017-04-12 10:42:22 +0530414 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts,
415 1, len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530416 return true;
Ishank Jain57c42a12017-04-12 10:42:22 +0530417 } else {
418 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1,
419 len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530420 return false;
Ishank Jain57c42a12017-04-12 10:42:22 +0530421 }
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530422 }
423 }
424 /* if it is a broadcast pkt (eg: ARP) and it is not its own
425 * source, then clone the pkt and send the cloned pkt for
426 * intra BSS forwarding and original pkt up the network stack
427 * Note: how do we handle multicast pkts. do we forward
428 * all multicast pkts as is or let a higher layer module
429 * like igmpsnoop decide whether to forward or not with
430 * Mcast enhancement.
431 */
432 else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
433 !sa_peer->bss_peer))) {
434 nbuf_copy = qdf_nbuf_copy(nbuf);
435 if (!nbuf_copy)
436 return false;
437 memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
438 len = qdf_nbuf_len(nbuf_copy);
Tallapragada Kalyan0cd17932017-06-23 11:16:42 +0530439
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530440 if (dp_tx_send(sa_peer->vdev, nbuf_copy)) {
441 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530442 qdf_nbuf_free(nbuf_copy);
Tallapragada Kalyan274eb9e2017-05-16 18:59:10 +0530443 } else
Ishank Jain57c42a12017-04-12 10:42:22 +0530444 DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530445 }
446 /* return false as we have to still send the original pkt
447 * up the stack
448 */
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700449 return false;
450}
451
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530452#ifdef MESH_MODE_SUPPORT
453
454/**
455 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
456 *
457 * @vdev: DP Virtual device handle
458 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530459 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530460 * @peer: pointer to peer
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530461 *
462 * This function allocated memory for mesh receive stats and fill the
463 * required stats. Stores the memory address in skb cb.
464 *
465 * Return: void
466 */
Venkateswara Swamy Bandaru1fecd152017-07-04 17:26:18 +0530467
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530468void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530469 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530470{
471 struct mesh_recv_hdr_s *rx_info = NULL;
472 uint32_t pkt_type;
473 uint32_t nss;
474 uint32_t rate_mcs;
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530475 uint32_t bw;
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530476
477 /* fill recv mesh stats */
478 rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
479
480 /* upper layers are resposible to free this memory */
481
482 if (rx_info == NULL) {
483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
484 "Memory allocation failed for mesh rx stats");
Ishank Jain57c42a12017-04-12 10:42:22 +0530485 DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530486 return;
487 }
488
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530489 rx_info->rs_flags = MESH_RXHDR_VER1;
Vivekde90e592017-11-30 17:24:18 +0530490 if (qdf_nbuf_is_rx_chfrag_start(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530491 rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
492
Vivekde90e592017-11-30 17:24:18 +0530493 if (qdf_nbuf_is_rx_chfrag_end(nbuf))
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530494 rx_info->rs_flags |= MESH_RX_LAST_MSDU;
495
496 if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
497 rx_info->rs_flags |= MESH_RX_DECRYPTED;
498 rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530499 if (vdev->osif_get_key)
500 vdev->osif_get_key(vdev->osif_vdev,
501 &rx_info->rs_decryptkey[0],
502 &peer->mac_addr.raw[0],
503 rx_info->rs_keyix);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530504 }
505
506 rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
507 rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
508 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
509 rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530510 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530511 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
Venkateswara Swamy Bandarucde55992017-07-04 17:30:19 +0530512 rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
513 (bw << 24);
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530514
Vivekde90e592017-11-30 17:24:18 +0530515 qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530516
Venkateswara Swamy Bandaru37a3a452018-02-12 15:37:14 +0530517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
Venkateswara Swamy Bandaruc64c8622017-02-27 20:08:33 +0530518 FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
519 rx_info->rs_flags,
520 rx_info->rs_rssi,
521 rx_info->rs_channel,
522 rx_info->rs_ratephy1,
523 rx_info->rs_keyix);
524
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530525}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530526
527/**
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530528 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530529 *
530 * @vdev: DP Virtual device handle
531 * @nbuf: Buffer pointer
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530532 * @rx_tlv_hdr: start of rx tlv header
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530533 *
534 * This checks if the received packet is matching any filter out
535 * catogery and and drop the packet if it matches.
536 *
537 * Return: status(0 indicates drop, 1 indicate to no drop)
538 */
539
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530540QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
541 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530542{
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530543 union dp_align_mac_addr mac_addr;
544
545 if (qdf_unlikely(vdev->mesh_rx_filter)) {
546 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
547 if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
548 return QDF_STATUS_SUCCESS;
549
550 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
551 if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
552 return QDF_STATUS_SUCCESS;
553
554 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
555 if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
556 && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
557 return QDF_STATUS_SUCCESS;
558
559 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
560 if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
561 &mac_addr.raw[0]))
562 return QDF_STATUS_E_FAILURE;
563
564 if (!qdf_mem_cmp(&mac_addr.raw[0],
565 &vdev->mac_addr.raw[0],
566 DP_MAC_ADDR_LEN))
567 return QDF_STATUS_SUCCESS;
568 }
569
570 if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
571 if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
572 &mac_addr.raw[0]))
573 return QDF_STATUS_E_FAILURE;
574
575 if (!qdf_mem_cmp(&mac_addr.raw[0],
576 &vdev->mac_addr.raw[0],
577 DP_MAC_ADDR_LEN))
578 return QDF_STATUS_SUCCESS;
579 }
580 }
581
582 return QDF_STATUS_E_FAILURE;
583}
584
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530585#else
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530586void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +0530587 uint8_t *rx_tlv_hdr, struct dp_peer *peer)
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530588{
589}
590
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530591QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
592 uint8_t *rx_tlv_hdr)
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530593{
594 return QDF_STATUS_E_FAILURE;
595}
596
Venkateswara Swamy Bandaruc14b1b62017-02-24 12:26:08 +0530597#endif
598
Ishank Jain9f174c62017-03-30 18:37:42 +0530599#ifdef CONFIG_WIN
600/**
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530601 * dp_rx_nac_filter(): Function to perform filtering of non-associated
602 * clients
603 * @pdev: DP pdev handle
604 * @rx_pkt_hdr: Rx packet Header
605 *
606 * return: dp_vdev*
607 */
608static
609struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
610 uint8_t *rx_pkt_hdr)
611{
612 struct ieee80211_frame *wh;
613 struct dp_neighbour_peer *peer = NULL;
614
615 wh = (struct ieee80211_frame *)rx_pkt_hdr;
616
617 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
618 return NULL;
619
620 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
621 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
622 neighbour_peer_list_elem) {
623 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
624 wh->i_addr2, DP_MAC_ADDR_LEN) == 0) {
625 QDF_TRACE(
Houston Hoffmanae850c62017-08-11 16:47:50 -0700626 QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530627 FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
628 peer->neighbour_peers_macaddr.raw[0],
629 peer->neighbour_peers_macaddr.raw[1],
630 peer->neighbour_peers_macaddr.raw[2],
631 peer->neighbour_peers_macaddr.raw[3],
632 peer->neighbour_peers_macaddr.raw[4],
633 peer->neighbour_peers_macaddr.raw[5]);
Pratik Gandhi97fa0b02017-07-14 00:55:43 +0530634
635 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
636
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530637 return pdev->monitor_vdev;
638 }
639 }
640 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
641
642 return NULL;
643}
644
645/**
Soumya Bhatbc719e62018-02-18 18:21:25 +0530646 * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC
647 * @pdev: DP pdev handle
648 * @rx_tlv_hdr: tlv hdr buf
649 *
650 * return: None
651 */
652#ifdef ATH_SUPPORT_NAC_RSSI
653static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
654{
655 struct dp_vdev *vdev = NULL;
656 struct dp_soc *soc = pdev->soc;
657 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
658 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
659
660 if (pdev->nac_rssi_filtering) {
661 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
662 if (vdev->cdp_nac_rssi_enabled &&
663 (qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
664 wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) {
665 QDF_TRACE(QDF_MODULE_ID_DP,
666 QDF_TRACE_LEVEL_DEBUG, "RSSI updated");
667 vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id;
668 vdev->cdp_nac_rssi.client_rssi =
669 hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
670 dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc,
671 (void *)&vdev->cdp_nac_rssi,
672 HTT_INVALID_PEER, WDI_NO_VAL,
673 pdev->pdev_id);
674 }
675 }
676 }
677}
678#else
679static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
680{
681}
682#endif
683
684/**
Ishank Jain9f174c62017-03-30 18:37:42 +0530685 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
686 * @soc: DP SOC handle
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530687 * @mpdu: mpdu for which peer is invalid
Ishank Jain9f174c62017-03-30 18:37:42 +0530688 *
689 * return: integer type
690 */
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530691uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
Ishank Jain9f174c62017-03-30 18:37:42 +0530692{
693 struct dp_invalid_peer_msg msg;
694 struct dp_vdev *vdev = NULL;
695 struct dp_pdev *pdev = NULL;
696 struct ieee80211_frame *wh;
697 uint8_t i;
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530698 qdf_nbuf_t curr_nbuf, next_nbuf;
Soumya Bhatbc719e62018-02-18 18:21:25 +0530699 uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
700 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
Ishank Jain9f174c62017-03-30 18:37:42 +0530701
Ishank Jain9f174c62017-03-30 18:37:42 +0530702 wh = (struct ieee80211_frame *)rx_pkt_hdr;
703
704 if (!DP_FRAME_IS_DATA(wh)) {
705 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
706 "NAWDS valid only for data frames");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530707 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530708 }
709
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530710 if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
Ishank Jain9f174c62017-03-30 18:37:42 +0530711 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
712 "Invalid nbuf length");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530713 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530714 }
715
716
717 for (i = 0; i < MAX_PDEV_CNT; i++) {
718 pdev = soc->pdev_list[i];
719 if (!pdev) {
720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
721 "PDEV not found");
722 continue;
723 }
724
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530725 if (pdev->filter_neighbour_peers) {
726 /* Next Hop scenario not yet handle */
727 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
728 if (vdev) {
729 dp_rx_mon_deliver(soc, i,
Pratik Gandhi76139082017-07-28 19:18:02 +0530730 pdev->invalid_peer_head_msdu,
731 pdev->invalid_peer_tail_msdu);
Chaithanya Garrepalli974da262018-02-22 20:32:19 +0530732
733 pdev->invalid_peer_head_msdu = NULL;
734 pdev->invalid_peer_tail_msdu = NULL;
735
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530736 return 0;
737 }
738 }
Soumya Bhatbc719e62018-02-18 18:21:25 +0530739
740
741 dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr);
742
Ishank Jain9f174c62017-03-30 18:37:42 +0530743 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Soumya Bhatbc719e62018-02-18 18:21:25 +0530744
Ishank Jain9f174c62017-03-30 18:37:42 +0530745 if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
746 DP_MAC_ADDR_LEN) == 0) {
747 goto out;
748 }
749 }
750 }
751
752 if (!vdev) {
753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
754 "VDEV not found");
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530755 goto free;
Ishank Jain9f174c62017-03-30 18:37:42 +0530756 }
757
758out:
759 msg.wh = wh;
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530760 qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
761 msg.nbuf = mpdu;
Ishank Jain9f174c62017-03-30 18:37:42 +0530762 msg.vdev_id = vdev->vdev_id;
763 if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +0530764 pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev,
765 &msg);
syed touqeer pasha4ffe1c52018-02-09 12:37:25 +0530766
767free:
768 /* Drop and free packet */
769 curr_nbuf = mpdu;
770 while (curr_nbuf) {
771 next_nbuf = qdf_nbuf_next(curr_nbuf);
772 qdf_nbuf_free(curr_nbuf);
773 curr_nbuf = next_nbuf;
774 }
Ishank Jain9f174c62017-03-30 18:37:42 +0530775
776 return 0;
777}
chenguo91c90102017-12-12 16:16:37 +0800778
779/**
780 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
781 * @soc: DP SOC handle
782 * @mpdu: mpdu for which peer is invalid
783 * @mpdu_done: if an mpdu is completed
784 *
785 * return: integer type
786 */
787void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
788 qdf_nbuf_t mpdu, bool mpdu_done)
789{
790 /* Only trigger the process when mpdu is completed */
791 if (mpdu_done)
792 dp_rx_process_invalid_peer(soc, mpdu);
793}
Ishank Jain9f174c62017-03-30 18:37:42 +0530794#else
Pratik Gandhi3da3bc72017-03-16 18:20:22 +0530795uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
Ishank Jain9f174c62017-03-30 18:37:42 +0530796{
chenguo91c90102017-12-12 16:16:37 +0800797 qdf_nbuf_t curr_nbuf, next_nbuf;
798 struct dp_pdev *pdev;
799 uint8_t i;
800
801 curr_nbuf = mpdu;
802 while (curr_nbuf) {
803 next_nbuf = qdf_nbuf_next(curr_nbuf);
804 /* Drop and free packet */
805 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
806 qdf_nbuf_len(curr_nbuf));
807 qdf_nbuf_free(curr_nbuf);
808 curr_nbuf = next_nbuf;
809 }
810
811 /* reset the head and tail pointers */
812 for (i = 0; i < MAX_PDEV_CNT; i++) {
813 pdev = soc->pdev_list[i];
814 if (!pdev) {
815 QDF_TRACE(QDF_MODULE_ID_DP,
816 QDF_TRACE_LEVEL_ERROR,
817 "PDEV not found");
818 continue;
819 }
820
821 pdev->invalid_peer_head_msdu = NULL;
822 pdev->invalid_peer_tail_msdu = NULL;
823 }
Ishank Jain9f174c62017-03-30 18:37:42 +0530824 return 0;
825}
chenguo91c90102017-12-12 16:16:37 +0800826
827void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
828 qdf_nbuf_t mpdu, bool mpdu_done)
829{
830 /* To avoid compiler warning */
831 mpdu_done = mpdu_done;
832
833 /* Process the nbuf */
834 dp_rx_process_invalid_peer(soc, mpdu);
835}
Ishank Jain9f174c62017-03-30 18:37:42 +0530836#endif
837
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700838#if defined(FEATURE_LRO)
839static void dp_rx_print_lro_info(uint8_t *rx_tlv)
840{
Houston Hoffmanae850c62017-08-11 16:47:50 -0700841 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700842 FL("----------------------RX DESC LRO----------------------\n"));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700843 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700844 FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700845 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700846 FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700847 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700848 FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700849 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700850 FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700851 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700852 FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700853 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700854 FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700855 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700856 FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700857 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700858 FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700859 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700860 FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
Houston Hoffmanae850c62017-08-11 16:47:50 -0700861 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700862 FL("---------------------------------------------------------\n"));
863}
864
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700865/**
Dhanashri Atre0da31222017-03-23 12:30:58 -0700866 * dp_rx_lro() - LRO related processing
867 * @rx_tlv: TLV data extracted from the rx packet
868 * @peer: destination peer of the msdu
869 * @msdu: network buffer
870 * @ctx: LRO context
871 *
872 * This function performs the LRO related processing of the msdu
873 *
874 * Return: true: LRO enabled false: LRO is not enabled
875 */
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700876static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
Dhanashri Atre0da31222017-03-23 12:30:58 -0700877 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
878{
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700879 if (!peer || !peer->vdev || !peer->vdev->lro_enable) {
Houston Hoffmanae850c62017-08-11 16:47:50 -0700880 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700881 FL("no peer, no vdev or LRO disabled"));
882 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
883 return;
Dhanashri Atre0da31222017-03-23 12:30:58 -0700884 }
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700885 qdf_assert(rx_tlv);
886 dp_rx_print_lro_info(rx_tlv);
Dhanashri Atre0da31222017-03-23 12:30:58 -0700887
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700888 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
889 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
890
891 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
892 HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
893
894 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
895 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
896 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
897 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
898 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
899 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
900 QDF_NBUF_CB_RX_TCP_WIN(msdu) =
901 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
902 QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
903 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
904 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
905 HAL_RX_TLV_GET_IPV6(rx_tlv);
906 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
907 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
Manjunathappa Prakash71772a52017-11-07 18:01:31 -0800908 QDF_NBUF_CB_RX_FLOW_ID(msdu) =
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700909 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
910 QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx;
911
Dhanashri Atre0da31222017-03-23 12:30:58 -0700912}
913#else
Dhanashri Atre991ee4d2017-05-03 19:03:10 -0700914static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
Dhanashri Atre0da31222017-03-23 12:30:58 -0700915 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
916{
Dhanashri Atre0da31222017-03-23 12:30:58 -0700917}
918#endif
919
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530920/**
921 * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
922 *
923 * @nbuf: pointer to msdu.
924 * @mpdu_len: mpdu length
925 *
926 * Return: returns true if nbuf is last msdu of mpdu else retuns false.
927 */
928static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530929{
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530930 bool last_nbuf;
931
932 if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530933 qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530934 last_nbuf = false;
935 } else {
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530936 qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530937 last_nbuf = true;
938 }
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530939
940 *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530941
942 return last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530943}
944
945/**
946 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
947 * multiple nbufs.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530948 * @nbuf: pointer to the first msdu of an amsdu.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530949 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530950 *
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530951 *
952 * This function implements the creation of RX frag_list for cases
953 * where an MSDU is spread across multiple nbufs.
954 *
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530955 * Return: returns the head nbuf which contains complete frag_list.
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530956 */
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530957qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530958{
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530959 qdf_nbuf_t parent, next, frag_list;
960 uint16_t frag_list_len = 0;
961 uint16_t mpdu_len;
962 bool last_nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530963
Chaithanya Garrepallia173a182018-05-18 21:33:10 +0530964 mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530965 /*
966 * this is a case where the complete msdu fits in one single nbuf.
967 * in this case HW sets both start and end bit and we only need to
968 * reset these bits for RAW mode simulator to decap the pkt
969 */
970 if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
971 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
Chaithanya Garrepallia173a182018-05-18 21:33:10 +0530972 qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
973 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530974 return nbuf;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +0530975 }
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530976
977 /*
978 * This is a case where we have multiple msdus (A-MSDU) spread across
979 * multiple nbufs. here we create a fraglist out of these nbufs.
980 *
981 * the moment we encounter a nbuf with continuation bit set we
982 * know for sure we have an MSDU which is spread across multiple
983 * nbufs. We loop through and reap nbufs till we reach last nbuf.
984 */
985 parent = nbuf;
986 frag_list = nbuf->next;
987 nbuf = nbuf->next;
988
989 /*
990 * set the start bit in the first nbuf we encounter with continuation
991 * bit set. This has the proper mpdu length set as it is the first
992 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
993 * nbufs will form the frag_list of the parent nbuf.
994 */
995 qdf_nbuf_set_rx_chfrag_start(parent, 1);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +0530996 last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
997
998 /*
999 * this is where we set the length of the fragments which are
1000 * associated to the parent nbuf. We iterate through the frag_list
1001 * till we hit the last_nbuf of the list.
1002 */
1003 do {
1004 last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1005 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1006 frag_list_len += qdf_nbuf_len(nbuf);
1007
1008 if (last_nbuf) {
1009 next = nbuf->next;
1010 nbuf->next = NULL;
1011 break;
1012 }
1013
1014 nbuf = nbuf->next;
1015 } while (!last_nbuf);
1016
1017 qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1018 qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1019 parent->next = next;
1020
1021 qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1022 return parent;
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301023}
1024
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301025static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
1026 struct dp_peer *peer,
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301027 qdf_nbuf_t nbuf_head,
1028 qdf_nbuf_t nbuf_tail)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301029{
1030 /*
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07001031 * highly unlikely to have a vdev without a registered rx
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301032 * callback function. if so let us free the nbuf_list.
1033 */
1034 if (qdf_unlikely(!vdev->osif_rx)) {
1035 qdf_nbuf_t nbuf;
1036 do {
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301037 nbuf = nbuf_head;
1038 nbuf_head = nbuf_head->next;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301039 qdf_nbuf_free(nbuf);
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301040 } while (nbuf_head);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301041
1042 return;
1043 }
1044
1045 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301046 (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1047 vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1048 &nbuf_tail, (struct cdp_peer *) peer);
1049 }
1050
1051 vdev->osif_rx(vdev->osif_vdev, nbuf_head);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301052
1053}
1054
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301055/**
1056 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1057 * @nbuf: pointer to the first msdu of an amsdu.
1058 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1059 *
1060 * The ipsumed field of the skb is set based on whether HW validated the
1061 * IP/TCP/UDP checksum.
1062 *
1063 * Return: void
1064 */
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301065static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1066 qdf_nbuf_t nbuf,
1067 uint8_t *rx_tlv_hdr)
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301068{
1069 qdf_nbuf_rx_cksum_t cksum = {0};
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301070 bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1071 bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301072
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301073 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301074 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301075 qdf_nbuf_set_rx_cksum(nbuf, &cksum);
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301076 } else {
1077 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1078 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301079 }
1080}
1081
1082/**
1083 * dp_rx_msdu_stats_update() - update per msdu stats.
1084 * @soc: core txrx main context
1085 * @nbuf: pointer to the first msdu of an amsdu.
1086 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1087 * @peer: pointer to the peer object.
1088 * @ring_id: reo dest ring number on which pkt is reaped.
1089 *
1090 * update all the per msdu stats for that nbuf.
1091 * Return: void
1092 */
1093static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1094 qdf_nbuf_t nbuf,
1095 uint8_t *rx_tlv_hdr,
1096 struct dp_peer *peer,
1097 uint8_t ring_id)
1098{
1099 bool is_ampdu, is_not_amsdu;
1100 uint16_t peer_id;
1101 uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1102 struct dp_vdev *vdev = peer->vdev;
1103 struct ether_header *eh;
Aditya Sathish6add3db2018-04-10 19:43:34 +05301104 uint16_t msdu_len = qdf_nbuf_len(nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301105
1106 peer_id = DP_PEER_METADATA_PEER_ID_GET(
1107 hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr));
1108
1109 is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1110 qdf_nbuf_is_rx_chfrag_end(nbuf);
1111
Aditya Sathish6add3db2018-04-10 19:43:34 +05301112 DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301113 DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1114 DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1115
Aditya Sathish6add3db2018-04-10 19:43:34 +05301116 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
1117 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1118 eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1119 if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) {
1120 DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1121 } else {
1122 DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1123 }
1124 }
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301125
1126 /*
1127 * currently we can return from here as we have similar stats
1128 * updated at per ppdu level instead of msdu level
1129 */
1130 if (!soc->process_rx_status)
1131 return;
1132
1133 is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1134 DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1135 DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1136
1137 sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1138 mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1139 tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr);
1140 bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1141 reception_type = hal_rx_msdu_start_reception_type_get(rx_tlv_hdr);
1142 nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
1143 pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1144
Neil Zhao48876362018-03-22 11:23:02 -07001145 /* Save tid to skb->priority */
1146 DP_RX_TID_SAVE(nbuf, tid);
1147
chenguo04d38d62018-04-23 14:06:57 +08001148 DP_STATS_INC(peer, rx.nss[nss], 1);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301149 DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1150 DP_STATS_INCC(peer, rx.err.mic_err, 1,
1151 hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1152 DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1153 hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1154
1155 DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1156 DP_STATS_INC(peer, rx.bw[bw], 1);
1157 DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1158
1159 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
1160 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1161 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1162 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1163 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
1164 ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1165 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1166 ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1167 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
1168 ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1169 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1170 ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1171 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
1172 ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1173 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1174 ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1175 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
1176 ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1177 DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1178 ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX)));
1179
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301180 if ((soc->process_rx_status) &&
1181 hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1182 if (soc->cdp_soc.ol_ops->update_dp_stats) {
1183 soc->cdp_soc.ol_ops->update_dp_stats(
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05301184 vdev->pdev->ctrl_pdev,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301185 &peer->stats,
1186 peer_id,
1187 UPDATE_PEER_STATS);
1188 }
1189 }
1190}
1191
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301192#ifdef WDS_VENDOR_EXTENSION
1193int dp_wds_rx_policy_check(
1194 uint8_t *rx_tlv_hdr,
1195 struct dp_vdev *vdev,
1196 struct dp_peer *peer,
1197 int rx_mcast
1198 )
1199{
1200 struct dp_peer *bss_peer;
1201 int fr_ds, to_ds, rx_3addr, rx_4addr;
1202 int rx_policy_ucast, rx_policy_mcast;
1203
1204 if (vdev->opmode == wlan_op_mode_ap) {
1205 TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
1206 if (bss_peer->bss_peer) {
1207 /* if wds policy check is not enabled on this vdev, accept all frames */
1208 if (!bss_peer->wds_ecm.wds_rx_filter) {
1209 return 1;
1210 }
1211 break;
1212 }
1213 }
1214 rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
1215 rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
1216 } else { /* sta mode */
1217 if (!peer->wds_ecm.wds_rx_filter) {
1218 return 1;
1219 }
1220 rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr;
1221 rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr;
1222 }
1223
1224 /* ------------------------------------------------
1225 * self
1226 * peer- rx rx-
1227 * wds ucast mcast dir policy accept note
1228 * ------------------------------------------------
1229 * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
1230 * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
1231 * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
1232 * 1 1 0 00 x1 0 bad frame, won't see it
1233 * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
1234 * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
1235 * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
1236 * 1 0 1 00 1x 0 bad frame, won't see it
1237 * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
1238 * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
1239 * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
1240 * 1 1 0 00 x0 0 bad frame, won't see it
1241 * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
1242 * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
1243 * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
1244 * 1 0 1 00 0x 0 bad frame, won't see it
1245 *
1246 * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode.
1247 * 0 x x 01 xx 1
1248 * 0 x x 10 xx 0
1249 * 0 x x 00 xx 0 bad frame, won't see it
1250 * ------------------------------------------------
1251 */
1252
1253 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
1254 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
1255 rx_3addr = fr_ds ^ to_ds;
1256 rx_4addr = fr_ds & to_ds;
1257
1258 if (vdev->opmode == wlan_op_mode_ap) {
1259 if ((!peer->wds_enabled && rx_3addr && to_ds) ||
1260 (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) ||
1261 (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) {
1262 return 1;
1263 }
1264 } else { /* sta mode */
1265 if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
1266 (rx_mcast && (rx_4addr == rx_policy_mcast))) {
1267 return 1;
1268 }
1269 }
1270 return 0;
1271}
1272#else
1273int dp_wds_rx_policy_check(
1274 uint8_t *rx_tlv_hdr,
1275 struct dp_vdev *vdev,
1276 struct dp_peer *peer,
1277 int rx_mcast
1278 )
1279{
1280 return 1;
1281}
1282#endif
1283
Dhanashri Atre0da31222017-03-23 12:30:58 -07001284/**
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001285 * dp_rx_process() - Brain of the Rx processing functionality
1286 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1287 * @soc: core txrx main context
1288 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1289 * @quota: No. of units (packets) that can be serviced in one shot.
1290 *
1291 * This function implements the core of Rx functionality. This is
1292 * expected to handle only non-error frames.
1293 *
1294 * Return: uint32_t: No. of elements processed
1295 */
1296uint32_t
Dhanashri Atre0da31222017-03-23 12:30:58 -07001297dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001298{
1299 void *hal_soc;
1300 void *ring_desc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001301 struct dp_rx_desc *rx_desc = NULL;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301302 qdf_nbuf_t nbuf, next;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301303 union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1304 union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
Chaithanya Garrepalli8aaf9b62018-05-17 15:53:21 +05301305 uint32_t rx_bufs_used = 0, rx_buf_cookie;
1306 uint32_t l2_hdr_offset = 0;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301307 uint16_t msdu_len = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001308 uint16_t peer_id;
1309 struct dp_peer *peer = NULL;
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301310 struct dp_vdev *vdev = NULL;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301311 uint32_t pkt_len = 0;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301312 struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
1313 struct hal_rx_msdu_desc_info msdu_desc_info = { 0 };
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001314 enum hal_reo_error_status error;
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05301315 uint32_t peer_mdata;
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301316 uint8_t *rx_tlv_hdr;
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301317 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301318 uint8_t mac_id = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08001319 struct dp_pdev *pdev;
1320 struct dp_srng *dp_rxdma_srng;
1321 struct rx_desc_pool *rx_desc_pool;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001322 struct dp_soc *soc = int_ctx->soc;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +05301323 uint8_t ring_id = 0;
1324 uint8_t core_id = 0;
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301325 qdf_nbuf_t nbuf_head = NULL;
1326 qdf_nbuf_t nbuf_tail = NULL;
1327 qdf_nbuf_t deliver_list_head = NULL;
1328 qdf_nbuf_t deliver_list_tail = NULL;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001329
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001330 DP_HIST_INIT();
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001331 /* Debug -- Remove later */
1332 qdf_assert(soc && hal_ring);
1333
1334 hal_soc = soc->hal_soc;
1335
1336 /* Debug -- Remove later */
1337 qdf_assert(hal_soc);
1338
Yue Ma245b47b2017-02-21 16:35:31 -08001339 hif_pm_runtime_mark_last_busy(soc->osdev->dev);
1340
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001341 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
1342
1343 /*
1344 * Need API to convert from hal_ring pointer to
1345 * Ring Type / Ring Id combo
1346 */
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001347 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001349 FL("HAL RING Access Failed -- %pK"), hal_ring);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001350 hal_srng_access_end(hal_soc, hal_ring);
1351 goto done;
1352 }
1353
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301354 /*
1355 * start reaping the buffers from reo ring and queue
1356 * them in per vdev queue.
1357 * Process the received pkts in a different per vdev loop.
1358 */
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301359 while (qdf_likely(quota)) {
1360 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring);
1361
1362 /*
1363 * in case HW has updated hp after we cached the hp
1364 * ring_desc can be NULL even there are entries
1365 * available in the ring. Update the cached_hp
1366 * and reap the buffers available to read complete
1367 * mpdu in one reap
1368 *
1369 * This is needed for RAW mode we have to read all
1370 * msdus corresponding to amsdu in one reap to create
1371 * SG list properly but due to mismatch in cached_hp
1372 * and actual hp sometimes we are unable to read
1373 * complete mpdu in one reap.
1374 */
1375 if (qdf_unlikely(!ring_desc)) {
1376 hal_srng_access_start_unlocked(hal_soc, hal_ring);
1377 ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring);
1378 if (!ring_desc)
1379 break;
1380 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001381
1382 error = HAL_RX_ERROR_STATUS_GET(ring_desc);
Ishank Jain57c42a12017-04-12 10:42:22 +05301383 ring_id = hal_srng_ring_id_get(hal_ring);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001384
1385 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1386 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001387 FL("HAL RING 0x%pK:error %d"), hal_ring, error);
Ishank Jain57c42a12017-04-12 10:42:22 +05301388 DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001389 /* Don't know how to deal with this -- assert */
1390 qdf_assert(0);
1391 }
1392
1393 rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1394
Kai Chen6eca1a62017-01-12 10:17:53 -08001395 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001396
Pranita Solankea5a3ae72018-01-18 21:45:27 +05301397
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001398 qdf_assert(rx_desc);
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301399 rx_bufs_reaped[rx_desc->pool_id]++;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001400
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001401 /* TODO */
1402 /*
1403 * Need a separate API for unmapping based on
1404 * phyiscal address
1405 */
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301406 qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001407 QDF_DMA_BIDIRECTIONAL);
1408
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07001409 core_id = smp_processor_id();
1410 DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1411
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301412 /* Get MPDU DESC info */
1413 hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001414
Tallapragada Kalyanbb3bbcd2017-07-14 12:17:04 +05301415 hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf),
1416 mpdu_desc_info.peer_meta_data);
1417
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301418 /* Get MSDU DESC info */
1419 hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1420
1421 /*
1422 * save msdu flags first, last and continuation msdu in
1423 * nbuf->cb
1424 */
1425 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05301426 qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301427
1428 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
Vivekde90e592017-11-30 17:24:18 +05301429 qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301430
1431 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
Vivekde90e592017-11-30 17:24:18 +05301432 qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05301433
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301434 DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301435
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301436 /*
1437 * if continuation bit is set then we have MSDU spread
1438 * across multiple buffers, let us not decrement quota
1439 * till we reap all buffers of that MSDU.
1440 */
Vivekde90e592017-11-30 17:24:18 +05301441 if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301442 quota -= 1;
1443
1444
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301445 dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1446 &tail[rx_desc->pool_id],
1447 rx_desc);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001448 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301449done:
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001450 hal_srng_access_end(hal_soc, hal_ring);
1451
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08001452 /* Update histogram statistics by looping through pdev's */
1453 DP_RX_HIST_STATS_PER_PDEV();
1454
Tallapragada Kalyanaae8c412017-02-13 12:00:17 +05301455 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1456 /*
1457 * continue with next mac_id if no pkts were reaped
1458 * from that pool
1459 */
1460 if (!rx_bufs_reaped[mac_id])
1461 continue;
1462
Kai Chen6eca1a62017-01-12 10:17:53 -08001463 pdev = soc->pdev_list[mac_id];
1464 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1465 rx_desc_pool = &soc->rx_desc_buf[mac_id];
1466
1467 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1468 rx_desc_pool, rx_bufs_reaped[mac_id],
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001469 &head[mac_id], &tail[mac_id]);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301470 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001471
psimha03f9a792017-10-17 10:42:58 -07001472 /* Peer can be NULL is case of LFR */
1473 if (qdf_likely(peer != NULL))
1474 vdev = NULL;
1475
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301476 /*
1477 * BIG loop where each nbuf is dequeued from global queue,
1478 * processed and queued back on a per vdev basis. These nbufs
1479 * are sent to stack as and when we run out of nbufs
1480 * or a new nbuf dequeued from global queue has a different
1481 * vdev when compared to previous nbuf.
1482 */
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301483 nbuf = nbuf_head;
1484 while (nbuf) {
1485 next = nbuf->next;
1486 rx_tlv_hdr = qdf_nbuf_data(nbuf);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001487
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301488 /*
1489 * Check if DMA completed -- msdu_done is the last bit
1490 * to be written
1491 */
1492 if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
1493 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1494 FL("MSDU DONE failure"));
1495 hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO);
1496 qdf_assert(0);
1497 }
1498
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301499 peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
1500 peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1501 peer = dp_peer_find_by_id(soc, peer_id);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001502
Mohit Khanna163c3172018-06-27 01:34:02 -07001503 if (peer) {
1504 QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
1505 qdf_dp_trace_set_track(nbuf, QDF_RX);
1506 QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
1507 QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
1508 QDF_NBUF_RX_PKT_DATA_TRACK;
1509 }
1510
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301511 rx_bufs_used++;
1512
psimha03f9a792017-10-17 10:42:58 -07001513 if (deliver_list_head && peer && (vdev != peer->vdev)) {
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301514 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
1515 deliver_list_tail);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301516 deliver_list_head = NULL;
1517 deliver_list_tail = NULL;
1518 }
psimha03f9a792017-10-17 10:42:58 -07001519
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301520 if (qdf_likely(peer != NULL)) {
psimha03f9a792017-10-17 10:42:58 -07001521 vdev = peer->vdev;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301522 } else {
1523 qdf_nbuf_free(nbuf);
1524 nbuf = next;
1525 continue;
1526 }
1527
1528 if (qdf_unlikely(vdev == NULL)) {
1529 qdf_nbuf_free(nbuf);
1530 nbuf = next;
1531 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1532 continue;
1533 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301534
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301535 DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301536 /*
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301537 * First IF condition:
1538 * 802.11 Fragmented pkts are reinjected to REO
1539 * HW block as SG pkts and for these pkts we only
1540 * need to pull the RX TLVS header length.
1541 * Second IF condition:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301542 * The below condition happens when an MSDU is spread
1543 * across multiple buffers. This can happen in two cases
1544 * 1. The nbuf size is smaller then the received msdu.
1545 * ex: we have set the nbuf size to 2048 during
1546 * nbuf_alloc. but we received an msdu which is
1547 * 2304 bytes in size then this msdu is spread
1548 * across 2 nbufs.
1549 *
1550 * 2. AMSDUs when RAW mode is enabled.
1551 * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
1552 * across 1st nbuf and 2nd nbuf and last MSDU is
1553 * spread across 2nd nbuf and 3rd nbuf.
1554 *
1555 * for these scenarios let us create a skb frag_list and
1556 * append these buffers till the last MSDU of the AMSDU
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301557 * Third condition:
1558 * This is the most likely case, we receive 802.3 pkts
1559 * decapsulated by HW, here we need to set the pkt length.
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301560 */
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301561 if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf)))
1562 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1563 else if (qdf_unlikely(vdev->rx_decap_type ==
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301564 htt_cmn_pkt_type_raw)) {
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301565 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1566 nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301567
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301568 DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301569 DP_STATS_INC_PKT(peer, rx.raw, 1,
1570 msdu_len);
Tallapragada Kalyan52b45a12017-05-12 17:36:16 +05301571
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301572 next = nbuf->next;
Chaithanya Garrepallia173a182018-05-18 21:33:10 +05301573 } else {
1574 l2_hdr_offset =
1575 hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
1576
1577 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1578 pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1579
1580 qdf_nbuf_set_pktlen(nbuf, pkt_len);
1581 qdf_nbuf_pull_head(nbuf,
1582 RX_PKT_TLVS_LEN +
1583 l2_hdr_offset);
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301584 }
1585
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301586 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
Tallapragada Kalyane33a5632018-02-22 20:33:15 +05301587 hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05301588 QDF_TRACE(QDF_MODULE_ID_DP,
1589 QDF_TRACE_LEVEL_ERROR,
1590 FL("Policy Check Drop pkt"));
1591 /* Drop & free packet */
1592 qdf_nbuf_free(nbuf);
1593 /* Statistics */
1594 nbuf = next;
1595 continue;
1596 }
1597
psimha03f9a792017-10-17 10:42:58 -07001598 if (qdf_unlikely(peer && peer->bss_peer)) {
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301599 QDF_TRACE(QDF_MODULE_ID_DP,
1600 QDF_TRACE_LEVEL_ERROR,
1601 FL("received pkt with same src MAC"));
1602 DP_STATS_INC(vdev->pdev, dropped.mec, 1);
1603
1604 /* Drop & free packet */
1605 qdf_nbuf_free(nbuf);
1606 /* Statistics */
1607 nbuf = next;
1608 continue;
1609 }
1610
psimha03f9a792017-10-17 10:42:58 -07001611 if (qdf_unlikely(peer && (peer->nawds_enabled == true) &&
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301612 (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
1613 (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
Ruchi, Agrawal27550482018-02-20 19:43:41 +05301614 DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301615 qdf_nbuf_free(nbuf);
1616 nbuf = next;
1617 continue;
1618 }
1619
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05301620 dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301621
Prathyusha Guduri02ed9482018-04-17 19:06:30 +05301622 dp_set_rx_queue(nbuf, ring_id);
1623
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301624 /*
1625 * HW structures call this L3 header padding --
1626 * even though this is actually the offset from
1627 * the buffer beginning where the L2 header
1628 * begins.
1629 */
Houston Hoffmanae850c62017-08-11 16:47:50 -07001630 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301631 FL("rxhash: flow id toeplitz: 0x%x\n"),
1632 hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
1633
Aditya Sathish6add3db2018-04-10 19:43:34 +05301634 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id);
1635
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301636 if (qdf_unlikely(vdev->mesh_vdev)) {
1637 if (dp_rx_filter_mesh_packets(vdev, nbuf,
1638 rx_tlv_hdr)
1639 == QDF_STATUS_SUCCESS) {
1640 QDF_TRACE(QDF_MODULE_ID_DP,
1641 QDF_TRACE_LEVEL_INFO_MED,
1642 FL("mesh pkt filtered"));
1643 DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1644 1);
1645
1646 qdf_nbuf_free(nbuf);
1647 nbuf = next;
1648 continue;
1649 }
1650 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1651 }
1652
1653#ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */
1654 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1655 "p_id %d msdu_len %d hdr_off %d",
1656 peer_id, msdu_len, l2_hdr_offset);
1657
1658 print_hex_dump(KERN_ERR,
1659 "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
1660 qdf_nbuf_data(nbuf), 128, false);
1661#endif /* NAPIER_EMULATION */
1662
1663 if (qdf_likely(vdev->rx_decap_type ==
1664 htt_cmn_pkt_type_ethernet) &&
1665 (qdf_likely(!vdev->mesh_vdev))) {
1666 /* WDS Source Port Learning */
1667 dp_rx_wds_srcport_learn(soc,
1668 rx_tlv_hdr,
1669 peer,
1670 nbuf);
1671
1672 /* Intrabss-fwd */
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05301673 if (dp_rx_check_ap_bridge(vdev))
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301674 if (dp_rx_intrabss_fwd(soc,
1675 peer,
1676 rx_tlv_hdr,
1677 nbuf)) {
1678 nbuf = next;
1679 continue; /* Get next desc */
1680 }
1681 }
1682
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301683 dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx);
1684
1685 DP_RX_LIST_APPEND(deliver_list_head,
1686 deliver_list_tail,
1687 nbuf);
1688
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301689 DP_STATS_INC_PKT(peer, rx.to_stack, 1,
Soumya Bhatd4ad6af2017-12-29 18:53:14 +05301690 qdf_nbuf_len(nbuf));
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301691
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301692 nbuf = next;
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301693 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001694
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301695 if (deliver_list_head)
Chaithanya Garrepalli72dc9132018-02-21 18:37:34 +05301696 dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
1697 deliver_list_tail);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301698
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001699 return rx_bufs_used; /* Assume no scale factor for now */
1700}
1701
1702/**
1703 * dp_rx_detach() - detach dp rx
Kai Chen6eca1a62017-01-12 10:17:53 -08001704 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001705 *
1706 * This function will detach DP RX into main device context
1707 * will free DP Rx resources.
1708 *
1709 * Return: void
1710 */
1711void
1712dp_rx_pdev_detach(struct dp_pdev *pdev)
1713{
1714 uint8_t pdev_id = pdev->pdev_id;
1715 struct dp_soc *soc = pdev->soc;
Kai Chen6eca1a62017-01-12 10:17:53 -08001716 struct rx_desc_pool *rx_desc_pool;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001717
Kai Chen6eca1a62017-01-12 10:17:53 -08001718 rx_desc_pool = &soc->rx_desc_buf[pdev_id];
1719
psimhaeae1b412017-08-25 16:10:13 -07001720 if (rx_desc_pool->pool_size != 0) {
1721 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
psimhaeae1b412017-08-25 16:10:13 -07001722 }
Tallapragada Kalyan603c5942016-12-07 21:30:44 +05301723
1724 return;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001725}
1726
1727/**
1728 * dp_rx_attach() - attach DP RX
Kai Chen6eca1a62017-01-12 10:17:53 -08001729 * @pdev: core txrx pdev context
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001730 *
1731 * This function will attach a DP RX instance into the main
1732 * device (SOC) context. Will allocate dp rx resource and
1733 * initialize resources.
1734 *
1735 * Return: QDF_STATUS_SUCCESS: success
1736 * QDF_STATUS_E_RESOURCES: Error return
1737 */
1738QDF_STATUS
1739dp_rx_pdev_attach(struct dp_pdev *pdev)
1740{
1741 uint8_t pdev_id = pdev->pdev_id;
1742 struct dp_soc *soc = pdev->soc;
1743 struct dp_srng rxdma_srng;
1744 uint32_t rxdma_entries;
1745 union dp_rx_desc_list_elem_t *desc_list = NULL;
1746 union dp_rx_desc_list_elem_t *tail = NULL;
Kai Chen6eca1a62017-01-12 10:17:53 -08001747 struct dp_srng *dp_rxdma_srng;
1748 struct rx_desc_pool *rx_desc_pool;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001749
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301750 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
1751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1752 "nss-wifi<4> skip Rx refil %d", pdev_id);
1753 return QDF_STATUS_SUCCESS;
1754 }
1755
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001756 pdev = soc->pdev_list[pdev_id];
Dhanashri Atre7351d172016-10-12 13:08:09 -07001757 rxdma_srng = pdev->rx_refill_buf_ring;
chenguo9bece1a2017-12-19 18:49:41 +08001758 soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001759 rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
1760 soc->hal_soc, RXDMA_BUF);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001761
Kai Chen6eca1a62017-01-12 10:17:53 -08001762 rx_desc_pool = &soc->rx_desc_buf[pdev_id];
1763
1764 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool);
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001765
1766 rx_desc_pool->owner = DP_WBM2SW_RBM;
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001767 /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
Kai Chen6eca1a62017-01-12 10:17:53 -08001768 dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1769 dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001770 0, &desc_list, &tail);
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001771
1772 return QDF_STATUS_SUCCESS;
1773}
jinweic chenc3546322018-02-02 15:03:41 +08001774
1775/*
1776 * dp_rx_nbuf_prepare() - prepare RX nbuf
1777 * @soc: core txrx main context
1778 * @pdev: core txrx pdev context
1779 *
1780 * This function alloc & map nbuf for RX dma usage, retry it if failed
1781 * until retry times reaches max threshold or succeeded.
1782 *
1783 * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
1784 */
1785qdf_nbuf_t
1786dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
1787{
1788 uint8_t *buf;
1789 int32_t nbuf_retry_count;
1790 QDF_STATUS ret;
1791 qdf_nbuf_t nbuf = NULL;
1792
1793 for (nbuf_retry_count = 0; nbuf_retry_count <
1794 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
1795 nbuf_retry_count++) {
1796 /* Allocate a new skb */
1797 nbuf = qdf_nbuf_alloc(soc->osdev,
1798 RX_BUFFER_SIZE,
1799 RX_BUFFER_RESERVATION,
1800 RX_BUFFER_ALIGNMENT,
1801 FALSE);
1802
1803 if (nbuf == NULL) {
1804 DP_STATS_INC(pdev,
1805 replenish.nbuf_alloc_fail, 1);
1806 continue;
1807 }
1808
1809 buf = qdf_nbuf_data(nbuf);
1810
1811 memset(buf, 0, RX_BUFFER_SIZE);
1812
1813 ret = qdf_nbuf_map_single(soc->osdev, nbuf,
1814 QDF_DMA_BIDIRECTIONAL);
1815
1816 /* nbuf map failed */
1817 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
1818 qdf_nbuf_free(nbuf);
1819 DP_STATS_INC(pdev, replenish.map_err, 1);
1820 continue;
1821 }
1822 /* qdf_nbuf alloc and map succeeded */
1823 break;
1824 }
1825
1826 /* qdf_nbuf still alloc or map failed */
1827 if (qdf_unlikely(nbuf_retry_count >=
1828 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
1829 return NULL;
1830
1831 return nbuf;
1832}