blob: 01777793b7e23b6f5266dc61e0497840d3a824cf [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarthee35f922018-01-04 16:41:26 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file htt_rx.c
30 * @brief Implement receive aspects of HTT.
31 * @details
32 * This file contains three categories of HTT rx code:
33 * 1. An abstraction of the rx descriptor, to hide the
34 * differences between the HL vs. LL rx descriptor.
35 * 2. Functions for providing access to the (series of)
36 * rx descriptor(s) and rx frame(s) associated with
37 * an rx indication message.
38 * 3. Functions for setting up and using the MAC DMA
39 * rx ring (applies to LL only).
40 */
41
Anurag Chouhan600c3a02016-03-01 10:33:54 +053042#include <qdf_mem.h> /* qdf_mem_malloc,free, etc. */
Anurag Chouhan6d760662016-02-20 16:05:43 +053043#include <qdf_types.h> /* qdf_print, bool */
Nirav Shahcbc6d722016-03-01 16:24:53 +053044#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053045#include <qdf_timer.h> /* qdf_timer_free */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47#include <htt.h> /* HTT_HL_RX_DESC_SIZE */
48#include <ol_cfg.h>
49#include <ol_rx.h>
50#include <ol_htt_rx_api.h>
51#include <htt_internal.h> /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
52#include "regtable.h"
53
54#include <cds_ieee80211_common.h> /* ieee80211_frame, ieee80211_qoscntl */
55#include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
Manjunathappa Prakashb7573722016-04-21 11:24:07 -070056#include <cds_utils.h>
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070057#include <wlan_policy_mgr_api.h>
Siddarth Poddar1df1cd82016-04-27 17:32:21 +053058#include "ol_txrx_types.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059#ifdef DEBUG_DMA_DONE
60#include <asm/barrier.h>
61#include <wma_api.h>
62#endif
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +053063#include <pktlog_ac_fmt.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080064
Manjunathappa Prakashb7573722016-04-21 11:24:07 -070065#ifdef HTT_DEBUG_DATA
66#define HTT_PKT_DUMP(x) x
67#else
68#define HTT_PKT_DUMP(x) /* no-op */
69#endif
70
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080071/* AR9888v1 WORKAROUND for EV#112367 */
72/* FIX THIS - remove this WAR when the bug is fixed */
73#define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
74
75/*--- setup / tear-down functions -------------------------------------------*/
76
77#ifndef HTT_RX_RING_SIZE_MIN
78#define HTT_RX_RING_SIZE_MIN 128 /* slightly > than one large A-MPDU */
79#endif
80
81#ifndef HTT_RX_RING_SIZE_MAX
82#define HTT_RX_RING_SIZE_MAX 2048 /* ~20 ms @ 1 Gbps of 1500B MSDUs */
83#endif
84
85#ifndef HTT_RX_AVG_FRM_BYTES
86#define HTT_RX_AVG_FRM_BYTES 1000
87#endif
88
89#ifndef HTT_RX_HOST_LATENCY_MAX_MS
90#define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */ /* very conservative */
91#endif
92
Dhanashri Atre6e61e8c2016-04-25 13:22:54 -070093 /* very conservative to ensure enough buffers are allocated */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080094#ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
jiad04f40212017-05-25 15:03:48 +080095#ifdef QCA_WIFI_3_0
Dhanashri Atre6e61e8c2016-04-25 13:22:54 -070096#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 20
jiad04f40212017-05-25 15:03:48 +080097#else
98#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
99#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800100#endif
101
102#ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
103#define HTT_RX_RING_REFILL_RETRY_TIME_MS 50
104#endif
105
106/*--- RX In Order Definitions ------------------------------------------------*/
107
108/* Number of buckets in the hash table */
109#define RX_NUM_HASH_BUCKETS 1024 /* This should always be a power of 2 */
110#define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
111
112/* Number of hash entries allocated per bucket */
113#define RX_ENTRIES_SIZE 10
114
115#define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
116
117#ifdef RX_HASH_DEBUG_LOG
118#define RX_HASH_LOG(x) x
119#else
120#define RX_HASH_LOG(x) /* no-op */
121#endif
122
Himanshu Agarwal8d4cf472017-09-15 13:55:44 +0530123#if HTT_PADDR64
124#define NEXT_FIELD_OFFSET_IN32 2
125#else /* ! HTT_PADDR64 */
126#define NEXT_FIELD_OFFSET_IN32 1
127#endif /* HTT_PADDR64 */
128
Nirav Shaheb017be2018-02-15 11:20:58 +0530129#ifndef CONFIG_HL_SUPPORT
Himanshu Agarwal8d4cf472017-09-15 13:55:44 +0530130/**
131 * htt_get_first_packet_after_wow_wakeup() - get first packet after wow wakeup
132 * @msg_word: pointer to rx indication message word
133 * @buf: pointer to buffer
134 *
135 * Return: None
136 */
137static void
138htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
139{
140 if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*msg_word) &
141 FW_MSDU_INFO_FIRST_WAKEUP_M) {
142 qdf_nbuf_mark_wakeup_frame(buf);
143 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
144 "%s: First packet after WOW Wakeup rcvd", __func__);
145 }
146}
147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148/* De -initialization function of the rx buffer hash table. This function will
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800149 * free up the hash table which includes freeing all the pending rx buffers
150 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700151static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152{
153
154 uint32_t i;
155 struct htt_rx_hash_entry *hash_entry;
Houston Hoffman54f465a2016-12-13 22:48:59 -0800156 struct htt_rx_hash_bucket **hash_table;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157 struct htt_list_node *list_iter = NULL;
158
159 if (NULL == pdev->rx_ring.hash_table)
160 return;
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -0700161
162 qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
Houston Hoffman54f465a2016-12-13 22:48:59 -0800163 hash_table = pdev->rx_ring.hash_table;
164 pdev->rx_ring.hash_table = NULL;
165 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -0700166
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167 for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
168 /* Free the hash entries in hash bucket i */
Houston Hoffman54f465a2016-12-13 22:48:59 -0800169 list_iter = hash_table[i]->listhead.next;
170 while (list_iter != &hash_table[i]->listhead) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171 hash_entry =
172 (struct htt_rx_hash_entry *)((char *)list_iter -
173 pdev->rx_ring.
174 listnode_offset);
175 if (hash_entry->netbuf) {
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700176#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530177 qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530178 QDF_DMA_BIDIRECTIONAL);
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700179#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530180 qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530181 QDF_DMA_FROM_DEVICE);
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700182#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530183 qdf_nbuf_free(hash_entry->netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184 hash_entry->paddr = 0;
185 }
186 list_iter = list_iter->next;
187
188 if (!hash_entry->fromlist)
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530189 qdf_mem_free(hash_entry);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 }
191
Houston Hoffman54f465a2016-12-13 22:48:59 -0800192 qdf_mem_free(hash_table[i]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193
194 }
Houston Hoffman54f465a2016-12-13 22:48:59 -0800195 qdf_mem_free(hash_table);
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -0700196
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -0700197 qdf_spinlock_destroy(&(pdev->rx_ring.rx_hash_lock));
198
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199}
Nirav Shaheb017be2018-02-15 11:20:58 +0530200#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530202/*
203 * This function is used both below within this file (which the compiler
204 * will hopefully inline), and out-line from other files via the
205 * htt_rx_msdu_first_msdu_flag function pointer.
206 */
207
208static inline bool
209htt_rx_msdu_first_msdu_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
210{
211 return ((u_int8_t *)msdu_desc - sizeof(struct hl_htt_rx_ind_base))
212 [HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)] &
213 HTT_RX_IND_HL_FLAG_FIRST_MSDU ? true : false;
214}
215
216u_int16_t
217htt_rx_msdu_rx_desc_size_hl(
218 htt_pdev_handle pdev,
219 void *msdu_desc
220 )
221{
222 return ((u_int8_t *)(msdu_desc) - HTT_RX_IND_HL_BYTES)
223 [HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
224}
225
226/**
227 * htt_rx_mpdu_desc_retry_hl() - Returns the retry bit from the Rx descriptor
228 * for the High Latency driver
229 * @pdev: Handle (pointer) to HTT pdev.
230 * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
231 * before the beginning of the payload.
232 *
233 * This function returns the retry bit of the 802.11 header for the
234 * provided rx MPDU descriptor. For the high latency driver, this function
235 * pretends as if the retry bit is never set so that the mcast duplicate
236 * detection never fails.
237 *
238 * Return: boolean -- false always for HL
239 */
240static inline bool
241htt_rx_mpdu_desc_retry_hl(htt_pdev_handle pdev, void *mpdu_desc)
242{
243 return false;
244}
245
246#ifdef CONFIG_HL_SUPPORT
Nirav Shaheb017be2018-02-15 11:20:58 +0530247static uint16_t
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530248htt_rx_mpdu_desc_seq_num_hl(htt_pdev_handle pdev, void *mpdu_desc)
249{
250 if (pdev->rx_desc_size_hl) {
251 return pdev->cur_seq_num_hl =
252 (u_int16_t)(HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
253 HTT_HL_RX_DESC_MPDU_SEQ_NUM));
254 } else {
255 return (u_int16_t)(pdev->cur_seq_num_hl);
256 }
257}
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530258
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700259static void
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530260htt_rx_mpdu_desc_pn_hl(
261 htt_pdev_handle pdev,
262 void *mpdu_desc,
263 union htt_rx_pn_t *pn,
264 int pn_len_bits)
265{
266 if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
267 /* Fix Me: only for little endian */
268 struct hl_htt_rx_desc_base *rx_desc =
269 (struct hl_htt_rx_desc_base *)mpdu_desc;
270 u_int32_t *word_ptr = (u_int32_t *)pn->pn128;
271
272 /* TODO: for Host of big endian */
273 switch (pn_len_bits) {
274 case 128:
275 /* bits 128:64 */
276 *(word_ptr + 3) = rx_desc->pn_127_96;
277 /* bits 63:0 */
278 *(word_ptr + 2) = rx_desc->pn_95_64;
279 case 48:
280 /* bits 48:0
281 * copy 64 bits
282 */
283 *(word_ptr + 1) = rx_desc->u0.pn_63_32;
284 case 24:
285 /* bits 23:0
286 * copy 32 bits
287 */
288 *(word_ptr + 0) = rx_desc->pn_31_0;
289 break;
290 default:
Poddar, Siddarth16264472017-03-14 19:39:43 +0530291 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
292 "Error: invalid length spec (%d bits) for PN",
293 pn_len_bits);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530294 qdf_assert(0);
295 break;
296 };
297 } else {
298 /* not first msdu, no pn info */
Poddar, Siddarth16264472017-03-14 19:39:43 +0530299 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
300 "Error: get pn from a not-first msdu.");
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530301 qdf_assert(0);
302 }
303}
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700304#endif
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530305
306/**
307 * htt_rx_mpdu_desc_tid_hl() - Returns the TID value from the Rx descriptor
308 * for High Latency driver
309 * @pdev: Handle (pointer) to HTT pdev.
310 * @mpdu_desc: Void pointer to the Rx descriptor for the MPDU
311 * before the beginning of the payload.
312 *
313 * This function returns the TID set in the 802.11 QoS Control for the MPDU
314 * in the packet header, by looking at the mpdu_start of the Rx descriptor.
315 * Rx descriptor gets a copy of the TID from the MAC.
316 * For the HL driver, this is currently uimplemented and always returns
317 * an invalid tid. It is the responsibility of the caller to make
318 * sure that return value is checked for valid range.
319 *
320 * Return: Invalid TID value (0xff) for HL driver.
321 */
322static inline uint8_t
323htt_rx_mpdu_desc_tid_hl(htt_pdev_handle pdev, void *mpdu_desc)
324{
325 return 0xff; /* Invalid TID */
326}
327
328static inline bool
329htt_rx_msdu_desc_completes_mpdu_hl(htt_pdev_handle pdev, void *msdu_desc)
330{
331 return (
332 ((u_int8_t *)(msdu_desc) - sizeof(struct hl_htt_rx_ind_base))
333 [HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)]
334 & HTT_RX_IND_HL_FLAG_LAST_MSDU)
335 ? true : false;
336}
337
338static inline int
339htt_rx_msdu_has_wlan_mcast_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
340{
341 /* currently, only first msdu has hl rx_desc */
342 return htt_rx_msdu_first_msdu_flag_hl(pdev, msdu_desc) == true;
343}
344
345static inline bool
346htt_rx_msdu_is_wlan_mcast_hl(htt_pdev_handle pdev, void *msdu_desc)
347{
348 struct hl_htt_rx_desc_base *rx_desc =
349 (struct hl_htt_rx_desc_base *)msdu_desc;
350
351 return
352 HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
353}
354
355static inline int
356htt_rx_msdu_is_frag_hl(htt_pdev_handle pdev, void *msdu_desc)
357{
358 struct hl_htt_rx_desc_base *rx_desc =
359 (struct hl_htt_rx_desc_base *)msdu_desc;
360
361 return
362 HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
363}
364
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800365#define RX_PADDR_MAGIC_PATTERN 0xDEAD0000
366static qdf_dma_addr_t
367htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
368{
Poddar, Siddarthd63954e2017-02-24 15:37:49 +0530369#ifdef ENABLE_DEBUG_ADDRESS_MARKING
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800370 if (sizeof(qdf_dma_addr_t) > 4) {
371 /* clear high bits, leave lower 37 bits (paddr) */
372 paddr &= 0x01FFFFFFFFF;
373 /* mark upper 16 bits of paddr */
374 paddr |= (((uint64_t)RX_PADDR_MAGIC_PATTERN) << 32);
375 }
376#endif
377 return paddr;
378}
379
Poddar, Siddarthee35f922018-01-04 16:41:26 +0530380#if HTT_PADDR64
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -0700381static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
382{
383 qdf_dma_addr_t ret = paddr;
384
385 if (sizeof(paddr) > 4)
386 ret &= 0x1fffffffff;
387 return ret;
388}
389#else /* not 64 bits */
390static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
391{
392 return paddr;
393}
394#endif /* HTT_PADDR64 */
395
Nirav Shaheb017be2018-02-15 11:20:58 +0530396#ifndef CONFIG_HL_SUPPORT
397static bool
398htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
399{
400 struct htt_host_rx_desc_base *rx_desc =
401 (struct htt_host_rx_desc_base *)msdu_desc;
402 return (bool)
403 (((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
404 RX_MSDU_END_4_FIRST_MSDU_MASK) >>
405 RX_MSDU_END_4_FIRST_MSDU_LSB);
406}
407
Poddar, Siddarthd63954e2017-02-24 15:37:49 +0530408#ifdef ENABLE_DEBUG_ADDRESS_MARKING
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800409static qdf_dma_addr_t
410htt_rx_paddr_unmark_high_bits(qdf_dma_addr_t paddr)
411{
412 uint32_t markings;
413
414 if (sizeof(qdf_dma_addr_t) > 4) {
415 markings = (uint32_t)((paddr >> 16) >> 16);
416 /*
417 * check if it is marked correctly:
418 * See the mark_high_bits function above for the expected
419 * pattern.
420 * the LS 5 bits are the high bits of physical address
421 * padded (with 0b0) to 8 bits
422 */
423 if ((markings & 0xFFFF0000) != RX_PADDR_MAGIC_PATTERN) {
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700424 qdf_print("%s: paddr not marked correctly: 0x%pK!\n",
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800425 __func__, (void *)paddr);
426 HTT_ASSERT_ALWAYS(0);
427 }
428
429 /* clear markings for further use */
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -0700430 paddr = htt_paddr_trim_to_37(paddr);
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800431 }
432 return paddr;
433}
434
435static qdf_dma_addr_t
436htt_rx_in_ord_paddr_get(uint32_t *u32p)
437{
438 qdf_dma_addr_t paddr = 0;
439
440 paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
441 if (sizeof(qdf_dma_addr_t) > 4) {
442 u32p++;
Orhan K AKYILDIZ4900e1d2017-02-07 14:33:11 -0800443 /* 32 bit architectures dont like <<32 */
444 paddr |= (((qdf_dma_addr_t)
445 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
446 << 16 << 16);
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800447 }
448 paddr = htt_rx_paddr_unmark_high_bits(paddr);
449
450 return paddr;
451}
452#else
Poddar, Siddarthee35f922018-01-04 16:41:26 +0530453#if HTT_PADDR64
Poddar, Siddarthd0217512017-12-19 18:24:49 +0530454static qdf_dma_addr_t
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800455htt_rx_in_ord_paddr_get(uint32_t *u32p)
456{
Poddar, Siddarthd0217512017-12-19 18:24:49 +0530457 qdf_dma_addr_t paddr = 0;
458
459 paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
460 if (sizeof(qdf_dma_addr_t) > 4) {
461 u32p++;
462 /* 32 bit architectures dont like <<32 */
463 paddr |= (((qdf_dma_addr_t)
464 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
465 << 16 << 16);
466 }
467 return paddr;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800468}
Poddar, Siddarthee35f922018-01-04 16:41:26 +0530469#else
470static inline qdf_dma_addr_t
471htt_rx_in_ord_paddr_get(uint32_t *u32p)
472{
473 return HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
474}
475
476#endif
Poddar, Siddarthd63954e2017-02-24 15:37:49 +0530477#endif /* ENABLE_DEBUG_ADDRESS_MARKING */
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800478#endif /* CONFIG_HL_SUPPORT*/
479
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800480/* full_reorder_offload case: this function is called with lock held */
481static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482{
483 int idx;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530484 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 struct htt_host_rx_desc_base *rx_desc;
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800486 int filled = 0;
Mohit Khannac68622e2017-01-31 21:07:12 -0800487 int debt_served = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800488
489 idx = *(pdev->rx_ring.alloc_idx.vaddr);
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800490
491moretofill:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492 while (num > 0) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530493 qdf_dma_addr_t paddr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530494 qdf_nbuf_t rx_netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 int headroom;
496
497 rx_netbuf =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530498 qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499 0, 4, false);
500 if (!rx_netbuf) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530501 qdf_timer_stop(&pdev->rx_ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 refill_retry_timer);
503 /*
504 * Failed to fill it to the desired level -
505 * we'll start a timer and try again next time.
506 * As long as enough buffers are left in the ring for
507 * another A-MPDU rx, no special recovery is needed.
508 */
509#ifdef DEBUG_DMA_DONE
510 pdev->rx_ring.dbg_refill_cnt++;
511#endif
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800512 pdev->refill_retry_timer_starts++;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530513 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514 &pdev->rx_ring.refill_retry_timer,
515 HTT_RX_RING_REFILL_RETRY_TIME_MS);
516 goto fail;
517 }
518
519 /* Clear rx_desc attention word before posting to Rx ring */
520 rx_desc = htt_rx_desc(rx_netbuf);
521 *(uint32_t *) &rx_desc->attention = 0;
522
523#ifdef DEBUG_DMA_DONE
524 *(uint32_t *) &rx_desc->msdu_end = 1;
525
526#define MAGIC_PATTERN 0xDEADBEEF
527 *(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
528
Yun Park16cf32a2017-04-03 10:49:06 -0700529 /*
530 * To ensure that attention bit is reset and msdu_end is set
531 * before calling dma_map
532 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 smp_mb();
534#endif
535 /*
Nirav Shahcbc6d722016-03-01 16:24:53 +0530536 * Adjust qdf_nbuf_data to point to the location in the buffer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 * where the rx descriptor will be filled in.
538 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530539 headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
540 qdf_nbuf_push_head(rx_netbuf, headroom);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800541
542#ifdef DEBUG_DMA_DONE
543 status =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530544 qdf_nbuf_map(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530545 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800546#else
547 status =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530548 qdf_nbuf_map(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530549 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800550#endif
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530551 if (status != QDF_STATUS_SUCCESS) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530552 qdf_nbuf_free(rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800553 goto fail;
554 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530555 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -0800556 paddr = htt_rx_paddr_mark_high_bits(paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557 if (pdev->cfg.is_full_reorder_offload) {
Poddar, Siddarth16264472017-03-14 19:39:43 +0530558 if (qdf_unlikely(htt_rx_hash_list_insert(
Dhanashri Atree08ba712017-05-08 12:53:38 -0700559 pdev, paddr, rx_netbuf))) {
Poddar, Siddarth16264472017-03-14 19:39:43 +0530560 QDF_TRACE(QDF_MODULE_ID_HTT,
561 QDF_TRACE_LEVEL_ERROR,
562 "%s: hash insert failed!", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530564 qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530565 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530567 qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530568 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530570 qdf_nbuf_free(rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800571 goto fail;
572 }
Govind Singhd79e1342015-11-03 16:20:02 +0530573 htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800574 } else {
575 pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
576 }
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800577 pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 pdev->rx_ring.fill_cnt++;
579
580 num--;
581 idx++;
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800582 filled++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 idx &= pdev->rx_ring.size_mask;
584 }
Mohit Khannac68622e2017-01-31 21:07:12 -0800585 if (debt_served < qdf_atomic_read(&pdev->rx_ring.refill_debt)) {
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800586 num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
Mohit Khannac68622e2017-01-31 21:07:12 -0800587 debt_served += num;
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800588 goto moretofill;
589 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590
591fail:
Govind Singhc701c4d2017-07-28 15:00:26 +0530592 /*
593 * Make sure alloc index write is reflected correctly before FW polls
594 * remote ring write index as compiler can reorder the instructions
595 * based on optimizations.
596 */
597 qdf_mb();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598 *(pdev->rx_ring.alloc_idx.vaddr) = idx;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800599 htt_rx_dbg_rxbuf_indupd(pdev, idx);
600
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800601 return filled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602}
603
Mohit Khannac68622e2017-01-31 21:07:12 -0800604#ifndef CONFIG_HL_SUPPORT
Mohit Khannac68622e2017-01-31 21:07:12 -0800605static int htt_rx_ring_size(struct htt_pdev_t *pdev)
606{
607 int size;
608
609 /*
610 * It is expected that the host CPU will typically be able to service
611 * the rx indication from one A-MPDU before the rx indication from
612 * the subsequent A-MPDU happens, roughly 1-2 ms later.
613 * However, the rx ring should be sized very conservatively, to
614 * accommodate the worst reasonable delay before the host CPU services
615 * a rx indication interrupt.
616 * The rx ring need not be kept full of empty buffers. In theory,
617 * the htt host SW can dynamically track the low-water mark in the
618 * rx ring, and dynamically adjust the level to which the rx ring
619 * is filled with empty buffers, to dynamically meet the desired
620 * low-water mark.
621 * In contrast, it's difficult to resize the rx ring itself, once
622 * it's in use.
623 * Thus, the ring itself should be sized very conservatively, while
624 * the degree to which the ring is filled with empty buffers should
625 * be sized moderately conservatively.
626 */
627 size =
628 ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
629 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
630 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
631
632 if (size < HTT_RX_RING_SIZE_MIN)
633 size = HTT_RX_RING_SIZE_MIN;
634 else if (size > HTT_RX_RING_SIZE_MAX)
635 size = HTT_RX_RING_SIZE_MAX;
636
637 size = qdf_get_pwr2(size);
638 return size;
639}
640
641static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
642{
643 int size;
644
645 size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
646 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
647 (8 * HTT_RX_AVG_FRM_BYTES) *
648 HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
649
650 size = qdf_get_pwr2(size);
651 /*
652 * Make sure the fill level is at least 1 less than the ring size.
653 * Leaving 1 element empty allows the SW to easily distinguish
654 * between a full ring vs. an empty ring.
655 */
656 if (size >= pdev->rx_ring.size)
657 size = pdev->rx_ring.size - 1;
658
659 return size;
660}
661
662static void htt_rx_ring_refill_retry(void *arg)
663{
664 htt_pdev_handle pdev = (htt_pdev_handle) arg;
665 int filled = 0;
666 int num;
667
668 pdev->refill_retry_timer_calls++;
669 qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
670
671 num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
672 qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
673 filled = htt_rx_ring_fill_n(pdev, num);
674
Mohit Khannac68622e2017-01-31 21:07:12 -0800675 if (filled > num) {
676 /* we served ourselves and some other debt */
677 /* sub is safer than = 0 */
678 qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
679 } else if (num == filled) { /* nothing to be done */
680 } else {
Govind Singh64228922017-09-12 22:22:09 +0530681 qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
Mohit Khannac68622e2017-01-31 21:07:12 -0800682 /* we could not fill all, timer must have been started */
683 pdev->refill_retry_timer_doubles++;
684 }
Govind Singh64228922017-09-12 22:22:09 +0530685 qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
Mohit Khannac68622e2017-01-31 21:07:12 -0800686}
687#endif
688
Yun Park16cf32a2017-04-03 10:49:06 -0700689static inline unsigned int htt_rx_ring_elems(struct htt_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690{
691 return
692 (*pdev->rx_ring.alloc_idx.vaddr -
693 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
694}
695
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700696static inline unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800697{
698 return
699 (*pdev->rx_ring.alloc_idx.vaddr -
700 *pdev->rx_ring.target_idx.vaddr) &
701 pdev->rx_ring.size_mask;
702}
703
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530704#ifndef CONFIG_HL_SUPPORT
705
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706void htt_rx_detach(struct htt_pdev_t *pdev)
707{
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530708 qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
709 qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
bings83210732017-11-03 09:09:52 +0800710 htt_rx_dbg_rxbuf_deinit(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800711
712 if (pdev->cfg.is_full_reorder_offload) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530713 qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714 sizeof(uint32_t),
715 pdev->rx_ring.target_idx.vaddr,
716 pdev->rx_ring.target_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530717 qdf_get_dma_mem_context((&pdev->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 rx_ring.
719 target_idx),
720 memctx));
721 htt_rx_hash_deinit(pdev);
722 } else {
723 int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
724
725 while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
726#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530727 qdf_nbuf_unmap(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800728 pdev->rx_ring.buf.
729 netbufs_ring[sw_rd_idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530730 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800731#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530732 qdf_nbuf_unmap(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733 pdev->rx_ring.buf.
734 netbufs_ring[sw_rd_idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530735 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530737 qdf_nbuf_free(pdev->rx_ring.buf.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738 netbufs_ring[sw_rd_idx]);
739 sw_rd_idx++;
740 sw_rd_idx &= pdev->rx_ring.size_mask;
741 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530742 qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743 }
744
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530745 qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800746 sizeof(uint32_t),
747 pdev->rx_ring.alloc_idx.vaddr,
748 pdev->rx_ring.alloc_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530749 qdf_get_dma_mem_context((&pdev->rx_ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800750 alloc_idx),
751 memctx));
752
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530753 qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
Lin Baidc00fb82017-08-04 14:26:18 +0800754 pdev->rx_ring.size * sizeof(target_paddr_t),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800755 pdev->rx_ring.buf.paddrs_ring,
756 pdev->rx_ring.base_paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530757 qdf_get_dma_mem_context((&pdev->rx_ring.buf),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800758 memctx));
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -0800759
760 /* destroy the rx-parallelization refill spinlock */
761 qdf_spinlock_destroy(&(pdev->rx_ring.refill_lock));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762}
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530763#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764
Nirav Shaheb017be2018-02-15 11:20:58 +0530765/**
766 * htt_rx_mpdu_wifi_hdr_retrieve() - retrieve 802.11 header
767 * @pdev - pdev handle
768 * @mpdu_desc - mpdu descriptor
769 *
770 * Return : pointer to 802.11 header
771 */
772char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
773{
774 struct htt_host_rx_desc_base *rx_desc =
775 (struct htt_host_rx_desc_base *)mpdu_desc;
776
777 if (!rx_desc)
778 return NULL;
779 else
780 return rx_desc->rx_hdr_status;
781}
782
783/**
784 * htt_rx_mpdu_desc_tsf32() - Return the TSF timestamp indicating when
785 * a MPDU was received.
786 * @pdev - the HTT instance the rx data was received on
787 * @mpdu_desc - the abstract descriptor for the MPDU in question
788 *
789 * return : 32 LSBs of TSF time at which the MPDU's PPDU was received
790 */
791uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
792{
793 return 0;
794}
795
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796/*--- rx descriptor field access functions ----------------------------------*/
797/*
798 * These functions need to use bit masks and shifts to extract fields
799 * from the rx descriptors, rather than directly using the bitfields.
800 * For example, use
801 * (desc & FIELD_MASK) >> FIELD_LSB
802 * rather than
803 * desc.field
804 * This allows the functions to work correctly on either little-endian
805 * machines (no endianness conversion needed) or big-endian machines
806 * (endianness conversion provided automatically by the HW DMA's
807 * byte-swizzling).
808 */
809/* FIX THIS: APPLIES TO LL ONLY */
810
Nirav Shaheb017be2018-02-15 11:20:58 +0530811#ifndef CONFIG_HL_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812/**
813 * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
814 * for the Low Latency driver
815 * @pdev: Handle (pointer) to HTT pdev.
816 * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
817 * before the beginning of the payload.
818 *
819 * This function returns the retry bit of the 802.11 header for the
820 * provided rx MPDU descriptor.
821 *
822 * Return: boolean -- true if retry is set, false otherwise
823 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700824static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
826{
827 struct htt_host_rx_desc_base *rx_desc =
828 (struct htt_host_rx_desc_base *) mpdu_desc;
829
830 return
831 (bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
832 RX_MPDU_START_0_RETRY_MASK) >>
833 RX_MPDU_START_0_RETRY_LSB);
834}
835
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700836static uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,
837 void *mpdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838{
839 struct htt_host_rx_desc_base *rx_desc =
840 (struct htt_host_rx_desc_base *)mpdu_desc;
841
842 return
843 (uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
844 RX_MPDU_START_0_SEQ_NUM_MASK) >>
845 RX_MPDU_START_0_SEQ_NUM_LSB);
846}
847
848/* FIX THIS: APPLIES TO LL ONLY */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700849static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
851 void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
852{
853 struct htt_host_rx_desc_base *rx_desc =
854 (struct htt_host_rx_desc_base *)mpdu_desc;
855
856 switch (pn_len_bits) {
857 case 24:
858 /* bits 23:0 */
859 pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
860 break;
861 case 48:
862 /* bits 31:0 */
863 pn->pn48 = rx_desc->mpdu_start.pn_31_0;
864 /* bits 47:32 */
865 pn->pn48 |= ((uint64_t)
866 ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
867 & RX_MPDU_START_2_PN_47_32_MASK))
868 << (32 - RX_MPDU_START_2_PN_47_32_LSB);
869 break;
870 case 128:
871 /* bits 31:0 */
872 pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
873 /* bits 47:32 */
874 pn->pn128[0] |=
875 ((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
876 & RX_MPDU_START_2_PN_47_32_MASK))
877 << (32 - RX_MPDU_START_2_PN_47_32_LSB);
878 /* bits 63:48 */
879 pn->pn128[0] |=
880 ((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
881 & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
882 << (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
883 /* bits 95:64 */
884 pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
885 /* bits 127:96 */
886 pn->pn128[1] |=
887 ((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
888 break;
889 default:
Poddar, Siddarth16264472017-03-14 19:39:43 +0530890 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
891 "Error: invalid length spec (%d bits) for PN",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 pn_len_bits);
893 };
894}
895
896/**
897 * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
898 * for Low Latency driver
899 * @pdev: Handle (pointer) to HTT pdev.
900 * @mpdu_desc: Void pointer to the Rx descriptor for the MPDU
901 * before the beginning of the payload.
902 *
903 * This function returns the TID set in the 802.11 QoS Control for the MPDU
904 * in the packet header, by looking at the mpdu_start of the Rx descriptor.
905 * Rx descriptor gets a copy of the TID from the MAC.
906 *
907 * Return: Actual TID set in the packet header.
908 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700909static uint8_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800910htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
911{
912 struct htt_host_rx_desc_base *rx_desc =
913 (struct htt_host_rx_desc_base *) mpdu_desc;
914
915 return
916 (uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
917 RX_MPDU_START_2_TID_MASK) >>
918 RX_MPDU_START_2_TID_LSB);
919}
920
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921/* FIX THIS: APPLIES TO LL ONLY */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700922static bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,
923 void *msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800924{
925 struct htt_host_rx_desc_base *rx_desc =
926 (struct htt_host_rx_desc_base *)msdu_desc;
927 return (bool)
928 (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
929 RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
930}
931
932/* FIX THIS: APPLIES TO LL ONLY */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700933static int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,
934 void *msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800935{
936 struct htt_host_rx_desc_base *rx_desc =
937 (struct htt_host_rx_desc_base *)msdu_desc;
Yun Park16cf32a2017-04-03 10:49:06 -0700938 /*
939 * HW rx desc: the mcast_bcast flag is only valid
940 * if first_msdu is set
941 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 return
943 ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
944 RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
945}
946
947/* FIX THIS: APPLIES TO LL ONLY */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700948static bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949{
950 struct htt_host_rx_desc_base *rx_desc =
951 (struct htt_host_rx_desc_base *)msdu_desc;
952 return
953 ((*((uint32_t *) &rx_desc->attention)) &
954 RX_ATTENTION_0_MCAST_BCAST_MASK)
955 >> RX_ATTENTION_0_MCAST_BCAST_LSB;
956}
957
958/* FIX THIS: APPLIES TO LL ONLY */
Jeff Johnson0e60ce52016-10-07 12:29:43 -0700959static int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800960{
961 struct htt_host_rx_desc_base *rx_desc =
962 (struct htt_host_rx_desc_base *)msdu_desc;
963 return
964 ((*((uint32_t *) &rx_desc->attention)) &
965 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
966}
Nirav Shaheb017be2018-02-15 11:20:58 +0530967#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800968
969static inline
970uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
971{
972 /*
973 * HL and LL use the same format for FW rx desc, but have the FW rx desc
974 * in different locations.
975 * In LL, the FW rx descriptor has been copied into the same
976 * htt_host_rx_desc_base struct that holds the HW rx desc.
977 * In HL, the FW rx descriptor, along with the MSDU payload,
978 * is in the same buffer as the rx indication message.
979 *
980 * Use the FW rx desc offset configured during startup to account for
981 * this difference between HL vs. LL.
982 *
983 * An optimization would be to define the LL and HL msdu_desc pointer
984 * in such a way that they both use the same offset to the FW rx desc.
985 * Then the following functions could be converted to macros, without
986 * needing to expose the htt_pdev_t definition outside HTT.
987 */
988 return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
989}
990
991int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
992{
993 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
994}
995
996int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
997{
998 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
999}
1000
1001int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
1002{
1003 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
1004}
1005
1006void
1007htt_rx_msdu_actions(htt_pdev_handle pdev,
1008 void *msdu_desc, int *discard, int *forward, int *inspect)
1009{
1010 uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
1011#ifdef HTT_DEBUG_DATA
1012 HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
1013#endif
1014 *discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
1015 *forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
1016 *inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
1017}
1018
Nirav Shahcbc6d722016-03-01 16:24:53 +05301019static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020{
1021 int idx;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301022 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001023
1024 HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
1025
1026#ifdef DEBUG_DMA_DONE
1027 pdev->rx_ring.dbg_ring_idx++;
1028 pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
1029#endif
1030
1031 idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
1032 msdu = pdev->rx_ring.buf.netbufs_ring[idx];
1033 idx++;
1034 idx &= pdev->rx_ring.size_mask;
1035 pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
1036 pdev->rx_ring.fill_cnt--;
1037 return msdu;
1038}
1039
wadesong9e95bd92017-04-14 14:28:40 +08001040#ifndef CONFIG_HL_SUPPORT
Nirav Shahcbc6d722016-03-01 16:24:53 +05301041static inline qdf_nbuf_t
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001042htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, qdf_dma_addr_t paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043{
1044 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1045 pdev->rx_ring.fill_cnt--;
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -07001046 paddr = htt_paddr_trim_to_37(paddr);
1047 return htt_rx_hash_list_lookup(pdev, paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001048}
wadesong9e95bd92017-04-14 14:28:40 +08001049#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001050
Yun Park16cf32a2017-04-03 10:49:06 -07001051/*
1052 * FIX ME: this function applies only to LL rx descs.
1053 * An equivalent for HL rx descs is needed.
1054 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001055#ifdef CHECKSUM_OFFLOAD
1056static inline
1057void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301058htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001059 struct htt_host_rx_desc_base *rx_desc)
1060{
1061#define MAX_IP_VER 2
1062#define MAX_PROTO_VAL 4
1063 struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
1064 unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
1065
1066 /*
1067 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
1068 */
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301069 static const qdf_nbuf_l4_rx_cksum_type_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001070 cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
1071 {
1072 /* non-fragmented IP packet */
1073 /* non TCP/UDP packet */
Srinivas Girigowda0517bfc2017-06-15 15:13:34 -07001074 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001075 /* TCP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301076 {QDF_NBUF_RX_CKSUM_TCP, QDF_NBUF_RX_CKSUM_TCPIPV6},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001077 /* UDP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301078 {QDF_NBUF_RX_CKSUM_UDP, QDF_NBUF_RX_CKSUM_UDPIPV6},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001079 /* invalid packet type */
Srinivas Girigowda0517bfc2017-06-15 15:13:34 -07001080 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001081 },
1082 {
1083 /* fragmented IP packet */
Srinivas Girigowda0517bfc2017-06-15 15:13:34 -07001084 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1085 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1086 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1087 {QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088 }
1089 };
1090
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301091 qdf_nbuf_rx_cksum_t cksum = {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001092 cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301093 QDF_NBUF_RX_CKSUM_NONE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001094 0
1095 };
1096
1097 if (cksum.l4_type !=
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301098 (qdf_nbuf_l4_rx_cksum_type_t) QDF_NBUF_RX_CKSUM_NONE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001099 cksum.l4_result =
1100 ((*(uint32_t *) &rx_desc->attention) &
1101 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301102 QDF_NBUF_RX_CKSUM_NONE :
1103 QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001104 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301105 qdf_nbuf_set_rx_cksum(msdu, &cksum);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001106#undef MAX_IP_VER
1107#undef MAX_PROTO_VAL
1108}
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301109
1110#if defined(CONFIG_HL_SUPPORT)
1111
1112static void
1113htt_set_checksum_result_hl(qdf_nbuf_t msdu,
1114 struct htt_host_rx_desc_base *rx_desc)
1115{
1116 u_int8_t flag = ((u_int8_t *)rx_desc -
1117 sizeof(struct hl_htt_rx_ind_base))[
1118 HTT_ENDIAN_BYTE_IDX_SWAP(
1119 HTT_RX_IND_HL_FLAG_OFFSET)];
1120
1121 int is_ipv6 = flag & HTT_RX_IND_HL_FLAG_IPV6 ? 1 : 0;
1122 int is_tcp = flag & HTT_RX_IND_HL_FLAG_TCP ? 1 : 0;
1123 int is_udp = flag & HTT_RX_IND_HL_FLAG_UDP ? 1 : 0;
1124
1125 qdf_nbuf_rx_cksum_t cksum = {
1126 QDF_NBUF_RX_CKSUM_NONE,
1127 QDF_NBUF_RX_CKSUM_NONE,
1128 0
1129 };
1130
1131 switch ((is_udp << 2) | (is_tcp << 1) | (is_ipv6 << 0)) {
1132 case 0x4:
1133 cksum.l4_type = QDF_NBUF_RX_CKSUM_UDP;
1134 break;
1135 case 0x2:
1136 cksum.l4_type = QDF_NBUF_RX_CKSUM_TCP;
1137 break;
1138 case 0x5:
1139 cksum.l4_type = QDF_NBUF_RX_CKSUM_UDPIPV6;
1140 break;
1141 case 0x3:
1142 cksum.l4_type = QDF_NBUF_RX_CKSUM_TCPIPV6;
1143 break;
1144 default:
1145 cksum.l4_type = QDF_NBUF_RX_CKSUM_NONE;
1146 break;
1147 }
1148 if (cksum.l4_type != (qdf_nbuf_l4_rx_cksum_type_t)
1149 QDF_NBUF_RX_CKSUM_NONE) {
1150 cksum.l4_result = flag & HTT_RX_IND_HL_FLAG_C4_FAILED ?
1151 QDF_NBUF_RX_CKSUM_NONE :
1152 QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1153 }
1154 qdf_nbuf_set_rx_cksum(msdu, &cksum);
1155}
1156#endif
1157
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001158#else
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301159
1160static inline
1161void htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
1162 struct htt_host_rx_desc_base *rx_desc)
1163{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301164}
1165
1166#if defined(CONFIG_HL_SUPPORT)
1167
1168static inline
1169void htt_set_checksum_result_hl(qdf_nbuf_t msdu,
1170 struct htt_host_rx_desc_base *rx_desc)
1171{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301172}
1173#endif
1174
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001175#endif
1176
1177#ifdef DEBUG_DMA_DONE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001178#define MAX_DONE_BIT_CHECK_ITER 5
1179#endif
1180
Nirav Shaheb017be2018-02-15 11:20:58 +05301181#ifndef CONFIG_HL_SUPPORT
Jeff Johnson0e60ce52016-10-07 12:29:43 -07001182static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301184 qdf_nbuf_t rx_ind_msg,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301185 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1186 uint32_t *msdu_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187{
1188 int msdu_len, msdu_chaining = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301189 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001190 struct htt_host_rx_desc_base *rx_desc;
1191 uint8_t *rx_ind_data;
1192 uint32_t *msg_word, num_msdu_bytes;
1193 enum htt_t2h_msg_type msg_type;
1194 uint8_t pad_bytes = 0;
1195
1196 HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301197 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001198 msg_word = (uint32_t *) rx_ind_data;
1199
1200 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1201
Anurag Chouhanc5548422016-02-24 18:33:27 +05301202 if (qdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001203 num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
1204 *(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
1205 } else {
1206 num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1207 *(msg_word
1208 + HTT_RX_IND_HDR_PREFIX_SIZE32
1209 + HTT_RX_PPDU_DESC_SIZE32));
1210 }
1211 msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
1212 while (1) {
1213 int last_msdu, msdu_len_invalid, msdu_chained;
1214 int byte_offset;
Yun Park16cf32a2017-04-03 10:49:06 -07001215 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001216
1217 /*
1218 * Set the netbuf length to be the entire buffer length
1219 * initially, so the unmap will unmap the entire buffer.
1220 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301221 qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001222#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301223 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001224#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301225 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001226#endif
1227
Nirav Shahcbc6d722016-03-01 16:24:53 +05301228 /* cache consistency has been taken care of by qdf_nbuf_unmap */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
1230 /*
1231 * Now read the rx descriptor.
1232 * Set the length to the appropriate value.
1233 * Check if this MSDU completes a MPDU.
1234 */
1235 rx_desc = htt_rx_desc(msdu);
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +05301236#if defined(HELIUMPLUS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001237 if (HTT_WIFI_IP(pdev, 2, 0))
1238 pad_bytes = rx_desc->msdu_end.l3_header_padding;
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +05301239#endif /* defined(HELIUMPLUS) */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 /*
1241 * Make the netbuf's data pointer point to the payload rather
1242 * than the descriptor.
1243 */
1244
Nirav Shahcbc6d722016-03-01 16:24:53 +05301245 qdf_nbuf_pull_head(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001246 HTT_RX_STD_DESC_RESERVATION + pad_bytes);
1247
1248 /*
1249 * Sanity check - confirm the HW is finished filling in
1250 * the rx data.
1251 * If the HW and SW are working correctly, then it's guaranteed
1252 * that the HW's MAC DMA is done before this point in the SW.
1253 * To prevent the case that we handle a stale Rx descriptor,
1254 * just assert for now until we have a way to recover.
1255 */
1256
1257#ifdef DEBUG_DMA_DONE
Anurag Chouhanc5548422016-02-24 18:33:27 +05301258 if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001259 & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1260
1261 int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
1262
Poddar, Siddarth16264472017-03-14 19:39:43 +05301263 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1264 "malformed frame");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001265
1266 while (dbg_iter &&
1267 (!((*(uint32_t *) &rx_desc->attention) &
1268 RX_ATTENTION_0_MSDU_DONE_MASK))) {
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301269 qdf_mdelay(1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001270
Nirav Shahcbc6d722016-03-01 16:24:53 +05301271 qdf_invalidate_range((void *)rx_desc,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001272 (void *)((char *)rx_desc +
1273 HTT_RX_STD_DESC_RESERVATION));
1274
Poddar, Siddarth16264472017-03-14 19:39:43 +05301275 QDF_TRACE(QDF_MODULE_ID_HTT,
1276 QDF_TRACE_LEVEL_INFO,
1277 "debug iter %d success %d", dbg_iter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001278 pdev->rx_ring.dbg_sync_success);
1279
1280 dbg_iter--;
1281 }
1282
Anurag Chouhanc5548422016-02-24 18:33:27 +05301283 if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284 & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1285
1286#ifdef HTT_RX_RESTORE
Poddar, Siddarth16264472017-03-14 19:39:43 +05301287 QDF_TRACE(QDF_MODULE_ID_HTT,
1288 QDF_TRACE_LEVEL_ERROR,
1289 "RX done bit error detected!");
1290
Nirav Shahcbc6d722016-03-01 16:24:53 +05301291 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292 *tail_msdu = msdu;
1293 pdev->rx_ring.rx_reset = 1;
1294 return msdu_chaining;
1295#else
1296 wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
1297 0, GEN_CMD);
1298 HTT_ASSERT_ALWAYS(0);
1299#endif
1300 }
1301 pdev->rx_ring.dbg_sync_success++;
Poddar, Siddarth16264472017-03-14 19:39:43 +05301302 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1303 "debug iter %d success %d", dbg_iter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001304 pdev->rx_ring.dbg_sync_success);
1305 }
1306#else
1307 HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
1308 RX_ATTENTION_0_MSDU_DONE_MASK);
1309#endif
1310 /*
1311 * Copy the FW rx descriptor for this MSDU from the rx
1312 * indication message into the MSDU's netbuf.
1313 * HL uses the same rx indication message definition as LL, and
1314 * simply appends new info (fields from the HW rx desc, and the
1315 * MSDU payload itself).
1316 * So, the offset into the rx indication message only has to
1317 * account for the standard offset of the per-MSDU FW rx
1318 * desc info within the message, and how many bytes of the
1319 * per-MSDU FW rx desc info have already been consumed.
1320 * (And the endianness of the host,
1321 * since for a big-endian host, the rx ind message contents,
1322 * including the per-MSDU rx desc bytes, were byteswapped during
1323 * upload.)
1324 */
1325 if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
Anurag Chouhanc5548422016-02-24 18:33:27 +05301326 if (qdf_unlikely
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327 (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
1328 byte_offset =
1329 HTT_ENDIAN_BYTE_IDX_SWAP
1330 (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
1331 else
1332 byte_offset =
1333 HTT_ENDIAN_BYTE_IDX_SWAP
1334 (HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
1335 pdev->rx_ind_msdu_byte_idx);
1336
1337 *((uint8_t *) &rx_desc->fw_desc.u.val) =
1338 rx_ind_data[byte_offset];
1339 /*
1340 * The target is expected to only provide the basic
1341 * per-MSDU rx descriptors. Just to be sure,
1342 * verify that the target has not attached
1343 * extension data (e.g. LRO flow ID).
1344 */
1345 /*
1346 * The assertion below currently doesn't work for
1347 * RX_FRAG_IND messages, since their format differs
1348 * from the RX_IND format (no FW rx PPDU desc in
1349 * the current RX_FRAG_IND message).
1350 * If the RX_FRAG_IND message format is updated to match
1351 * the RX_IND message format, then the following
1352 * assertion can be restored.
1353 */
Yun Park16cf32a2017-04-03 10:49:06 -07001354 /*
1355 * qdf_assert((rx_ind_data[byte_offset] &
1356 * FW_RX_DESC_EXT_M) == 0);
1357 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001358 pdev->rx_ind_msdu_byte_idx += 1;
1359 /* or more, if there's ext data */
1360 } else {
1361 /*
1362 * When an oversized AMSDU happened, FW will lost some
1363 * of MSDU status - in this case, the FW descriptors
1364 * provided will be less than the actual MSDUs
1365 * inside this MPDU.
1366 * Mark the FW descriptors so that it will still
1367 * deliver to upper stack, if no CRC error for the MPDU.
1368 *
1369 * FIX THIS - the FW descriptors are actually for MSDUs
1370 * in the end of this A-MSDU instead of the beginning.
1371 */
1372 *((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
1373 }
1374
1375 /*
1376 * TCP/UDP checksum offload support
1377 */
1378 htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1379
1380 msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
1381 RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
1382 msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
1383 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
1384 RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
1385 msdu_len =
1386 ((*((uint32_t *) &rx_desc->msdu_start)) &
1387 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
1388 RX_MSDU_START_0_MSDU_LENGTH_LSB;
1389
1390 do {
1391 if (!msdu_len_invalid && !msdu_chained) {
1392#if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
1393 if (msdu_len > 0x3000)
1394 break;
1395#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +05301396 qdf_nbuf_trim_tail(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001397 HTT_RX_BUF_SIZE -
1398 (RX_STD_DESC_SIZE +
1399 msdu_len));
1400 }
1401 } while (0);
1402
1403 while (msdu_chained--) {
Yun Park16cf32a2017-04-03 10:49:06 -07001404 next = htt_rx_netbuf_pop(pdev);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301405 qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001406 msdu_len -= HTT_RX_BUF_SIZE;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301407 qdf_nbuf_set_next(msdu, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001408 msdu = next;
1409 msdu_chaining = 1;
1410
1411 if (msdu_chained == 0) {
1412 /* Trim the last one to the correct size -
1413 * accounting for inconsistent HW lengths
1414 * causing length overflows and underflows
1415 */
Yun Park16cf32a2017-04-03 10:49:06 -07001416 if (((unsigned int)msdu_len) >
1417 ((unsigned int)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001418 (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
1419 msdu_len =
1420 (HTT_RX_BUF_SIZE -
1421 RX_STD_DESC_SIZE);
1422 }
1423
Nirav Shahcbc6d722016-03-01 16:24:53 +05301424 qdf_nbuf_trim_tail(next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001425 HTT_RX_BUF_SIZE -
1426 (RX_STD_DESC_SIZE +
1427 msdu_len));
1428 }
1429 }
1430
1431 last_msdu =
1432 ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
1433 RX_MSDU_END_4_LAST_MSDU_MASK) >>
1434 RX_MSDU_END_4_LAST_MSDU_LSB;
1435
1436 if (last_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301437 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001438 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001439 }
Yun Park16cf32a2017-04-03 10:49:06 -07001440
1441 next = htt_rx_netbuf_pop(pdev);
1442 qdf_nbuf_set_next(msdu, next);
1443 msdu = next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001444 }
1445 *tail_msdu = msdu;
1446
1447 /*
1448 * Don't refill the ring yet.
1449 * First, the elements popped here are still in use - it is
1450 * not safe to overwrite them until the matching call to
1451 * mpdu_desc_list_next.
1452 * Second, for efficiency it is preferable to refill the rx ring
1453 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
1454 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
1455 * Consequently, we'll rely on the txrx SW to tell us when it is done
1456 * pulling all the PPDU's rx buffers out of the rx ring, and then
1457 * refill it just once.
1458 */
1459 return msdu_chaining;
1460}
Nirav Shaheb017be2018-02-15 11:20:58 +05301461#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001462
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301463#if defined(CONFIG_HL_SUPPORT)
1464
1465static int
1466htt_rx_amsdu_pop_hl(
1467 htt_pdev_handle pdev,
1468 qdf_nbuf_t rx_ind_msg,
1469 qdf_nbuf_t *head_msdu,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301470 qdf_nbuf_t *tail_msdu,
1471 uint32_t *msdu_count)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301472{
1473 pdev->rx_desc_size_hl =
1474 (qdf_nbuf_data(rx_ind_msg))
1475 [HTT_ENDIAN_BYTE_IDX_SWAP(
1476 HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
1477
1478 /* point to the rx desc */
1479 qdf_nbuf_pull_head(rx_ind_msg,
1480 sizeof(struct hl_htt_rx_ind_base));
1481 *head_msdu = *tail_msdu = rx_ind_msg;
1482
1483 htt_set_checksum_result_hl(rx_ind_msg,
1484 (struct htt_host_rx_desc_base *)
1485 (qdf_nbuf_data(rx_ind_msg)));
1486
1487 qdf_nbuf_set_next(*tail_msdu, NULL);
1488 return 0;
1489}
1490
1491static int
1492htt_rx_frag_pop_hl(
1493 htt_pdev_handle pdev,
1494 qdf_nbuf_t frag_msg,
1495 qdf_nbuf_t *head_msdu,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301496 qdf_nbuf_t *tail_msdu,
1497 uint32_t *msdu_count)
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301498{
1499 qdf_nbuf_pull_head(frag_msg, HTT_RX_FRAG_IND_BYTES);
1500 pdev->rx_desc_size_hl =
1501 (qdf_nbuf_data(frag_msg))
1502 [HTT_ENDIAN_BYTE_IDX_SWAP(
1503 HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
1504
1505 /* point to the rx desc */
1506 qdf_nbuf_pull_head(frag_msg,
1507 sizeof(struct hl_htt_rx_ind_base));
1508 *head_msdu = *tail_msdu = frag_msg;
1509
1510 qdf_nbuf_set_next(*tail_msdu, NULL);
1511 return 0;
1512}
1513
1514static inline int
Tiger Yu6211cd72017-12-08 15:48:46 +08001515htt_rx_offload_msdu_cnt_hl(
1516 htt_pdev_handle pdev)
1517{
1518 return 1;
1519}
1520
1521static inline int
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301522htt_rx_offload_msdu_pop_hl(htt_pdev_handle pdev,
1523 qdf_nbuf_t offload_deliver_msg,
1524 int *vdev_id,
1525 int *peer_id,
1526 int *tid,
1527 u_int8_t *fw_desc,
1528 qdf_nbuf_t *head_buf,
1529 qdf_nbuf_t *tail_buf)
1530{
Yu Wang053d3e72017-02-08 18:48:24 +08001531 qdf_nbuf_t buf;
gbian2259cb32016-09-30 15:56:56 +08001532 u_int32_t *msdu_hdr, msdu_len;
1533 int ret = 0;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301534
gbian2259cb32016-09-30 15:56:56 +08001535 *head_buf = *tail_buf = buf = offload_deliver_msg;
Yu Wang053d3e72017-02-08 18:48:24 +08001536 msdu_hdr = (u_int32_t *)qdf_nbuf_data(buf);
gbian2259cb32016-09-30 15:56:56 +08001537 /* First dword */
1538
1539 /* Second dword */
1540 msdu_hdr++;
1541 msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1542 *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1543
1544 /* Third dword */
1545 msdu_hdr++;
1546 *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1547 *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1548 *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1549
Yun Park16cf32a2017-04-03 10:49:06 -07001550 qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES
gbian2259cb32016-09-30 15:56:56 +08001551 + HTT_RX_OFFLOAD_DELIVER_IND_HDR_BYTES);
1552
Yu Wang053d3e72017-02-08 18:48:24 +08001553 if (msdu_len <= qdf_nbuf_len(buf)) {
1554 qdf_nbuf_set_pktlen(buf, msdu_len);
gbian2259cb32016-09-30 15:56:56 +08001555 } else {
Poddar, Siddarth16264472017-03-14 19:39:43 +05301556 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1557 "%s: drop frame with invalid msdu len %d %d",
1558 __func__, msdu_len, (int)qdf_nbuf_len(buf));
Yu Wang053d3e72017-02-08 18:48:24 +08001559 qdf_nbuf_free(offload_deliver_msg);
gbian2259cb32016-09-30 15:56:56 +08001560 ret = -1;
1561 }
1562
1563 return ret;
1564}
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05301565#endif
1566
Tiger Yu6211cd72017-12-08 15:48:46 +08001567static inline int
1568htt_rx_offload_msdu_cnt_ll(
1569 htt_pdev_handle pdev)
1570{
1571 return htt_rx_ring_elems(pdev);
1572}
1573
Nirav Shaheb017be2018-02-15 11:20:58 +05301574#ifndef CONFIG_HL_SUPPORT
Jeff Johnson0e60ce52016-10-07 12:29:43 -07001575static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001576htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301577 qdf_nbuf_t offload_deliver_msg,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001578 int *vdev_id,
1579 int *peer_id,
1580 int *tid,
1581 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301582 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001583{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301584 qdf_nbuf_t buf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001585 uint32_t *msdu_hdr, msdu_len;
1586
1587 *head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
Yun Parkd1c76472017-08-02 10:51:56 -07001588
1589 if (qdf_unlikely(NULL == buf)) {
1590 qdf_print("%s: netbuf pop failed!\n", __func__);
1591 return 1;
1592 }
1593
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001594 /* Fake read mpdu_desc to keep desc ptr in sync */
1595 htt_rx_mpdu_desc_list_next(pdev, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301596 qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001597#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301598 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001599#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301600 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001601#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +05301602 msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001603
1604 /* First dword */
1605 msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1606 *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1607
1608 /* Second dword */
1609 msdu_hdr++;
1610 *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1611 *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1612 *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1613
Nirav Shahcbc6d722016-03-01 16:24:53 +05301614 qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1615 qdf_nbuf_set_pktlen(buf, msdu_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001616 return 0;
1617}
1618
1619int
1620htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
1621 uint32_t *msg_word,
1622 int msdu_iter,
1623 int *vdev_id,
1624 int *peer_id,
1625 int *tid,
1626 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301627 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001628{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301629 qdf_nbuf_t buf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 uint32_t *msdu_hdr, msdu_len;
1631 uint32_t *curr_msdu;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001632 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001633
1634 curr_msdu =
1635 msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001636 paddr = htt_rx_in_ord_paddr_get(curr_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001637 *head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
1638
Anurag Chouhanc5548422016-02-24 18:33:27 +05301639 if (qdf_unlikely(NULL == buf)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05301640 qdf_print("%s: netbuf pop failed!\n", __func__);
Yun Parkd1c76472017-08-02 10:51:56 -07001641 return 1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001642 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301643 qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001644#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301645 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001646#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301647 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001648#endif
Himanshu Agarwal19141bb2016-07-20 20:15:48 +05301649
Himanshu Agarwal8d4cf472017-09-15 13:55:44 +05301650 if (pdev->cfg.is_first_wakeup_packet)
1651 htt_get_first_packet_after_wow_wakeup(
1652 msg_word + NEXT_FIELD_OFFSET_IN32, buf);
Himanshu Agarwal19141bb2016-07-20 20:15:48 +05301653
Nirav Shahcbc6d722016-03-01 16:24:53 +05301654 msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001655
1656 /* First dword */
1657 msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1658 *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1659
1660 /* Second dword */
1661 msdu_hdr++;
1662 *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1663 *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1664 *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1665
Nirav Shahcbc6d722016-03-01 16:24:53 +05301666 qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1667 qdf_nbuf_set_pktlen(buf, msdu_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001668 return 0;
1669}
Nirav Shaheb017be2018-02-15 11:20:58 +05301670#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001671
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001672#define MIN(a, b) (((a) < (b)) ? (a) : (b))
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001673
Nirav Shaheb017be2018-02-15 11:20:58 +05301674#ifndef CONFIG_HL_SUPPORT
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001675/**
1676 * htt_mon_rx_handle_amsdu_packet() - Handle consecutive fragments of amsdu
1677 * @msdu: pointer to first msdu of amsdu
1678 * @pdev: Handle to htt_pdev_handle
1679 * @msg_word: Input and output variable, so pointer to HTT msg pointer
1680 * @amsdu_len: remaining length of all N-1 msdu msdu's
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301681 * @frag_cnt: number of frags handled
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001682 *
1683 * This function handles the (N-1) msdu's of amsdu, N'th msdu is already
1684 * handled by calling function. N-1 msdu's are tied using frags_list.
1685 * msdu_info field updated by FW indicates if this is last msdu. All the
1686 * msdu's before last msdu will be of MAX payload.
1687 *
1688 * Return: 1 on success and 0 on failure.
1689 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07001690static
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001691int htt_mon_rx_handle_amsdu_packet(qdf_nbuf_t msdu, htt_pdev_handle pdev,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301692 uint32_t **msg_word, uint32_t amsdu_len,
1693 uint32_t *frag_cnt)
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001694{
1695 qdf_nbuf_t frag_nbuf;
1696 qdf_nbuf_t prev_frag_nbuf;
1697 uint32_t len;
1698 uint32_t last_frag;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001699 qdf_dma_addr_t paddr;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001700
1701 *msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001702 paddr = htt_rx_in_ord_paddr_get(*msg_word);
1703 frag_nbuf = htt_rx_in_order_netbuf_pop(pdev, paddr);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001704 if (qdf_unlikely(NULL == frag_nbuf)) {
1705 qdf_print("%s: netbuf pop failed!\n", __func__);
1706 return 0;
1707 }
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301708 *frag_cnt = *frag_cnt + 1;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001709 last_frag = ((struct htt_rx_in_ord_paddr_ind_msdu_t *)*msg_word)->
1710 msdu_info;
1711 qdf_nbuf_append_ext_list(msdu, frag_nbuf, amsdu_len);
1712 qdf_nbuf_set_pktlen(frag_nbuf, HTT_RX_BUF_SIZE);
1713 qdf_nbuf_unmap(pdev->osdev, frag_nbuf, QDF_DMA_FROM_DEVICE);
1714 /* For msdu's other than parent will not have htt_host_rx_desc_base */
1715 len = MIN(amsdu_len, HTT_RX_BUF_SIZE);
1716 amsdu_len -= len;
1717 qdf_nbuf_trim_tail(frag_nbuf, HTT_RX_BUF_SIZE - len);
1718
1719 HTT_PKT_DUMP(qdf_trace_hex_dump(QDF_MODULE_ID_TXRX,
Poddar, Siddarth16264472017-03-14 19:39:43 +05301720 QDF_TRACE_LEVEL_INFO_HIGH,
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001721 qdf_nbuf_data(frag_nbuf),
1722 qdf_nbuf_len(frag_nbuf)));
1723 prev_frag_nbuf = frag_nbuf;
1724 while (!last_frag) {
1725 *msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08001726 paddr = htt_rx_in_ord_paddr_get(*msg_word);
1727 frag_nbuf = htt_rx_in_order_netbuf_pop(pdev, paddr);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001728 last_frag = ((struct htt_rx_in_ord_paddr_ind_msdu_t *)
1729 *msg_word)->msdu_info;
1730
1731 if (qdf_unlikely(NULL == frag_nbuf)) {
1732 qdf_print("%s: netbuf pop failed!\n", __func__);
1733 prev_frag_nbuf->next = NULL;
1734 return 0;
1735 }
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05301736 *frag_cnt = *frag_cnt + 1;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001737 qdf_nbuf_set_pktlen(frag_nbuf, HTT_RX_BUF_SIZE);
1738 qdf_nbuf_unmap(pdev->osdev, frag_nbuf, QDF_DMA_FROM_DEVICE);
1739
1740 len = MIN(amsdu_len, HTT_RX_BUF_SIZE);
1741 amsdu_len -= len;
1742 qdf_nbuf_trim_tail(frag_nbuf, HTT_RX_BUF_SIZE - len);
1743 HTT_PKT_DUMP(qdf_trace_hex_dump(QDF_MODULE_ID_TXRX,
Poddar, Siddarth16264472017-03-14 19:39:43 +05301744 QDF_TRACE_LEVEL_INFO_HIGH,
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001745 qdf_nbuf_data(frag_nbuf),
1746 qdf_nbuf_len(frag_nbuf)));
1747
1748 qdf_nbuf_set_next(prev_frag_nbuf, frag_nbuf);
1749 prev_frag_nbuf = frag_nbuf;
1750 }
1751 qdf_nbuf_set_next(prev_frag_nbuf, NULL);
1752 return 1;
1753}
1754
Mohit Khanna31776f82017-08-03 15:40:07 -07001755#define SHORT_PREAMBLE 1
1756#define LONG_PREAMBLE 0
1757#ifdef HELIUMPLUS
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001758/**
Mohit Khanna31776f82017-08-03 15:40:07 -07001759 * htt_rx_get_rate() - get rate info in terms of 500Kbps from htt_rx_desc
1760 * @l_sig_rate_select: OFDM or CCK rate
1761 * @l_sig_rate:
1762 *
1763 * If l_sig_rate_select is 0:
1764 * 0x8: OFDM 48 Mbps
1765 * 0x9: OFDM 24 Mbps
1766 * 0xA: OFDM 12 Mbps
1767 * 0xB: OFDM 6 Mbps
1768 * 0xC: OFDM 54 Mbps
1769 * 0xD: OFDM 36 Mbps
1770 * 0xE: OFDM 18 Mbps
1771 * 0xF: OFDM 9 Mbps
1772 * If l_sig_rate_select is 1:
1773 * 0x1: DSSS 1 Mbps long preamble
1774 * 0x2: DSSS 2 Mbps long preamble
1775 * 0x3: CCK 5.5 Mbps long preamble
1776 * 0x4: CCK 11 Mbps long preamble
1777 * 0x5: DSSS 2 Mbps short preamble
1778 * 0x6: CCK 5.5 Mbps
1779 * 0x7: CCK 11 Mbps short preamble
1780 *
1781 * Return: rate interms of 500Kbps.
1782 */
1783static unsigned char htt_rx_get_rate(uint32_t l_sig_rate_select,
1784 uint32_t l_sig_rate, uint8_t *preamble)
1785{
1786 char ret = 0x0;
Himanshu Agarwalb956c032017-12-14 15:16:37 +05301787 *preamble = SHORT_PREAMBLE;
Mohit Khanna31776f82017-08-03 15:40:07 -07001788 if (l_sig_rate_select == 0) {
1789 switch (l_sig_rate) {
1790 case 0x8:
1791 ret = 0x60;
1792 break;
1793 case 0x9:
1794 ret = 0x30;
1795 break;
1796 case 0xA:
1797 ret = 0x18;
1798 break;
1799 case 0xB:
1800 ret = 0x0c;
1801 break;
1802 case 0xC:
1803 ret = 0x6c;
1804 break;
1805 case 0xD:
1806 ret = 0x48;
1807 break;
1808 case 0xE:
1809 ret = 0x24;
1810 break;
1811 case 0xF:
1812 ret = 0x12;
1813 break;
1814 default:
1815 break;
1816 }
1817 } else if (l_sig_rate_select == 1) {
1818 switch (l_sig_rate) {
1819 case 0x1:
1820 ret = 0x2;
1821 *preamble = LONG_PREAMBLE;
1822 break;
1823 case 0x2:
1824 ret = 0x4;
1825 *preamble = LONG_PREAMBLE;
1826 break;
1827 case 0x3:
1828 ret = 0xB;
1829 *preamble = LONG_PREAMBLE;
1830 break;
1831 case 0x4:
1832 ret = 0x16;
1833 *preamble = LONG_PREAMBLE;
1834 break;
1835 case 0x5:
1836 ret = 0x4;
Mohit Khanna31776f82017-08-03 15:40:07 -07001837 break;
1838 case 0x6:
1839 ret = 0xB;
1840 break;
1841 case 0x7:
1842 ret = 0x16;
Mohit Khanna31776f82017-08-03 15:40:07 -07001843 break;
1844 default:
1845 break;
1846 }
1847 } else {
1848 qdf_print("Invalid rate info\n");
1849 }
1850 return ret;
1851}
1852#else
1853/**
1854 * htt_rx_get_rate() - get rate info in terms of 500Kbps from htt_rx_desc
1855 * @l_sig_rate_select: OFDM or CCK rate
1856 * @l_sig_rate:
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001857 *
1858 * If l_sig_rate_select is 0:
1859 * 0x8: OFDM 48 Mbps
1860 * 0x9: OFDM 24 Mbps
1861 * 0xA: OFDM 12 Mbps
1862 * 0xB: OFDM 6 Mbps
1863 * 0xC: OFDM 54 Mbps
1864 * 0xD: OFDM 36 Mbps
1865 * 0xE: OFDM 18 Mbps
1866 * 0xF: OFDM 9 Mbps
1867 * If l_sig_rate_select is 1:
1868 * 0x8: CCK 11 Mbps long preamble
Mohit Khanna31776f82017-08-03 15:40:07 -07001869 * 0x9: CCK 5.5 Mbps long preamble
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001870 * 0xA: CCK 2 Mbps long preamble
1871 * 0xB: CCK 1 Mbps long preamble
1872 * 0xC: CCK 11 Mbps short preamble
1873 * 0xD: CCK 5.5 Mbps short preamble
1874 * 0xE: CCK 2 Mbps short preamble
1875 *
Mohit Khanna31776f82017-08-03 15:40:07 -07001876 * Return: rate interms of 500Kbps.
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001877 */
Mohit Khanna31776f82017-08-03 15:40:07 -07001878static unsigned char htt_rx_get_rate(uint32_t l_sig_rate_select,
1879 uint32_t l_sig_rate, uint8_t *preamble)
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001880{
Mohit Khanna31776f82017-08-03 15:40:07 -07001881 char ret = 0x0;
1882 *preamble = SHORT_PREAMBLE;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001883 if (l_sig_rate_select == 0) {
1884 switch (l_sig_rate) {
1885 case 0x8:
Mohit Khanna31776f82017-08-03 15:40:07 -07001886 ret = 0x60;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001887 break;
1888 case 0x9:
Mohit Khanna31776f82017-08-03 15:40:07 -07001889 ret = 0x30;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001890 break;
1891 case 0xA:
Mohit Khanna31776f82017-08-03 15:40:07 -07001892 ret = 0x18;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001893 break;
1894 case 0xB:
Mohit Khanna31776f82017-08-03 15:40:07 -07001895 ret = 0x0c;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001896 break;
1897 case 0xC:
Mohit Khanna31776f82017-08-03 15:40:07 -07001898 ret = 0x6c;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001899 break;
1900 case 0xD:
Mohit Khanna31776f82017-08-03 15:40:07 -07001901 ret = 0x48;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001902 break;
1903 case 0xE:
Mohit Khanna31776f82017-08-03 15:40:07 -07001904 ret = 0x24;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001905 break;
1906 case 0xF:
Mohit Khanna31776f82017-08-03 15:40:07 -07001907 ret = 0x12;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001908 break;
1909 default:
1910 break;
1911 }
1912 } else if (l_sig_rate_select == 1) {
1913 switch (l_sig_rate) {
1914 case 0x8:
Mohit Khanna31776f82017-08-03 15:40:07 -07001915 ret = 0x16;
1916 *preamble = LONG_PREAMBLE;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001917 break;
1918 case 0x9:
Mohit Khanna31776f82017-08-03 15:40:07 -07001919 ret = 0x0B;
1920 *preamble = LONG_PREAMBLE;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001921 break;
1922 case 0xA:
Mohit Khanna31776f82017-08-03 15:40:07 -07001923 ret = 0x4;
1924 *preamble = LONG_PREAMBLE;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001925 break;
1926 case 0xB:
Mohit Khanna31776f82017-08-03 15:40:07 -07001927 ret = 0x02;
1928 *preamble = LONG_PREAMBLE;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001929 break;
1930 case 0xC:
Mohit Khanna31776f82017-08-03 15:40:07 -07001931 ret = 0x16;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001932 break;
1933 case 0xD:
Mohit Khanna31776f82017-08-03 15:40:07 -07001934 ret = 0x0B;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001935 break;
1936 case 0xE:
Mohit Khanna31776f82017-08-03 15:40:07 -07001937 ret = 0x04;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001938 break;
1939 default:
1940 break;
1941 }
1942 } else {
1943 qdf_print("Invalid rate info\n");
1944 }
Mohit Khanna31776f82017-08-03 15:40:07 -07001945 return ret;
1946}
1947#endif /* HELIUMPLUS */
1948/**
1949 * htt_mon_rx_get_phy_info() - Get phy info
1950 * @rx_desc: Pointer to struct htt_host_rx_desc_base
1951 * @rx_status: Return variable updated with phy_info in rx_status
1952 *
1953 * Return: None
1954 */
1955static void htt_mon_rx_get_phy_info(struct htt_host_rx_desc_base *rx_desc,
1956 struct mon_rx_status *rx_status)
1957{
1958 uint8_t preamble = 0;
1959 uint8_t preamble_type = rx_desc->ppdu_start.preamble_type;
1960 uint8_t mcs = 0, nss = 0, sgi = 0, bw = 0, beamformed = 0;
1961 uint16_t vht_flags = 0, ht_flags = 0;
1962 uint32_t l_sig_rate_select = rx_desc->ppdu_start.l_sig_rate_select;
1963 uint32_t l_sig_rate = rx_desc->ppdu_start.l_sig_rate;
1964 bool is_stbc = 0, ldpc = 0;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001965
1966 switch (preamble_type) {
Mohit Khanna31776f82017-08-03 15:40:07 -07001967 case 4:
1968 /* legacy */
1969 rx_status->rate = htt_rx_get_rate(l_sig_rate_select, l_sig_rate,
1970 &preamble);
1971 break;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001972 case 8:
1973 is_stbc = ((VHT_SIG_A_2(rx_desc) >> 4) & 3);
1974 /* fallthrough */
1975 case 9:
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05301976 ht_flags = 1;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001977 sgi = (VHT_SIG_A_2(rx_desc) >> 7) & 0x01;
1978 bw = (VHT_SIG_A_1(rx_desc) >> 7) & 0x01;
1979 mcs = (VHT_SIG_A_1(rx_desc) & 0x7f);
1980 nss = mcs>>3;
1981 beamformed =
1982 (VHT_SIG_A_2(rx_desc) >> 8) & 0x1;
1983 break;
1984 case 0x0c:
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001985 is_stbc = (VHT_SIG_A_2(rx_desc) >> 3) & 1;
1986 ldpc = (VHT_SIG_A_2(rx_desc) >> 2) & 1;
1987 /* fallthrough */
1988 case 0x0d:
1989 {
1990 uint8_t gid_in_sig = ((VHT_SIG_A_1(rx_desc) >> 4) & 0x3f);
1991
1992 vht_flags = 1;
1993 sgi = VHT_SIG_A_2(rx_desc) & 0x01;
1994 bw = (VHT_SIG_A_1(rx_desc) & 0x03);
1995 if (gid_in_sig == 0 || gid_in_sig == 63) {
1996 /* SU case */
1997 mcs = (VHT_SIG_A_2(rx_desc) >> 4) &
1998 0xf;
1999 nss = (VHT_SIG_A_1(rx_desc) >> 10) &
2000 0x7;
2001 } else {
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05302002 /* MU case */
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002003 uint8_t sta_user_pos =
2004 (uint8_t)((rx_desc->ppdu_start.reserved_4a >> 8)
2005 & 0x3);
2006 mcs = (rx_desc->ppdu_start.vht_sig_b >> 16);
2007 if (bw >= 2)
2008 mcs >>= 3;
2009 else if (bw > 0)
2010 mcs >>= 1;
2011 mcs &= 0xf;
2012 nss = (((VHT_SIG_A_1(rx_desc) >> 10) +
2013 sta_user_pos * 3) & 0x7);
2014 }
2015 beamformed = (VHT_SIG_A_2(rx_desc) >> 8) & 0x1;
2016 }
2017 /* fallthrough */
2018 default:
2019 break;
2020 }
2021
2022 rx_status->mcs = mcs;
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05302023 rx_status->bw = bw;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002024 rx_status->nr_ant = nss;
2025 rx_status->is_stbc = is_stbc;
2026 rx_status->sgi = sgi;
2027 rx_status->ldpc = ldpc;
2028 rx_status->beamformed = beamformed;
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05302029 rx_status->vht_flag_values3[0] = mcs << 0x4 | (nss + 1);
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05302030 rx_status->ht_flags = ht_flags;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002031 rx_status->vht_flags = vht_flags;
2032 rx_status->rtap_flags |= ((preamble == SHORT_PREAMBLE) ? BIT(1) : 0);
Himanshu Agarwal7c8ab882017-04-24 13:47:20 +05302033 if (bw == 0)
2034 rx_status->vht_flag_values2 = 0;
2035 else if (bw == 1)
2036 rx_status->vht_flag_values2 = 1;
2037 else if (bw == 2)
2038 rx_status->vht_flag_values2 = 4;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002039}
2040
2041/**
2042 * htt_mon_rx_get_rtap_flags() - Get radiotap flags
2043 * @rx_desc: Pointer to struct htt_host_rx_desc_base
2044 *
2045 * Return: Bitmapped radiotap flags.
2046 */
2047static uint8_t htt_mon_rx_get_rtap_flags(struct htt_host_rx_desc_base *rx_desc)
2048{
2049 uint8_t rtap_flags = 0;
2050
2051 /* WEP40 || WEP104 || WEP128 */
2052 if (rx_desc->mpdu_start.encrypt_type == 0 ||
2053 rx_desc->mpdu_start.encrypt_type == 1 ||
2054 rx_desc->mpdu_start.encrypt_type == 3)
2055 rtap_flags |= BIT(2);
2056
2057 /* IEEE80211_RADIOTAP_F_FRAG */
2058 if (rx_desc->attention.fragment)
2059 rtap_flags |= BIT(3);
2060
2061 /* IEEE80211_RADIOTAP_F_FCS */
2062 rtap_flags |= BIT(4);
2063
2064 /* IEEE80211_RADIOTAP_F_BADFCS */
2065 if (rx_desc->mpdu_end.fcs_err)
2066 rtap_flags |= BIT(6);
2067
2068 return rtap_flags;
2069}
2070
2071/**
2072 * htt_rx_mon_get_rx_status() - Update information about the rx status,
2073 * which is used later for radiotap updation.
2074 * @rx_desc: Pointer to struct htt_host_rx_desc_base
2075 * @rx_status: Return variable updated with rx_status
2076 *
2077 * Return: None
2078 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07002079static void htt_rx_mon_get_rx_status(htt_pdev_handle pdev,
2080 struct htt_host_rx_desc_base *rx_desc,
2081 struct mon_rx_status *rx_status)
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002082{
2083 uint16_t channel_flags = 0;
2084 struct mon_channel *ch_info = &pdev->mon_ch_info;
2085
2086 rx_status->tsft = (u_int64_t)TSF_TIMESTAMP(rx_desc);
2087 rx_status->chan_freq = ch_info->ch_freq;
2088 rx_status->chan_num = ch_info->ch_num;
2089 htt_mon_rx_get_phy_info(rx_desc, rx_status);
2090 rx_status->rtap_flags |= htt_mon_rx_get_rtap_flags(rx_desc);
2091 channel_flags |= rx_desc->ppdu_start.l_sig_rate_select ?
2092 IEEE80211_CHAN_CCK : IEEE80211_CHAN_OFDM;
2093 channel_flags |=
2094 (cds_chan_to_band(ch_info->ch_num) == CDS_BAND_2GHZ ?
2095 IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
2096
2097 rx_status->chan_flags = channel_flags;
2098 rx_status->ant_signal_db = rx_desc->ppdu_start.rssi_comb;
2099}
Nirav Shaheb017be2018-02-15 11:20:58 +05302100#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002101
2102#ifdef RX_HASH_DEBUG
2103#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
2104#else
2105#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) /* no-op */
2106#endif
2107
Nirav Shaheb017be2018-02-15 11:20:58 +05302108#ifndef CONFIG_HL_SUPPORT
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002109/**
2110 * htt_rx_mon_amsdu_rx_in_order_pop_ll() - Monitor mode HTT Rx in order pop
2111 * function
2112 * @pdev: Handle to htt_pdev_handle
2113 * @rx_ind_msg: In order indication message.
2114 * @head_msdu: Return variable pointing to head msdu.
2115 * @tail_msdu: Return variable pointing to tail msdu.
2116 *
2117 * This function pops the msdu based on paddr:length of inorder indication
2118 * message.
2119 *
2120 * Return: 1 for success, 0 on failure.
2121 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07002122static int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
2123 qdf_nbuf_t rx_ind_msg,
2124 qdf_nbuf_t *head_msdu,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302125 qdf_nbuf_t *tail_msdu,
2126 uint32_t *replenish_cnt)
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002127{
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302128 qdf_nbuf_t msdu, next, prev = NULL;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002129 uint8_t *rx_ind_data;
2130 uint32_t *msg_word;
2131 uint32_t msdu_count;
2132 struct htt_host_rx_desc_base *rx_desc;
2133 struct mon_rx_status rx_status = {0};
2134 uint32_t amsdu_len;
2135 uint32_t len;
2136 uint32_t last_frag;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002137 qdf_dma_addr_t paddr;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002138
2139 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
2140
2141 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
2142 msg_word = (uint32_t *)rx_ind_data;
2143
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302144 *replenish_cnt = 0;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002145 HTT_PKT_DUMP(qdf_trace_hex_dump(QDF_MODULE_ID_TXRX,
Poddar, Siddarth16264472017-03-14 19:39:43 +05302146 QDF_TRACE_LEVEL_INFO_HIGH,
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002147 (void *)rx_ind_data,
2148 (int)qdf_nbuf_len(rx_ind_msg)));
2149
2150 /* Get the total number of MSDUs */
2151 msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
2152 HTT_RX_CHECK_MSDU_COUNT(msdu_count);
2153
2154 msg_word = (uint32_t *)(rx_ind_data +
2155 HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002156 paddr = htt_rx_in_ord_paddr_get(msg_word);
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302157 msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002158
2159 if (qdf_unlikely(NULL == msdu)) {
2160 qdf_print("%s: netbuf pop failed!\n", __func__);
2161 *tail_msdu = NULL;
2162 return 0;
2163 }
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302164 *replenish_cnt = *replenish_cnt + 1;
2165
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002166 while (msdu_count > 0) {
2167
2168 msdu_count--;
2169 /*
2170 * Set the netbuf length to be the entire buffer length
2171 * initially, so the unmap will unmap the entire buffer.
2172 */
2173 qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
2174 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
2175
2176 /*
2177 * cache consistency has been taken care of by the
2178 * qdf_nbuf_unmap
2179 */
2180 rx_desc = htt_rx_desc(msdu);
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302181 if ((unsigned int)(*(uint32_t *)&rx_desc->attention) &
2182 RX_DESC_ATTN_MPDU_LEN_ERR_BIT) {
2183 qdf_nbuf_free(msdu);
2184 last_frag = ((struct htt_rx_in_ord_paddr_ind_msdu_t *)
2185 msg_word)->msdu_info;
2186 while (!last_frag) {
2187 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
2188 paddr = htt_rx_in_ord_paddr_get(msg_word);
2189 msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
2190 last_frag = ((struct
2191 htt_rx_in_ord_paddr_ind_msdu_t *)
2192 msg_word)->msdu_info;
2193 if (qdf_unlikely(!msdu)) {
2194 qdf_print("%s: netbuf pop failed!\n",
2195 __func__);
2196 return 0;
2197 }
2198 *replenish_cnt = *replenish_cnt + 1;
2199 qdf_nbuf_unmap(pdev->osdev, msdu,
2200 QDF_DMA_FROM_DEVICE);
2201 qdf_nbuf_free(msdu);
2202 }
2203 msdu = prev;
2204 goto next_pop;
2205 }
2206
2207 if (!prev)
2208 (*head_msdu) = msdu;
2209 prev = msdu;
2210
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002211 HTT_PKT_DUMP(htt_print_rx_desc(rx_desc));
2212 /*
2213 * Make the netbuf's data pointer point to the payload rather
2214 * than the descriptor.
2215 */
2216 htt_rx_mon_get_rx_status(pdev, rx_desc, &rx_status);
2217 /*
2218 * 350 bytes of RX_STD_DESC size should be sufficient for
2219 * radiotap.
2220 */
2221 qdf_nbuf_update_radiotap(&rx_status, msdu,
2222 HTT_RX_STD_DESC_RESERVATION);
2223 amsdu_len = HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(*(msg_word +
2224 NEXT_FIELD_OFFSET_IN32));
2225
2226 /*
2227 * MAX_RX_PAYLOAD_SZ when we have AMSDU packet. amsdu_len in
2228 * which case is the total length of sum of all AMSDU's
2229 */
2230 len = MIN(amsdu_len, MAX_RX_PAYLOAD_SZ);
2231 amsdu_len -= len;
2232 qdf_nbuf_trim_tail(msdu,
2233 HTT_RX_BUF_SIZE -
2234 (RX_STD_DESC_SIZE + len));
2235
2236
2237 HTT_PKT_DUMP(qdf_trace_hex_dump(QDF_MODULE_ID_TXRX,
Poddar, Siddarth16264472017-03-14 19:39:43 +05302238 QDF_TRACE_LEVEL_INFO_HIGH,
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002239 qdf_nbuf_data(msdu),
2240 qdf_nbuf_len(msdu)));
2241 last_frag = ((struct htt_rx_in_ord_paddr_ind_msdu_t *)
2242 msg_word)->msdu_info;
2243
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002244 /* Handle amsdu packet */
2245 if (!last_frag) {
2246 /*
2247 * For AMSDU packet msdu->len is sum of all the msdu's
2248 * length, msdu->data_len is sum of length's of
2249 * remaining msdu's other than parent.
2250 */
2251 if (!htt_mon_rx_handle_amsdu_packet(msdu, pdev,
2252 &msg_word,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302253 amsdu_len,
2254 replenish_cnt)) {
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002255 qdf_print("%s: failed to handle amsdu packet\n",
2256 __func__);
2257 return 0;
2258 }
2259 }
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302260
2261next_pop:
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002262 /* check if this is the last msdu */
2263 if (msdu_count) {
2264 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002265 paddr = htt_rx_in_ord_paddr_get(msg_word);
2266 next = htt_rx_in_order_netbuf_pop(pdev, paddr);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002267 if (qdf_unlikely(NULL == next)) {
2268 qdf_print("%s: netbuf pop failed!\n",
2269 __func__);
2270 *tail_msdu = NULL;
2271 return 0;
2272 }
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302273 *replenish_cnt = *replenish_cnt + 1;
2274 if (msdu)
2275 qdf_nbuf_set_next(msdu, next);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002276 msdu = next;
2277 } else {
2278 *tail_msdu = msdu;
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302279 if (msdu)
2280 qdf_nbuf_set_next(msdu, NULL);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002281 }
2282 }
2283
2284 return 1;
2285}
Nirav Shaheb017be2018-02-15 11:20:58 +05302286#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002287
2288/**
2289 * htt_rx_mon_note_capture_channel() - Make note of channel to update in
2290 * radiotap
2291 * @pdev: handle to htt_pdev
2292 * @mon_ch: capture channel number.
2293 *
2294 * Return: None
2295 */
2296void htt_rx_mon_note_capture_channel(htt_pdev_handle pdev, int mon_ch)
2297{
2298 struct mon_channel *ch_info = &pdev->mon_ch_info;
2299
2300 ch_info->ch_num = mon_ch;
2301 ch_info->ch_freq = cds_chan_to_freq(mon_ch);
2302}
2303
Nirav Shahd59b3b22016-08-01 14:39:23 +05302304uint32_t htt_rx_amsdu_rx_in_order_get_pktlog(qdf_nbuf_t rx_ind_msg)
2305{
2306 uint32_t *msg_word;
2307
2308 msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
2309 return HTT_RX_IN_ORD_PADDR_IND_PKTLOG_GET(*msg_word);
2310}
2311
Nirav Shaheb017be2018-02-15 11:20:58 +05302312#ifndef CONFIG_HL_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002313/* Return values: 1 - success, 0 - failure */
Paul Zhang87251cc2017-08-16 18:23:48 +08002314#define RX_DESC_DISCARD_IS_SET ((*((u_int8_t *) &rx_desc->fw_desc.u.val)) & \
2315 FW_RX_DESC_DISCARD_M)
2316#define RX_DESC_MIC_ERR_IS_SET ((*((u_int8_t *) &rx_desc->fw_desc.u.val)) & \
2317 FW_RX_DESC_ANY_ERR_M)
2318
Jeff Johnson0e60ce52016-10-07 12:29:43 -07002319static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002320htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302321 qdf_nbuf_t rx_ind_msg,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302322 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
2323 uint32_t *replenish_cnt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002324{
Nirav Shahcbc6d722016-03-01 16:24:53 +05302325 qdf_nbuf_t msdu, next, prev = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002326 uint8_t *rx_ind_data;
2327 uint32_t *msg_word;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07002328 uint32_t rx_ctx_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002329 unsigned int msdu_count = 0;
Nirav Shahda008342016-05-17 18:50:40 +05302330 uint8_t offload_ind, frag_ind;
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302331 uint8_t peer_id;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07002332 struct htt_host_rx_desc_base *rx_desc;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05302333 enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002334 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002335
2336 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
2337
Nirav Shahcbc6d722016-03-01 16:24:53 +05302338 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07002339 rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002340 msg_word = (uint32_t *) rx_ind_data;
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302341 peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
2342 *(u_int32_t *)rx_ind_data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002343
2344 offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
Nirav Shahda008342016-05-17 18:50:40 +05302345 frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002346
2347 /* Get the total number of MSDUs */
2348 msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
2349 HTT_RX_CHECK_MSDU_COUNT(msdu_count);
Nirav Shahda008342016-05-17 18:50:40 +05302350 ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08002351 htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
2352
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002353
2354 msg_word =
2355 (uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
2356 if (offload_ind) {
2357 ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
2358 msg_word);
2359 *head_msdu = *tail_msdu = NULL;
2360 return 0;
2361 }
2362
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002363 paddr = htt_rx_in_ord_paddr_get(msg_word);
2364 (*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002365
Anurag Chouhanc5548422016-02-24 18:33:27 +05302366 if (qdf_unlikely(NULL == msdu)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302367 qdf_print("%s: netbuf pop failed!\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002368 *tail_msdu = NULL;
Sravan Kumar Kairam8ea5ee22017-09-08 16:28:09 +05302369 pdev->rx_ring.pop_fail_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002370 return 0;
2371 }
2372
2373 while (msdu_count > 0) {
2374
2375 /*
2376 * Set the netbuf length to be the entire buffer length
2377 * initially, so the unmap will unmap the entire buffer.
2378 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302379 qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002380#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05302381 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002382#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05302383 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002384#endif
2385
Nirav Shahcbc6d722016-03-01 16:24:53 +05302386 /* cache consistency has been taken care of by qdf_nbuf_unmap */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002387 rx_desc = htt_rx_desc(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002388 htt_rx_extract_lro_info(msdu, rx_desc);
2389
2390 /*
2391 * Make the netbuf's data pointer point to the payload rather
2392 * than the descriptor.
2393 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302394 qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302395
Himanshu Agarwal5c7e2f52016-10-27 15:16:54 +05302396 QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302397 qdf_dp_trace_set_track(msdu, QDF_RX);
2398 QDF_NBUF_CB_TX_PACKET_TRACK(msdu) = QDF_NBUF_TX_PKT_DATA_TRACK;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07002399 QDF_NBUF_CB_RX_CTX_ID(msdu) = rx_ctx_id;
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302400 DPTRACE(qdf_dp_trace(msdu,
2401 QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -07002402 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302403 qdf_nbuf_data_addr(msdu),
2404 sizeof(qdf_nbuf_data(msdu)), QDF_RX));
2405
Nirav Shahcbc6d722016-03-01 16:24:53 +05302406 qdf_nbuf_trim_tail(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002407 HTT_RX_BUF_SIZE -
2408 (RX_STD_DESC_SIZE +
2409 HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
Yun Park16cf32a2017-04-03 10:49:06 -07002410 *(msg_word + NEXT_FIELD_OFFSET_IN32))));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002411#if defined(HELIUMPLUS_DEBUG)
Poddar, Siddarthbd804202016-11-23 18:19:49 +05302412 ol_txrx_dump_pkt(msdu, 0, 64);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002413#endif
2414 *((uint8_t *) &rx_desc->fw_desc.u.val) =
Yun Park16cf32a2017-04-03 10:49:06 -07002415 HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word +
2416 NEXT_FIELD_OFFSET_IN32));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002417
2418 msdu_count--;
2419
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05302420 /* calling callback function for packet logging */
2421 if (pdev->rx_pkt_dump_cb) {
Paul Zhang87251cc2017-08-16 18:23:48 +08002422 if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
2423 !RX_DESC_DISCARD_IS_SET))
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05302424 status = RX_PKT_FATE_FW_DROP_INVALID;
2425 pdev->rx_pkt_dump_cb(msdu, peer_id, status);
2426 }
Himanshu Agarwal8d4cf472017-09-15 13:55:44 +05302427
2428 if (pdev->cfg.is_first_wakeup_packet)
2429 htt_get_first_packet_after_wow_wakeup(
2430 msg_word + NEXT_FIELD_OFFSET_IN32, msdu);
2431
Paul Zhang87251cc2017-08-16 18:23:48 +08002432 /* if discard flag is set (SA is self MAC), then
2433 * don't check mic failure.
2434 */
2435 if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
2436 !RX_DESC_DISCARD_IS_SET)) {
Nirav Shah0d58a7e2016-04-26 22:54:12 +05302437 uint8_t tid =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002438 HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
2439 *(u_int32_t *)rx_ind_data);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002440 ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
2441 rx_desc, msdu);
2442
2443 htt_rx_desc_frame_free(pdev, msdu);
2444 /* if this is the last msdu */
2445 if (!msdu_count) {
2446 /* if this is the only msdu */
2447 if (!prev) {
2448 *head_msdu = *tail_msdu = NULL;
2449 return 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002450 }
Yun Park16cf32a2017-04-03 10:49:06 -07002451 *tail_msdu = prev;
2452 qdf_nbuf_set_next(prev, NULL);
2453 return 1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002454 } else { /* if this is not the last msdu */
2455 /* get the next msdu */
2456 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002457 paddr = htt_rx_in_ord_paddr_get(msg_word);
2458 next = htt_rx_in_order_netbuf_pop(pdev, paddr);
Anurag Chouhanc5548422016-02-24 18:33:27 +05302459 if (qdf_unlikely(NULL == next)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302460 qdf_print("%s: netbuf pop failed!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002461 __func__);
2462 *tail_msdu = NULL;
Sravan Kumar Kairam8ea5ee22017-09-08 16:28:09 +05302463 pdev->rx_ring.pop_fail_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002464 return 0;
2465 }
2466
2467 /* if this is not the first msdu, update the
2468 * next pointer of the preceding msdu
2469 */
2470 if (prev) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302471 qdf_nbuf_set_next(prev, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002472 } else {
Yun Park16cf32a2017-04-03 10:49:06 -07002473 /* if this is the first msdu, update the
2474 * head pointer
2475 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002476 *head_msdu = next;
2477 }
2478 msdu = next;
2479 continue;
2480 }
2481 }
2482
2483 /* Update checksum result */
2484 htt_set_checksum_result_ll(pdev, msdu, rx_desc);
2485
2486 /* check if this is the last msdu */
2487 if (msdu_count) {
2488 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
Orhan K AKYILDIZc8c27be2017-01-06 18:06:25 -08002489 paddr = htt_rx_in_ord_paddr_get(msg_word);
2490 next = htt_rx_in_order_netbuf_pop(pdev, paddr);
Anurag Chouhanc5548422016-02-24 18:33:27 +05302491 if (qdf_unlikely(NULL == next)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302492 qdf_print("%s: netbuf pop failed!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002493 __func__);
2494 *tail_msdu = NULL;
Sravan Kumar Kairam8ea5ee22017-09-08 16:28:09 +05302495 pdev->rx_ring.pop_fail_cnt++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002496 return 0;
2497 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05302498 qdf_nbuf_set_next(msdu, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002499 prev = msdu;
2500 msdu = next;
2501 } else {
2502 *tail_msdu = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302503 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002504 }
2505 }
2506
2507 return 1;
2508}
Nirav Shaheb017be2018-02-15 11:20:58 +05302509#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002510
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002511/* FIXME: This is a HW definition not provded by HW, where does it go ? */
2512enum {
2513 HW_RX_DECAP_FORMAT_RAW = 0,
2514 HW_RX_DECAP_FORMAT_NWIFI,
2515 HW_RX_DECAP_FORMAT_8023,
2516 HW_RX_DECAP_FORMAT_ETH2,
2517};
2518
2519#define HTT_FCS_LEN (4)
2520
Dhanashri Atre06c7e362017-01-10 16:06:40 -08002521#if !defined(QCA6290_HEADERS_DEF)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002522static void
2523htt_rx_parse_ppdu_start_status(struct htt_host_rx_desc_base *rx_desc,
2524 struct ieee80211_rx_status *rs)
2525{
2526
2527 struct rx_ppdu_start *ppdu_start = &rx_desc->ppdu_start;
2528
2529 /* RSSI */
2530 rs->rs_rssi = ppdu_start->rssi_comb;
2531
2532 /* PHY rate */
Yun Park16cf32a2017-04-03 10:49:06 -07002533 /*
2534 * rs_ratephy coding
2535 * [b3 - b0]
2536 * 0 -> OFDM
2537 * 1 -> CCK
2538 * 2 -> HT
2539 * 3 -> VHT
2540 * OFDM / CCK
2541 * [b7 - b4 ] => LSIG rate
2542 * [b23 - b8 ] => service field
2543 * (b'12 static/dynamic,
2544 * b'14..b'13 BW for VHT)
2545 * [b31 - b24 ] => Reserved
2546 * HT / VHT
2547 * [b15 - b4 ] => SIG A_2 12 LSBs
2548 * [b31 - b16] => SIG A_1 16 LSBs
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002549 */
2550 if (ppdu_start->preamble_type == 0x4) {
2551 rs->rs_ratephy = ppdu_start->l_sig_rate_select;
2552 rs->rs_ratephy |= ppdu_start->l_sig_rate << 4;
2553 rs->rs_ratephy |= ppdu_start->service << 8;
2554 } else {
2555 rs->rs_ratephy = (ppdu_start->preamble_type & 0x4) ? 3 : 2;
2556#ifdef HELIUMPLUS
2557 rs->rs_ratephy |=
2558 (ppdu_start->ht_sig_vht_sig_ah_sig_a_2 & 0xFFF) << 4;
2559 rs->rs_ratephy |=
2560 (ppdu_start->ht_sig_vht_sig_ah_sig_a_1 & 0xFFFF) << 16;
2561#else
2562 rs->rs_ratephy |= (ppdu_start->ht_sig_vht_sig_a_2 & 0xFFF) << 4;
2563 rs->rs_ratephy |=
2564 (ppdu_start->ht_sig_vht_sig_a_1 & 0xFFFF) << 16;
2565#endif
2566 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002567}
2568
Dhanashri Atre06c7e362017-01-10 16:06:40 -08002569/* Util fake function that has same prototype as qdf_nbuf_clone that just
2570 * returns the same nbuf
2571 */
2572static qdf_nbuf_t htt_rx_qdf_noclone_buf(qdf_nbuf_t buf)
2573{
2574 return buf;
2575}
Himanshu Agarwalcf9ecfa2017-01-23 15:56:26 +05302576
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002577/* This function is used by montior mode code to restitch an MSDU list
2578 * corresponding to an MPDU back into an MPDU by linking up the skbs.
2579 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302580qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002581htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302582 qdf_nbuf_t head_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002583 struct ieee80211_rx_status *rx_status,
Yun Park16cf32a2017-04-03 10:49:06 -07002584 unsigned int clone_not_reqd)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002585{
2586
Nirav Shahcbc6d722016-03-01 16:24:53 +05302587 qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
Yun Park16cf32a2017-04-03 10:49:06 -07002588 unsigned int decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002589 mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
2590 is_amsdu, is_first_frag, amsdu_pad, msdu_len;
2591 struct htt_host_rx_desc_base *rx_desc;
2592 char *hdr_desc;
2593 unsigned char *dest;
2594 struct ieee80211_frame *wh;
2595 struct ieee80211_qoscntl *qos;
2596
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002597 /* The nbuf has been pulled just beyond the status and points to the
2598 * payload
2599 */
2600 msdu_orig = head_msdu;
2601 rx_desc = htt_rx_desc(msdu_orig);
2602
2603 /* Fill out the rx_status from the PPDU start and end fields */
2604 if (rx_desc->attention.first_mpdu) {
2605 htt_rx_parse_ppdu_start_status(rx_desc, rx_status);
2606
2607 /* The timestamp is no longer valid - It will be valid only for
2608 * the last MPDU
2609 */
2610 rx_status->rs_tstamp.tsf = ~0;
2611 }
2612
2613 decap_format =
2614 GET_FIELD(&rx_desc->msdu_start, RX_MSDU_START_2_DECAP_FORMAT);
2615
2616 head_frag_list_cloned = NULL;
2617
2618 /* Easy case - The MSDU status indicates that this is a non-decapped
2619 * packet in RAW mode.
2620 * return
2621 */
2622 if (decap_format == HW_RX_DECAP_FORMAT_RAW) {
2623 /* Note that this path might suffer from headroom unavailabilty,
2624 * but the RX status is usually enough
2625 */
Himanshu Agarwalcf9ecfa2017-01-23 15:56:26 +05302626 if (clone_not_reqd)
2627 mpdu_buf = htt_rx_qdf_noclone_buf(head_msdu);
2628 else
2629 mpdu_buf = qdf_nbuf_clone(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002630
Himanshu Agarwald4fe5f72017-02-20 11:28:44 +05302631 if (!mpdu_buf)
2632 goto mpdu_stitch_fail;
2633
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002634 prev_buf = mpdu_buf;
2635
2636 frag_list_sum_len = 0;
2637 is_first_frag = 1;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302638 msdu_len = qdf_nbuf_len(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002639
2640 /* Drop the zero-length msdu */
2641 if (!msdu_len)
2642 goto mpdu_stitch_fail;
2643
Nirav Shahcbc6d722016-03-01 16:24:53 +05302644 msdu_orig = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002645
2646 while (msdu_orig) {
2647
2648 /* TODO: intra AMSDU padding - do we need it ??? */
Himanshu Agarwalcf9ecfa2017-01-23 15:56:26 +05302649 if (clone_not_reqd)
2650 msdu = htt_rx_qdf_noclone_buf(msdu_orig);
2651 else
2652 msdu = qdf_nbuf_clone(msdu_orig);
2653
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002654 if (!msdu)
2655 goto mpdu_stitch_fail;
2656
2657 if (is_first_frag) {
2658 is_first_frag = 0;
2659 head_frag_list_cloned = msdu;
2660 }
2661
Nirav Shahcbc6d722016-03-01 16:24:53 +05302662 msdu_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002663 /* Drop the zero-length msdu */
2664 if (!msdu_len)
2665 goto mpdu_stitch_fail;
2666
2667 frag_list_sum_len += msdu_len;
2668
2669 /* Maintain the linking of the cloned MSDUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302670 qdf_nbuf_set_next_ext(prev_buf, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002671
2672 /* Move to the next */
2673 prev_buf = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302674 msdu_orig = qdf_nbuf_next(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002675 }
2676
2677 /* The last msdu length need be larger than HTT_FCS_LEN */
2678 if (msdu_len < HTT_FCS_LEN)
2679 goto mpdu_stitch_fail;
2680
Nirav Shahcbc6d722016-03-01 16:24:53 +05302681 qdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002682
2683 /* If there were more fragments to this RAW frame */
2684 if (head_frag_list_cloned) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302685 qdf_nbuf_append_ext_list(mpdu_buf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002686 head_frag_list_cloned,
2687 frag_list_sum_len);
2688 }
2689
2690 goto mpdu_stitch_done;
2691 }
2692
2693 /* Decap mode:
2694 * Calculate the amount of header in decapped packet to knock off based
2695 * on the decap type and the corresponding number of raw bytes to copy
2696 * status header
2697 */
2698
2699 hdr_desc = &rx_desc->rx_hdr_status[0];
2700
2701 /* Base size */
2702 wifi_hdr_len = sizeof(struct ieee80211_frame);
2703 wh = (struct ieee80211_frame *)hdr_desc;
2704
2705 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
2706 if (dir == IEEE80211_FC1_DIR_DSTODS)
2707 wifi_hdr_len += 6;
2708
2709 is_amsdu = 0;
2710 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
2711 qos = (struct ieee80211_qoscntl *)
2712 (hdr_desc + wifi_hdr_len);
2713 wifi_hdr_len += 2;
2714
2715 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
2716 }
2717
2718 /* TODO: Any security headers associated with MPDU */
2719 sec_hdr_len = 0;
2720
2721 /* MSDU related stuff LLC - AMSDU subframe header etc */
2722 msdu_llc_len = is_amsdu ? (14 + 8) : 8;
2723
2724 mpdu_buf_len = wifi_hdr_len + sec_hdr_len + msdu_llc_len;
2725
2726 /* "Decap" header to remove from MSDU buffer */
2727 decap_hdr_pull_bytes = 14;
2728
2729 /* Allocate a new nbuf for holding the 802.11 header retrieved from the
2730 * status of the now decapped first msdu. Leave enough headroom for
2731 * accomodating any radio-tap /prism like PHY header
2732 */
2733#define HTT_MAX_MONITOR_HEADER (512)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302734 mpdu_buf = qdf_nbuf_alloc(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002735 HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
2736 HTT_MAX_MONITOR_HEADER, 4, false);
2737
2738 if (!mpdu_buf)
2739 goto mpdu_stitch_fail;
2740
2741 /* Copy the MPDU related header and enc headers into the first buffer
2742 * - Note that there can be a 2 byte pad between heaader and enc header
2743 */
2744
2745 prev_buf = mpdu_buf;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302746 dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002747 if (!dest)
2748 goto mpdu_stitch_fail;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302749 qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002750 hdr_desc += wifi_hdr_len;
2751
2752 /* NOTE - This padding is present only in the RAW header status - not
2753 * when the MSDU data payload is in RAW format.
2754 */
2755 /* Skip the "IV pad" */
2756 if (wifi_hdr_len & 0x3)
2757 hdr_desc += 2;
2758
2759 /* The first LLC len is copied into the MPDU buffer */
2760 frag_list_sum_len = 0;
2761 frag_list_sum_len -= msdu_llc_len;
2762
2763 msdu_orig = head_msdu;
2764 is_first_frag = 1;
2765 amsdu_pad = 0;
2766
2767 while (msdu_orig) {
2768
2769 /* TODO: intra AMSDU padding - do we need it ??? */
2770
Himanshu Agarwalcf9ecfa2017-01-23 15:56:26 +05302771 if (clone_not_reqd)
2772 msdu = htt_rx_qdf_noclone_buf(msdu_orig);
2773 else
2774 msdu = qdf_nbuf_clone(msdu_orig);
2775
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002776 if (!msdu)
2777 goto mpdu_stitch_fail;
2778
2779 if (is_first_frag) {
2780 is_first_frag = 0;
2781 head_frag_list_cloned = msdu;
2782 } else {
2783
2784 /* Maintain the linking of the cloned MSDUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302785 qdf_nbuf_set_next_ext(prev_buf, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002786
2787 /* Reload the hdr ptr only on non-first MSDUs */
2788 rx_desc = htt_rx_desc(msdu_orig);
2789 hdr_desc = &rx_desc->rx_hdr_status[0];
2790
2791 }
2792
2793 /* Copy this buffers MSDU related status into the prev buffer */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302794 dest = qdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002795 dest += amsdu_pad;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302796 qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002797
2798 /* Push the MSDU buffer beyond the decap header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302799 qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002800 frag_list_sum_len +=
Nirav Shahcbc6d722016-03-01 16:24:53 +05302801 msdu_llc_len + qdf_nbuf_len(msdu) + amsdu_pad;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002802
Yun Park16cf32a2017-04-03 10:49:06 -07002803 /*
2804 * Set up intra-AMSDU pad to be added to start of next buffer -
2805 * AMSDU pad is 4 byte pad on AMSDU subframe
2806 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302807 amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002808 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
2809
Yun Park16cf32a2017-04-03 10:49:06 -07002810 /*
2811 * TODO FIXME How do we handle MSDUs that have fraglist - Should
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002812 * probably iterate all the frags cloning them along the way and
2813 * and also updating the prev_buf pointer
2814 */
2815
2816 /* Move to the next */
2817 prev_buf = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302818 msdu_orig = qdf_nbuf_next(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002819
2820 }
2821
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05302822 /* TODO: Convert this to suitable qdf routines */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302823 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002824 frag_list_sum_len);
2825
2826mpdu_stitch_done:
2827 /* Check if this buffer contains the PPDU end status for TSF */
2828 if (rx_desc->attention.last_mpdu)
2829#ifdef HELIUMPLUS
2830 rx_status->rs_tstamp.tsf =
2831 rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32;
2832#else
2833 rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
2834#endif
2835 /* All the nbufs have been linked into the ext list and
2836 then unlink the nbuf list */
2837 if (clone_not_reqd) {
2838 msdu = head_msdu;
2839 while (msdu) {
2840 msdu_orig = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302841 msdu = qdf_nbuf_next(msdu);
2842 qdf_nbuf_set_next(msdu_orig, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002843 }
2844 }
2845
2846 return mpdu_buf;
2847
2848mpdu_stitch_fail:
2849 /* Free these alloced buffers and the orig buffers in non-clone case */
2850 if (!clone_not_reqd) {
2851 /* Free the head buffer */
2852 if (mpdu_buf)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302853 qdf_nbuf_free(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002854
2855 /* Free the partial list */
2856 while (head_frag_list_cloned) {
2857 msdu = head_frag_list_cloned;
2858 head_frag_list_cloned =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302859 qdf_nbuf_next_ext(head_frag_list_cloned);
2860 qdf_nbuf_free(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002861 }
2862 } else {
2863 /* Free the alloced head buffer */
2864 if (decap_format != HW_RX_DECAP_FORMAT_RAW)
2865 if (mpdu_buf)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302866 qdf_nbuf_free(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002867
2868 /* Free the orig buffers */
2869 msdu = head_msdu;
2870 while (msdu) {
2871 msdu_orig = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302872 msdu = qdf_nbuf_next(msdu);
2873 qdf_nbuf_free(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002874 }
2875 }
2876
2877 return NULL;
2878}
Dhanashri Atre06c7e362017-01-10 16:06:40 -08002879#else
2880qdf_nbuf_t
2881htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
2882 qdf_nbuf_t head_msdu,
2883 struct ieee80211_rx_status *rx_status,
2884 unsigned clone_not_reqd)
2885{
2886 return NULL;
2887}
2888#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002889int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
2890{
2891 /*
2892 * Currently the RSSI is provided only as a field in the
2893 * HTT_T2H_RX_IND message, rather than in each rx descriptor.
2894 */
2895 return HTT_RSSI_INVALID;
2896}
2897
2898/*
2899 * htt_rx_amsdu_pop -
2900 * global function pointer that is programmed during attach to point
2901 * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
2902 */
2903int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302904 qdf_nbuf_t rx_ind_msg,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302905 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
2906 uint32_t *msdu_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002907
2908/*
2909 * htt_rx_frag_pop -
2910 * global function pointer that is programmed during attach to point
2911 * to either htt_rx_amsdu_pop_ll
2912 */
2913int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302914 qdf_nbuf_t rx_ind_msg,
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +05302915 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
2916 uint32_t *msdu_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002917
2918int
Tiger Yu6211cd72017-12-08 15:48:46 +08002919(*htt_rx_offload_msdu_cnt)(
2920 htt_pdev_handle pdev);
2921
2922int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002923(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302924 qdf_nbuf_t offload_deliver_msg,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002925 int *vdev_id,
2926 int *peer_id,
2927 int *tid,
2928 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302929 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002930
2931void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302932 qdf_nbuf_t rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002933
Yun Park16cf32a2017-04-03 10:49:06 -07002934bool (*htt_rx_mpdu_desc_retry)(htt_pdev_handle pdev, void *mpdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002935
2936uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
2937
2938void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
2939 void *mpdu_desc,
2940 union htt_rx_pn_t *pn, int pn_len_bits);
2941
Yun Park16cf32a2017-04-03 10:49:06 -07002942uint8_t (*htt_rx_mpdu_desc_tid)(htt_pdev_handle pdev, void *mpdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002943
2944bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
2945
2946bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
2947
2948int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
2949
2950bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
2951
2952int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
2953
Nirav Shahcbc6d722016-03-01 16:24:53 +05302954void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, qdf_nbuf_t msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002955
2956bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
2957
2958bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
2959 void *mpdu_desc, uint8_t *key_id);
2960
Nirav Shaheb017be2018-02-15 11:20:58 +05302961#ifndef CONFIG_HL_SUPPORT
Jeff Johnson0e60ce52016-10-07 12:29:43 -07002962static
Nirav Shahcbc6d722016-03-01 16:24:53 +05302963void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002964{
2965 int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302966 qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
Yun Park16cf32a2017-04-03 10:49:06 -07002967
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002968 pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
2969 return (void *)htt_rx_desc(netbuf);
2970}
Nirav Shaheb017be2018-02-15 11:20:58 +05302971#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002972
2973bool (*htt_rx_msdu_chan_info_present)(
2974 htt_pdev_handle pdev,
2975 void *mpdu_desc);
2976
2977bool (*htt_rx_msdu_center_freq)(
2978 htt_pdev_handle pdev,
2979 struct ol_txrx_peer_t *peer,
2980 void *mpdu_desc,
2981 uint16_t *primary_chan_center_freq_mhz,
2982 uint16_t *contig_chan1_center_freq_mhz,
2983 uint16_t *contig_chan2_center_freq_mhz,
2984 uint8_t *phy_mode);
2985
Nirav Shaheb017be2018-02-15 11:20:58 +05302986#ifndef CONFIG_HL_SUPPORT
Jeff Johnson0e60ce52016-10-07 12:29:43 -07002987static void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
2988 qdf_nbuf_t netbuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002989{
2990 return (void *)htt_rx_desc(netbuf);
2991}
Nirav Shaheb017be2018-02-15 11:20:58 +05302992#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002993
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05302994#if defined(CONFIG_HL_SUPPORT)
2995
2996/**
2997 * htt_rx_mpdu_desc_list_next_hl() - provides an abstract way to obtain
2998 * the next MPDU descriptor
2999 * @pdev: the HTT instance the rx data was received on
3000 * @rx_ind_msg: the netbuf containing the rx indication message
3001 *
3002 * for HL, the returned value is not mpdu_desc,
3003 * it's translated hl_rx_desc just after the hl_ind_msg
3004 * for HL AMSDU, we can't point to payload now, because
3005 * hl rx desc is not fixed, we can't retrive the desc
3006 * by minus rx_desc_size when release. keep point to hl rx desc
3007 * now
3008 *
3009 * Return: next abstract rx descriptor from the series of MPDUs
3010 * referenced by an rx ind msg
3011 */
3012static inline void *
3013htt_rx_mpdu_desc_list_next_hl(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
3014{
3015 void *mpdu_desc = (void *)qdf_nbuf_data(rx_ind_msg);
3016 return mpdu_desc;
3017}
3018
3019/**
3020 * htt_rx_msdu_desc_retrieve_hl() - Retrieve a previously-stored rx descriptor
3021 * from a MSDU buffer
3022 * @pdev: the HTT instance the rx data was received on
3023 * @msdu - the buffer containing the MSDU payload
3024 *
3025 * currently for HL AMSDU, we don't point to payload.
3026 * we shift to payload in ol_rx_deliver later
3027 *
3028 * Return: the corresponding abstract rx MSDU descriptor
3029 */
3030static inline void *
3031htt_rx_msdu_desc_retrieve_hl(htt_pdev_handle pdev, qdf_nbuf_t msdu)
3032{
3033 return qdf_nbuf_data(msdu);
3034}
3035
3036static
3037bool htt_rx_mpdu_is_encrypted_hl(htt_pdev_handle pdev, void *mpdu_desc)
3038{
3039 if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
3040 /* Fix Me: only for little endian */
3041 struct hl_htt_rx_desc_base *rx_desc =
3042 (struct hl_htt_rx_desc_base *)mpdu_desc;
3043
3044 return HTT_WORD_GET(*(u_int32_t *)rx_desc,
3045 HTT_HL_RX_DESC_MPDU_ENC);
3046 } else {
3047 /* not first msdu, no encrypt info for hl */
3048 qdf_print(
3049 "Error: get encrypted from a not-first msdu.\n");
3050 qdf_assert(0);
3051 return false;
3052 }
3053}
3054
3055static inline bool
3056htt_rx_msdu_chan_info_present_hl(htt_pdev_handle pdev, void *mpdu_desc)
3057{
3058 if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true &&
3059 HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
3060 HTT_HL_RX_DESC_CHAN_INFO_PRESENT))
3061 return true;
3062
3063 return false;
3064}
3065
3066static bool
3067htt_rx_msdu_center_freq_hl(htt_pdev_handle pdev,
3068 struct ol_txrx_peer_t *peer,
3069 void *mpdu_desc,
3070 uint16_t *primary_chan_center_freq_mhz,
3071 uint16_t *contig_chan1_center_freq_mhz,
3072 uint16_t *contig_chan2_center_freq_mhz,
3073 uint8_t *phy_mode)
3074{
3075 int pn_len, index;
3076 uint32_t *chan_info;
3077
3078 index = htt_rx_msdu_is_wlan_mcast(pdev, mpdu_desc) ?
3079 txrx_sec_mcast : txrx_sec_ucast;
3080
3081 pn_len = (peer ?
3082 pdev->txrx_pdev->rx_pn[peer->security[index].sec_type].
3083 len : 0);
3084 chan_info = (uint32_t *)((uint8_t *)mpdu_desc +
3085 HTT_HL_RX_DESC_PN_OFFSET + pn_len);
3086
3087 if (htt_rx_msdu_chan_info_present_hl(pdev, mpdu_desc)) {
3088 if (primary_chan_center_freq_mhz)
3089 *primary_chan_center_freq_mhz =
3090 HTT_WORD_GET(
3091 *chan_info,
3092 HTT_CHAN_INFO_PRIMARY_CHAN_CENTER_FREQ);
3093 if (contig_chan1_center_freq_mhz)
3094 *contig_chan1_center_freq_mhz =
3095 HTT_WORD_GET(
3096 *chan_info,
3097 HTT_CHAN_INFO_CONTIG_CHAN1_CENTER_FREQ);
3098 chan_info++;
3099 if (contig_chan2_center_freq_mhz)
3100 *contig_chan2_center_freq_mhz =
3101 HTT_WORD_GET(
3102 *chan_info,
3103 HTT_CHAN_INFO_CONTIG_CHAN2_CENTER_FREQ);
3104 if (phy_mode)
3105 *phy_mode =
3106 HTT_WORD_GET(*chan_info,
3107 HTT_CHAN_INFO_PHY_MODE);
3108 return true;
3109 }
3110
3111 if (primary_chan_center_freq_mhz)
3112 *primary_chan_center_freq_mhz = 0;
3113 if (contig_chan1_center_freq_mhz)
3114 *contig_chan1_center_freq_mhz = 0;
3115 if (contig_chan2_center_freq_mhz)
3116 *contig_chan2_center_freq_mhz = 0;
3117 if (phy_mode)
3118 *phy_mode = 0;
3119 return false;
3120}
3121
3122static bool
3123htt_rx_msdu_desc_key_id_hl(htt_pdev_handle htt_pdev,
3124 void *mpdu_desc, u_int8_t *key_id)
3125{
3126 if (htt_rx_msdu_first_msdu_flag_hl(htt_pdev, mpdu_desc) == true) {
3127 /* Fix Me: only for little endian */
3128 struct hl_htt_rx_desc_base *rx_desc =
3129 (struct hl_htt_rx_desc_base *)mpdu_desc;
3130
3131 *key_id = rx_desc->key_id_oct;
3132 return true;
3133 }
3134
3135 return false;
3136}
3137
3138#endif
3139
Nirav Shaheb017be2018-02-15 11:20:58 +05303140#ifndef CONFIG_HL_SUPPORT
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003141static void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003142{
3143 return htt_rx_desc(msdu);
3144}
3145
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003146static bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003147{
3148 struct htt_host_rx_desc_base *rx_desc =
3149 (struct htt_host_rx_desc_base *)mpdu_desc;
3150
3151 return (((*((uint32_t *) &rx_desc->mpdu_start)) &
3152 RX_MPDU_START_0_ENCRYPTED_MASK) >>
3153 RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
3154}
3155
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003156static
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003157bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
3158{
3159 return false;
3160}
3161
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003162static bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003163 struct ol_txrx_peer_t *peer,
3164 void *mpdu_desc,
3165 uint16_t *primary_chan_center_freq_mhz,
3166 uint16_t *contig_chan1_center_freq_mhz,
3167 uint16_t *contig_chan2_center_freq_mhz,
3168 uint8_t *phy_mode)
3169{
3170 if (primary_chan_center_freq_mhz)
3171 *primary_chan_center_freq_mhz = 0;
3172 if (contig_chan1_center_freq_mhz)
3173 *contig_chan1_center_freq_mhz = 0;
3174 if (contig_chan2_center_freq_mhz)
3175 *contig_chan2_center_freq_mhz = 0;
3176 if (phy_mode)
3177 *phy_mode = 0;
3178 return false;
3179}
3180
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003181static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003182htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
3183 uint8_t *key_id)
3184{
3185 struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
3186 mpdu_desc;
3187
3188 if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
3189 return false;
3190
3191 *key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
3192 (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
3193 RX_MSDU_END_1_KEY_ID_OCT_LSB));
3194
3195 return true;
3196}
Nirav Shaheb017be2018-02-15 11:20:58 +05303197#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003198
Nirav Shahcbc6d722016-03-01 16:24:53 +05303199void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003200{
Nirav Shahcbc6d722016-03-01 16:24:53 +05303201 qdf_nbuf_free(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003202}
3203
Nirav Shahcbc6d722016-03-01 16:24:53 +05303204void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003205{
3206 /*
3207 * The rx descriptor is in the same buffer as the rx MSDU payload,
3208 * and does not need to be freed separately.
3209 */
3210}
3211
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303212#if defined(CONFIG_HL_SUPPORT)
3213
3214/**
3215 * htt_rx_fill_ring_count() - replenish rx msdu buffer
3216 * @pdev: Handle (pointer) to HTT pdev.
3217 *
3218 * This funciton will replenish the rx buffer to the max number
3219 * that can be kept in the ring
3220 *
3221 * Return: None
3222 */
3223static inline void htt_rx_fill_ring_count(htt_pdev_handle pdev)
3224{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303225}
3226#else
3227
3228static void htt_rx_fill_ring_count(htt_pdev_handle pdev)
3229{
3230 int num_to_fill;
Yun Park16cf32a2017-04-03 10:49:06 -07003231
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303232 num_to_fill = pdev->rx_ring.fill_level - pdev->rx_ring.fill_cnt;
3233 htt_rx_ring_fill_n(pdev, num_to_fill /* okay if <= 0 */);
3234}
3235#endif
3236
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003237void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
3238{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303239 if (qdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt))
3240 htt_rx_fill_ring_count(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003241
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303242 qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003243}
3244
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -08003245#define RX_RING_REFILL_DEBT_MAX 128
3246int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
3247{
3248 int filled = 0;
3249
3250 if (!qdf_spin_trylock_bh(&(pdev->rx_ring.refill_lock))) {
3251 if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
3252 < RX_RING_REFILL_DEBT_MAX) {
3253 qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
Mohit Khannac68622e2017-01-31 21:07:12 -08003254 pdev->rx_buff_debt_invoked++;
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -08003255 return filled; /* 0 */
3256 }
3257 /*
3258 * else:
3259 * If we have quite a debt, then it is better for the lock
3260 * holder to finish its work and then acquire the lock and
3261 * fill our own part.
3262 */
3263 qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
3264 }
Mohit Khannac68622e2017-01-31 21:07:12 -08003265 pdev->rx_buff_fill_n_invoked++;
Sravan Kumar Kairam8ea5ee22017-09-08 16:28:09 +05303266
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -08003267 filled = htt_rx_ring_fill_n(pdev, num);
Mohit Khannac68622e2017-01-31 21:07:12 -08003268
Mohit Khannac68622e2017-01-31 21:07:12 -08003269 if (filled > num) {
3270 /* we served ourselves and some other debt */
3271 /* sub is safer than = 0 */
3272 qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
Govind Singh64228922017-09-12 22:22:09 +05303273 } else {
3274 qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
Mohit Khannac68622e2017-01-31 21:07:12 -08003275 }
Govind Singh64228922017-09-12 22:22:09 +05303276 qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
Mohit Khannac68622e2017-01-31 21:07:12 -08003277
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -08003278 return filled;
3279}
3280
Nirav Shaheb017be2018-02-15 11:20:58 +05303281#ifndef CONFIG_HL_SUPPORT
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003282#define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
3283 (((_pream) << 6) | ((_nss) << 4) | (_rate))
3284
3285enum AR600P_HW_RATECODE_PREAM_TYPE {
3286 AR600P_HW_RATECODE_PREAM_OFDM,
3287 AR600P_HW_RATECODE_PREAM_CCK,
3288 AR600P_HW_RATECODE_PREAM_HT,
3289 AR600P_HW_RATECODE_PREAM_VHT,
3290};
3291
3292/*--- RX In Order Hash Code --------------------------------------------------*/
3293
3294/* Initializes the circular linked list */
3295static inline void htt_list_init(struct htt_list_node *head)
3296{
3297 head->prev = head;
3298 head->next = head;
3299}
3300
3301/* Adds entry to the end of the linked list */
3302static inline void htt_list_add_tail(struct htt_list_node *head,
3303 struct htt_list_node *node)
3304{
3305 head->prev->next = node;
3306 node->prev = head->prev;
3307 node->next = head;
3308 head->prev = node;
3309}
3310
3311/* Removes the entry corresponding to the input node from the linked list */
3312static inline void htt_list_remove(struct htt_list_node *node)
3313{
3314 node->prev->next = node->next;
3315 node->next->prev = node->prev;
3316}
3317
3318/* Helper macro to iterate through the linked list */
3319#define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next; \
3320 (iter) != (head); \
3321 (iter) = (iter)->next) \
3322
3323#ifdef RX_HASH_DEBUG
3324/* Hash cookie related macros */
3325#define HTT_RX_HASH_COOKIE 0xDEED
3326
3327#define HTT_RX_HASH_COOKIE_SET(hash_element) \
3328 ((hash_element)->cookie = HTT_RX_HASH_COOKIE)
3329
3330#define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
3331 HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
3332
3333/* Hash count related macros */
3334#define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
Nirav Shah95919702016-04-25 10:55:20 +05303335 ((hash_bucket)->count++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003336
3337#define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
Nirav Shah95919702016-04-25 10:55:20 +05303338 ((hash_bucket)->count--)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003339
Nirav Shah95919702016-04-25 10:55:20 +05303340#define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket)->count = 0)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003341
3342#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
Nirav Shah95919702016-04-25 10:55:20 +05303343 RX_HASH_LOG(qdf_print(" count %d\n", (hash_bucket)->count))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003344#else /* RX_HASH_DEBUG */
3345/* Hash cookie related macros */
3346#define HTT_RX_HASH_COOKIE_SET(hash_element) /* no-op */
3347#define HTT_RX_HASH_COOKIE_CHECK(hash_element) /* no-op */
3348/* Hash count related macros */
3349#define HTT_RX_HASH_COUNT_INCR(hash_bucket) /* no-op */
3350#define HTT_RX_HASH_COUNT_DECR(hash_bucket) /* no-op */
3351#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) /* no-op */
3352#define HTT_RX_HASH_COUNT_RESET(hash_bucket) /* no-op */
3353#endif /* RX_HASH_DEBUG */
3354
Yun Park16cf32a2017-04-03 10:49:06 -07003355/*
3356 * Inserts the given "physical address - network buffer" pair into the
3357 * hash table for the given pdev. This function will do the following:
3358 * 1. Determine which bucket to insert the pair into
3359 * 2. First try to allocate the hash entry for this pair from the pre-allocated
3360 * entries list
3361 * 3. If there are no more entries in the pre-allocated entries list, allocate
3362 * the hash entry from the hash memory pool
3363 * Note: this function is not thread-safe
3364 * Returns 0 - success, 1 - failure
3365 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003366int
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -07003367htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
3368 qdf_dma_addr_t paddr,
Nirav Shahcbc6d722016-03-01 16:24:53 +05303369 qdf_nbuf_t netbuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003370{
3371 int i;
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003372 int rc = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003373 struct htt_rx_hash_entry *hash_element = NULL;
3374
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003375 qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
3376
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -07003377 /* get rid of the marking bits if they are available */
3378 paddr = htt_paddr_trim_to_37(paddr);
3379
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003380 i = RX_HASH_FUNCTION(paddr);
3381
3382 /* Check if there are any entries in the pre-allocated free list */
Nirav Shah95919702016-04-25 10:55:20 +05303383 if (pdev->rx_ring.hash_table[i]->freepool.next !=
3384 &pdev->rx_ring.hash_table[i]->freepool) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003385
3386 hash_element =
3387 (struct htt_rx_hash_entry *)(
3388 (char *)
Nirav Shah95919702016-04-25 10:55:20 +05303389 pdev->rx_ring.hash_table[i]->freepool.next -
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003390 pdev->rx_ring.listnode_offset);
Anurag Chouhanc5548422016-02-24 18:33:27 +05303391 if (qdf_unlikely(NULL == hash_element)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003392 HTT_ASSERT_ALWAYS(0);
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003393 rc = 1;
3394 goto hli_end;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003395 }
3396
Nirav Shah95919702016-04-25 10:55:20 +05303397 htt_list_remove(pdev->rx_ring.hash_table[i]->freepool.next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003398 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303399 hash_element = qdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
Anurag Chouhanc5548422016-02-24 18:33:27 +05303400 if (qdf_unlikely(NULL == hash_element)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003401 HTT_ASSERT_ALWAYS(0);
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003402 rc = 1;
3403 goto hli_end;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003404 }
3405 hash_element->fromlist = 0;
3406 }
3407
3408 hash_element->netbuf = netbuf;
3409 hash_element->paddr = paddr;
3410 HTT_RX_HASH_COOKIE_SET(hash_element);
3411
Nirav Shah95919702016-04-25 10:55:20 +05303412 htt_list_add_tail(&pdev->rx_ring.hash_table[i]->listhead,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003413 &hash_element->listnode);
3414
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003415 RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x netbuf %pK bucket %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003416 __func__, paddr, netbuf, (int)i));
3417
3418 HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
3419 HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
3420
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003421hli_end:
3422 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
3423 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003424}
Nirav Shaheb017be2018-02-15 11:20:58 +05303425#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003426
Nirav Shaheb017be2018-02-15 11:20:58 +05303427#ifndef CONFIG_HL_SUPPORT
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -07003428/*
3429 * Given a physical address this function will find the corresponding network
3430 * buffer from the hash table.
3431 * paddr is already stripped off of higher marking bits.
3432 */
3433qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
3434 qdf_dma_addr_t paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003435{
3436 uint32_t i;
3437 struct htt_list_node *list_iter = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +05303438 qdf_nbuf_t netbuf = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003439 struct htt_rx_hash_entry *hash_entry;
3440
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003441 qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
3442
Govind Singhd062a9e2017-09-05 13:04:17 +05303443 if (!pdev->rx_ring.hash_table) {
3444 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
Govind Singh9ad84142017-05-18 15:24:16 +05303445 return NULL;
Govind Singhd062a9e2017-09-05 13:04:17 +05303446 }
Govind Singh9ad84142017-05-18 15:24:16 +05303447
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003448 i = RX_HASH_FUNCTION(paddr);
3449
Nirav Shah95919702016-04-25 10:55:20 +05303450 HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i]->listhead) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003451 hash_entry = (struct htt_rx_hash_entry *)
3452 ((char *)list_iter -
3453 pdev->rx_ring.listnode_offset);
3454
3455 HTT_RX_HASH_COOKIE_CHECK(hash_entry);
3456
3457 if (hash_entry->paddr == paddr) {
3458 /* Found the entry corresponding to paddr */
3459 netbuf = hash_entry->netbuf;
Govind Singh9ad84142017-05-18 15:24:16 +05303460 /* set netbuf to NULL to trace if freed entry
3461 * is getting unmapped in hash deinit.
3462 */
3463 hash_entry->netbuf = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003464 htt_list_remove(&hash_entry->listnode);
3465 HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
Yun Park16cf32a2017-04-03 10:49:06 -07003466 /*
3467 * if the rx entry is from the pre-allocated list,
3468 * return it
3469 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003470 if (hash_entry->fromlist)
Yun Park16cf32a2017-04-03 10:49:06 -07003471 htt_list_add_tail(
3472 &pdev->rx_ring.hash_table[i]->freepool,
3473 &hash_entry->listnode);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003474 else
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303475 qdf_mem_free(hash_entry);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003476
Govind Singhd79e1342015-11-03 16:20:02 +05303477 htt_rx_dbg_rxbuf_reset(pdev, netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003478 break;
3479 }
3480 }
3481
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003482 RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x, netbuf %pK, bucket %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003483 __func__, paddr, netbuf, (int)i));
3484 HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
3485
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003486 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
3487
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003488 if (netbuf == NULL) {
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003489 qdf_print("rx hash: %s: no entry found for %pK!\n",
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -07003490 __func__, (void *)paddr);
Orhan K AKYILDIZ57a78ea2017-07-28 14:45:10 -07003491 if (cds_is_self_recovery_enabled())
3492 cds_trigger_recovery(QDF_RX_HASH_NO_ENTRY_FOUND);
3493 else
3494 HTT_ASSERT_ALWAYS(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003495 }
3496
3497 return netbuf;
3498}
3499
Yun Park16cf32a2017-04-03 10:49:06 -07003500/*
3501 * Initialization function of the rx buffer hash table. This function will
3502 * allocate a hash table of a certain pre-determined size and initialize all
3503 * the elements
3504 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003505static int htt_rx_hash_init(struct htt_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003506{
3507 int i, j;
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003508 int rc = 0;
Houston Hoffmaneb2f80b2016-12-14 11:09:34 -08003509 void *allocation;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003510
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303511 HTT_ASSERT2(QDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003512
Nirav Shah95919702016-04-25 10:55:20 +05303513 /* hash table is array of bucket pointers */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003514 pdev->rx_ring.hash_table =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303515 qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
Nirav Shah95919702016-04-25 10:55:20 +05303516 sizeof(struct htt_rx_hash_bucket *));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003517
3518 if (NULL == pdev->rx_ring.hash_table) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303519 qdf_print("rx hash table allocation failed!\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520 return 1;
3521 }
3522
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003523 qdf_spinlock_create(&(pdev->rx_ring.rx_hash_lock));
3524 qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
3525
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003526 for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
Nirav Shah95919702016-04-25 10:55:20 +05303527
Houston Hoffmaneb2f80b2016-12-14 11:09:34 -08003528 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
Nirav Shah95919702016-04-25 10:55:20 +05303529 /* pre-allocate bucket and pool of entries for this bucket */
Houston Hoffmaneb2f80b2016-12-14 11:09:34 -08003530 allocation = qdf_mem_malloc((sizeof(struct htt_rx_hash_bucket) +
Nirav Shah95919702016-04-25 10:55:20 +05303531 (RX_ENTRIES_SIZE * sizeof(struct htt_rx_hash_entry))));
Houston Hoffmaneb2f80b2016-12-14 11:09:34 -08003532 qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
3533 pdev->rx_ring.hash_table[i] = allocation;
3534
Nirav Shah95919702016-04-25 10:55:20 +05303535
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003536 HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
3537
3538 /* initialize the hash table buckets */
Nirav Shah95919702016-04-25 10:55:20 +05303539 htt_list_init(&pdev->rx_ring.hash_table[i]->listhead);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003540
3541 /* initialize the hash table free pool per bucket */
Nirav Shah95919702016-04-25 10:55:20 +05303542 htt_list_init(&pdev->rx_ring.hash_table[i]->freepool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003543
3544 /* pre-allocate a pool of entries for this bucket */
Nirav Shah95919702016-04-25 10:55:20 +05303545 pdev->rx_ring.hash_table[i]->entries =
3546 (struct htt_rx_hash_entry *)
3547 ((uint8_t *)pdev->rx_ring.hash_table[i] +
3548 sizeof(struct htt_rx_hash_bucket));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003549
Nirav Shah95919702016-04-25 10:55:20 +05303550 if (NULL == pdev->rx_ring.hash_table[i]->entries) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303551 qdf_print("rx hash bucket %d entries alloc failed\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003552 (int)i);
3553 while (i) {
3554 i--;
Nirav Shah95919702016-04-25 10:55:20 +05303555 qdf_mem_free(pdev->rx_ring.hash_table[i]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003556 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303557 qdf_mem_free(pdev->rx_ring.hash_table);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003558 pdev->rx_ring.hash_table = NULL;
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003559 rc = 1;
3560 goto hi_end;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003561 }
3562
3563 /* initialize the free list with pre-allocated entries */
3564 for (j = 0; j < RX_ENTRIES_SIZE; j++) {
Nirav Shah95919702016-04-25 10:55:20 +05303565 pdev->rx_ring.hash_table[i]->entries[j].fromlist = 1;
Yun Park16cf32a2017-04-03 10:49:06 -07003566 htt_list_add_tail(
3567 &pdev->rx_ring.hash_table[i]->freepool,
3568 &pdev->rx_ring.hash_table[i]->entries[j].
3569 listnode);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003570 }
3571 }
3572
3573 pdev->rx_ring.listnode_offset =
Anurag Chouhan6d760662016-02-20 16:05:43 +05303574 qdf_offsetof(struct htt_rx_hash_entry, listnode);
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003575hi_end:
3576 qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003577
Orhan K AKYILDIZb257bff2016-08-11 10:06:15 -07003578 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003579}
Nirav Shaheb017be2018-02-15 11:20:58 +05303580#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003581
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003582/*--- RX In Order Hash Code --------------------------------------------------*/
3583
3584/* move the function to the end of file
3585 * to omit ll/hl pre-declaration
3586 */
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303587
3588#if defined(CONFIG_HL_SUPPORT)
3589
3590int htt_rx_attach(struct htt_pdev_t *pdev)
3591{
3592 pdev->rx_ring.size = HTT_RX_RING_SIZE_MIN;
3593 HTT_ASSERT2(IS_PWR2(pdev->rx_ring.size));
3594 pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
3595 /* host can force ring base address if it wish to do so */
3596 pdev->rx_ring.base_paddr = 0;
3597 htt_rx_amsdu_pop = htt_rx_amsdu_pop_hl;
3598 htt_rx_frag_pop = htt_rx_frag_pop_hl;
Tiger Yu6211cd72017-12-08 15:48:46 +08003599 htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_hl;
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303600 htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_hl;
3601 htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_hl;
3602 htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_hl;
3603 htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_hl;
3604 htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_hl;
3605 htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_hl;
3606 htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_hl;
3607 htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_hl;
3608 htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_hl;
3609 htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_hl;
3610 htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_hl;
3611 htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_hl;
3612 htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_hl;
3613 htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_hl;
3614 htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_hl;
3615 htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_hl;
3616
3617 /*
3618 * HL case, the rx descriptor can be different sizes for
3619 * different sub-types of RX_IND messages, e.g. for the
3620 * initial vs. interior vs. final MSDUs within a PPDU.
3621 * The size of each RX_IND message's rx desc is read from
3622 * a field within the RX_IND message itself.
3623 * In the meantime, until the rx_desc_size_hl variable is
3624 * set to its real value based on the RX_IND message,
3625 * initialize it to a reasonable value (zero).
3626 */
3627 pdev->rx_desc_size_hl = 0;
3628 return 0; /* success */
3629}
3630
3631#else
3632
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003633int htt_rx_attach(struct htt_pdev_t *pdev)
3634{
Anurag Chouhan6d760662016-02-20 16:05:43 +05303635 qdf_dma_addr_t paddr;
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003636 uint32_t ring_elem_size = sizeof(target_paddr_t);
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003637
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003638 pdev->rx_ring.size = htt_rx_ring_size(pdev);
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303639 HTT_ASSERT2(QDF_IS_PWR2(pdev->rx_ring.size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003640 pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
3641
3642 /*
Yun Park16cf32a2017-04-03 10:49:06 -07003643 * Set the initial value for the level to which the rx ring
3644 * should be filled, based on the max throughput and the worst
3645 * likely latency for the host to fill the rx ring.
3646 * In theory, this fill level can be dynamically adjusted from
3647 * the initial value set here to reflect the actual host latency
3648 * rather than a conservative assumption.
3649 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003650 pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
3651
3652 if (pdev->cfg.is_full_reorder_offload) {
3653 if (htt_rx_hash_init(pdev))
3654 goto fail1;
3655
3656 /* allocate the target index */
3657 pdev->rx_ring.target_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303658 qdf_mem_alloc_consistent(pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003659 sizeof(uint32_t),
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303660 &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003661
3662 if (!pdev->rx_ring.target_idx.vaddr)
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303663 goto fail2;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003664
3665 pdev->rx_ring.target_idx.paddr = paddr;
3666 *pdev->rx_ring.target_idx.vaddr = 0;
3667 } else {
3668 pdev->rx_ring.buf.netbufs_ring =
Nirav Shahcbc6d722016-03-01 16:24:53 +05303669 qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003670 if (!pdev->rx_ring.buf.netbufs_ring)
3671 goto fail1;
3672
3673 pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
3674 pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
3675 }
3676
3677 pdev->rx_ring.buf.paddrs_ring =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303678 qdf_mem_alloc_consistent(
3679 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003680 pdev->rx_ring.size * ring_elem_size,
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303681 &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003682 if (!pdev->rx_ring.buf.paddrs_ring)
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303683 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003684
3685 pdev->rx_ring.base_paddr = paddr;
3686 pdev->rx_ring.alloc_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303687 qdf_mem_alloc_consistent(
3688 pdev->osdev, pdev->osdev->dev,
3689 sizeof(uint32_t), &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003690
3691 if (!pdev->rx_ring.alloc_idx.vaddr)
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303692 goto fail4;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003693
3694 pdev->rx_ring.alloc_idx.paddr = paddr;
3695 *pdev->rx_ring.alloc_idx.vaddr = 0;
3696
3697 /*
Yun Park16cf32a2017-04-03 10:49:06 -07003698 * Initialize the Rx refill reference counter to be one so that
3699 * only one thread is allowed to refill the Rx ring.
3700 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303701 qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
3702 qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003703
Orhan K AKYILDIZadf06102016-12-19 20:51:47 -08003704 /* Initialize the refill_lock and debt (for rx-parallelization) */
3705 qdf_spinlock_create(&(pdev->rx_ring.refill_lock));
3706 qdf_atomic_init(&pdev->rx_ring.refill_debt);
3707
3708
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003709 /* Initialize the Rx refill retry timer */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05303710 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003711 &pdev->rx_ring.refill_retry_timer,
3712 htt_rx_ring_refill_retry, (void *)pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303713 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003714
3715 pdev->rx_ring.fill_cnt = 0;
Sravan Kumar Kairam8ea5ee22017-09-08 16:28:09 +05303716 pdev->rx_ring.pop_fail_cnt = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003717#ifdef DEBUG_DMA_DONE
3718 pdev->rx_ring.dbg_ring_idx = 0;
3719 pdev->rx_ring.dbg_refill_cnt = 0;
3720 pdev->rx_ring.dbg_sync_success = 0;
3721#endif
3722#ifdef HTT_RX_RESTORE
3723 pdev->rx_ring.rx_reset = 0;
3724 pdev->rx_ring.htt_rx_restore = 0;
3725#endif
Govind Singhd79e1342015-11-03 16:20:02 +05303726 htt_rx_dbg_rxbuf_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003727 htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
3728
3729 if (pdev->cfg.is_full_reorder_offload) {
Poddar, Siddarth16264472017-03-14 19:39:43 +05303730 QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
3731 "HTT: full reorder offload enabled");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003732 htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
3733 htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
3734 htt_rx_mpdu_desc_list_next =
3735 htt_rx_in_ord_mpdu_desc_list_next_ll;
3736 } else {
3737 htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
3738 htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
3739 htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
3740 }
3741
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07003742 if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
3743 htt_rx_amsdu_pop = htt_rx_mon_amsdu_rx_in_order_pop_ll;
3744
Tiger Yu6211cd72017-12-08 15:48:46 +08003745 htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003746 htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003747 htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003748 htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
3749 htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003750 htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003751 htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
3752 htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
3753 htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
3754 htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
3755 htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
3756 htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
3757 htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
3758 htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
3759 htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
3760 htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
3761
3762 return 0; /* success */
3763
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303764fail4:
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303765 qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003766 pdev->rx_ring.size * sizeof(target_paddr_t),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003767 pdev->rx_ring.buf.paddrs_ring,
3768 pdev->rx_ring.base_paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303769 qdf_get_dma_mem_context((&pdev->rx_ring.buf),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003770 memctx));
3771
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303772fail3:
3773 if (pdev->cfg.is_full_reorder_offload)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303774 qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003775 sizeof(uint32_t),
3776 pdev->rx_ring.target_idx.vaddr,
3777 pdev->rx_ring.target_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303778 qdf_get_dma_mem_context((&pdev->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003779 rx_ring.
3780 target_idx),
3781 memctx));
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303782 else
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303783 qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
Poddar, Siddarth76ac7282017-03-27 15:59:42 +05303784
3785fail2:
3786 if (pdev->cfg.is_full_reorder_offload)
3787 htt_rx_hash_deinit(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003788
3789fail1:
3790 return 1; /* failure */
3791}
Siddarth Poddar1df1cd82016-04-27 17:32:21 +05303792#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003793
3794#ifdef IPA_OFFLOAD
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003795#ifdef QCA_WIFI_3_0
3796/**
3797 * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
3798 * @pdev: htt context
3799 * @rx_ind_ring_elements: rx ring elements
3800 *
3801 * Return: 0 success
3802 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003803static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003804 unsigned int rx_ind_ring_elements)
3805{
Yun Park16cf32a2017-04-03 10:49:06 -07003806 /*
3807 * Allocate RX2 indication ring
3808 * RX2 IND ring element
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003809 * 4bytes: pointer
3810 * 2bytes: VDEV ID
Yun Park16cf32a2017-04-03 10:49:06 -07003811 * 2bytes: length
3812 *
3813 * RX indication ring size, by bytes
3814 */
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003815 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
3816 rx_ind_ring_elements * sizeof(target_paddr_t);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003817 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303818 qdf_mem_alloc_consistent(
3819 pdev->osdev, pdev->osdev->dev,
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003820 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303821 &pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003822 if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303823 qdf_print("%s: RX IND RING alloc fail", __func__);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003824 return -ENOBUFS;
3825 }
3826
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303827 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003828 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003829
3830 /* Allocate RX process done index */
3831 pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303832 qdf_mem_alloc_consistent(
3833 pdev->osdev, pdev->osdev->dev, 4,
3834 &pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003835 if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303836 qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303837 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303838 pdev->osdev, pdev->osdev->dev,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003839 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
3840 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
3841 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303842 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003843 rx2_ind_ring_base),
3844 memctx));
3845 return -ENOBUFS;
3846 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303847 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003848 return 0;
3849}
3850#else
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003851static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003852 unsigned int rx_ind_ring_elements)
3853{
3854 return 0;
3855}
3856#endif
3857
Leo Chang8e073612015-11-13 10:55:34 -08003858/**
3859 * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
3860 * @pdev: htt context
3861 * @rx_ind_ring_size: rx ring size
3862 *
3863 * Return: 0 success
3864 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003865int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
3866 unsigned int rx_ind_ring_elements)
3867{
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003868 int ret = 0;
Orhan K AKYILDIZa652a602017-01-29 13:07:42 -08003869
Yun Park16cf32a2017-04-03 10:49:06 -07003870 /*
3871 * Allocate RX indication ring
3872 * RX IND ring element
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003873 * 4bytes: pointer
3874 * 2bytes: VDEV ID
Yun Park16cf32a2017-04-03 10:49:06 -07003875 * 2bytes: length
3876 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003877 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303878 qdf_mem_alloc_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003879 pdev->osdev,
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303880 pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003881 rx_ind_ring_elements *
3882 sizeof(struct ipa_uc_rx_ring_elem_t),
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303883 &pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003884 if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303885 qdf_print("%s: RX IND RING alloc fail", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003886 return -ENOBUFS;
3887 }
3888
3889 /* RX indication ring size, by bytes */
3890 pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
3891 rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303892 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003893 pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
3894
3895 /* Allocate RX process done index */
3896 pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303897 qdf_mem_alloc_consistent(
3898 pdev->osdev, pdev->osdev->dev, 4,
3899 &pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003900 if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303901 qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303902 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303903 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003904 pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
3905 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
3906 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303907 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003908 rx_ind_ring_base),
3909 memctx));
3910 return -ENOBUFS;
3911 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303912 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003913
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003914 ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
3915 return ret;
3916}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003917
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003918#ifdef QCA_WIFI_3_0
3919/**
3920 * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
3921 * @pdev: htt context
3922 *
3923 * Return: None
3924 */
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003925static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003926{
3927 if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303928 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303929 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003930 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
3931 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
3932 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303933 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003934 rx2_ind_ring_base),
3935 memctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003936 }
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003937
3938 if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303939 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303940 pdev->osdev, pdev->osdev->dev,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003941 4,
3942 pdev->ipa_uc_rx_rsc.
Leo Changbc24e612016-07-05 17:19:55 -07003943 rx2_ipa_prc_done_idx.vaddr,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003944 pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303945 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Leo Changbc24e612016-07-05 17:19:55 -07003946 rx2_ipa_prc_done_idx),
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003947 memctx));
3948 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003949}
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003950#else
Jeff Johnson0e60ce52016-10-07 12:29:43 -07003951static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003952{
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003953}
3954#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003955
3956int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
3957{
3958 if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303959 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303960 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003961 pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
3962 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
3963 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303964 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003965 rx_ind_ring_base),
3966 memctx));
3967 }
3968
3969 if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303970 qdf_mem_free_consistent(
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05303971 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003972 4,
3973 pdev->ipa_uc_rx_rsc.
3974 rx_ipa_prc_done_idx.vaddr,
3975 pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303976 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Leo Chang8e073612015-11-13 10:55:34 -08003977 rx2_ipa_prc_done_idx),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003978 memctx));
3979 }
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08003980
3981 htt_rx_ipa_uc_free_wdi2_rsc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003982 return 0;
3983}
3984#endif /* IPA_OFFLOAD */
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05303985
3986/**
3987 * htt_register_rx_pkt_dump_callback() - registers callback to
3988 * get rx pkt status and call callback to do rx packet dump
3989 *
3990 * @pdev: htt pdev handle
3991 * @callback: callback to get rx pkt status and
3992 * call callback to do rx packet dump
3993 *
3994 * This function is used to register the callback to get
3995 * rx pkt status and call callback to do rx packet dump
3996 *
3997 * Return: None
3998 *
3999 */
4000void htt_register_rx_pkt_dump_callback(struct htt_pdev_t *pdev,
4001 tp_rx_pkt_dump_cb callback)
4002{
4003 if (!pdev) {
Yun Park16cf32a2017-04-03 10:49:06 -07004004 qdf_print("%s: %s, %s",
4005 __func__,
4006 "htt pdev is NULL",
4007 "rx packet status callback register unsuccessful\n");
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05304008 return;
4009 }
4010 pdev->rx_pkt_dump_cb = callback;
4011}
4012
4013/**
4014 * htt_deregister_rx_pkt_dump_callback() - deregisters callback to
4015 * get rx pkt status and call callback to do rx packet dump
4016 *
4017 * @pdev: htt pdev handle
4018 *
4019 * This function is used to deregister the callback to get
4020 * rx pkt status and call callback to do rx packet dump
4021 *
4022 * Return: None
4023 *
4024 */
4025void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
4026{
4027 if (!pdev) {
Yun Park16cf32a2017-04-03 10:49:06 -07004028 qdf_print("%s: %s, %s",
4029 __func__,
4030 "htt pdev is NULL",
4031 "rx packet status callback deregister unsuccessful\n");
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05304032 return;
4033 }
4034 pdev->rx_pkt_dump_cb = NULL;
4035}
4036