blob: 72b70474948147b069f2551c52beb812b609a71f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffman43d47fa2016-02-24 16:34:30 -08002 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file htt_rx.c
30 * @brief Implement receive aspects of HTT.
31 * @details
32 * This file contains three categories of HTT rx code:
33 * 1. An abstraction of the rx descriptor, to hide the
34 * differences between the HL vs. LL rx descriptor.
35 * 2. Functions for providing access to the (series of)
36 * rx descriptor(s) and rx frame(s) associated with
37 * an rx indication message.
38 * 3. Functions for setting up and using the MAC DMA
39 * rx ring (applies to LL only).
40 */
41
Anurag Chouhan600c3a02016-03-01 10:33:54 +053042#include <qdf_mem.h> /* qdf_mem_malloc,free, etc. */
Anurag Chouhan6d760662016-02-20 16:05:43 +053043#include <qdf_types.h> /* qdf_print, bool */
Nirav Shahcbc6d722016-03-01 16:24:53 +053044#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053045#include <qdf_timer.h> /* qdf_timer_free */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47#include <htt.h> /* HTT_HL_RX_DESC_SIZE */
48#include <ol_cfg.h>
49#include <ol_rx.h>
50#include <ol_htt_rx_api.h>
51#include <htt_internal.h> /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
52#include "regtable.h"
53
54#include <cds_ieee80211_common.h> /* ieee80211_frame, ieee80211_qoscntl */
55#include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
56
57#ifdef DEBUG_DMA_DONE
58#include <asm/barrier.h>
59#include <wma_api.h>
60#endif
61
62/* AR9888v1 WORKAROUND for EV#112367 */
63/* FIX THIS - remove this WAR when the bug is fixed */
64#define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
65
66/*--- setup / tear-down functions -------------------------------------------*/
67
68#ifndef HTT_RX_RING_SIZE_MIN
69#define HTT_RX_RING_SIZE_MIN 128 /* slightly > than one large A-MPDU */
70#endif
71
72#ifndef HTT_RX_RING_SIZE_MAX
73#define HTT_RX_RING_SIZE_MAX 2048 /* ~20 ms @ 1 Gbps of 1500B MSDUs */
74#endif
75
76#ifndef HTT_RX_AVG_FRM_BYTES
77#define HTT_RX_AVG_FRM_BYTES 1000
78#endif
79
80#ifndef HTT_RX_HOST_LATENCY_MAX_MS
81#define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */ /* very conservative */
82#endif
83
84#ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
85#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 /* ms */ /* conservative */
86#endif
87
88#ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
89#define HTT_RX_RING_REFILL_RETRY_TIME_MS 50
90#endif
91
92/*--- RX In Order Definitions ------------------------------------------------*/
93
94/* Number of buckets in the hash table */
95#define RX_NUM_HASH_BUCKETS 1024 /* This should always be a power of 2 */
96#define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
97
98/* Number of hash entries allocated per bucket */
99#define RX_ENTRIES_SIZE 10
100
101#define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
102
103#ifdef RX_HASH_DEBUG_LOG
104#define RX_HASH_LOG(x) x
105#else
106#define RX_HASH_LOG(x) /* no-op */
107#endif
108
109/* De -initialization function of the rx buffer hash table. This function will
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800110 * free up the hash table which includes freeing all the pending rx buffers
111 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800112void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
113{
114
115 uint32_t i;
116 struct htt_rx_hash_entry *hash_entry;
117 struct htt_list_node *list_iter = NULL;
118
119 if (NULL == pdev->rx_ring.hash_table)
120 return;
121 for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
122 /* Free the hash entries in hash bucket i */
123 list_iter = pdev->rx_ring.hash_table[i].listhead.next;
124 while (list_iter != &pdev->rx_ring.hash_table[i].listhead) {
125 hash_entry =
126 (struct htt_rx_hash_entry *)((char *)list_iter -
127 pdev->rx_ring.
128 listnode_offset);
129 if (hash_entry->netbuf) {
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700130#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530131 qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530132 QDF_DMA_BIDIRECTIONAL);
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700133#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530134 qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530135 QDF_DMA_FROM_DEVICE);
Dhanashri Atrecf475d22015-08-07 19:03:17 -0700136#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530137 qdf_nbuf_free(hash_entry->netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800138 hash_entry->paddr = 0;
139 }
140 list_iter = list_iter->next;
141
142 if (!hash_entry->fromlist)
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530143 qdf_mem_free(hash_entry);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144 }
145
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530146 qdf_mem_free(pdev->rx_ring.hash_table[i].entries);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147
148 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530149 qdf_mem_free(pdev->rx_ring.hash_table);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800150 pdev->rx_ring.hash_table = NULL;
151}
152
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800153static bool
154htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
155{
156 struct htt_host_rx_desc_base *rx_desc =
157 (struct htt_host_rx_desc_base *)msdu_desc;
158 return (bool)
159 (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
160 RX_MSDU_END_4_FIRST_MSDU_MASK) >>
161 RX_MSDU_END_4_FIRST_MSDU_LSB);
162}
163
164static int htt_rx_ring_size(struct htt_pdev_t *pdev)
165{
166 int size;
167
168 /*
169 * It is expected that the host CPU will typically be able to service
170 * the rx indication from one A-MPDU before the rx indication from
171 * the subsequent A-MPDU happens, roughly 1-2 ms later.
172 * However, the rx ring should be sized very conservatively, to
173 * accomodate the worst reasonable delay before the host CPU services
174 * a rx indication interrupt.
175 * The rx ring need not be kept full of empty buffers. In theory,
176 * the htt host SW can dynamically track the low-water mark in the
177 * rx ring, and dynamically adjust the level to which the rx ring
178 * is filled with empty buffers, to dynamically meet the desired
179 * low-water mark.
180 * In contrast, it's difficult to resize the rx ring itself, once
181 * it's in use.
182 * Thus, the ring itself should be sized very conservatively, while
183 * the degree to which the ring is filled with empty buffers should
184 * be sized moderately conservatively.
185 */
186 size =
187 ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
188 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
189 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
190
191 if (size < HTT_RX_RING_SIZE_MIN)
192 size = HTT_RX_RING_SIZE_MIN;
193 else if (size > HTT_RX_RING_SIZE_MAX)
194 size = HTT_RX_RING_SIZE_MAX;
195
Anurag Chouhanc5548422016-02-24 18:33:27 +0530196 size = qdf_get_pwr2(size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197 return size;
198}
199
200static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
201{
202 int size;
203
204 size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
205 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
206 8 * HTT_RX_AVG_FRM_BYTES * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
207 /*
208 * Make sure the fill level is at least 1 less than the ring size.
209 * Leaving 1 element empty allows the SW to easily distinguish
210 * between a full ring vs. an empty ring.
211 */
212 if (size >= pdev->rx_ring.size)
213 size = pdev->rx_ring.size - 1;
214
215 return size;
216}
217
218static void htt_rx_ring_refill_retry(void *arg)
219{
220 htt_pdev_handle pdev = (htt_pdev_handle) arg;
221 htt_rx_msdu_buff_replenish(pdev);
222}
223
224void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
225{
226 int idx;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530227 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800228 struct htt_host_rx_desc_base *rx_desc;
229
230 idx = *(pdev->rx_ring.alloc_idx.vaddr);
231 while (num > 0) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530232 qdf_dma_addr_t paddr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530233 qdf_nbuf_t rx_netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234 int headroom;
235
236 rx_netbuf =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530237 qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238 0, 4, false);
239 if (!rx_netbuf) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530240 qdf_timer_stop(&pdev->rx_ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800241 refill_retry_timer);
242 /*
243 * Failed to fill it to the desired level -
244 * we'll start a timer and try again next time.
245 * As long as enough buffers are left in the ring for
246 * another A-MPDU rx, no special recovery is needed.
247 */
248#ifdef DEBUG_DMA_DONE
249 pdev->rx_ring.dbg_refill_cnt++;
250#endif
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530251 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 &pdev->rx_ring.refill_retry_timer,
253 HTT_RX_RING_REFILL_RETRY_TIME_MS);
254 goto fail;
255 }
256
257 /* Clear rx_desc attention word before posting to Rx ring */
258 rx_desc = htt_rx_desc(rx_netbuf);
259 *(uint32_t *) &rx_desc->attention = 0;
260
261#ifdef DEBUG_DMA_DONE
262 *(uint32_t *) &rx_desc->msdu_end = 1;
263
264#define MAGIC_PATTERN 0xDEADBEEF
265 *(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
266
267 /* To ensure that attention bit is reset and msdu_end is set
268 before calling dma_map */
269 smp_mb();
270#endif
271 /*
Nirav Shahcbc6d722016-03-01 16:24:53 +0530272 * Adjust qdf_nbuf_data to point to the location in the buffer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800273 * where the rx descriptor will be filled in.
274 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530275 headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
276 qdf_nbuf_push_head(rx_netbuf, headroom);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277
278#ifdef DEBUG_DMA_DONE
279 status =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530280 qdf_nbuf_map(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530281 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800282#else
283 status =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530284 qdf_nbuf_map(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530285 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286#endif
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530287 if (status != QDF_STATUS_SUCCESS) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530288 qdf_nbuf_free(rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 goto fail;
290 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530291 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292 if (pdev->cfg.is_full_reorder_offload) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530293 if (qdf_unlikely
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294 (htt_rx_hash_list_insert(pdev, paddr,
295 rx_netbuf))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530296 qdf_print("%s: hash insert failed!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297 __func__);
298#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530299 qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530300 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530302 qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530303 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530305 qdf_nbuf_free(rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 goto fail;
307 }
Govind Singhd79e1342015-11-03 16:20:02 +0530308 htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 } else {
310 pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
311 }
312#if HTT_PADDR64
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800313 paddr &= 0x1fffffffff; /* trim out higher than 37 bits */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314#endif /* HTT_PADDR64 */
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800315 pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316 pdev->rx_ring.fill_cnt++;
317
318 num--;
319 idx++;
320 idx &= pdev->rx_ring.size_mask;
321 }
322
323fail:
324 *(pdev->rx_ring.alloc_idx.vaddr) = idx;
325 return;
326}
327
328unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
329{
330 return
331 (*pdev->rx_ring.alloc_idx.vaddr -
332 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
333}
334
335unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
336{
337 return
338 (*pdev->rx_ring.alloc_idx.vaddr -
339 *pdev->rx_ring.target_idx.vaddr) &
340 pdev->rx_ring.size_mask;
341}
342
343void htt_rx_detach(struct htt_pdev_t *pdev)
344{
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530345 qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
346 qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347
348 if (pdev->cfg.is_full_reorder_offload) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530349 qdf_mem_free_consistent(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 sizeof(uint32_t),
351 pdev->rx_ring.target_idx.vaddr,
352 pdev->rx_ring.target_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530353 qdf_get_dma_mem_context((&pdev->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800354 rx_ring.
355 target_idx),
356 memctx));
357 htt_rx_hash_deinit(pdev);
358 } else {
359 int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
360
361 while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
362#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530363 qdf_nbuf_unmap(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364 pdev->rx_ring.buf.
365 netbufs_ring[sw_rd_idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530366 QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530368 qdf_nbuf_unmap(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369 pdev->rx_ring.buf.
370 netbufs_ring[sw_rd_idx],
Anurag Chouhan6d760662016-02-20 16:05:43 +0530371 QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +0530373 qdf_nbuf_free(pdev->rx_ring.buf.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 netbufs_ring[sw_rd_idx]);
375 sw_rd_idx++;
376 sw_rd_idx &= pdev->rx_ring.size_mask;
377 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530378 qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 }
380
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530381 qdf_mem_free_consistent(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382 sizeof(uint32_t),
383 pdev->rx_ring.alloc_idx.vaddr,
384 pdev->rx_ring.alloc_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530385 qdf_get_dma_mem_context((&pdev->rx_ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 alloc_idx),
387 memctx));
388
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530389 qdf_mem_free_consistent(pdev->osdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530390 pdev->rx_ring.size * sizeof(qdf_dma_addr_t),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391 pdev->rx_ring.buf.paddrs_ring,
392 pdev->rx_ring.base_paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530393 qdf_get_dma_mem_context((&pdev->rx_ring.buf),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394 memctx));
395}
396
397/*--- rx descriptor field access functions ----------------------------------*/
398/*
399 * These functions need to use bit masks and shifts to extract fields
400 * from the rx descriptors, rather than directly using the bitfields.
401 * For example, use
402 * (desc & FIELD_MASK) >> FIELD_LSB
403 * rather than
404 * desc.field
405 * This allows the functions to work correctly on either little-endian
406 * machines (no endianness conversion needed) or big-endian machines
407 * (endianness conversion provided automatically by the HW DMA's
408 * byte-swizzling).
409 */
410/* FIX THIS: APPLIES TO LL ONLY */
411
412/**
413 * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
414 * for the Low Latency driver
415 * @pdev: Handle (pointer) to HTT pdev.
416 * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
417 * before the beginning of the payload.
418 *
419 * This function returns the retry bit of the 802.11 header for the
420 * provided rx MPDU descriptor.
421 *
422 * Return: boolean -- true if retry is set, false otherwise
423 */
424bool
425htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
426{
427 struct htt_host_rx_desc_base *rx_desc =
428 (struct htt_host_rx_desc_base *) mpdu_desc;
429
430 return
431 (bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
432 RX_MPDU_START_0_RETRY_MASK) >>
433 RX_MPDU_START_0_RETRY_LSB);
434}
435
436uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev, void *mpdu_desc)
437{
438 struct htt_host_rx_desc_base *rx_desc =
439 (struct htt_host_rx_desc_base *)mpdu_desc;
440
441 return
442 (uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
443 RX_MPDU_START_0_SEQ_NUM_MASK) >>
444 RX_MPDU_START_0_SEQ_NUM_LSB);
445}
446
447/* FIX THIS: APPLIES TO LL ONLY */
448void
449htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
450 void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
451{
452 struct htt_host_rx_desc_base *rx_desc =
453 (struct htt_host_rx_desc_base *)mpdu_desc;
454
455 switch (pn_len_bits) {
456 case 24:
457 /* bits 23:0 */
458 pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
459 break;
460 case 48:
461 /* bits 31:0 */
462 pn->pn48 = rx_desc->mpdu_start.pn_31_0;
463 /* bits 47:32 */
464 pn->pn48 |= ((uint64_t)
465 ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
466 & RX_MPDU_START_2_PN_47_32_MASK))
467 << (32 - RX_MPDU_START_2_PN_47_32_LSB);
468 break;
469 case 128:
470 /* bits 31:0 */
471 pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
472 /* bits 47:32 */
473 pn->pn128[0] |=
474 ((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
475 & RX_MPDU_START_2_PN_47_32_MASK))
476 << (32 - RX_MPDU_START_2_PN_47_32_LSB);
477 /* bits 63:48 */
478 pn->pn128[0] |=
479 ((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
480 & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
481 << (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
482 /* bits 95:64 */
483 pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
484 /* bits 127:96 */
485 pn->pn128[1] |=
486 ((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
487 break;
488 default:
Anurag Chouhan6d760662016-02-20 16:05:43 +0530489 qdf_print("Error: invalid length spec (%d bits) for PN\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490 pn_len_bits);
491 };
492}
493
494/**
495 * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
496 * for Low Latency driver
497 * @pdev: Handle (pointer) to HTT pdev.
498 * @mpdu_desc: Void pointer to the Rx descriptor for the MPDU
499 * before the beginning of the payload.
500 *
501 * This function returns the TID set in the 802.11 QoS Control for the MPDU
502 * in the packet header, by looking at the mpdu_start of the Rx descriptor.
503 * Rx descriptor gets a copy of the TID from the MAC.
504 *
505 * Return: Actual TID set in the packet header.
506 */
507uint8_t
508htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
509{
510 struct htt_host_rx_desc_base *rx_desc =
511 (struct htt_host_rx_desc_base *) mpdu_desc;
512
513 return
514 (uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
515 RX_MPDU_START_2_TID_MASK) >>
516 RX_MPDU_START_2_TID_LSB);
517}
518
519uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
520{
521/* FIX THIS */
522 return 0;
523}
524
525/* FIX THIS: APPLIES TO LL ONLY */
526char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
527{
528 struct htt_host_rx_desc_base *rx_desc =
529 (struct htt_host_rx_desc_base *)mpdu_desc;
530 return rx_desc->rx_hdr_status;
531}
532
533/* FIX THIS: APPLIES TO LL ONLY */
534bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev, void *msdu_desc)
535{
536 struct htt_host_rx_desc_base *rx_desc =
537 (struct htt_host_rx_desc_base *)msdu_desc;
538 return (bool)
539 (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
540 RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
541}
542
543/* FIX THIS: APPLIES TO LL ONLY */
544int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
545{
546 struct htt_host_rx_desc_base *rx_desc =
547 (struct htt_host_rx_desc_base *)msdu_desc;
548 /* HW rx desc: the mcast_bcast flag is only valid
549 if first_msdu is set */
550 return
551 ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
552 RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
553}
554
555/* FIX THIS: APPLIES TO LL ONLY */
556bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
557{
558 struct htt_host_rx_desc_base *rx_desc =
559 (struct htt_host_rx_desc_base *)msdu_desc;
560 return
561 ((*((uint32_t *) &rx_desc->attention)) &
562 RX_ATTENTION_0_MCAST_BCAST_MASK)
563 >> RX_ATTENTION_0_MCAST_BCAST_LSB;
564}
565
566/* FIX THIS: APPLIES TO LL ONLY */
567int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
568{
569 struct htt_host_rx_desc_base *rx_desc =
570 (struct htt_host_rx_desc_base *)msdu_desc;
571 return
572 ((*((uint32_t *) &rx_desc->attention)) &
573 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
574}
575
576static inline
577uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
578{
579 /*
580 * HL and LL use the same format for FW rx desc, but have the FW rx desc
581 * in different locations.
582 * In LL, the FW rx descriptor has been copied into the same
583 * htt_host_rx_desc_base struct that holds the HW rx desc.
584 * In HL, the FW rx descriptor, along with the MSDU payload,
585 * is in the same buffer as the rx indication message.
586 *
587 * Use the FW rx desc offset configured during startup to account for
588 * this difference between HL vs. LL.
589 *
590 * An optimization would be to define the LL and HL msdu_desc pointer
591 * in such a way that they both use the same offset to the FW rx desc.
592 * Then the following functions could be converted to macros, without
593 * needing to expose the htt_pdev_t definition outside HTT.
594 */
595 return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
596}
597
598int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
599{
600 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
601}
602
603int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
604{
605 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
606}
607
608int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
609{
610 return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
611}
612
613void
614htt_rx_msdu_actions(htt_pdev_handle pdev,
615 void *msdu_desc, int *discard, int *forward, int *inspect)
616{
617 uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
618#ifdef HTT_DEBUG_DATA
619 HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
620#endif
621 *discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
622 *forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
623 *inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
624}
625
Nirav Shahcbc6d722016-03-01 16:24:53 +0530626static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627{
628 int idx;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530629 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800630
631 HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
632
633#ifdef DEBUG_DMA_DONE
634 pdev->rx_ring.dbg_ring_idx++;
635 pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
636#endif
637
638 idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
639 msdu = pdev->rx_ring.buf.netbufs_ring[idx];
640 idx++;
641 idx &= pdev->rx_ring.size_mask;
642 pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
643 pdev->rx_ring.fill_cnt--;
644 return msdu;
645}
646
Nirav Shahcbc6d722016-03-01 16:24:53 +0530647static inline qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800648htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, uint32_t paddr)
649{
650 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
651 pdev->rx_ring.fill_cnt--;
652 return htt_rx_hash_list_lookup(pdev, paddr);
653}
654
655/* FIX ME: this function applies only to LL rx descs.
656 An equivalent for HL rx descs is needed. */
657#ifdef CHECKSUM_OFFLOAD
658static inline
659void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530660htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 struct htt_host_rx_desc_base *rx_desc)
662{
663#define MAX_IP_VER 2
664#define MAX_PROTO_VAL 4
665 struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
666 unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
667
668 /*
669 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
670 */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530671 static const qdf_nbuf_l4_rx_cksum_type_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800672 cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
673 {
674 /* non-fragmented IP packet */
675 /* non TCP/UDP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530676 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800677 /* TCP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530678 {QDF_NBUF_RX_CKSUM_TCP, QDF_NBUF_RX_CKSUM_TCPIPV6},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 /* UDP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530680 {QDF_NBUF_RX_CKSUM_UDP, QDF_NBUF_RX_CKSUM_UDPIPV6},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800681 /* invalid packet type */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530682 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683 },
684 {
685 /* fragmented IP packet */
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530686 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
687 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
688 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
689 {QDF_NBUF_RX_CKSUM_NONE, QDF_NBUF_RX_CKSUM_NONE},
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 }
691 };
692
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530693 qdf_nbuf_rx_cksum_t cksum = {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800694 cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530695 QDF_NBUF_RX_CKSUM_NONE,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696 0
697 };
698
699 if (cksum.l4_type !=
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530700 (qdf_nbuf_l4_rx_cksum_type_t) QDF_NBUF_RX_CKSUM_NONE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 cksum.l4_result =
702 ((*(uint32_t *) &rx_desc->attention) &
703 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530704 QDF_NBUF_RX_CKSUM_NONE :
705 QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530707 qdf_nbuf_set_rx_cksum(msdu, &cksum);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800708#undef MAX_IP_VER
709#undef MAX_PROTO_VAL
710}
711#else
712#define htt_set_checksum_result_ll(pdev, msdu, rx_desc) /* no-op */
713#endif
714
715#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530716void htt_rx_print_rx_indication(qdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800717{
718 uint32_t *msg_word;
719 int byte_offset;
720 int mpdu_range, num_mpdu_range;
721
Nirav Shahcbc6d722016-03-01 16:24:53 +0530722 msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723
Anurag Chouhan6d760662016-02-20 16:05:43 +0530724 qdf_print
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725 ("------------------HTT RX IND-----------------------------\n");
Anurag Chouhan6d760662016-02-20 16:05:43 +0530726 qdf_print("alloc idx paddr %x (*vaddr) %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800727 pdev->rx_ring.alloc_idx.paddr,
728 *pdev->rx_ring.alloc_idx.vaddr);
729
Anurag Chouhan6d760662016-02-20 16:05:43 +0530730 qdf_print("sw_rd_idx msdu_payld %d msdu_desc %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800731 pdev->rx_ring.sw_rd_idx.msdu_payld,
732 pdev->rx_ring.sw_rd_idx.msdu_desc);
733
Anurag Chouhan6d760662016-02-20 16:05:43 +0530734 qdf_print("dbg_ring_idx %d\n", pdev->rx_ring.dbg_ring_idx);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735
Anurag Chouhan6d760662016-02-20 16:05:43 +0530736 qdf_print("fill_level %d fill_cnt %d\n", pdev->rx_ring.fill_level,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800737 pdev->rx_ring.fill_cnt);
738
Anurag Chouhan6d760662016-02-20 16:05:43 +0530739 qdf_print("initial msdu_payld %d curr mpdu range %d curr mpdu cnt %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800740 pdev->rx_ring.dbg_initial_msdu_payld,
741 pdev->rx_ring.dbg_mpdu_range, pdev->rx_ring.dbg_mpdu_count);
742
743 /* Print the RX_IND contents */
744
Anurag Chouhan6d760662016-02-20 16:05:43 +0530745 qdf_print("peer id %x RV %x FV %x ext_tid %x msg_type %x\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800746 HTT_RX_IND_PEER_ID_GET(*msg_word),
747 HTT_RX_IND_REL_VALID_GET(*msg_word),
748 HTT_RX_IND_FLUSH_VALID_GET(*msg_word),
749 HTT_RX_IND_EXT_TID_GET(*msg_word),
750 HTT_T2H_MSG_TYPE_GET(*msg_word));
751
Anurag Chouhan6d760662016-02-20 16:05:43 +0530752 qdf_print("num_mpdu_ranges %x rel_seq_num_end %x rel_seq_num_start %x\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753 " flush_seq_num_end %x flush_seq_num_start %x\n",
754 HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)),
755 HTT_RX_IND_REL_SEQ_NUM_END_GET(*(msg_word + 1)),
756 HTT_RX_IND_REL_SEQ_NUM_START_GET(*(msg_word + 1)),
757 HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1)),
758 HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1)));
759
Anurag Chouhan6d760662016-02-20 16:05:43 +0530760 qdf_print("fw_rx_desc_bytes %x\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
762 (msg_word + 2 +
763 HTT_RX_PPDU_DESC_SIZE32)));
764
765 /* receive MSDU desc for current frame */
766 byte_offset =
767 HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
768 pdev->rx_ind_msdu_byte_idx);
769
Anurag Chouhan6d760662016-02-20 16:05:43 +0530770 qdf_print("msdu byte idx %x msdu desc %x\n", pdev->rx_ind_msdu_byte_idx,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771 HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
772 (msg_word + 2 +
773 HTT_RX_PPDU_DESC_SIZE32)));
774
775 num_mpdu_range = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
776
777 for (mpdu_range = 0; mpdu_range < num_mpdu_range; mpdu_range++) {
778 enum htt_rx_status status;
779 int num_mpdus;
780
781 htt_rx_ind_mpdu_range_info(pdev, rx_ind_msg, mpdu_range,
782 &status, &num_mpdus);
783
Anurag Chouhan6d760662016-02-20 16:05:43 +0530784 qdf_print("mpdu_range %x status %x num_mpdus %x\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800785 pdev->rx_ind_msdu_byte_idx, status, num_mpdus);
786 }
Anurag Chouhan6d760662016-02-20 16:05:43 +0530787 qdf_print
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800788 ("---------------------------------------------------------\n");
789}
790#endif
791
792#ifdef DEBUG_DMA_DONE
793#define MAX_DONE_BIT_CHECK_ITER 5
794#endif
795
796int
797htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530798 qdf_nbuf_t rx_ind_msg,
799 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800{
801 int msdu_len, msdu_chaining = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530802 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800803 struct htt_host_rx_desc_base *rx_desc;
804 uint8_t *rx_ind_data;
805 uint32_t *msg_word, num_msdu_bytes;
806 enum htt_t2h_msg_type msg_type;
807 uint8_t pad_bytes = 0;
808
809 HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530810 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800811 msg_word = (uint32_t *) rx_ind_data;
812
813 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
814
Anurag Chouhanc5548422016-02-24 18:33:27 +0530815 if (qdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800816 num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
817 *(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
818 } else {
819 num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
820 *(msg_word
821 + HTT_RX_IND_HDR_PREFIX_SIZE32
822 + HTT_RX_PPDU_DESC_SIZE32));
823 }
824 msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
825 while (1) {
826 int last_msdu, msdu_len_invalid, msdu_chained;
827 int byte_offset;
828
829 /*
830 * Set the netbuf length to be the entire buffer length
831 * initially, so the unmap will unmap the entire buffer.
832 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530833 qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800834#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +0530835 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800836#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530837 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838#endif
839
Nirav Shahcbc6d722016-03-01 16:24:53 +0530840 /* cache consistency has been taken care of by qdf_nbuf_unmap */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800841
842 /*
843 * Now read the rx descriptor.
844 * Set the length to the appropriate value.
845 * Check if this MSDU completes a MPDU.
846 */
847 rx_desc = htt_rx_desc(msdu);
848#if defined(HELIUMPLUS_PADDR64)
849 if (HTT_WIFI_IP(pdev, 2, 0))
850 pad_bytes = rx_desc->msdu_end.l3_header_padding;
851#endif /* defined(HELIUMPLUS_PADDR64) */
852 /*
853 * Make the netbuf's data pointer point to the payload rather
854 * than the descriptor.
855 */
856
Nirav Shahcbc6d722016-03-01 16:24:53 +0530857 qdf_nbuf_pull_head(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800858 HTT_RX_STD_DESC_RESERVATION + pad_bytes);
859
860 /*
861 * Sanity check - confirm the HW is finished filling in
862 * the rx data.
863 * If the HW and SW are working correctly, then it's guaranteed
864 * that the HW's MAC DMA is done before this point in the SW.
865 * To prevent the case that we handle a stale Rx descriptor,
866 * just assert for now until we have a way to recover.
867 */
868
869#ifdef DEBUG_DMA_DONE
Anurag Chouhanc5548422016-02-24 18:33:27 +0530870 if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800871 & RX_ATTENTION_0_MSDU_DONE_MASK))) {
872
873 int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
874
Anurag Chouhan6d760662016-02-20 16:05:43 +0530875 qdf_print("malformed frame\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876
877 while (dbg_iter &&
878 (!((*(uint32_t *) &rx_desc->attention) &
879 RX_ATTENTION_0_MSDU_DONE_MASK))) {
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530880 qdf_mdelay(1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800881
Nirav Shahcbc6d722016-03-01 16:24:53 +0530882 qdf_invalidate_range((void *)rx_desc,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800883 (void *)((char *)rx_desc +
884 HTT_RX_STD_DESC_RESERVATION));
885
Anurag Chouhan6d760662016-02-20 16:05:43 +0530886 qdf_print("debug iter %d success %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800887 dbg_iter,
888 pdev->rx_ring.dbg_sync_success);
889
890 dbg_iter--;
891 }
892
Anurag Chouhanc5548422016-02-24 18:33:27 +0530893 if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800894 & RX_ATTENTION_0_MSDU_DONE_MASK))) {
895
896#ifdef HTT_RX_RESTORE
Anurag Chouhan6d760662016-02-20 16:05:43 +0530897 qdf_print("RX done bit error detected!\n");
Nirav Shahcbc6d722016-03-01 16:24:53 +0530898 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800899 *tail_msdu = msdu;
900 pdev->rx_ring.rx_reset = 1;
901 return msdu_chaining;
902#else
903 wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
904 0, GEN_CMD);
905 HTT_ASSERT_ALWAYS(0);
906#endif
907 }
908 pdev->rx_ring.dbg_sync_success++;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530909 qdf_print("debug iter %d success %d\n", dbg_iter,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800910 pdev->rx_ring.dbg_sync_success);
911 }
912#else
913 HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
914 RX_ATTENTION_0_MSDU_DONE_MASK);
915#endif
916 /*
917 * Copy the FW rx descriptor for this MSDU from the rx
918 * indication message into the MSDU's netbuf.
919 * HL uses the same rx indication message definition as LL, and
920 * simply appends new info (fields from the HW rx desc, and the
921 * MSDU payload itself).
922 * So, the offset into the rx indication message only has to
923 * account for the standard offset of the per-MSDU FW rx
924 * desc info within the message, and how many bytes of the
925 * per-MSDU FW rx desc info have already been consumed.
926 * (And the endianness of the host,
927 * since for a big-endian host, the rx ind message contents,
928 * including the per-MSDU rx desc bytes, were byteswapped during
929 * upload.)
930 */
931 if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530932 if (qdf_unlikely
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800933 (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
934 byte_offset =
935 HTT_ENDIAN_BYTE_IDX_SWAP
936 (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
937 else
938 byte_offset =
939 HTT_ENDIAN_BYTE_IDX_SWAP
940 (HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
941 pdev->rx_ind_msdu_byte_idx);
942
943 *((uint8_t *) &rx_desc->fw_desc.u.val) =
944 rx_ind_data[byte_offset];
945 /*
946 * The target is expected to only provide the basic
947 * per-MSDU rx descriptors. Just to be sure,
948 * verify that the target has not attached
949 * extension data (e.g. LRO flow ID).
950 */
951 /*
952 * The assertion below currently doesn't work for
953 * RX_FRAG_IND messages, since their format differs
954 * from the RX_IND format (no FW rx PPDU desc in
955 * the current RX_FRAG_IND message).
956 * If the RX_FRAG_IND message format is updated to match
957 * the RX_IND message format, then the following
958 * assertion can be restored.
959 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530960 /* qdf_assert((rx_ind_data[byte_offset] &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961 FW_RX_DESC_EXT_M) == 0); */
962 pdev->rx_ind_msdu_byte_idx += 1;
963 /* or more, if there's ext data */
964 } else {
965 /*
966 * When an oversized AMSDU happened, FW will lost some
967 * of MSDU status - in this case, the FW descriptors
968 * provided will be less than the actual MSDUs
969 * inside this MPDU.
970 * Mark the FW descriptors so that it will still
971 * deliver to upper stack, if no CRC error for the MPDU.
972 *
973 * FIX THIS - the FW descriptors are actually for MSDUs
974 * in the end of this A-MSDU instead of the beginning.
975 */
976 *((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
977 }
978
979 /*
980 * TCP/UDP checksum offload support
981 */
982 htt_set_checksum_result_ll(pdev, msdu, rx_desc);
983
984 msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
985 RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
986 msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
987 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
988 RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
989 msdu_len =
990 ((*((uint32_t *) &rx_desc->msdu_start)) &
991 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
992 RX_MSDU_START_0_MSDU_LENGTH_LSB;
993
994 do {
995 if (!msdu_len_invalid && !msdu_chained) {
996#if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
997 if (msdu_len > 0x3000)
998 break;
999#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +05301000 qdf_nbuf_trim_tail(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001001 HTT_RX_BUF_SIZE -
1002 (RX_STD_DESC_SIZE +
1003 msdu_len));
1004 }
1005 } while (0);
1006
1007 while (msdu_chained--) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301008 qdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
1009 qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010 msdu_len -= HTT_RX_BUF_SIZE;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301011 qdf_nbuf_set_next(msdu, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001012 msdu = next;
1013 msdu_chaining = 1;
1014
1015 if (msdu_chained == 0) {
1016 /* Trim the last one to the correct size -
1017 * accounting for inconsistent HW lengths
1018 * causing length overflows and underflows
1019 */
1020 if (((unsigned)msdu_len) >
1021 ((unsigned)
1022 (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
1023 msdu_len =
1024 (HTT_RX_BUF_SIZE -
1025 RX_STD_DESC_SIZE);
1026 }
1027
Nirav Shahcbc6d722016-03-01 16:24:53 +05301028 qdf_nbuf_trim_tail(next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001029 HTT_RX_BUF_SIZE -
1030 (RX_STD_DESC_SIZE +
1031 msdu_len));
1032 }
1033 }
1034
1035 last_msdu =
1036 ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
1037 RX_MSDU_END_4_LAST_MSDU_MASK) >>
1038 RX_MSDU_END_4_LAST_MSDU_LSB;
1039
1040 if (last_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301041 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001042 break;
1043 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301044 qdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
1045 qdf_nbuf_set_next(msdu, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001046 msdu = next;
1047 }
1048 }
1049 *tail_msdu = msdu;
1050
1051 /*
1052 * Don't refill the ring yet.
1053 * First, the elements popped here are still in use - it is
1054 * not safe to overwrite them until the matching call to
1055 * mpdu_desc_list_next.
1056 * Second, for efficiency it is preferable to refill the rx ring
1057 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
1058 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
1059 * Consequently, we'll rely on the txrx SW to tell us when it is done
1060 * pulling all the PPDU's rx buffers out of the rx ring, and then
1061 * refill it just once.
1062 */
1063 return msdu_chaining;
1064}
1065
1066int
1067htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301068 qdf_nbuf_t offload_deliver_msg,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001069 int *vdev_id,
1070 int *peer_id,
1071 int *tid,
1072 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301073 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301075 qdf_nbuf_t buf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001076 uint32_t *msdu_hdr, msdu_len;
1077
1078 *head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
1079 /* Fake read mpdu_desc to keep desc ptr in sync */
1080 htt_rx_mpdu_desc_list_next(pdev, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301081 qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001082#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301083 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301085 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +05301087 msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088
1089 /* First dword */
1090 msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1091 *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1092
1093 /* Second dword */
1094 msdu_hdr++;
1095 *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1096 *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1097 *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1098
Nirav Shahcbc6d722016-03-01 16:24:53 +05301099 qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1100 qdf_nbuf_set_pktlen(buf, msdu_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001101 return 0;
1102}
1103
1104int
1105htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
1106 uint32_t *msg_word,
1107 int msdu_iter,
1108 int *vdev_id,
1109 int *peer_id,
1110 int *tid,
1111 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301112 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001113{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301114 qdf_nbuf_t buf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115 uint32_t *msdu_hdr, msdu_len;
1116 uint32_t *curr_msdu;
1117 uint32_t paddr;
1118
1119 curr_msdu =
1120 msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
1121 paddr = HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*curr_msdu);
1122 *head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
1123
Anurag Chouhanc5548422016-02-24 18:33:27 +05301124 if (qdf_unlikely(NULL == buf)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05301125 qdf_print("%s: netbuf pop failed!\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001126 return 0;
1127 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301128 qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001129#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301130 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001131#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301132 qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001133#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +05301134 msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001135
1136 /* First dword */
1137 msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1138 *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1139
1140 /* Second dword */
1141 msdu_hdr++;
1142 *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1143 *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1144 *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1145
Nirav Shahcbc6d722016-03-01 16:24:53 +05301146 qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1147 qdf_nbuf_set_pktlen(buf, msdu_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148 return 0;
1149}
1150
1151extern void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301152dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153
1154#ifdef RX_HASH_DEBUG
1155#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
1156#else
1157#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) /* no-op */
1158#endif
1159
1160/* Return values: 1 - success, 0 - failure */
1161int
1162htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301163 qdf_nbuf_t rx_ind_msg,
1164 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301166 qdf_nbuf_t msdu, next, prev = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001167 uint8_t *rx_ind_data;
1168 uint32_t *msg_word;
1169 unsigned int msdu_count = 0;
1170 uint8_t offload_ind;
1171 struct htt_host_rx_desc_base *rx_desc;
1172
1173 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1174
Nirav Shahcbc6d722016-03-01 16:24:53 +05301175 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001176 msg_word = (uint32_t *) rx_ind_data;
1177
1178 offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1179
1180 /* Get the total number of MSDUs */
1181 msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1182 HTT_RX_CHECK_MSDU_COUNT(msdu_count);
1183
1184 msg_word =
1185 (uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
1186 if (offload_ind) {
1187 ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
1188 msg_word);
1189 *head_msdu = *tail_msdu = NULL;
1190 return 0;
1191 }
1192
1193 (*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(
1194 pdev,
1195 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
1196
Anurag Chouhanc5548422016-02-24 18:33:27 +05301197 if (qdf_unlikely(NULL == msdu)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05301198 qdf_print("%s: netbuf pop failed!\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001199 *tail_msdu = NULL;
1200 return 0;
1201 }
1202
1203 while (msdu_count > 0) {
1204
1205 /*
1206 * Set the netbuf length to be the entire buffer length
1207 * initially, so the unmap will unmap the entire buffer.
1208 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301209 qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001210#ifdef DEBUG_DMA_DONE
Nirav Shahcbc6d722016-03-01 16:24:53 +05301211 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001212#else
Nirav Shahcbc6d722016-03-01 16:24:53 +05301213 qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001214#endif
1215
Nirav Shahcbc6d722016-03-01 16:24:53 +05301216 /* cache consistency has been taken care of by qdf_nbuf_unmap */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001217 rx_desc = htt_rx_desc(msdu);
1218
1219 htt_rx_extract_lro_info(msdu, rx_desc);
1220
1221 /*
1222 * Make the netbuf's data pointer point to the payload rather
1223 * than the descriptor.
1224 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301225 qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001226#if HTT_PADDR64
1227#define NEXT_FIELD_OFFSET_IN32 2
1228#else /* ! HTT_PADDR64 */
1229#define NEXT_FIELD_OFFSET_IN32 1
1230#endif /* HTT_PADDR64 */
1231#
Nirav Shahcbc6d722016-03-01 16:24:53 +05301232 qdf_nbuf_trim_tail(msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001233 HTT_RX_BUF_SIZE -
1234 (RX_STD_DESC_SIZE +
1235 HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
1236 *(msg_word + NEXT_FIELD_OFFSET_IN32))));
1237#if defined(HELIUMPLUS_DEBUG)
1238 dump_pkt(msdu, 0, 64);
1239#endif
1240 *((uint8_t *) &rx_desc->fw_desc.u.val) =
1241 HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word + NEXT_FIELD_OFFSET_IN32));
1242#undef NEXT_FIELD_OFFSET_IN32
1243
1244 msdu_count--;
1245
Anurag Chouhanc5548422016-02-24 18:33:27 +05301246 if (qdf_unlikely((*((u_int8_t *) &rx_desc->fw_desc.u.val)) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001247 FW_RX_DESC_MIC_ERR_M)) {
1248 u_int8_t tid =
1249 HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1250 *(u_int32_t *)rx_ind_data);
1251 u_int16_t peer_id =
1252 HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1253 *(u_int32_t *)rx_ind_data);
1254 ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
1255 rx_desc, msdu);
1256
1257 htt_rx_desc_frame_free(pdev, msdu);
1258 /* if this is the last msdu */
1259 if (!msdu_count) {
1260 /* if this is the only msdu */
1261 if (!prev) {
1262 *head_msdu = *tail_msdu = NULL;
1263 return 0;
1264 } else {
1265 *tail_msdu = prev;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301266 qdf_nbuf_set_next(prev, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 return 1;
1268 }
1269 } else { /* if this is not the last msdu */
1270 /* get the next msdu */
1271 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1272 next = htt_rx_in_order_netbuf_pop(
1273 pdev,
1274 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(
1275 *msg_word));
Anurag Chouhanc5548422016-02-24 18:33:27 +05301276 if (qdf_unlikely(NULL == next)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05301277 qdf_print("%s: netbuf pop failed!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001278 __func__);
1279 *tail_msdu = NULL;
1280 return 0;
1281 }
1282
1283 /* if this is not the first msdu, update the
1284 * next pointer of the preceding msdu
1285 */
1286 if (prev) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301287 qdf_nbuf_set_next(prev, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001288 } else {
1289 /* if this is the first msdu, update the
1290 * head pointer
1291 */
1292 *head_msdu = next;
1293 }
1294 msdu = next;
1295 continue;
1296 }
1297 }
1298
1299 /* Update checksum result */
1300 htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1301
1302 /* check if this is the last msdu */
1303 if (msdu_count) {
1304 msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1305 next = htt_rx_in_order_netbuf_pop(
1306 pdev,
1307 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
Anurag Chouhanc5548422016-02-24 18:33:27 +05301308 if (qdf_unlikely(NULL == next)) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05301309 qdf_print("%s: netbuf pop failed!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001310 __func__);
1311 *tail_msdu = NULL;
1312 return 0;
1313 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301314 qdf_nbuf_set_next(msdu, next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315 prev = msdu;
1316 msdu = next;
1317 } else {
1318 *tail_msdu = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301319 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320 }
1321 }
1322
1323 return 1;
1324}
1325
Nirav Shahcbc6d722016-03-01 16:24:53 +05301326/* Util fake function that has same prototype as qdf_nbuf_clone that just
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327 * retures the same nbuf
1328 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301329qdf_nbuf_t htt_rx_cdf_noclone_buf(qdf_nbuf_t buf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001330{
1331 return buf;
1332}
1333
1334/* FIXME: This is a HW definition not provded by HW, where does it go ? */
1335enum {
1336 HW_RX_DECAP_FORMAT_RAW = 0,
1337 HW_RX_DECAP_FORMAT_NWIFI,
1338 HW_RX_DECAP_FORMAT_8023,
1339 HW_RX_DECAP_FORMAT_ETH2,
1340};
1341
1342#define HTT_FCS_LEN (4)
1343
1344static void
1345htt_rx_parse_ppdu_start_status(struct htt_host_rx_desc_base *rx_desc,
1346 struct ieee80211_rx_status *rs)
1347{
1348
1349 struct rx_ppdu_start *ppdu_start = &rx_desc->ppdu_start;
1350
1351 /* RSSI */
1352 rs->rs_rssi = ppdu_start->rssi_comb;
1353
1354 /* PHY rate */
1355 /* rs_ratephy coding
1356 [b3 - b0]
1357 0 -> OFDM
1358 1 -> CCK
1359 2 -> HT
1360 3 -> VHT
1361 OFDM / CCK
1362 [b7 - b4 ] => LSIG rate
1363 [b23 - b8 ] => service field
1364 (b'12 static/dynamic,
1365 b'14..b'13 BW for VHT)
1366 [b31 - b24 ] => Reserved
1367 HT / VHT
1368 [b15 - b4 ] => SIG A_2 12 LSBs
1369 [b31 - b16] => SIG A_1 16 LSBs
1370
1371 */
1372 if (ppdu_start->preamble_type == 0x4) {
1373 rs->rs_ratephy = ppdu_start->l_sig_rate_select;
1374 rs->rs_ratephy |= ppdu_start->l_sig_rate << 4;
1375 rs->rs_ratephy |= ppdu_start->service << 8;
1376 } else {
1377 rs->rs_ratephy = (ppdu_start->preamble_type & 0x4) ? 3 : 2;
1378#ifdef HELIUMPLUS
1379 rs->rs_ratephy |=
1380 (ppdu_start->ht_sig_vht_sig_ah_sig_a_2 & 0xFFF) << 4;
1381 rs->rs_ratephy |=
1382 (ppdu_start->ht_sig_vht_sig_ah_sig_a_1 & 0xFFFF) << 16;
1383#else
1384 rs->rs_ratephy |= (ppdu_start->ht_sig_vht_sig_a_2 & 0xFFF) << 4;
1385 rs->rs_ratephy |=
1386 (ppdu_start->ht_sig_vht_sig_a_1 & 0xFFFF) << 16;
1387#endif
1388 }
1389
1390 return;
1391}
1392
1393/* This function is used by montior mode code to restitch an MSDU list
1394 * corresponding to an MPDU back into an MPDU by linking up the skbs.
1395 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301396qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001397htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301398 qdf_nbuf_t head_msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001399 struct ieee80211_rx_status *rx_status,
1400 unsigned clone_not_reqd)
1401{
1402
Nirav Shahcbc6d722016-03-01 16:24:53 +05301403 qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
1404 qdf_nbuf_t (*clone_nbuf_fn)(qdf_nbuf_t buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001405 unsigned decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1406 mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
1407 is_amsdu, is_first_frag, amsdu_pad, msdu_len;
1408 struct htt_host_rx_desc_base *rx_desc;
1409 char *hdr_desc;
1410 unsigned char *dest;
1411 struct ieee80211_frame *wh;
1412 struct ieee80211_qoscntl *qos;
1413
1414 /* If this packet does not go up the normal stack path we dont need to
1415 * waste cycles cloning the packets
1416 */
1417 clone_nbuf_fn =
Nirav Shahcbc6d722016-03-01 16:24:53 +05301418 clone_not_reqd ? htt_rx_cdf_noclone_buf : qdf_nbuf_clone;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419
1420 /* The nbuf has been pulled just beyond the status and points to the
1421 * payload
1422 */
1423 msdu_orig = head_msdu;
1424 rx_desc = htt_rx_desc(msdu_orig);
1425
1426 /* Fill out the rx_status from the PPDU start and end fields */
1427 if (rx_desc->attention.first_mpdu) {
1428 htt_rx_parse_ppdu_start_status(rx_desc, rx_status);
1429
1430 /* The timestamp is no longer valid - It will be valid only for
1431 * the last MPDU
1432 */
1433 rx_status->rs_tstamp.tsf = ~0;
1434 }
1435
1436 decap_format =
1437 GET_FIELD(&rx_desc->msdu_start, RX_MSDU_START_2_DECAP_FORMAT);
1438
1439 head_frag_list_cloned = NULL;
1440
1441 /* Easy case - The MSDU status indicates that this is a non-decapped
1442 * packet in RAW mode.
1443 * return
1444 */
1445 if (decap_format == HW_RX_DECAP_FORMAT_RAW) {
1446 /* Note that this path might suffer from headroom unavailabilty,
1447 * but the RX status is usually enough
1448 */
1449 mpdu_buf = clone_nbuf_fn(head_msdu);
1450
1451 prev_buf = mpdu_buf;
1452
1453 frag_list_sum_len = 0;
1454 is_first_frag = 1;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301455 msdu_len = qdf_nbuf_len(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001456
1457 /* Drop the zero-length msdu */
1458 if (!msdu_len)
1459 goto mpdu_stitch_fail;
1460
Nirav Shahcbc6d722016-03-01 16:24:53 +05301461 msdu_orig = qdf_nbuf_next(head_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001462
1463 while (msdu_orig) {
1464
1465 /* TODO: intra AMSDU padding - do we need it ??? */
1466 msdu = clone_nbuf_fn(msdu_orig);
1467 if (!msdu)
1468 goto mpdu_stitch_fail;
1469
1470 if (is_first_frag) {
1471 is_first_frag = 0;
1472 head_frag_list_cloned = msdu;
1473 }
1474
Nirav Shahcbc6d722016-03-01 16:24:53 +05301475 msdu_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001476 /* Drop the zero-length msdu */
1477 if (!msdu_len)
1478 goto mpdu_stitch_fail;
1479
1480 frag_list_sum_len += msdu_len;
1481
1482 /* Maintain the linking of the cloned MSDUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301483 qdf_nbuf_set_next_ext(prev_buf, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001484
1485 /* Move to the next */
1486 prev_buf = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301487 msdu_orig = qdf_nbuf_next(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001488 }
1489
1490 /* The last msdu length need be larger than HTT_FCS_LEN */
1491 if (msdu_len < HTT_FCS_LEN)
1492 goto mpdu_stitch_fail;
1493
Nirav Shahcbc6d722016-03-01 16:24:53 +05301494 qdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001495
1496 /* If there were more fragments to this RAW frame */
1497 if (head_frag_list_cloned) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301498 qdf_nbuf_append_ext_list(mpdu_buf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001499 head_frag_list_cloned,
1500 frag_list_sum_len);
1501 }
1502
1503 goto mpdu_stitch_done;
1504 }
1505
1506 /* Decap mode:
1507 * Calculate the amount of header in decapped packet to knock off based
1508 * on the decap type and the corresponding number of raw bytes to copy
1509 * status header
1510 */
1511
1512 hdr_desc = &rx_desc->rx_hdr_status[0];
1513
1514 /* Base size */
1515 wifi_hdr_len = sizeof(struct ieee80211_frame);
1516 wh = (struct ieee80211_frame *)hdr_desc;
1517
1518 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1519 if (dir == IEEE80211_FC1_DIR_DSTODS)
1520 wifi_hdr_len += 6;
1521
1522 is_amsdu = 0;
1523 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
1524 qos = (struct ieee80211_qoscntl *)
1525 (hdr_desc + wifi_hdr_len);
1526 wifi_hdr_len += 2;
1527
1528 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1529 }
1530
1531 /* TODO: Any security headers associated with MPDU */
1532 sec_hdr_len = 0;
1533
1534 /* MSDU related stuff LLC - AMSDU subframe header etc */
1535 msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1536
1537 mpdu_buf_len = wifi_hdr_len + sec_hdr_len + msdu_llc_len;
1538
1539 /* "Decap" header to remove from MSDU buffer */
1540 decap_hdr_pull_bytes = 14;
1541
1542 /* Allocate a new nbuf for holding the 802.11 header retrieved from the
1543 * status of the now decapped first msdu. Leave enough headroom for
1544 * accomodating any radio-tap /prism like PHY header
1545 */
1546#define HTT_MAX_MONITOR_HEADER (512)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301547 mpdu_buf = qdf_nbuf_alloc(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001548 HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
1549 HTT_MAX_MONITOR_HEADER, 4, false);
1550
1551 if (!mpdu_buf)
1552 goto mpdu_stitch_fail;
1553
1554 /* Copy the MPDU related header and enc headers into the first buffer
1555 * - Note that there can be a 2 byte pad between heaader and enc header
1556 */
1557
1558 prev_buf = mpdu_buf;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301559 dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001560 if (!dest)
1561 goto mpdu_stitch_fail;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301562 qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001563 hdr_desc += wifi_hdr_len;
1564
1565 /* NOTE - This padding is present only in the RAW header status - not
1566 * when the MSDU data payload is in RAW format.
1567 */
1568 /* Skip the "IV pad" */
1569 if (wifi_hdr_len & 0x3)
1570 hdr_desc += 2;
1571
1572 /* The first LLC len is copied into the MPDU buffer */
1573 frag_list_sum_len = 0;
1574 frag_list_sum_len -= msdu_llc_len;
1575
1576 msdu_orig = head_msdu;
1577 is_first_frag = 1;
1578 amsdu_pad = 0;
1579
1580 while (msdu_orig) {
1581
1582 /* TODO: intra AMSDU padding - do we need it ??? */
1583
1584 msdu = clone_nbuf_fn(msdu_orig);
1585 if (!msdu)
1586 goto mpdu_stitch_fail;
1587
1588 if (is_first_frag) {
1589 is_first_frag = 0;
1590 head_frag_list_cloned = msdu;
1591 } else {
1592
1593 /* Maintain the linking of the cloned MSDUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301594 qdf_nbuf_set_next_ext(prev_buf, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001595
1596 /* Reload the hdr ptr only on non-first MSDUs */
1597 rx_desc = htt_rx_desc(msdu_orig);
1598 hdr_desc = &rx_desc->rx_hdr_status[0];
1599
1600 }
1601
1602 /* Copy this buffers MSDU related status into the prev buffer */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301603 dest = qdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001604 dest += amsdu_pad;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301605 qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001606
1607 /* Push the MSDU buffer beyond the decap header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301608 qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001609 frag_list_sum_len +=
Nirav Shahcbc6d722016-03-01 16:24:53 +05301610 msdu_llc_len + qdf_nbuf_len(msdu) + amsdu_pad;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001611
1612 /* Set up intra-AMSDU pad to be added to start of next buffer -
1613 * AMSDU pad is 4 byte pad on AMSDU subframe */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301614 amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001615 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1616
1617 /* TODO FIXME How do we handle MSDUs that have fraglist - Should
1618 * probably iterate all the frags cloning them along the way and
1619 * and also updating the prev_buf pointer
1620 */
1621
1622 /* Move to the next */
1623 prev_buf = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301624 msdu_orig = qdf_nbuf_next(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001625
1626 }
1627
1628 /* TODO: Convert this to suitable cdf routines */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301629 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 frag_list_sum_len);
1631
1632mpdu_stitch_done:
1633 /* Check if this buffer contains the PPDU end status for TSF */
1634 if (rx_desc->attention.last_mpdu)
1635#ifdef HELIUMPLUS
1636 rx_status->rs_tstamp.tsf =
1637 rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32;
1638#else
1639 rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
1640#endif
1641 /* All the nbufs have been linked into the ext list and
1642 then unlink the nbuf list */
1643 if (clone_not_reqd) {
1644 msdu = head_msdu;
1645 while (msdu) {
1646 msdu_orig = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301647 msdu = qdf_nbuf_next(msdu);
1648 qdf_nbuf_set_next(msdu_orig, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001649 }
1650 }
1651
1652 return mpdu_buf;
1653
1654mpdu_stitch_fail:
1655 /* Free these alloced buffers and the orig buffers in non-clone case */
1656 if (!clone_not_reqd) {
1657 /* Free the head buffer */
1658 if (mpdu_buf)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301659 qdf_nbuf_free(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001660
1661 /* Free the partial list */
1662 while (head_frag_list_cloned) {
1663 msdu = head_frag_list_cloned;
1664 head_frag_list_cloned =
Nirav Shahcbc6d722016-03-01 16:24:53 +05301665 qdf_nbuf_next_ext(head_frag_list_cloned);
1666 qdf_nbuf_free(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001667 }
1668 } else {
1669 /* Free the alloced head buffer */
1670 if (decap_format != HW_RX_DECAP_FORMAT_RAW)
1671 if (mpdu_buf)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301672 qdf_nbuf_free(mpdu_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001673
1674 /* Free the orig buffers */
1675 msdu = head_msdu;
1676 while (msdu) {
1677 msdu_orig = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301678 msdu = qdf_nbuf_next(msdu);
1679 qdf_nbuf_free(msdu_orig);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001680 }
1681 }
1682
1683 return NULL;
1684}
1685
1686int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
1687{
1688 /*
1689 * Currently the RSSI is provided only as a field in the
1690 * HTT_T2H_RX_IND message, rather than in each rx descriptor.
1691 */
1692 return HTT_RSSI_INVALID;
1693}
1694
1695/*
1696 * htt_rx_amsdu_pop -
1697 * global function pointer that is programmed during attach to point
1698 * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
1699 */
1700int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301701 qdf_nbuf_t rx_ind_msg,
1702 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001703
1704/*
1705 * htt_rx_frag_pop -
1706 * global function pointer that is programmed during attach to point
1707 * to either htt_rx_amsdu_pop_ll
1708 */
1709int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301710 qdf_nbuf_t rx_ind_msg,
1711 qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001712
1713int
1714(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301715 qdf_nbuf_t offload_deliver_msg,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001716 int *vdev_id,
1717 int *peer_id,
1718 int *tid,
1719 uint8_t *fw_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301720 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001721
1722void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301723 qdf_nbuf_t rx_ind_msg);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001724
1725bool (*htt_rx_mpdu_desc_retry)(
1726 htt_pdev_handle pdev, void *mpdu_desc);
1727
1728uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
1729
1730void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
1731 void *mpdu_desc,
1732 union htt_rx_pn_t *pn, int pn_len_bits);
1733
1734uint8_t (*htt_rx_mpdu_desc_tid)(
1735 htt_pdev_handle pdev, void *mpdu_desc);
1736
1737bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
1738
1739bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
1740
1741int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
1742
1743bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
1744
1745int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
1746
Nirav Shahcbc6d722016-03-01 16:24:53 +05301747void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, qdf_nbuf_t msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001748
1749bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
1750
1751bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
1752 void *mpdu_desc, uint8_t *key_id);
1753
Nirav Shahcbc6d722016-03-01 16:24:53 +05301754void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001755{
1756 int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301757 qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001758 pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
1759 return (void *)htt_rx_desc(netbuf);
1760}
1761
1762bool (*htt_rx_msdu_chan_info_present)(
1763 htt_pdev_handle pdev,
1764 void *mpdu_desc);
1765
1766bool (*htt_rx_msdu_center_freq)(
1767 htt_pdev_handle pdev,
1768 struct ol_txrx_peer_t *peer,
1769 void *mpdu_desc,
1770 uint16_t *primary_chan_center_freq_mhz,
1771 uint16_t *contig_chan1_center_freq_mhz,
1772 uint16_t *contig_chan2_center_freq_mhz,
1773 uint8_t *phy_mode);
1774
1775void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301776 qdf_nbuf_t netbuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001777{
1778 return (void *)htt_rx_desc(netbuf);
1779}
1780
Nirav Shahcbc6d722016-03-01 16:24:53 +05301781void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001782{
1783 return htt_rx_desc(msdu);
1784}
1785
1786bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
1787{
1788 struct htt_host_rx_desc_base *rx_desc =
1789 (struct htt_host_rx_desc_base *)mpdu_desc;
1790
1791 return (((*((uint32_t *) &rx_desc->mpdu_start)) &
1792 RX_MPDU_START_0_ENCRYPTED_MASK) >>
1793 RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
1794}
1795
1796bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
1797{
1798 return false;
1799}
1800
1801bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
1802 struct ol_txrx_peer_t *peer,
1803 void *mpdu_desc,
1804 uint16_t *primary_chan_center_freq_mhz,
1805 uint16_t *contig_chan1_center_freq_mhz,
1806 uint16_t *contig_chan2_center_freq_mhz,
1807 uint8_t *phy_mode)
1808{
1809 if (primary_chan_center_freq_mhz)
1810 *primary_chan_center_freq_mhz = 0;
1811 if (contig_chan1_center_freq_mhz)
1812 *contig_chan1_center_freq_mhz = 0;
1813 if (contig_chan2_center_freq_mhz)
1814 *contig_chan2_center_freq_mhz = 0;
1815 if (phy_mode)
1816 *phy_mode = 0;
1817 return false;
1818}
1819
1820bool
1821htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
1822 uint8_t *key_id)
1823{
1824 struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
1825 mpdu_desc;
1826
1827 if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
1828 return false;
1829
1830 *key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
1831 (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
1832 RX_MSDU_END_1_KEY_ID_OCT_LSB));
1833
1834 return true;
1835}
1836
Nirav Shahcbc6d722016-03-01 16:24:53 +05301837void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001838{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301839 qdf_nbuf_free(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001840}
1841
Nirav Shahcbc6d722016-03-01 16:24:53 +05301842void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001843{
1844 /*
1845 * The rx descriptor is in the same buffer as the rx MSDU payload,
1846 * and does not need to be freed separately.
1847 */
1848}
1849
1850void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
1851{
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301852 if (qdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001853 int num_to_fill;
1854 num_to_fill = pdev->rx_ring.fill_level -
1855 pdev->rx_ring.fill_cnt;
1856
1857 htt_rx_ring_fill_n(pdev,
1858 num_to_fill /* okay if <= 0 */);
1859 }
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301860 qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001861}
1862
1863#define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
1864 (((_pream) << 6) | ((_nss) << 4) | (_rate))
1865
1866enum AR600P_HW_RATECODE_PREAM_TYPE {
1867 AR600P_HW_RATECODE_PREAM_OFDM,
1868 AR600P_HW_RATECODE_PREAM_CCK,
1869 AR600P_HW_RATECODE_PREAM_HT,
1870 AR600P_HW_RATECODE_PREAM_VHT,
1871};
1872
1873/*--- RX In Order Hash Code --------------------------------------------------*/
1874
1875/* Initializes the circular linked list */
1876static inline void htt_list_init(struct htt_list_node *head)
1877{
1878 head->prev = head;
1879 head->next = head;
1880}
1881
1882/* Adds entry to the end of the linked list */
1883static inline void htt_list_add_tail(struct htt_list_node *head,
1884 struct htt_list_node *node)
1885{
1886 head->prev->next = node;
1887 node->prev = head->prev;
1888 node->next = head;
1889 head->prev = node;
1890}
1891
1892/* Removes the entry corresponding to the input node from the linked list */
1893static inline void htt_list_remove(struct htt_list_node *node)
1894{
1895 node->prev->next = node->next;
1896 node->next->prev = node->prev;
1897}
1898
1899/* Helper macro to iterate through the linked list */
1900#define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next; \
1901 (iter) != (head); \
1902 (iter) = (iter)->next) \
1903
1904#ifdef RX_HASH_DEBUG
1905/* Hash cookie related macros */
1906#define HTT_RX_HASH_COOKIE 0xDEED
1907
1908#define HTT_RX_HASH_COOKIE_SET(hash_element) \
1909 ((hash_element)->cookie = HTT_RX_HASH_COOKIE)
1910
1911#define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
1912 HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
1913
1914/* Hash count related macros */
1915#define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
1916 ((hash_bucket).count++)
1917
1918#define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
1919 ((hash_bucket).count--)
1920
1921#define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket).count = 0)
1922
1923#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
Anurag Chouhan6d760662016-02-20 16:05:43 +05301924 RX_HASH_LOG(qdf_print(" count %d\n", (hash_bucket).count))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001925#else /* RX_HASH_DEBUG */
1926/* Hash cookie related macros */
1927#define HTT_RX_HASH_COOKIE_SET(hash_element) /* no-op */
1928#define HTT_RX_HASH_COOKIE_CHECK(hash_element) /* no-op */
1929/* Hash count related macros */
1930#define HTT_RX_HASH_COUNT_INCR(hash_bucket) /* no-op */
1931#define HTT_RX_HASH_COUNT_DECR(hash_bucket) /* no-op */
1932#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) /* no-op */
1933#define HTT_RX_HASH_COUNT_RESET(hash_bucket) /* no-op */
1934#endif /* RX_HASH_DEBUG */
1935
1936/* Inserts the given "physical address - network buffer" pair into the
1937 hash table for the given pdev. This function will do the following:
1938 1. Determine which bucket to insert the pair into
1939 2. First try to allocate the hash entry for this pair from the pre-allocated
1940 entries list
1941 3. If there are no more entries in the pre-allocated entries list, allocate
1942 the hash entry from the hash memory pool
1943 Note: this function is not thread-safe
1944 Returns 0 - success, 1 - failure */
1945int
1946htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301947 qdf_nbuf_t netbuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001948{
1949 int i;
1950 struct htt_rx_hash_entry *hash_element = NULL;
1951
1952 i = RX_HASH_FUNCTION(paddr);
1953
1954 /* Check if there are any entries in the pre-allocated free list */
1955 if (pdev->rx_ring.hash_table[i].freepool.next !=
1956 &pdev->rx_ring.hash_table[i].freepool) {
1957
1958 hash_element =
1959 (struct htt_rx_hash_entry *)(
1960 (char *)
1961 pdev->rx_ring.hash_table[i].freepool.next -
1962 pdev->rx_ring.listnode_offset);
Anurag Chouhanc5548422016-02-24 18:33:27 +05301963 if (qdf_unlikely(NULL == hash_element)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001964 HTT_ASSERT_ALWAYS(0);
1965 return 1;
1966 }
1967
1968 htt_list_remove(pdev->rx_ring.hash_table[i].freepool.next);
1969 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301970 hash_element = qdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
Anurag Chouhanc5548422016-02-24 18:33:27 +05301971 if (qdf_unlikely(NULL == hash_element)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001972 HTT_ASSERT_ALWAYS(0);
1973 return 1;
1974 }
1975 hash_element->fromlist = 0;
1976 }
1977
1978 hash_element->netbuf = netbuf;
1979 hash_element->paddr = paddr;
1980 HTT_RX_HASH_COOKIE_SET(hash_element);
1981
1982 htt_list_add_tail(&pdev->rx_ring.hash_table[i].listhead,
1983 &hash_element->listnode);
1984
Anurag Chouhan6d760662016-02-20 16:05:43 +05301985 RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x netbuf %p bucket %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001986 __func__, paddr, netbuf, (int)i));
1987
1988 HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
1989 HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
1990
1991 return 0;
1992}
1993
1994/* Given a physical address this function will find the corresponding network
1995 buffer from the hash table.
1996 Note: this function is not thread-safe */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301997qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001998{
1999 uint32_t i;
2000 struct htt_list_node *list_iter = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302001 qdf_nbuf_t netbuf = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002002 struct htt_rx_hash_entry *hash_entry;
2003
2004 i = RX_HASH_FUNCTION(paddr);
2005
2006 HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i].listhead) {
2007 hash_entry = (struct htt_rx_hash_entry *)
2008 ((char *)list_iter -
2009 pdev->rx_ring.listnode_offset);
2010
2011 HTT_RX_HASH_COOKIE_CHECK(hash_entry);
2012
2013 if (hash_entry->paddr == paddr) {
2014 /* Found the entry corresponding to paddr */
2015 netbuf = hash_entry->netbuf;
2016 htt_list_remove(&hash_entry->listnode);
2017 HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
2018 /* if the rx entry is from the pre-allocated list,
2019 return it */
2020 if (hash_entry->fromlist)
2021 htt_list_add_tail(&pdev->rx_ring.hash_table[i].
2022 freepool,
2023 &hash_entry->listnode);
2024 else
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302025 qdf_mem_free(hash_entry);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002026
Govind Singhd79e1342015-11-03 16:20:02 +05302027 htt_rx_dbg_rxbuf_reset(pdev, netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002028 break;
2029 }
2030 }
2031
Anurag Chouhan6d760662016-02-20 16:05:43 +05302032 RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x, netbuf %p, bucket %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002033 __func__, paddr, netbuf, (int)i));
2034 HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
2035
2036 if (netbuf == NULL) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302037 qdf_print("rx hash: %s: no entry found for 0x%x!!!\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002038 __func__, paddr);
2039 HTT_ASSERT_ALWAYS(0);
2040 }
2041
2042 return netbuf;
2043}
2044
2045/* Initialization function of the rx buffer hash table. This function will
2046 allocate a hash table of a certain pre-determined size and initialize all
2047 the elements */
2048int htt_rx_hash_init(struct htt_pdev_t *pdev)
2049{
2050 int i, j;
2051
Leo Chang376398b2015-10-23 14:19:02 -07002052 HTT_ASSERT2(CDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002053
2054 pdev->rx_ring.hash_table =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302055 qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002056 sizeof(struct htt_rx_hash_bucket));
2057
2058 if (NULL == pdev->rx_ring.hash_table) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302059 qdf_print("rx hash table allocation failed!\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002060 return 1;
2061 }
2062
2063 for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2064 HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
2065
2066 /* initialize the hash table buckets */
2067 htt_list_init(&pdev->rx_ring.hash_table[i].listhead);
2068
2069 /* initialize the hash table free pool per bucket */
2070 htt_list_init(&pdev->rx_ring.hash_table[i].freepool);
2071
2072 /* pre-allocate a pool of entries for this bucket */
2073 pdev->rx_ring.hash_table[i].entries =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302074 qdf_mem_malloc(RX_ENTRIES_SIZE *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002075 sizeof(struct htt_rx_hash_entry));
2076
2077 if (NULL == pdev->rx_ring.hash_table[i].entries) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302078 qdf_print("rx hash bucket %d entries alloc failed\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002079 (int)i);
2080 while (i) {
2081 i--;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302082 qdf_mem_free(pdev->rx_ring.hash_table[i].
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002083 entries);
2084 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302085 qdf_mem_free(pdev->rx_ring.hash_table);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002086 pdev->rx_ring.hash_table = NULL;
2087 return 1;
2088 }
2089
2090 /* initialize the free list with pre-allocated entries */
2091 for (j = 0; j < RX_ENTRIES_SIZE; j++) {
2092 pdev->rx_ring.hash_table[i].entries[j].fromlist = 1;
2093 htt_list_add_tail(&pdev->rx_ring.hash_table[i].freepool,
2094 &pdev->rx_ring.hash_table[i].
2095 entries[j].listnode);
2096 }
2097 }
2098
2099 pdev->rx_ring.listnode_offset =
Anurag Chouhan6d760662016-02-20 16:05:43 +05302100 qdf_offsetof(struct htt_rx_hash_entry, listnode);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002101
2102 return 0;
2103}
2104
2105void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
2106{
2107 uint32_t i;
2108 struct htt_rx_hash_entry *hash_entry;
2109 struct htt_list_node *list_iter = NULL;
2110
2111 for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2112 HTT_LIST_ITER_FWD(list_iter,
2113 &pdev->rx_ring.hash_table[i].listhead) {
2114 hash_entry =
2115 (struct htt_rx_hash_entry *)((char *)list_iter -
2116 pdev->rx_ring.
2117 listnode_offset);
Anurag Chouhan6d760662016-02-20 16:05:43 +05302118 qdf_print("hash_table[%d]: netbuf %p paddr 0x%x\n", i,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002119 hash_entry->netbuf, hash_entry->paddr);
2120 }
2121 }
2122}
2123
2124/*--- RX In Order Hash Code --------------------------------------------------*/
2125
2126/* move the function to the end of file
2127 * to omit ll/hl pre-declaration
2128 */
2129int htt_rx_attach(struct htt_pdev_t *pdev)
2130{
Anurag Chouhan6d760662016-02-20 16:05:43 +05302131 qdf_dma_addr_t paddr;
2132 uint32_t ring_elem_size = sizeof(qdf_dma_addr_t);
Houston Hoffman43d47fa2016-02-24 16:34:30 -08002133
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002134 pdev->rx_ring.size = htt_rx_ring_size(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07002135 HTT_ASSERT2(CDF_IS_PWR2(pdev->rx_ring.size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002136 pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
2137
2138 /*
2139 * Set the initial value for the level to which the rx ring
2140 * should be filled, based on the max throughput and the worst
2141 * likely latency for the host to fill the rx ring.
2142 * In theory, this fill level can be dynamically adjusted from
2143 * the initial value set here to reflect the actual host latency
2144 * rather than a conservative assumption.
2145 */
2146 pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
2147
2148 if (pdev->cfg.is_full_reorder_offload) {
2149 if (htt_rx_hash_init(pdev))
2150 goto fail1;
2151
2152 /* allocate the target index */
2153 pdev->rx_ring.target_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302154 qdf_mem_alloc_consistent(pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002155 sizeof(uint32_t),
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302156 &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002157
2158 if (!pdev->rx_ring.target_idx.vaddr)
2159 goto fail1;
2160
2161 pdev->rx_ring.target_idx.paddr = paddr;
2162 *pdev->rx_ring.target_idx.vaddr = 0;
2163 } else {
2164 pdev->rx_ring.buf.netbufs_ring =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302165 qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002166 if (!pdev->rx_ring.buf.netbufs_ring)
2167 goto fail1;
2168
2169 pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
2170 pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
2171 }
2172
2173 pdev->rx_ring.buf.paddrs_ring =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302174 qdf_mem_alloc_consistent(
2175 pdev->osdev, pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002176 pdev->rx_ring.size * ring_elem_size,
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302177 &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002178 if (!pdev->rx_ring.buf.paddrs_ring)
2179 goto fail2;
2180
2181 pdev->rx_ring.base_paddr = paddr;
2182 pdev->rx_ring.alloc_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302183 qdf_mem_alloc_consistent(
2184 pdev->osdev, pdev->osdev->dev,
2185 sizeof(uint32_t), &paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002186
2187 if (!pdev->rx_ring.alloc_idx.vaddr)
2188 goto fail3;
2189
2190 pdev->rx_ring.alloc_idx.paddr = paddr;
2191 *pdev->rx_ring.alloc_idx.vaddr = 0;
2192
2193 /*
2194 * Initialize the Rx refill reference counter to be one so that
2195 * only one thread is allowed to refill the Rx ring.
2196 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302197 qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
2198 qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002199
2200 /* Initialize the Rx refill retry timer */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302201 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002202 &pdev->rx_ring.refill_retry_timer,
2203 htt_rx_ring_refill_retry, (void *)pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302204 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002205
2206 pdev->rx_ring.fill_cnt = 0;
2207#ifdef DEBUG_DMA_DONE
2208 pdev->rx_ring.dbg_ring_idx = 0;
2209 pdev->rx_ring.dbg_refill_cnt = 0;
2210 pdev->rx_ring.dbg_sync_success = 0;
2211#endif
2212#ifdef HTT_RX_RESTORE
2213 pdev->rx_ring.rx_reset = 0;
2214 pdev->rx_ring.htt_rx_restore = 0;
2215#endif
Govind Singhd79e1342015-11-03 16:20:02 +05302216 htt_rx_dbg_rxbuf_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002217 htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
2218
2219 if (pdev->cfg.is_full_reorder_offload) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302220 qdf_print("HTT: full reorder offload enabled\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002221 htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2222 htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2223 htt_rx_mpdu_desc_list_next =
2224 htt_rx_in_ord_mpdu_desc_list_next_ll;
2225 } else {
2226 htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
2227 htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
2228 htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
2229 }
2230
2231 htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08002232 htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002233 htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
2234 htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08002235 htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002236 htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
2237 htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
2238 htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
2239 htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
2240 htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
2241 htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
2242 htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
2243 htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
2244 htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
2245 htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
2246
2247 return 0; /* success */
2248
2249fail3:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302250 qdf_mem_free_consistent(pdev->osdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302251 pdev->rx_ring.size * sizeof(qdf_dma_addr_t),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002252 pdev->rx_ring.buf.paddrs_ring,
2253 pdev->rx_ring.base_paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302254 qdf_get_dma_mem_context((&pdev->rx_ring.buf),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002255 memctx));
2256
2257fail2:
2258 if (pdev->cfg.is_full_reorder_offload) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302259 qdf_mem_free_consistent(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002260 sizeof(uint32_t),
2261 pdev->rx_ring.target_idx.vaddr,
2262 pdev->rx_ring.target_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302263 qdf_get_dma_mem_context((&pdev->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002264 rx_ring.
2265 target_idx),
2266 memctx));
2267 htt_rx_hash_deinit(pdev);
2268 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302269 qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002270 }
2271
2272fail1:
2273 return 1; /* failure */
2274}
2275
2276#ifdef IPA_OFFLOAD
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002277#ifdef QCA_WIFI_3_0
2278/**
2279 * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
2280 * @pdev: htt context
2281 * @rx_ind_ring_elements: rx ring elements
2282 *
2283 * Return: 0 success
2284 */
2285int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
2286 unsigned int rx_ind_ring_elements)
2287{
2288 /* Allocate RX2 indication ring */
2289 /* RX2 IND ring element
2290 * 4bytes: pointer
2291 * 2bytes: VDEV ID
2292 * 2bytes: length */
2293 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302294 qdf_mem_alloc_consistent(
2295 pdev->osdev, pdev->osdev->dev,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002296 rx_ind_ring_elements *
2297 sizeof(struct ipa_uc_rx_ring_elem_t),
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302298 &pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002299 if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302300 qdf_print("%s: RX IND RING alloc fail", __func__);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002301 return -ENOBUFS;
2302 }
2303
2304 /* RX indication ring size, by bytes */
2305 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
2306 rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302307 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002308 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
2309
2310 /* Allocate RX process done index */
2311 pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302312 qdf_mem_alloc_consistent(
2313 pdev->osdev, pdev->osdev->dev, 4,
2314 &pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002315 if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302316 qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302317 qdf_mem_free_consistent(
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002318 pdev->osdev,
2319 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
2320 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
2321 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302322 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002323 rx2_ind_ring_base),
2324 memctx));
2325 return -ENOBUFS;
2326 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302327 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002328 return 0;
2329}
2330#else
2331int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
2332 unsigned int rx_ind_ring_elements)
2333{
2334 return 0;
2335}
2336#endif
2337
Leo Chang8e073612015-11-13 10:55:34 -08002338/**
2339 * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
2340 * @pdev: htt context
2341 * @rx_ind_ring_size: rx ring size
2342 *
2343 * Return: 0 success
2344 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002345int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
2346 unsigned int rx_ind_ring_elements)
2347{
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002348 int ret = 0;
2349 /* Allocate RX indication ring */
2350 /* RX IND ring element
2351 * 4bytes: pointer
2352 * 2bytes: VDEV ID
2353 * 2bytes: length */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002354 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302355 qdf_mem_alloc_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002356 pdev->osdev,
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302357 pdev->osdev->dev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002358 rx_ind_ring_elements *
2359 sizeof(struct ipa_uc_rx_ring_elem_t),
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302360 &pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002361 if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302362 qdf_print("%s: RX IND RING alloc fail", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002363 return -ENOBUFS;
2364 }
2365
2366 /* RX indication ring size, by bytes */
2367 pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
2368 rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302369 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002370 pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
2371
2372 /* Allocate RX process done index */
2373 pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302374 qdf_mem_alloc_consistent(
2375 pdev->osdev, pdev->osdev->dev, 4,
2376 &pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002377 if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302378 qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302379 qdf_mem_free_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002380 pdev->osdev,
2381 pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
2382 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
2383 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302384 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002385 rx_ind_ring_base),
2386 memctx));
2387 return -ENOBUFS;
2388 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302389 qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002390
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002391 ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
2392 return ret;
2393}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002394
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002395#ifdef QCA_WIFI_3_0
2396/**
2397 * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
2398 * @pdev: htt context
2399 *
2400 * Return: None
2401 */
2402void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
2403{
2404 if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302405 qdf_mem_free_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002406 pdev->osdev,
2407 pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
2408 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
2409 pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302410 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002411 rx2_ind_ring_base),
2412 memctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002413 }
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002414
2415 if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302416 qdf_mem_free_consistent(
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002417 pdev->osdev,
2418 4,
2419 pdev->ipa_uc_rx_rsc.
2420 rx_ipa_prc_done_idx.vaddr,
2421 pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302422 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002423 rx_ipa_prc_done_idx),
2424 memctx));
2425 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002426}
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002427#else
2428void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
2429{
2430 return;
2431}
2432#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002433
2434int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
2435{
2436 if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302437 qdf_mem_free_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002438 pdev->osdev,
2439 pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
2440 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
2441 pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302442 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002443 rx_ind_ring_base),
2444 memctx));
2445 }
2446
2447 if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302448 qdf_mem_free_consistent(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002449 pdev->osdev,
2450 4,
2451 pdev->ipa_uc_rx_rsc.
2452 rx_ipa_prc_done_idx.vaddr,
2453 pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302454 qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
Leo Chang8e073612015-11-13 10:55:34 -08002455 rx2_ipa_prc_done_idx),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002456 memctx));
2457 }
Manikandan Mohanfba8e0d2015-11-18 16:27:37 -08002458
2459 htt_rx_ipa_uc_free_wdi2_rsc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002460 return 0;
2461}
2462#endif /* IPA_OFFLOAD */