blob: 8a14792771597e67c62c701e7a06162e0c247aeb [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnson40d289e2018-12-28 23:20:16 -08002 * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef _HTT_INTERNAL__H_
20#define _HTT_INTERNAL__H_
21
22#include <athdefs.h> /* A_STATUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +053023#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhanc5548422016-02-24 18:33:27 +053024#include <qdf_util.h> /* qdf_assert */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080025#include <htc_api.h> /* HTC_PACKET */
26
27#include <htt_types.h>
28
Rakshith Suresh Patkardd7f36c2019-03-05 15:58:47 +053029/* htt_rx.c */
30#define RX_MSDU_END_4_FIRST_MSDU_MASK \
31 (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK)
32#define RX_MSDU_END_4_FIRST_MSDU_LSB \
33 (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB)
34#define RX_MPDU_START_0_RETRY_LSB \
35 (pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB)
36#define RX_MPDU_START_0_RETRY_MASK \
37 (pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK)
38#define RX_MPDU_START_0_SEQ_NUM_MASK \
39 (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK)
40#define RX_MPDU_START_0_SEQ_NUM_LSB \
41 (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB)
42#define RX_MPDU_START_2_PN_47_32_LSB \
43 (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB)
44#define RX_MPDU_START_2_PN_47_32_MASK \
45 (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK)
46#define RX_MPDU_START_2_TID_LSB \
47 (pdev->targetdef->d_RX_MPDU_START_2_TID_LSB)
48#define RX_MPDU_START_2_TID_MASK \
49 (pdev->targetdef->d_RX_MPDU_START_2_TID_MASK)
50#define RX_MSDU_END_1_KEY_ID_OCT_MASK \
51 (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK)
52#define RX_MSDU_END_1_KEY_ID_OCT_LSB \
53 (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB)
54#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \
55 (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK)
56#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB \
57 (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB)
58#define RX_MSDU_END_4_LAST_MSDU_MASK \
59 (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK)
60#define RX_MSDU_END_4_LAST_MSDU_LSB \
61 (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB)
62#define RX_ATTENTION_0_MCAST_BCAST_MASK \
63 (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK)
64#define RX_ATTENTION_0_MCAST_BCAST_LSB \
65 (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB)
66#define RX_ATTENTION_0_FRAGMENT_MASK \
67 (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK)
68#define RX_ATTENTION_0_FRAGMENT_LSB \
69 (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB)
70#define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK \
71 (pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK)
72#define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \
73 (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK)
74#define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB \
75 (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB)
76#define RX_MSDU_START_0_MSDU_LENGTH_MASK \
77 (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK)
78#define RX_MSDU_START_0_MSDU_LENGTH_LSB \
79 (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB)
80#define RX_MPDU_START_0_ENCRYPTED_MASK \
81 (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK)
82#define RX_MPDU_START_0_ENCRYPTED_LSB \
83 (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB)
84#define RX_ATTENTION_0_MORE_DATA_MASK \
85 (pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK)
86#define RX_ATTENTION_0_MSDU_DONE_MASK \
87 (pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK)
88#define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \
89 (pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK)
90#define RX_MSDU_START_2_DECAP_FORMAT_OFFSET \
91 (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET)
92#define RX_MSDU_START_2_DECAP_FORMAT_LSB \
93 (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB)
94#define RX_MSDU_START_2_DECAP_FORMAT_MASK \
95 (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK)
96/* end */
97
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080098#ifndef offsetof
99#define offsetof(type, field) ((size_t)(&((type *)0)->field))
100#endif
101
102#undef MS
103#define MS(_v, _f) (((_v) & _f ## _MASK) >> _f ## _LSB)
104#undef SM
105#define SM(_v, _f) (((_v) << _f ## _LSB) & _f ## _MASK)
106#undef WO
107#define WO(_f) ((_f ## _OFFSET) >> 2)
108
109#define GET_FIELD(_addr, _f) MS(*((A_UINT32 *)(_addr) + WO(_f)), _f)
110
111#include <rx_desc.h>
112#include <wal_rx_desc.h> /* struct rx_attention, etc */
113
114struct htt_host_fw_desc_base {
115 union {
116 struct fw_rx_desc_base val;
117 A_UINT32 dummy_pad; /* make sure it is DOWRD aligned */
118 } u;
119};
120
Himanshu Agarwal053d4552016-07-20 20:00:34 +0530121
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800122/*
123 * This struct defines the basic descriptor information used by host,
124 * which is written either by the 11ac HW MAC into the host Rx data
125 * buffer ring directly or generated by FW and copied from Rx indication
126 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800127struct htt_host_rx_desc_base {
128 struct htt_host_fw_desc_base fw_desc;
129 struct rx_attention attention;
130 struct rx_frag_info frag_info;
131 struct rx_mpdu_start mpdu_start;
132 struct rx_msdu_start msdu_start;
133 struct rx_msdu_end msdu_end;
134 struct rx_mpdu_end mpdu_end;
135 struct rx_ppdu_start ppdu_start;
136 struct rx_ppdu_end ppdu_end;
Manjunathappa Prakash70ea7282016-06-17 15:42:45 -0700137#ifdef QCA_WIFI_3_0_ADRASTEA
138/* Increased to support some of offload features */
139#define RX_HTT_HDR_STATUS_LEN 256
140#else
141#define RX_HTT_HDR_STATUS_LEN 64
142#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143 char rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
144};
145
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +0530146#define RX_DESC_ATTN_MPDU_LEN_ERR_BIT 0x08000000
147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148#define RX_STD_DESC_ATTN_OFFSET \
149 (offsetof(struct htt_host_rx_desc_base, attention))
150#define RX_STD_DESC_FRAG_INFO_OFFSET \
151 (offsetof(struct htt_host_rx_desc_base, frag_info))
152#define RX_STD_DESC_MPDU_START_OFFSET \
153 (offsetof(struct htt_host_rx_desc_base, mpdu_start))
154#define RX_STD_DESC_MSDU_START_OFFSET \
155 (offsetof(struct htt_host_rx_desc_base, msdu_start))
156#define RX_STD_DESC_MSDU_END_OFFSET \
157 (offsetof(struct htt_host_rx_desc_base, msdu_end))
158#define RX_STD_DESC_MPDU_END_OFFSET \
159 (offsetof(struct htt_host_rx_desc_base, mpdu_end))
160#define RX_STD_DESC_PPDU_START_OFFSET \
161 (offsetof(struct htt_host_rx_desc_base, ppdu_start))
162#define RX_STD_DESC_PPDU_END_OFFSET \
163 (offsetof(struct htt_host_rx_desc_base, ppdu_end))
164#define RX_STD_DESC_HDR_STATUS_OFFSET \
165 (offsetof(struct htt_host_rx_desc_base, rx_hdr_status))
166
167#define RX_STD_DESC_FW_MSDU_OFFSET \
168 (offsetof(struct htt_host_rx_desc_base, fw_desc))
169
170#define RX_STD_DESC_SIZE (sizeof(struct htt_host_rx_desc_base))
171
172#define RX_DESC_ATTN_OFFSET32 (RX_STD_DESC_ATTN_OFFSET >> 2)
173#define RX_DESC_FRAG_INFO_OFFSET32 (RX_STD_DESC_FRAG_INFO_OFFSET >> 2)
174#define RX_DESC_MPDU_START_OFFSET32 (RX_STD_DESC_MPDU_START_OFFSET >> 2)
175#define RX_DESC_MSDU_START_OFFSET32 (RX_STD_DESC_MSDU_START_OFFSET >> 2)
176#define RX_DESC_MSDU_END_OFFSET32 (RX_STD_DESC_MSDU_END_OFFSET >> 2)
177#define RX_DESC_MPDU_END_OFFSET32 (RX_STD_DESC_MPDU_END_OFFSET >> 2)
178#define RX_DESC_PPDU_START_OFFSET32 (RX_STD_DESC_PPDU_START_OFFSET >> 2)
179#define RX_DESC_PPDU_END_OFFSET32 (RX_STD_DESC_PPDU_END_OFFSET >> 2)
180#define RX_DESC_HDR_STATUS_OFFSET32 (RX_STD_DESC_HDR_STATUS_OFFSET >> 2)
181
182#define RX_STD_DESC_SIZE_DWORD (RX_STD_DESC_SIZE >> 2)
183
184/*
185 * Make sure there is a minimum headroom provided in the rx netbufs
186 * for use by the OS shim and OS and rx data consumers.
187 */
188#define HTT_RX_BUF_OS_MIN_HEADROOM 32
189#define HTT_RX_STD_DESC_RESERVATION \
190 ((HTT_RX_BUF_OS_MIN_HEADROOM > RX_STD_DESC_SIZE) ? \
191 HTT_RX_BUF_OS_MIN_HEADROOM : RX_STD_DESC_SIZE)
192#define HTT_RX_DESC_RESERVATION32 \
193 (HTT_RX_STD_DESC_RESERVATION >> 2)
194
195#define HTT_RX_DESC_ALIGN_MASK 7 /* 8-byte alignment */
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700196
Govind Singhd79e1342015-11-03 16:20:02 +0530197#ifdef DEBUG_RX_RING_BUFFER
Leo Chang98726762016-10-28 11:07:18 -0700198#ifdef MSM_PLATFORM
199#define HTT_ADDRESS_MASK 0xfffffffffffffffe
200#else
201#define HTT_ADDRESS_MASK 0xfffffffe
202#endif /* MSM_PLATFORM */
203
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800204/**
205 * rx_buf_debug: rx_ring history
206 *
207 * There are three types of entries in history:
208 * 1) rx-descriptors posted (and received)
209 * Both of these events are stored on the same entry
210 * @paddr : physical address posted on the ring
211 * @nbuf : virtual address of nbuf containing data
212 * @ndata : virual address of data (corresponds to physical address)
213 * @posted: time-stamp when the buffer is posted to the ring
214 * @recved: time-stamp when the buffer is received (rx_in_order_ind)
215 * : or 0, if the buffer has not been received yet
216 * 2) ring alloc-index (fill-index) updates
217 * @paddr : = 0
218 * @nbuf : = 0
219 * @ndata : = 0
220 * posted : time-stamp when alloc index was updated
221 * recved : value of alloc index
222 * 3) htt_rx_in_order_indication reception
223 * @paddr : = 0
224 * @nbuf : = 0
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800225 * @ndata : msdu_cnt
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800226 * @posted: time-stamp when HTT message is recived
227 * @recvd : 0x48545452584D5367 ('HTTRXMSG')
Yun Park4afce842017-04-05 07:09:26 -0700228 */
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800229#define HTT_RX_RING_BUFF_DBG_LIST (2 * 1024)
Govind Singhd79e1342015-11-03 16:20:02 +0530230struct rx_buf_debug {
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700231 qdf_dma_addr_t paddr;
232 qdf_nbuf_t nbuf;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800233 void *nbuf_data;
234 uint64_t posted; /* timetamp */
235 uint64_t recved; /* timestamp */
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800236 int cpu;
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700237
Govind Singhd79e1342015-11-03 16:20:02 +0530238};
239#endif
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700240
Nirav Shahcbc6d722016-03-01 16:24:53 +0530241static inline struct htt_host_rx_desc_base *htt_rx_desc(qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800242{
243 return (struct htt_host_rx_desc_base *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530244 (((size_t) (qdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800245 ~HTT_RX_DESC_ALIGN_MASK);
246}
247
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -0700248#if defined(HELIUMPLUS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249/**
250 * htt_print_rx_desc_lro() - print LRO information in the rx
251 * descriptor
252 * @rx_desc: HTT rx descriptor
253 *
254 * Prints the LRO related fields in the HTT rx descriptor
255 *
256 * Return: none
257 */
258static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
259{
Nirav Shahe6194ac2018-07-13 11:04:41 +0530260 qdf_nofl_info
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261 ("----------------------RX DESC LRO----------------------\n");
Nirav Shahe6194ac2018-07-13 11:04:41 +0530262 qdf_nofl_info("msdu_end.lro_eligible:0x%x\n",
263 rx_desc->msdu_end.lro_eligible);
264 qdf_nofl_info("msdu_start.tcp_only_ack:0x%x\n",
265 rx_desc->msdu_start.tcp_only_ack);
266 qdf_nofl_info("msdu_end.tcp_udp_chksum:0x%x\n",
267 rx_desc->msdu_end.tcp_udp_chksum);
268 qdf_nofl_info("msdu_end.tcp_seq_number:0x%x\n",
269 rx_desc->msdu_end.tcp_seq_number);
270 qdf_nofl_info("msdu_end.tcp_ack_number:0x%x\n",
271 rx_desc->msdu_end.tcp_ack_number);
272 qdf_nofl_info("msdu_start.tcp_proto:0x%x\n",
273 rx_desc->msdu_start.tcp_proto);
274 qdf_nofl_info("msdu_start.ipv6_proto:0x%x\n",
275 rx_desc->msdu_start.ipv6_proto);
276 qdf_nofl_info("msdu_start.ipv4_proto:0x%x\n",
277 rx_desc->msdu_start.ipv4_proto);
278 qdf_nofl_info("msdu_start.l3_offset:0x%x\n",
279 rx_desc->msdu_start.l3_offset);
280 qdf_nofl_info("msdu_start.l4_offset:0x%x\n",
281 rx_desc->msdu_start.l4_offset);
282 qdf_nofl_info("msdu_start.flow_id_toeplitz:0x%x\n",
283 rx_desc->msdu_start.flow_id_toeplitz);
284 qdf_nofl_info
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800285 ("---------------------------------------------------------\n");
286}
287
288/**
289 * htt_print_rx_desc_lro() - extract LRO information from the rx
290 * descriptor
291 * @msdu: network buffer
292 * @rx_desc: HTT rx descriptor
293 *
294 * Extracts the LRO related fields from the HTT rx descriptor
295 * and stores them in the network buffer's control block
296 *
297 * Return: none
298 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530299static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300 struct htt_host_rx_desc_base *rx_desc)
301{
Poddar, Siddarth37a302d2017-06-27 14:19:23 +0530302 if (rx_desc->attention.tcp_udp_chksum_fail)
303 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
304 else
305 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
306 rx_desc->msdu_end.lro_eligible;
307
308 if (QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)) {
Yun Park4afce842017-04-05 07:09:26 -0700309 QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
310 rx_desc->msdu_start.tcp_only_ack;
311 QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
312 rx_desc->msdu_end.tcp_udp_chksum;
313 QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
314 rx_desc->msdu_end.tcp_seq_number;
315 QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
316 rx_desc->msdu_end.tcp_ack_number;
317 QDF_NBUF_CB_RX_TCP_WIN(msdu) =
318 rx_desc->msdu_end.window_size;
319 QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
320 rx_desc->msdu_start.tcp_proto;
321 QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
322 rx_desc->msdu_start.ipv6_proto;
323 QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
324 rx_desc->msdu_start.l4_offset;
Manjunathappa Prakash794366e2017-11-07 09:59:21 -0800325 QDF_NBUF_CB_RX_FLOW_ID(msdu) =
Poddar, Siddarth37a302d2017-06-27 14:19:23 +0530326 rx_desc->msdu_start.flow_id_toeplitz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 }
328}
329#else
330static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
331{}
Nirav Shahcbc6d722016-03-01 16:24:53 +0530332static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 struct htt_host_rx_desc_base *rx_desc) {}
Manjunathappa Prakash7b0ad462018-04-15 00:37:16 -0700334#endif /* HELIUMPLUS */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335
336static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
337{
Nirav Shahe6194ac2018-07-13 11:04:41 +0530338 qdf_nofl_info
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339 ("----------------------RX DESC----------------------------\n");
Nirav Shahe6194ac2018-07-13 11:04:41 +0530340 qdf_nofl_info("attention: %#010x\n",
341 (unsigned int)(*(uint32_t *)&rx_desc->attention));
342 qdf_nofl_info("frag_info: %#010x\n",
343 (unsigned int)(*(uint32_t *)&rx_desc->frag_info));
344 qdf_nofl_info("mpdu_start: %#010x %#010x %#010x\n",
345 (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[0]),
346 (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[1]),
347 (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[2]));
348 qdf_nofl_info("msdu_start: %#010x %#010x %#010x\n",
349 (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[0]),
350 (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[1]),
351 (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[2]));
352 qdf_nofl_info("msdu_end: %#010x %#010x %#010x %#010x %#010x\n",
353 (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[0]),
354 (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[1]),
355 (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[2]),
356 (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[3]),
357 (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[4]));
358 qdf_nofl_info("mpdu_end: %#010x\n",
359 (unsigned int)(*(uint32_t *)&rx_desc->mpdu_end));
360 qdf_nofl_info("ppdu_start: %#010x %#010x %#010x %#010x %#010x\n"
361 "%#010x %#010x %#010x %#010x %#010x\n",
362 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[0]),
363 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[1]),
364 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[2]),
365 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[3]),
366 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[4]),
367 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[5]),
368 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[6]),
369 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[7]),
370 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[8]),
371 (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[9]));
372 qdf_nofl_info("ppdu_end: %#010x %#010x %#010x %#010x %#010x\n"
373 "%#010x %#010x %#010x %#010x %#010x\n"
374 "%#010x,%#010x %#010x %#010x %#010x\n"
375 "%#010x %#010x %#010x %#010x %#010x\n" "%#010x %#010x\n",
376 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[0]),
377 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[1]),
378 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[2]),
379 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[3]),
380 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[4]),
381 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[5]),
382 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[6]),
383 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[7]),
384 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[8]),
385 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[9]),
386 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[10]),
387 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[11]),
388 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[12]),
389 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[13]),
390 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[14]),
391 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[15]),
392 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[16]),
393 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[17]),
394 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[18]),
395 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[19]),
396 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[20]),
397 (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[21]));
398 qdf_nofl_info
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800399 ("---------------------------------------------------------\n");
400}
401
402#ifndef HTT_ASSERT_LEVEL
403#define HTT_ASSERT_LEVEL 3
404#endif
405
Anurag Chouhanc5548422016-02-24 18:33:27 +0530406#define HTT_ASSERT_ALWAYS(condition) qdf_assert_always((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800407
Anurag Chouhanc5548422016-02-24 18:33:27 +0530408#define HTT_ASSERT0(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800409#if HTT_ASSERT_LEVEL > 0
Anurag Chouhanc5548422016-02-24 18:33:27 +0530410#define HTT_ASSERT1(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411#else
412#define HTT_ASSERT1(condition)
413#endif
414
415#if HTT_ASSERT_LEVEL > 1
Anurag Chouhanc5548422016-02-24 18:33:27 +0530416#define HTT_ASSERT2(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417#else
418#define HTT_ASSERT2(condition)
419#endif
420
421#if HTT_ASSERT_LEVEL > 2
Anurag Chouhanc5548422016-02-24 18:33:27 +0530422#define HTT_ASSERT3(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423#else
424#define HTT_ASSERT3(condition)
425#endif
426
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800427/*
428 * HTT_MAX_SEND_QUEUE_DEPTH -
429 * How many packets HTC should allow to accumulate in a send queue
430 * before calling the EpSendFull callback to see whether to retain
431 * or drop packets.
432 * This is not relevant for LL, where tx descriptors should be immediately
433 * downloaded to the target.
434 * This is not very relevant for HL either, since it is anticipated that
435 * the HL tx download scheduler will not work this far in advance - rather,
436 * it will make its decisions just-in-time, so it can be responsive to
437 * changing conditions.
438 * Hence, this queue depth threshold spec is mostly just a formality.
439 */
440#define HTT_MAX_SEND_QUEUE_DEPTH 64
441
442#define IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
443
Alok Kumara13db782018-08-12 02:24:12 +0530444/*
445 * HTT_RX_PRE_ALLOC_POOL_SIZE -
446 * How many Rx Buffer should be there in pre-allocated pool of buffers.
447 * This is mainly for low memory condition where kernel fails to alloc
448 * SKB buffer to the Rx ring.
449 */
450#define HTT_RX_PRE_ALLOC_POOL_SIZE 64
Manjunathappa Prakash70ea7282016-06-17 15:42:45 -0700451/* Max rx MSDU size including L2 headers */
452#define MSDU_SIZE 1560
453/* Rounding up to a cache line size. */
454#define HTT_RX_BUF_SIZE roundup(MSDU_SIZE + \
455 sizeof(struct htt_host_rx_desc_base), \
456 QDF_CACHE_LINE_SZ)
Manjunathappa Prakashb7573722016-04-21 11:24:07 -0700457#define MAX_RX_PAYLOAD_SZ (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800458/*
459 * DMA_MAP expects the buffer to be an integral number of cache lines.
460 * Rather than checking the actual cache line size, this code makes a
461 * conservative estimate of what the cache line size could be.
462 */
463#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
464#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
465
466#ifdef BIG_ENDIAN_HOST
467/*
468 * big-endian: bytes within a 4-byte "word" are swapped:
469 * pre-swap post-swap
470 * index index
471 * 0 3
472 * 1 2
473 * 2 1
474 * 3 0
475 * 4 7
476 * 5 6
477 * etc.
478 * To compute the post-swap index from the pre-swap index, compute
479 * the byte offset for the start of the word (index & ~0x3) and add
480 * the swapped byte offset within the word (3 - (index & 0x3)).
481 */
482#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) (((idx) & ~0x3) + (3 - ((idx) & 0x3)))
483#else
484/* little-endian: no adjustment needed */
485#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) idx
486#endif
487
488#define HTT_TX_MUTEX_INIT(_mutex) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530489 qdf_spinlock_create(_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490
491#define HTT_TX_MUTEX_ACQUIRE(_mutex) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530492 qdf_spin_lock_bh(_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800493
494#define HTT_TX_MUTEX_RELEASE(_mutex) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530495 qdf_spin_unlock_bh(_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496
497#define HTT_TX_MUTEX_DESTROY(_mutex) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530498 qdf_spinlock_destroy(_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500#ifdef ATH_11AC_TXCOMPACT
501
502#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530503 qdf_spinlock_create(&_pdev->txnbufq_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504
505#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev) \
506 HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)
507
508#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu) do { \
509 HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530510 _msdu = qdf_nbuf_queue_remove(&_pdev->txnbufq);\
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511 HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
512 } while (0)
513
514#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do { \
515 HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530516 qdf_nbuf_queue_add(&_pdev->txnbufq, _msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800517 HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
518 } while (0)
519
520#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do { \
521 HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530522 qdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523 HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
524 } while (0)
525#else
526
527#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)
528#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)
529#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu)
530#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu)
531#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)
532
533#endif
534
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530535#ifdef CONFIG_HL_SUPPORT
536
537static inline void htt_tx_resume_handler(void *context)
538{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530539}
540#else
541
Yun Park4afce842017-04-05 07:09:26 -0700542void htt_tx_resume_handler(void *context);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530543#endif
544
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800545#ifdef ATH_11AC_TXCOMPACT
546#define HTT_TX_SCHED htt_tx_sched
547#else
548#define HTT_TX_SCHED(pdev) /* no-op */
549#endif
550
551int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems);
552
553void htt_tx_detach(struct htt_pdev_t *pdev);
554
555int htt_rx_attach(struct htt_pdev_t *pdev);
556
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530557#if defined(CONFIG_HL_SUPPORT)
558
559static inline void htt_rx_detach(struct htt_pdev_t *pdev)
560{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530561}
562#else
563
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564void htt_rx_detach(struct htt_pdev_t *pdev);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530565#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566
Houston Hoffman23e76f92016-02-26 12:19:11 -0800567int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800568
569void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt);
Manjunathappa Prakash585178d2016-04-14 01:11:18 -0700570#ifdef WLAN_FEATURE_FASTPATH
571void htt_t2h_msg_handler_fast(void *htt_pdev, qdf_nbuf_t *cmpl_msdus,
572 uint32_t num_cmpls);
573#else
574static inline void htt_t2h_msg_handler_fast(void *htt_pdev,
575 qdf_nbuf_t *cmpl_msdus,
576 uint32_t num_cmpls)
577{
578}
579#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800580
581void htt_h2t_send_complete(void *context, HTC_PACKET *pkt);
582
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530583QDF_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584
Visweswara Tanuku2e839e52019-06-11 10:16:30 +0530585int htt_tx_padding_credit_update_handler(void *context, int pad_credit);
586
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530587#if defined(HELIUMPLUS)
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +0530588QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
Himanshu Agarwal18d6b8c2017-03-01 16:41:04 +0530590#endif /* defined(HELIUMPLUS) */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800591
Jeff Johnsoncdcc9102019-03-07 13:13:31 -0800592QDF_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530593
Jeff Johnsoncdcc9102019-03-07 13:13:31 -0800594QDF_STATUS htt_h2t_rx_ring_rfs_cfg_msg_ll(struct htt_pdev_t *pdev);
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -0700595
Jeff Johnsoncdcc9102019-03-07 13:13:31 -0800596QDF_STATUS htt_h2t_rx_ring_rfs_cfg_msg_hl(struct htt_pdev_t *pdev);
Manjunathappa Prakashfff753c2016-09-01 19:34:56 -0700597
Jeff Johnsoncdcc9102019-03-07 13:13:31 -0800598QDF_STATUS htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev);
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530599
600extern QDF_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800601
Manikandan Mohan83c939c2017-04-13 20:23:07 -0700602enum htc_send_full_action htt_h2t_full(void *context, HTC_PACKET *pkt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800603
604struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev);
605
606void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
607
608void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
609
610#ifdef ATH_11AC_TXCOMPACT
Yun Parkeea1c9c2017-03-08 11:26:37 -0800611void htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level);
612
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613void
614htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
615
616void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev);
617#endif
618
Nirav Shah6ebfe242018-05-09 15:56:12 +0530619#ifdef WLAN_FULL_REORDER_OFFLOAD
620int
621htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
622 qdf_dma_addr_t paddr,
623 qdf_nbuf_t netbuf);
624#else
Nirav Shaheb017be2018-02-15 11:20:58 +0530625static inline int
626htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
627 qdf_dma_addr_t paddr,
628 qdf_nbuf_t netbuf)
629{
630 return 0;
631}
Nirav Shaheb017be2018-02-15 11:20:58 +0530632#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800633
Orhan K AKYILDIZ4c878ed2017-03-23 13:12:46 -0700634qdf_nbuf_t
635htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, qdf_dma_addr_t paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800636
637#ifdef IPA_OFFLOAD
638int
639htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
640 unsigned int uc_tx_buf_sz,
641 unsigned int uc_tx_buf_cnt,
642 unsigned int uc_tx_partition_base);
643
644int
645htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
646
647int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
648
649int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530650
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800651#else
Leo Chang8e073612015-11-13 10:55:34 -0800652/**
653 * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
654 * @pdev: htt context
655 * @uc_tx_buf_sz: single tx buffer size
656 * @uc_tx_buf_cnt: total tx buffer count
657 * @uc_tx_partition_base: tx buffer partition start
658 *
659 * Return: 0 success
660 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661static inline int
662htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
663 unsigned int uc_tx_buf_sz,
664 unsigned int uc_tx_buf_cnt,
665 unsigned int uc_tx_partition_base)
666{
667 return 0;
668}
669
Leo Chang8e073612015-11-13 10:55:34 -0800670/**
671 * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
672 * @pdev: htt context
673 * @rx_ind_ring_size: rx ring size
674 *
675 * Return: 0 success
676 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800677static inline int
678htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size)
679{
680 return 0;
681}
682
683static inline int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
684{
685 return 0;
686}
687
688static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
689{
690 return 0;
691}
Sravan Kumar Kairamb664b6c2018-02-27 17:43:10 +0530692
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693#endif /* IPA_OFFLOAD */
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700694
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530695/* Maximum Outstanding Bus Download */
696#define HTT_MAX_BUS_CREDIT 33
697
698#ifdef CONFIG_HL_SUPPORT
699
700/**
701 * htt_tx_credit_update() - check for diff in bus delta and target delta
702 * @pdev: pointer to htt device.
703 *
704 * Return: min of bus delta and target delta
705 */
706int
707htt_tx_credit_update(struct htt_pdev_t *pdev);
708#else
709
710static inline int
711htt_tx_credit_update(struct htt_pdev_t *pdev)
712{
713 return 0;
714}
715#endif
716
717
718#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
719
720#define HTT_TX_GROUP_INDEX_OFFSET \
721(sizeof(struct htt_txq_group) / sizeof(u_int32_t))
722
723void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word);
724#else
725
726static inline
727void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
728{
Siddarth Poddar1df1cd82016-04-27 17:32:21 +0530729}
730#endif
731
Govind Singhd79e1342015-11-03 16:20:02 +0530732#ifdef DEBUG_RX_RING_BUFFER
733/**
734 * htt_rx_dbg_rxbuf_init() - init debug rx buff list
735 * @pdev: pdev handle
736 *
Orhan K AKYILDIZ3d926d82017-02-28 18:05:41 -0800737 * Allocation is done from bss segment. This uses vmalloc and has a bit
738 * of an overhead compared to kmalloc (which qdf_mem_alloc wraps). The impact
739 * of the overhead to performance will need to be quantified.
740 *
Govind Singhd79e1342015-11-03 16:20:02 +0530741 * Return: none
742 */
Orhan K AKYILDIZ3d926d82017-02-28 18:05:41 -0800743static struct rx_buf_debug rx_buff_list_bss[HTT_RX_RING_BUFF_DBG_LIST];
Govind Singhd79e1342015-11-03 16:20:02 +0530744static inline
745void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
746{
Orhan K AKYILDIZ3d926d82017-02-28 18:05:41 -0800747 pdev->rx_buff_list = rx_buff_list_bss;
748 qdf_spinlock_create(&(pdev->rx_buff_list_lock));
749 pdev->rx_buff_index = 0;
750 pdev->rx_buff_posted_cum = 0;
751 pdev->rx_buff_recvd_cum = 0;
752 pdev->rx_buff_recvd_err = 0;
753 pdev->refill_retry_timer_starts = 0;
754 pdev->refill_retry_timer_calls = 0;
755 pdev->refill_retry_timer_doubles = 0;
Govind Singhd79e1342015-11-03 16:20:02 +0530756}
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700757
Mohit Khannac19888f2017-02-09 12:50:15 -0800758/**
759 * htt_display_rx_buf_debug() - display debug rx buff list and some counters
760 * @pdev: pdev handle
761 *
762 * Return: Success
763 */
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800764static inline int htt_display_rx_buf_debug(struct htt_pdev_t *pdev)
765{
766 int i;
767 struct rx_buf_debug *buf;
768
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700769 if ((pdev) &&
770 (pdev->rx_buff_list)) {
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800771 buf = pdev->rx_buff_list;
772 for (i = 0; i < HTT_RX_RING_BUFF_DBG_LIST; i++) {
773 if (buf[i].posted != 0)
Nirav Shahe6194ac2018-07-13 11:04:41 +0530774 qdf_nofl_info("[%d][0x%x] %pK %lu %pK %llu %llu",
775 i, buf[i].cpu,
776 buf[i].nbuf_data,
777 (unsigned long)buf[i].paddr,
778 buf[i].nbuf,
779 buf[i].posted,
780 buf[i].recved);
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800781 }
Mohit Khannac68622e2017-01-31 21:07:12 -0800782
Nirav Shahe6194ac2018-07-13 11:04:41 +0530783 qdf_nofl_info("rxbuf_idx %d all_posted: %d all_recvd: %d recv_err: %d",
784 pdev->rx_buff_index,
785 pdev->rx_buff_posted_cum,
786 pdev->rx_buff_recvd_cum,
787 pdev->rx_buff_recvd_err);
Mohit Khannac68622e2017-01-31 21:07:12 -0800788
Nirav Shahe6194ac2018-07-13 11:04:41 +0530789 qdf_nofl_info("timer kicks :%d actual :%d restarts:%d debtors: %d fill_n: %d",
790 pdev->refill_retry_timer_starts,
791 pdev->refill_retry_timer_calls,
792 pdev->refill_retry_timer_doubles,
793 pdev->rx_buff_debt_invoked,
794 pdev->rx_buff_fill_n_invoked);
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800795 } else
796 return -EINVAL;
797 return 0;
798}
799
Govind Singhd79e1342015-11-03 16:20:02 +0530800/**
801 * htt_rx_dbg_rxbuf_set() - set element of rx buff list
802 * @pdev: pdev handle
803 * @paddr: physical address of netbuf
804 * @rx_netbuf: received netbuf
805 *
806 * Return: none
807 */
808static inline
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700809void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev, qdf_dma_addr_t paddr,
810 qdf_nbuf_t rx_netbuf)
Govind Singhd79e1342015-11-03 16:20:02 +0530811{
812 if (pdev->rx_buff_list) {
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700813 qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700814 pdev->rx_buff_list[pdev->rx_buff_index].paddr = paddr;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800815 pdev->rx_buff_list[pdev->rx_buff_index].nbuf = rx_netbuf;
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700816 pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data =
817 rx_netbuf->data;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800818 pdev->rx_buff_list[pdev->rx_buff_index].posted =
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700819 qdf_get_log_timestamp();
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800820 pdev->rx_buff_posted_cum++;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800821 pdev->rx_buff_list[pdev->rx_buff_index].recved = 0;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800822 pdev->rx_buff_list[pdev->rx_buff_index].cpu =
823 (1 << qdf_get_cpu());
Rakshith Suresh Patkarc1079672018-12-20 15:49:55 +0530824 QDF_NBUF_CB_RX_MAP_IDX(rx_netbuf) = pdev->rx_buff_index;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800825 if (++pdev->rx_buff_index >=
Govind Singhd79e1342015-11-03 16:20:02 +0530826 HTT_RX_RING_BUFF_DBG_LIST)
827 pdev->rx_buff_index = 0;
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700828 qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
Govind Singhd79e1342015-11-03 16:20:02 +0530829 }
830}
Manjunathappa Prakashc8e75642016-06-03 19:32:27 -0700831
Govind Singhd79e1342015-11-03 16:20:02 +0530832/**
833 * htt_rx_dbg_rxbuf_set() - reset element of rx buff list
834 * @pdev: pdev handle
835 * @netbuf: rx sk_buff
836 * Return: none
837 */
838static inline
839void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530840 qdf_nbuf_t netbuf)
Govind Singhd79e1342015-11-03 16:20:02 +0530841{
842 uint32_t index;
843
844 if (pdev->rx_buff_list) {
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700845 qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
Rakshith Suresh Patkarc1079672018-12-20 15:49:55 +0530846 index = QDF_NBUF_CB_RX_MAP_IDX(netbuf);
Govind Singhd79e1342015-11-03 16:20:02 +0530847 if (index < HTT_RX_RING_BUFF_DBG_LIST) {
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800848 pdev->rx_buff_list[index].recved =
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700849 qdf_get_log_timestamp();
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800850 pdev->rx_buff_recvd_cum++;
851 } else {
852 pdev->rx_buff_recvd_err++;
Govind Singhd79e1342015-11-03 16:20:02 +0530853 }
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800854 pdev->rx_buff_list[pdev->rx_buff_index].cpu |=
855 (1 << qdf_get_cpu());
Orhan K AKYILDIZa8e2e6f2016-10-13 22:24:12 -0700856 qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
Govind Singhd79e1342015-11-03 16:20:02 +0530857 }
858}
Govind Singhc5ce2902015-10-26 14:32:43 +0530859/**
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800860 * htt_rx_dbg_rxbuf_indupd() - add a record for alloc index update
861 * @pdev: pdev handle
862 * @idx : value of the index
863 *
864 * Return: none
865 */
866static inline
867void htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t *pdev, int alloc_index)
868{
869 if (pdev->rx_buff_list) {
870 qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
871 pdev->rx_buff_list[pdev->rx_buff_index].paddr = 0;
872 pdev->rx_buff_list[pdev->rx_buff_index].nbuf = 0;
873 pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data = 0;
874 pdev->rx_buff_list[pdev->rx_buff_index].posted =
875 qdf_get_log_timestamp();
876 pdev->rx_buff_list[pdev->rx_buff_index].recved =
877 (uint64_t)alloc_index;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800878 pdev->rx_buff_list[pdev->rx_buff_index].cpu =
879 (1 << qdf_get_cpu());
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800880 if (++pdev->rx_buff_index >=
881 HTT_RX_RING_BUFF_DBG_LIST)
882 pdev->rx_buff_index = 0;
883 qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
884 }
885}
886/**
887 * htt_rx_dbg_rxbuf_httrxind() - add a record for recipt of htt rx_ind msg
888 * @pdev: pdev handle
889 *
890 * Return: none
891 */
892static inline
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800893void htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t *pdev, unsigned int msdu_cnt)
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800894{
895 if (pdev->rx_buff_list) {
896 qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800897 pdev->rx_buff_list[pdev->rx_buff_index].paddr = msdu_cnt;
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800898 pdev->rx_buff_list[pdev->rx_buff_index].nbuf = 0;
899 pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data = 0;
900 pdev->rx_buff_list[pdev->rx_buff_index].posted =
901 qdf_get_log_timestamp();
902 pdev->rx_buff_list[pdev->rx_buff_index].recved =
903 (uint64_t)0x48545452584D5347; /* 'HTTRXMSG' */
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800904 pdev->rx_buff_list[pdev->rx_buff_index].cpu =
905 (1 << qdf_get_cpu());
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800906 if (++pdev->rx_buff_index >=
907 HTT_RX_RING_BUFF_DBG_LIST)
908 pdev->rx_buff_index = 0;
909 qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
910 }
911}
912
913/**
Govind Singhc5ce2902015-10-26 14:32:43 +0530914 * htt_rx_dbg_rxbuf_deinit() - deinit debug rx buff list
915 * @pdev: pdev handle
916 *
917 * Return: none
918 */
919static inline
920void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
921{
922 if (pdev->rx_buff_list)
Orhan K AKYILDIZ3d926d82017-02-28 18:05:41 -0800923 pdev->rx_buff_list = NULL;
Houston Hoffman2eac7c42016-12-12 12:22:52 -0800924 qdf_spinlock_destroy(&(pdev->rx_buff_list_lock));
Govind Singhc5ce2902015-10-26 14:32:43 +0530925}
Govind Singhd79e1342015-11-03 16:20:02 +0530926#else
927static inline
928void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
929{
Govind Singhd79e1342015-11-03 16:20:02 +0530930}
Mohit Khannac19888f2017-02-09 12:50:15 -0800931static inline int htt_display_rx_buf_debug(struct htt_pdev_t *pdev)
932{
933 return 0;
934}
935
Govind Singhd79e1342015-11-03 16:20:02 +0530936static inline
937void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
938 uint32_t paddr,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530939 qdf_nbuf_t rx_netbuf)
Govind Singhd79e1342015-11-03 16:20:02 +0530940{
Govind Singhd79e1342015-11-03 16:20:02 +0530941}
942static inline
943void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530944 qdf_nbuf_t netbuf)
Govind Singhd79e1342015-11-03 16:20:02 +0530945{
Govind Singhd79e1342015-11-03 16:20:02 +0530946}
Govind Singhc5ce2902015-10-26 14:32:43 +0530947static inline
Orhan K AKYILDIZba81c512016-12-21 16:13:07 -0800948void htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t *pdev,
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800949 int alloc_index)
950{
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800951}
952static inline
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -0800953void htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t *pdev,
954 unsigned int msdu_cnt)
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800955{
Orhan K AKYILDIZ0c1b6bf2016-11-28 18:47:24 -0800956}
957static inline
Govind Singhc5ce2902015-10-26 14:32:43 +0530958void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
959{
960 return;
961}
Govind Singhd79e1342015-11-03 16:20:02 +0530962#endif
Nirav Shah73713f72018-05-17 14:50:41 +0530963
Nirav Shah6ebfe242018-05-09 15:56:12 +0530964#ifndef HTT_RX_RING_SIZE_MIN
965#define HTT_RX_RING_SIZE_MIN 128 /* slightly > than one large A-MPDU */
966#endif
967
968#ifndef HTT_RX_RING_SIZE_MAX
969#define HTT_RX_RING_SIZE_MAX 2048 /* ~20 ms @ 1 Gbps of 1500B MSDUs */
970#endif
971
972#ifndef HTT_RX_AVG_FRM_BYTES
973#define HTT_RX_AVG_FRM_BYTES 1000
974#endif
975
976#define HTT_FCS_LEN (4)
Nirav Shah73713f72018-05-17 14:50:41 +0530977
978#ifdef HTT_DEBUG_DATA
979#define HTT_PKT_DUMP(x) x
980#else
981#define HTT_PKT_DUMP(x) /* no-op */
982#endif
983
984#ifdef RX_HASH_DEBUG
985#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
986#else
987#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) /* no-op */
988#endif
989
990#if HTT_PADDR64
991#define NEXT_FIELD_OFFSET_IN32 2
992#else /* ! HTT_PADDR64 */
993#define NEXT_FIELD_OFFSET_IN32 1
994#endif /* HTT_PADDR64 */
995
996#define RX_PADDR_MAGIC_PATTERN 0xDEAD0000
997
998#if HTT_PADDR64
999static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
1000{
1001 qdf_dma_addr_t ret = paddr;
1002
1003 if (sizeof(paddr) > 4)
1004 ret &= 0x1fffffffff;
1005 return ret;
1006}
1007#else /* not 64 bits */
1008static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
1009{
1010 return paddr;
1011}
1012#endif /* HTT_PADDR64 */
1013
Nirav Shah6ebfe242018-05-09 15:56:12 +05301014#ifdef WLAN_FULL_REORDER_OFFLOAD
Nirav Shah73713f72018-05-17 14:50:41 +05301015#ifdef ENABLE_DEBUG_ADDRESS_MARKING
1016static inline qdf_dma_addr_t
1017htt_rx_paddr_unmark_high_bits(qdf_dma_addr_t paddr)
1018{
1019 uint32_t markings;
1020
1021 if (sizeof(qdf_dma_addr_t) > 4) {
1022 markings = (uint32_t)((paddr >> 16) >> 16);
1023 /*
1024 * check if it is marked correctly:
1025 * See the mark_high_bits function above for the expected
1026 * pattern.
1027 * the LS 5 bits are the high bits of physical address
1028 * padded (with 0b0) to 8 bits
1029 */
1030 if ((markings & 0xFFFF0000) != RX_PADDR_MAGIC_PATTERN) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301031 qdf_print("paddr not marked correctly: 0x%pK!\n",
1032 (void *)paddr);
Nirav Shah73713f72018-05-17 14:50:41 +05301033 HTT_ASSERT_ALWAYS(0);
1034 }
1035
1036 /* clear markings for further use */
1037 paddr = htt_paddr_trim_to_37(paddr);
1038 }
1039 return paddr;
1040}
1041
1042static inline
1043qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1044{
1045 qdf_dma_addr_t paddr = 0;
1046
1047 paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1048 if (sizeof(qdf_dma_addr_t) > 4) {
1049 u32p++;
1050 /* 32 bit architectures dont like <<32 */
1051 paddr |= (((qdf_dma_addr_t)
1052 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
1053 << 16 << 16);
1054 }
1055 paddr = htt_rx_paddr_unmark_high_bits(paddr);
1056
1057 return paddr;
1058}
1059#else
1060#if HTT_PADDR64
1061static inline
1062qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1063{
1064 qdf_dma_addr_t paddr = 0;
1065
1066 paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1067 if (sizeof(qdf_dma_addr_t) > 4) {
1068 u32p++;
1069 /* 32 bit architectures dont like <<32 */
1070 paddr |= (((qdf_dma_addr_t)
1071 HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
1072 << 16 << 16);
1073 }
1074 return paddr;
1075}
1076#else
1077static inline
1078qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1079{
1080 return HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1081}
1082#endif
1083#endif /* ENABLE_DEBUG_ADDRESS_MARKING */
1084
Nirav Shah6ebfe242018-05-09 15:56:12 +05301085static inline
1086unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
1087{
1088 return (*pdev->rx_ring.alloc_idx.vaddr -
1089 *pdev->rx_ring.target_idx.vaddr) &
1090 pdev->rx_ring.size_mask;
1091}
1092
Nirav Shah73713f72018-05-17 14:50:41 +05301093static inline qdf_nbuf_t
1094htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, qdf_dma_addr_t paddr)
1095{
1096 HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1097 pdev->rx_ring.fill_cnt--;
1098 paddr = htt_paddr_trim_to_37(paddr);
1099 return htt_rx_hash_list_lookup(pdev, paddr);
1100}
1101
Nirav Shah6ebfe242018-05-09 15:56:12 +05301102#else
1103static inline
1104qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1105{
1106 return 0;
1107}
1108
1109static inline qdf_nbuf_t
1110htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, qdf_dma_addr_t paddr)
1111{
1112 return NULL;
1113}
1114#endif
1115
1116#if defined(FEATURE_MONITOR_MODE_SUPPORT) && defined(WLAN_FULL_REORDER_OFFLOAD)
Nirav Shah73713f72018-05-17 14:50:41 +05301117int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1118 qdf_nbuf_t rx_ind_msg,
1119 qdf_nbuf_t *head_msdu,
1120 qdf_nbuf_t *tail_msdu,
1121 uint32_t *replenish_cnt);
1122#else
1123static inline
1124int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1125 qdf_nbuf_t rx_ind_msg,
1126 qdf_nbuf_t *head_msdu,
1127 qdf_nbuf_t *tail_msdu,
1128 uint32_t *replenish_cnt)
1129{
1130 return 0;
1131}
1132#endif
Nirav Shah73713f72018-05-17 14:50:41 +05301133
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001134#endif /* _HTT_INTERNAL__H_ */