blob: 4b035ada638550624389f61d36d88736790a87cb [file] [log] [blame]
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001/*
Akshay Kosigi6a206752019-06-10 23:14:52 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +053019#include "hal_api.h"
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053020#include "hal_hw_headers.h"
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -070021#include "hal_reo.h"
22#include "hal_tx.h"
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +053023#include "hal_rx.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053024#include "qdf_module.h"
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -070025
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +053026/* TODO: See if the following definition is available in HW headers */
27#define HAL_REO_OWNED 4
28#define HAL_REO_QUEUE_DESC 8
29#define HAL_REO_QUEUE_EXT_DESC 9
30
31/* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32 * how these counters are assigned
33 */
34#define HAL_RX_LINK_DESC_CNTR 1
35/* TODO: Following definition should be from HW headers */
36#define HAL_DESC_REO_OWNED 4
37
38/**
39 * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40 * @owner - owner info
41 * @buffer_type - buffer type
42 */
43static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 uint32_t buffer_type)
45{
46 HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 owner);
48 HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 buffer_type);
50}
51
52#ifndef TID_TO_WME_AC
53#define WME_AC_BE 0 /* best effort */
54#define WME_AC_BK 1 /* background */
55#define WME_AC_VI 2 /* video */
56#define WME_AC_VO 3 /* voice */
57
58#define TID_TO_WME_AC(_tid) ( \
59 (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 WME_AC_VO)
63#endif
64#define HAL_NON_QOS_TID 16
65
66/**
67 * hal_reo_qdesc_setup - Setup HW REO queue descriptor
68 *
69 * @hal_soc: Opaque HAL SOC handle
70 * @ba_window_size: BlockAck window size
71 * @start_seq: Starting sequence number
72 * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
73 * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
74 * @tid: TID
75 *
76 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +053077void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid,
78 uint32_t ba_window_size,
79 uint32_t start_seq, void *hw_qdesc_vaddr,
80 qdf_dma_addr_t hw_qdesc_paddr,
81 int pn_type)
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +053082{
83 uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
84 uint32_t *reo_queue_ext_desc;
85 uint32_t reg_val;
86 uint32_t pn_enable;
87 uint32_t pn_size = 0;
88
89 qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
90
91 hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
92 HAL_REO_QUEUE_DESC);
93 /* Fixed pattern in reserved bits for debugging */
94 HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
95 RESERVED_0A, 0xDDBEEF);
96
97 /* This a just a SW meta data and will be copied to REO destination
98 * descriptors indicated by hardware.
99 * TODO: Setting TID in this field. See if we should set something else.
100 */
101 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
102 RECEIVE_QUEUE_NUMBER, tid);
103 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
104 VLD, 1);
105 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
106 ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
107
108 /*
109 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
110 */
111
112 reg_val = TID_TO_WME_AC(tid);
113 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
114
115 if (ba_window_size < 1)
116 ba_window_size = 1;
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -0700117 /* WAR to get 2k exception in Non BA case.
118 * Setting window size to 2 to get 2k jump exception
119 * when we receive aggregates in Non BA case
120 */
121 if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
122 ba_window_size++;
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +0530123 /* Set RTY bit for non-BA case. Duplicate detection is currently not
124 * done by HW in non-BA case if RTY bit is not set.
125 * TODO: This is a temporary War and should be removed once HW fix is
126 * made to check and discard duplicates even if RTY bit is not set.
127 */
128 if (ba_window_size == 1)
129 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
130
131 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
132 ba_window_size - 1);
133
134 switch (pn_type) {
135 case HAL_PN_WPA:
136 pn_enable = 1;
137 pn_size = PN_SIZE_48;
138 break;
139 case HAL_PN_WAPI_EVEN:
140 case HAL_PN_WAPI_UNEVEN:
141 pn_enable = 1;
142 pn_size = PN_SIZE_128;
143 break;
144 default:
145 pn_enable = 0;
146 break;
147 }
148
149 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
150 pn_enable);
151
152 if (pn_type == HAL_PN_WAPI_EVEN)
153 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
154 PN_SHALL_BE_EVEN, 1);
155 else if (pn_type == HAL_PN_WAPI_UNEVEN)
156 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
157 PN_SHALL_BE_UNEVEN, 1);
158
Radha krishna Simha Jiguru65c212d2019-09-09 19:04:09 +0530159 /*
160 * TODO: Need to check if PN handling in SW needs to be enabled
161 * So far this is not a requirement
162 */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +0530163
164 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
165 pn_size);
166
167 /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
168 * based on BA window size and/or AMPDU capabilities
169 */
170 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
171 IGNORE_AMPDU_FLAG, 1);
172
173 if (start_seq <= 0xfff)
174 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
175 start_seq);
176
177 /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
178 * but REO is not delivering packets if we set it to 1. Need to enable
179 * this once the issue is resolved
180 */
181 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
182
183 /* TODO: Check if we should set start PN for WAPI */
184
185#ifdef notyet
186 /* Setup first queue extension if BA window size is more than 1 */
187 if (ba_window_size > 1) {
188 reo_queue_ext_desc =
189 (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
190 1);
191 qdf_mem_zero(reo_queue_ext_desc,
192 sizeof(struct rx_reo_queue_ext));
193 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
194 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
195 }
196 /* Setup second queue extension if BA window size is more than 105 */
197 if (ba_window_size > 105) {
198 reo_queue_ext_desc = (uint32_t *)
199 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
200 qdf_mem_zero(reo_queue_ext_desc,
201 sizeof(struct rx_reo_queue_ext));
202 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
203 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
204 }
205 /* Setup third queue extension if BA window size is more than 210 */
206 if (ba_window_size > 210) {
207 reo_queue_ext_desc = (uint32_t *)
208 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
209 qdf_mem_zero(reo_queue_ext_desc,
210 sizeof(struct rx_reo_queue_ext));
211 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
212 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
213 }
214#else
215 /* TODO: HW queue descriptors are currently allocated for max BA
216 * window size for all QOS TIDs so that same descriptor can be used
217 * later when ADDBA request is recevied. This should be changed to
218 * allocate HW queue descriptors based on BA window size being
219 * negotiated (0 for non BA cases), and reallocate when BA window
220 * size changes and also send WMI message to FW to change the REO
221 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
222 */
223 if (tid != HAL_NON_QOS_TID) {
224 reo_queue_ext_desc = (uint32_t *)
225 (((struct rx_reo_queue *)reo_queue_desc) + 1);
226 qdf_mem_zero(reo_queue_ext_desc, 3 *
227 sizeof(struct rx_reo_queue_ext));
228 /* Initialize first reo queue extension descriptor */
229 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
230 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
231 /* Fixed pattern in reserved bits for debugging */
232 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
233 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
234 /* Initialize second reo queue extension descriptor */
235 reo_queue_ext_desc = (uint32_t *)
236 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
237 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
238 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
239 /* Fixed pattern in reserved bits for debugging */
240 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
241 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
242 /* Initialize third reo queue extension descriptor */
243 reo_queue_ext_desc = (uint32_t *)
244 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
245 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
246 HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
247 /* Fixed pattern in reserved bits for debugging */
248 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
249 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
250 }
251#endif
252}
253qdf_export_symbol(hal_reo_qdesc_setup);
254
Nandha Kishore Easwarane6a27f72018-09-01 23:04:33 +0530255/**
256 * hal_get_ba_aging_timeout - Get BA Aging timeout
257 *
258 * @hal_soc: Opaque HAL SOC handle
259 * @ac: Access category
260 * @value: window size to get
261 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530262void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
Nandha Kishore Easwarane6a27f72018-09-01 23:04:33 +0530263 uint32_t *value)
264{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530265 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
Nandha Kishore Easwarane6a27f72018-09-01 23:04:33 +0530266
267 switch (ac) {
268 case WME_AC_BE:
269 *value = HAL_REG_READ(soc,
270 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
271 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
272 break;
273 case WME_AC_BK:
274 *value = HAL_REG_READ(soc,
275 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
276 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
277 break;
278 case WME_AC_VI:
279 *value = HAL_REG_READ(soc,
280 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
281 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
282 break;
283 case WME_AC_VO:
284 *value = HAL_REG_READ(soc,
285 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
286 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
287 break;
288 default:
289 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
290 "Invalid AC: %d\n", ac);
291 }
292}
293
294qdf_export_symbol(hal_get_ba_aging_timeout);
295
296/**
297 * hal_set_ba_aging_timeout - Set BA Aging timeout
298 *
299 * @hal_soc: Opaque HAL SOC handle
300 * @ac: Access category
301 * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
302 * @value: Input value to set
303 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530304void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
Nandha Kishore Easwarane6a27f72018-09-01 23:04:33 +0530305 uint32_t value)
306{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530307 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
Nandha Kishore Easwarane6a27f72018-09-01 23:04:33 +0530308
309 switch (ac) {
310 case WME_AC_BE:
311 HAL_REG_WRITE(soc,
312 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
313 SEQ_WCSS_UMAC_REO_REG_OFFSET),
314 value * 1000);
315 break;
316 case WME_AC_BK:
317 HAL_REG_WRITE(soc,
318 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
319 SEQ_WCSS_UMAC_REO_REG_OFFSET),
320 value * 1000);
321 break;
322 case WME_AC_VI:
323 HAL_REG_WRITE(soc,
324 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
325 SEQ_WCSS_UMAC_REO_REG_OFFSET),
326 value * 1000);
327 break;
328 case WME_AC_VO:
329 HAL_REG_WRITE(soc,
330 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
331 SEQ_WCSS_UMAC_REO_REG_OFFSET),
332 value * 1000);
333 break;
334 default:
335 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
336 "Invalid AC: %d\n", ac);
337 }
338}
339
340qdf_export_symbol(hal_set_ba_aging_timeout);
341
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700342#define BLOCK_RES_MASK 0xF
343static inline uint8_t hal_find_one_bit(uint8_t x)
344{
345 uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
346 uint8_t pos;
347
348 for (pos = 0; y; y >>= 1)
349 pos++;
350
351 return pos-1;
352}
353
354static inline uint8_t hal_find_zero_bit(uint8_t x)
355{
356 uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
357 uint8_t pos;
358
359 for (pos = 0; y; y >>= 1)
360 pos++;
361
362 return pos-1;
363}
364
365inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
366 enum hal_reo_cmd_type type,
367 uint32_t paddr_lo,
368 uint8_t paddr_hi)
369{
370 switch (type) {
371 case CMD_GET_QUEUE_STATS:
372 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
373 RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
374 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
375 RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
376 break;
377 case CMD_FLUSH_QUEUE:
378 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
379 FLUSH_DESC_ADDR_31_0, paddr_lo);
380 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
381 FLUSH_DESC_ADDR_39_32, paddr_hi);
382 break;
383 case CMD_FLUSH_CACHE:
384 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
385 FLUSH_ADDR_31_0, paddr_lo);
386 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
387 FLUSH_ADDR_39_32, paddr_hi);
388 break;
389 case CMD_UPDATE_RX_REO_QUEUE:
390 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
391 RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
392 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
393 RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
394 break;
395 default:
396 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530397 "%s: Invalid REO command type", __func__);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700398 break;
399 }
400}
401
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530402inline int hal_reo_cmd_queue_stats(hal_ring_handle_t hal_ring_hdl,
403 hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530404 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700405
406{
407 uint32_t *reo_desc, val;
Akshay Kosigi6a206752019-06-10 23:14:52 +0530408 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700409
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530410 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
411 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800412 if (!reo_desc) {
413 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530414 "%s: Out of cmd ring entries", __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530415 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800416 return -EBUSY;
417 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700418
419 HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800420 sizeof(struct reo_get_queue_stats));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700421
422 /* Offsets of descriptor fields defined in HW headers start from
423 * the field after TLV header */
424 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530425 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700426 sizeof(struct reo_get_queue_stats) -
427 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700428
429 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
430 REO_STATUS_REQUIRED, cmd->std.need_status);
431
432 hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
433 cmd->std.addr_lo,
434 cmd->std.addr_hi);
435
436 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
437 cmd->u.stats_params.clear);
438
Venkata Sharath Chandra Manchalab9cafe22019-10-14 15:39:45 -0700439 if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
440 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
441 hif_pm_runtime_put(hal_soc->hif_handle);
442 } else {
443 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
444 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
445 hal_srng_inc_flush_cnt(hal_ring_hdl);
446 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700447
448 val = reo_desc[CMD_HEADER_DW_OFFSET];
449 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
450 val);
451}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530452qdf_export_symbol(hal_reo_cmd_queue_stats);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700453
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530454inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl,
455 hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530456 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700457{
458 uint32_t *reo_desc, val;
Akshay Kosigi6a206752019-06-10 23:14:52 +0530459 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700460
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530461 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
462 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800463 if (!reo_desc) {
464 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530465 "%s: Out of cmd ring entries", __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530466 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800467 return -EBUSY;
468 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700469
470 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800471 sizeof(struct reo_flush_queue));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700472
473 /* Offsets of descriptor fields defined in HW headers start from
474 * the field after TLV header */
475 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530476 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700477 sizeof(struct reo_flush_queue) -
478 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700479
480 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
481 REO_STATUS_REQUIRED, cmd->std.need_status);
482
483 hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
484 cmd->std.addr_hi);
485
486 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
487 BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700488 cmd->u.fl_queue_params.block_use_after_flush);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700489
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700490 if (cmd->u.fl_queue_params.block_use_after_flush) {
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700491 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
492 BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
493 }
494
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530495 hal_srng_access_end(hal_soc, hal_ring_hdl);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700496 val = reo_desc[CMD_HEADER_DW_OFFSET];
497 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
498 val);
499}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530500qdf_export_symbol(hal_reo_cmd_flush_queue);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700501
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530502inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
503 hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530504 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700505{
506 uint32_t *reo_desc, val;
507 struct hal_reo_cmd_flush_cache_params *cp;
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700508 uint8_t index = 0;
Akshay Kosigi6a206752019-06-10 23:14:52 +0530509 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700510
511 cp = &cmd->u.fl_cache_params;
512
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530513 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700514
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700515 /* We need a cache block resource for this operation, and REO HW has
516 * only 4 such blocking resources. These resources are managed using
517 * reo_res_bitmap, and we return failure if none is available.
518 */
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700519 if (cp->block_use_after_flush) {
Akshay Kosigi6a206752019-06-10 23:14:52 +0530520 index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700521 if (index > 3) {
Aditya Sathishded018e2018-07-02 16:25:21 +0530522 qdf_print("%s, No blocking resource available!",
523 __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530524 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700525 return -EBUSY;
526 }
Akshay Kosigi6a206752019-06-10 23:14:52 +0530527 hal_soc->index = index;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700528 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700529
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530530 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800531 if (!reo_desc) {
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530532 hal_srng_access_end(hal_soc, hal_ring_hdl);
533 hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800534 return -EBUSY;
535 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700536
537 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800538 sizeof(struct reo_flush_cache));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700539
540 /* Offsets of descriptor fields defined in HW headers start from
541 * the field after TLV header */
542 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530543 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700544 sizeof(struct reo_flush_cache) -
545 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700546
547 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
548 REO_STATUS_REQUIRED, cmd->std.need_status);
549
550 hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
551 cmd->std.addr_hi);
552
553 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
554 FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
555
556 /* set it to 0 for now */
557 cp->rel_block_index = 0;
558 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
559 RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
560
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700561 if (cp->block_use_after_flush) {
562 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
563 CACHE_BLOCK_RESOURCE_INDEX, index);
564 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700565
566 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
567 FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
568
569 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700570 BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700571
572 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
573 cp->flush_all);
574
Venkata Sharath Chandra Manchalab9cafe22019-10-14 15:39:45 -0700575 if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
576 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
577 hif_pm_runtime_put(hal_soc->hif_handle);
578 } else {
579 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
580 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
581 hal_srng_inc_flush_cnt(hal_ring_hdl);
582 }
583
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700584 val = reo_desc[CMD_HEADER_DW_OFFSET];
585 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
586 val);
587}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530588qdf_export_symbol(hal_reo_cmd_flush_cache);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700589
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530590inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530591 hal_soc_handle_t hal_soc_hdl,
592 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700593
594{
Akshay Kosigi6a206752019-06-10 23:14:52 +0530595 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700596 uint32_t *reo_desc, val;
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -0800597 uint8_t index = 0;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700598
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530599 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700600
601 if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
Akshay Kosigi6a206752019-06-10 23:14:52 +0530602 index = hal_find_one_bit(hal_soc->reo_res_bitmap);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700603 if (index > 3) {
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530604 hal_srng_access_end(hal_soc, hal_ring_hdl);
Aditya Sathishded018e2018-07-02 16:25:21 +0530605 qdf_print("%s: No blocking resource to unblock!",
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700606 __func__);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800607 return -EBUSY;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700608 }
609 }
610
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530611 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800612 if (!reo_desc) {
613 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530614 "%s: Out of cmd ring entries", __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530615 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800616 return -EBUSY;
617 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700618
619 HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800620 sizeof(struct reo_unblock_cache));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700621
622 /* Offsets of descriptor fields defined in HW headers start from
623 * the field after TLV header */
624 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530625 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700626 sizeof(struct reo_unblock_cache) -
627 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700628
629 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
630 REO_STATUS_REQUIRED, cmd->std.need_status);
631
632 HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
633 UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
634
635 if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
636 HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
Karunakar Dasineni4f886f32017-05-31 22:39:39 -0700637 CACHE_BLOCK_RESOURCE_INDEX,
638 cmd->u.unblk_cache_params.index);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700639 }
640
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530641 hal_srng_access_end(hal_soc, hal_ring_hdl);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700642 val = reo_desc[CMD_HEADER_DW_OFFSET];
643 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
644 val);
645}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530646qdf_export_symbol(hal_reo_cmd_unblock_cache);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700647
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530648inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530649 hal_soc_handle_t hal_soc_hdl,
650 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700651{
Akshay Kosigi6a206752019-06-10 23:14:52 +0530652 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700653 uint32_t *reo_desc, val;
654
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530655 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
656 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800657 if (!reo_desc) {
658 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530659 "%s: Out of cmd ring entries", __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530660 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800661 return -EBUSY;
662 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700663
664 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800665 sizeof(struct reo_flush_timeout_list));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700666
667 /* Offsets of descriptor fields defined in HW headers start from
668 * the field after TLV header */
669 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530670 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700671 sizeof(struct reo_flush_timeout_list) -
672 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700673
674 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
675 REO_STATUS_REQUIRED, cmd->std.need_status);
676
677 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
678 cmd->u.fl_tim_list_params.ac_list);
679
680 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
681 MINIMUM_RELEASE_DESC_COUNT,
682 cmd->u.fl_tim_list_params.min_rel_desc);
683
684 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
685 MINIMUM_FORWARD_BUF_COUNT,
686 cmd->u.fl_tim_list_params.min_fwd_buf);
687
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530688 hal_srng_access_end(hal_soc, hal_ring_hdl);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700689 val = reo_desc[CMD_HEADER_DW_OFFSET];
690 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
691 val);
692}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530693qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700694
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530695inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +0530696 hal_soc_handle_t hal_soc_hdl,
697 struct hal_reo_cmd_params *cmd)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700698{
Akshay Kosigi6a206752019-06-10 23:14:52 +0530699 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700700 uint32_t *reo_desc, val;
701 struct hal_reo_cmd_update_queue_params *p;
702
703 p = &cmd->u.upd_queue_params;
704
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530705 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
706 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800707 if (!reo_desc) {
708 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530709 "%s: Out of cmd ring entries", __func__);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530710 hal_srng_access_end(hal_soc, hal_ring_hdl);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -0800711 return -EBUSY;
712 }
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700713
714 HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
715 sizeof(struct reo_update_rx_reo_queue));
716
717 /* Offsets of descriptor fields defined in HW headers start from
718 * the field after TLV header */
719 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530720 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
Karunakar Dasineni6a526752018-08-02 08:56:19 -0700721 sizeof(struct reo_update_rx_reo_queue) -
722 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700723
724 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
725 REO_STATUS_REQUIRED, cmd->std.need_status);
726
727 hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
728 cmd->std.addr_lo, cmd->std.addr_hi);
729
730 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
731 UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
732
733 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
734 p->update_vld);
735
736 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
737 UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
738 p->update_assoc_link_desc);
739
740 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
741 UPDATE_DISABLE_DUPLICATE_DETECTION,
742 p->update_disable_dup_detect);
743
744 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
745 UPDATE_DISABLE_DUPLICATE_DETECTION,
746 p->update_disable_dup_detect);
747
748 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
749 UPDATE_SOFT_REORDER_ENABLE,
750 p->update_soft_reorder_enab);
751
752 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
753 UPDATE_AC, p->update_ac);
754
755 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
756 UPDATE_BAR, p->update_bar);
757
758 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
759 UPDATE_BAR, p->update_bar);
760
761 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
762 UPDATE_RTY, p->update_rty);
763
764 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
765 UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
766
767 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
768 UPDATE_OOR_MODE, p->update_oor_mode);
769
770 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
771 UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
772
773 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
774 UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
775
776 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
777 UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
778
779 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
780 UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
781
782 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
783 UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
784
785 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
786 UPDATE_PN_SIZE, p->update_pn_size);
787
788 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
789 UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
790
791 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
792 UPDATE_SVLD, p->update_svld);
793
794 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
795 UPDATE_SSN, p->update_ssn);
796
797 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
798 UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
799 p->update_seq_2k_err_detect);
800
801 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
802 UPDATE_PN_VALID, p->update_pn_valid);
803
804 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
805 UPDATE_PN, p->update_pn);
806
807 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
808 RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
809
810 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
811 VLD, p->vld);
812
813 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
814 ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
815 p->assoc_link_desc);
816
817 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
818 DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
819
820 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
821 SOFT_REORDER_ENABLE, p->soft_reorder_enab);
822
823 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
824
825 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
826 BAR, p->bar);
827
828 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
829 CHK_2K_MODE, p->chk_2k_mode);
830
831 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
832 RTY, p->rty);
833
834 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
835 OOR_MODE, p->oor_mode);
836
837 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
838 PN_CHECK_NEEDED, p->pn_check_needed);
839
840 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
841 PN_SHALL_BE_EVEN, p->pn_even);
842
843 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
844 PN_SHALL_BE_UNEVEN, p->pn_uneven);
845
846 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
847 PN_HANDLING_ENABLE, p->pn_hand_enab);
848
849 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
850 IGNORE_AMPDU_FLAG, p->ignore_ampdu);
851
Karunakar Dasineni7957fa92017-02-23 23:05:40 -0800852 if (p->ba_window_size < 1)
853 p->ba_window_size = 1;
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700854 /*
855 * WAR to get 2k exception in Non BA case.
856 * Setting window size to 2 to get 2k jump exception
857 * when we receive aggregates in Non BA case
858 */
859 if (p->ba_window_size == 1)
860 p->ba_window_size++;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700861 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -0800862 BA_WINDOW_SIZE, p->ba_window_size - 1);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700863
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530864 if (p->pn_size == 24)
865 p->pn_size = PN_SIZE_24;
866 else if (p->pn_size == 48)
867 p->pn_size = PN_SIZE_48;
868 else if (p->pn_size == 128)
869 p->pn_size = PN_SIZE_128;
870
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700871 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
872 PN_SIZE, p->pn_size);
873
874 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
875 SVLD, p->svld);
876
877 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
878 SSN, p->ssn);
879
880 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
881 SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
882
883 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
884 PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
885
886 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
887 PN_31_0, p->pn_31_0);
888
889 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
890 PN_63_32, p->pn_63_32);
891
892 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
893 PN_95_64, p->pn_95_64);
894
895 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
896 PN_127_96, p->pn_127_96);
897
Venkata Sharath Chandra Manchala5ee6efd2019-08-01 11:22:04 -0700898 if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
899 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
900 hif_pm_runtime_put(hal_soc->hif_handle);
901 } else {
902 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
Sravan Kumar Kairam78b01a12019-09-16 14:22:55 +0530903 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
904 hal_srng_inc_flush_cnt(hal_ring_hdl);
Venkata Sharath Chandra Manchala5ee6efd2019-08-01 11:22:04 -0700905 }
906
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700907 val = reo_desc[CMD_HEADER_DW_OFFSET];
908 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
909 val);
910}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530911qdf_export_symbol(hal_reo_cmd_update_rx_queue);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700912
Akshay Kosigi6a206752019-06-10 23:14:52 +0530913inline void
914hal_reo_queue_stats_status(uint32_t *reo_desc,
915 struct hal_reo_queue_status *st,
916 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700917{
Akshay Kosigi6a206752019-06-10 23:14:52 +0530918 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700919 uint32_t val;
920
921 /* Offsets of descriptor fields defined in HW headers start
922 * from the field after TLV header */
923 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
924
925 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +0530926 hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
927 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -0700928
929 /* SSN */
930 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
931 st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
932
933 /* current index */
934 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
935 CURRENT_INDEX)];
936 st->curr_idx =
937 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
938 CURRENT_INDEX, val);
939
940 /* PN bits */
941 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
942 PN_31_0)];
943 st->pn_31_0 =
944 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
945 PN_31_0, val);
946
947 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
948 PN_63_32)];
949 st->pn_63_32 =
950 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
951 PN_63_32, val);
952
953 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
954 PN_95_64)];
955 st->pn_95_64 =
956 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
957 PN_95_64, val);
958
959 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
960 PN_127_96)];
961 st->pn_127_96 =
962 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
963 PN_127_96, val);
964
965 /* timestamps */
966 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
967 LAST_RX_ENQUEUE_TIMESTAMP)];
968 st->last_rx_enq_tstamp =
969 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
970 LAST_RX_ENQUEUE_TIMESTAMP, val);
971
972 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
973 LAST_RX_DEQUEUE_TIMESTAMP)];
974 st->last_rx_deq_tstamp =
975 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
976 LAST_RX_DEQUEUE_TIMESTAMP, val);
977
978 /* rx bitmap */
979 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
980 RX_BITMAP_31_0)];
981 st->rx_bitmap_31_0 =
982 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
983 RX_BITMAP_31_0, val);
984
985 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
986 RX_BITMAP_63_32)];
987 st->rx_bitmap_63_32 =
988 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
989 RX_BITMAP_63_32, val);
990
991 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
992 RX_BITMAP_95_64)];
993 st->rx_bitmap_95_64 =
994 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
995 RX_BITMAP_95_64, val);
996
997 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
998 RX_BITMAP_127_96)];
999 st->rx_bitmap_127_96 =
1000 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
1001 RX_BITMAP_127_96, val);
1002
1003 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
1004 RX_BITMAP_159_128)];
1005 st->rx_bitmap_159_128 =
1006 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
1007 RX_BITMAP_159_128, val);
1008
1009 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1010 RX_BITMAP_191_160)];
1011 st->rx_bitmap_191_160 =
1012 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1013 RX_BITMAP_191_160, val);
1014
1015 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1016 RX_BITMAP_223_192)];
1017 st->rx_bitmap_223_192 =
1018 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1019 RX_BITMAP_223_192, val);
1020
1021 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1022 RX_BITMAP_255_224)];
1023 st->rx_bitmap_255_224 =
1024 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1025 RX_BITMAP_255_224, val);
1026
1027 /* various counts */
1028 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1029 CURRENT_MPDU_COUNT)];
1030 st->curr_mpdu_cnt =
1031 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1032 CURRENT_MPDU_COUNT, val);
1033
1034 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1035 CURRENT_MSDU_COUNT)];
1036 st->curr_msdu_cnt =
1037 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1038 CURRENT_MSDU_COUNT, val);
1039
1040 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1041 TIMEOUT_COUNT)];
1042 st->fwd_timeout_cnt =
1043 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1044 TIMEOUT_COUNT, val);
1045
1046 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1047 FORWARD_DUE_TO_BAR_COUNT)];
1048 st->fwd_bar_cnt =
1049 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1050 FORWARD_DUE_TO_BAR_COUNT, val);
1051
1052 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1053 DUPLICATE_COUNT)];
1054 st->dup_cnt =
1055 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1056 DUPLICATE_COUNT, val);
1057
1058 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1059 FRAMES_IN_ORDER_COUNT)];
1060 st->frms_in_order_cnt =
1061 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1062 FRAMES_IN_ORDER_COUNT, val);
1063
1064 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1065 BAR_RECEIVED_COUNT)];
1066 st->bar_rcvd_cnt =
1067 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1068 BAR_RECEIVED_COUNT, val);
1069
1070 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1071 MPDU_FRAMES_PROCESSED_COUNT)];
1072 st->mpdu_frms_cnt =
1073 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1074 MPDU_FRAMES_PROCESSED_COUNT, val);
1075
1076 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1077 MSDU_FRAMES_PROCESSED_COUNT)];
1078 st->msdu_frms_cnt =
1079 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1080 MSDU_FRAMES_PROCESSED_COUNT, val);
1081
1082 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1083 TOTAL_PROCESSED_BYTE_COUNT)];
1084 st->total_cnt =
1085 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1086 TOTAL_PROCESSED_BYTE_COUNT, val);
1087
1088 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1089 LATE_RECEIVE_MPDU_COUNT)];
1090 st->late_recv_mpdu_cnt =
1091 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1092 LATE_RECEIVE_MPDU_COUNT, val);
1093
1094 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1095 WINDOW_JUMP_2K)];
1096 st->win_jump_2k =
1097 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1098 WINDOW_JUMP_2K, val);
1099
1100 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1101 HOLE_COUNT)];
1102 st->hole_cnt =
1103 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1104 HOLE_COUNT, val);
1105}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301106qdf_export_symbol(hal_reo_queue_stats_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001107
Akshay Kosigi6a206752019-06-10 23:14:52 +05301108inline void
1109hal_reo_flush_queue_status(uint32_t *reo_desc,
1110 struct hal_reo_flush_queue_status *st,
1111 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001112{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301113 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001114 uint32_t val;
1115
1116 /* Offsets of descriptor fields defined in HW headers start
1117 * from the field after TLV header */
1118 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1119
1120 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301121 hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1122 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001123
1124 /* error bit */
1125 val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1126 ERROR_DETECTED)];
1127 st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1128 val);
1129}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301130qdf_export_symbol(hal_reo_flush_queue_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001131
Akshay Kosigi6a206752019-06-10 23:14:52 +05301132inline void
1133hal_reo_flush_cache_status(uint32_t *reo_desc,
1134 struct hal_reo_flush_cache_status *st,
1135 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001136{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301137 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001138 uint32_t val;
1139
1140 /* Offsets of descriptor fields defined in HW headers start
1141 * from the field after TLV header */
1142 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1143
1144 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301145 hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1146 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001147
1148 /* error bit */
1149 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1150 ERROR_DETECTED)];
1151 st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1152 val);
1153
1154 /* block error */
1155 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1156 BLOCK_ERROR_DETAILS)];
1157 st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1158 BLOCK_ERROR_DETAILS,
1159 val);
1160 if (!st->block_error)
Akshay Kosigi6a206752019-06-10 23:14:52 +05301161 qdf_set_bit(hal_soc->index,
1162 (unsigned long *)&hal_soc->reo_res_bitmap);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001163
1164 /* cache flush status */
1165 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1166 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1167 st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1168 CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1169 val);
1170
1171 /* cache flush descriptor type */
1172 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1173 CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1174 st->cache_flush_status_desc_type =
1175 HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1176 CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1177 val);
1178
1179 /* cache flush count */
1180 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1181 CACHE_CONTROLLER_FLUSH_COUNT)];
1182 st->cache_flush_cnt =
1183 HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1184 CACHE_CONTROLLER_FLUSH_COUNT,
1185 val);
1186
1187}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301188qdf_export_symbol(hal_reo_flush_cache_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001189
1190inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301191 hal_soc_handle_t hal_soc_hdl,
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001192 struct hal_reo_unblk_cache_status *st)
1193{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301194 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001195 uint32_t val;
1196
1197 /* Offsets of descriptor fields defined in HW headers start
1198 * from the field after TLV header */
1199 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1200
1201 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301202 hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301203 &st->header, hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001204
1205 /* error bit */
1206 val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1207 ERROR_DETECTED)];
1208 st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1209 ERROR_DETECTED,
1210 val);
1211
1212 /* unblock type */
1213 val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1214 UNBLOCK_TYPE)];
1215 st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1216 UNBLOCK_TYPE,
1217 val);
1218
1219 if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
Akshay Kosigi6a206752019-06-10 23:14:52 +05301220 qdf_clear_bit(hal_soc->index,
1221 (unsigned long *)&hal_soc->reo_res_bitmap);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001222}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301223qdf_export_symbol(hal_reo_unblock_cache_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001224
1225inline void hal_reo_flush_timeout_list_status(
1226 uint32_t *reo_desc,
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301227 struct hal_reo_flush_timeout_list_status *st,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301228 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001229
1230{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301231 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001232 uint32_t val;
1233
1234 /* Offsets of descriptor fields defined in HW headers start
1235 * from the field after TLV header */
1236 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1237
1238 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301239 hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1240 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001241
1242 /* error bit */
1243 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1244 ERROR_DETECTED)];
1245 st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1246 ERROR_DETECTED,
1247 val);
1248
1249 /* list empty */
1250 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1251 TIMOUT_LIST_EMPTY)];
1252 st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1253 TIMOUT_LIST_EMPTY,
1254 val);
1255
1256 /* release descriptor count */
1257 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1258 RELEASE_DESC_COUNT)];
1259 st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1260 RELEASE_DESC_COUNT,
1261 val);
1262
1263 /* forward buf count */
1264 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1265 FORWARD_BUF_COUNT)];
1266 st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1267 FORWARD_BUF_COUNT,
1268 val);
1269}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301270qdf_export_symbol(hal_reo_flush_timeout_list_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001271
1272inline void hal_reo_desc_thres_reached_status(
1273 uint32_t *reo_desc,
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301274 struct hal_reo_desc_thres_reached_status *st,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301275 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001276{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301277 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001278 uint32_t val;
1279
1280 /* Offsets of descriptor fields defined in HW headers start
1281 * from the field after TLV header */
1282 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1283
1284 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301285 hal_reo_status_get_header(reo_desc,
1286 HAL_REO_DESC_THRES_STATUS_TLV,
1287 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001288
1289 /* threshold index */
1290 val = reo_desc[HAL_OFFSET_DW(
1291 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1292 THRESHOLD_INDEX)];
1293 st->thres_index = HAL_GET_FIELD(
1294 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1295 THRESHOLD_INDEX,
1296 val);
1297
1298 /* link desc counters */
1299 val = reo_desc[HAL_OFFSET_DW(
1300 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1301 LINK_DESCRIPTOR_COUNTER0)];
1302 st->link_desc_counter0 = HAL_GET_FIELD(
1303 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1304 LINK_DESCRIPTOR_COUNTER0,
1305 val);
1306
1307 val = reo_desc[HAL_OFFSET_DW(
1308 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1309 LINK_DESCRIPTOR_COUNTER1)];
1310 st->link_desc_counter1 = HAL_GET_FIELD(
1311 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1312 LINK_DESCRIPTOR_COUNTER1,
1313 val);
1314
1315 val = reo_desc[HAL_OFFSET_DW(
1316 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1317 LINK_DESCRIPTOR_COUNTER2)];
1318 st->link_desc_counter2 = HAL_GET_FIELD(
1319 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1320 LINK_DESCRIPTOR_COUNTER2,
1321 val);
1322
1323 val = reo_desc[HAL_OFFSET_DW(
1324 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1325 LINK_DESCRIPTOR_COUNTER_SUM)];
1326 st->link_desc_counter_sum = HAL_GET_FIELD(
1327 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1328 LINK_DESCRIPTOR_COUNTER_SUM,
1329 val);
1330}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301331qdf_export_symbol(hal_reo_desc_thres_reached_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001332
Akshay Kosigi6a206752019-06-10 23:14:52 +05301333inline void
1334hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1335 struct hal_reo_update_rx_queue_status *st,
1336 hal_soc_handle_t hal_soc_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001337{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301338 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1339
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001340 /* Offsets of descriptor fields defined in HW headers start
1341 * from the field after TLV header */
1342 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1343
1344 /* header */
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301345 hal_reo_status_get_header(reo_desc,
1346 HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1347 &(st->header), hal_soc);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001348}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301349qdf_export_symbol(hal_reo_rx_update_queue_status);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001350
1351/**
1352 * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1353 * with command number
1354 * @hal_soc: Handle to HAL SoC structure
1355 * @hal_ring: Handle to HAL SRNG structure
1356 *
1357 * Return: none
1358 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301359inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301360 hal_ring_handle_t hal_ring_hdl)
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001361{
1362 int cmd_num;
1363 uint32_t *desc_addr;
1364 struct hal_srng_params srng_params;
1365 uint32_t desc_size;
1366 uint32_t num_desc;
Akshay Kosigi6a206752019-06-10 23:14:52 +05301367 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001368
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301369 hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params);
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07001370
1371 desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1372 desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1373 desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1374 num_desc = srng_params.num_entries;
1375 cmd_num = 1;
1376 while (num_desc) {
1377 /* Offsets of descriptor fields defined in HW headers start
1378 * from the field after TLV header */
1379 HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1380 REO_CMD_NUMBER, cmd_num);
1381 desc_addr += desc_size;
1382 num_desc--; cmd_num++;
1383 }
1384
1385 soc->reo_res_bitmap = 0;
1386}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301387qdf_export_symbol(hal_reo_init_cmd_ring);