blob: 1a0340665a1fd4dbb6553aeffaade9161c2b9a9f [file] [log] [blame]
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001/*
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07003 *
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07008 *
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05309 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -070017 */
18
19#ifndef _HAL_API_H_
20#define _HAL_API_H_
21
22#include "qdf_types.h"
Houston Hoffman61dad492017-04-07 17:09:34 -070023#include "qdf_util.h"
Sravan Kumar Kairam78b01a12019-09-16 14:22:55 +053024#include "qdf_atomic.h"
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -070025#include "hal_internal.h"
Houston Hoffman61dad492017-04-07 17:09:34 -070026#define MAX_UNWINDOWED_ADDRESS 0x80000
Nandha Kishore Easwaranfb73acb2019-10-24 17:33:52 +053027#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
28 defined(QCA_WIFI_QCN9000)
Venkata Sharath Chandra Manchala9347b8d2018-06-07 15:26:11 -070029#define WINDOW_ENABLE_BIT 0x40000000
30#else
Houston Hoffman61dad492017-04-07 17:09:34 -070031#define WINDOW_ENABLE_BIT 0x80000000
Venkata Sharath Chandra Manchala9347b8d2018-06-07 15:26:11 -070032#endif
Houston Hoffman61dad492017-04-07 17:09:34 -070033#define WINDOW_REG_ADDRESS 0x310C
34#define WINDOW_SHIFT 19
jiad5661cef2017-11-09 18:24:41 +080035#define WINDOW_VALUE_MASK 0x3F
Houston Hoffman61dad492017-04-07 17:09:34 -070036#define WINDOW_START MAX_UNWINDOWED_ADDRESS
37#define WINDOW_RANGE_MASK 0x7FFFF
38
Pramod Simha95c59f22018-08-27 10:03:04 -070039/*
40 * BAR + 4K is always accessible, any access outside this
41 * space requires force wake procedure.
42 * OFFSET = 4K - 32 bytes = 0x4063
43 */
44#define MAPPED_REF_OFF 0x4063
Venkata Sharath Chandra Manchala74a2f412019-11-14 16:57:52 -080045
46#ifdef HAL_CONFIG_SLUB_DEBUG_ON
47#define FORCE_WAKE_DELAY_TIMEOUT 100
48#else
Pramod Simha95c59f22018-08-27 10:03:04 -070049#define FORCE_WAKE_DELAY_TIMEOUT 50
Venkata Sharath Chandra Manchala74a2f412019-11-14 16:57:52 -080050#endif /* HAL_CONFIG_SLUB_DEBUG_ON */
51
Pramod Simha95c59f22018-08-27 10:03:04 -070052#define FORCE_WAKE_DELAY_MS 5
53
Akshay Kosigi8eda31c2019-07-10 14:42:42 +053054/**
55 * hal_ring_desc - opaque handle for DP ring descriptor
56 */
57struct hal_ring_desc;
58typedef struct hal_ring_desc *hal_ring_desc_t;
59
60/**
61 * hal_link_desc - opaque handle for DP link descriptor
62 */
63struct hal_link_desc;
64typedef struct hal_link_desc *hal_link_desc_t;
65
66/**
67 * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
68 */
69struct hal_rxdma_desc;
70typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
71
Krunal Soni9911b442019-02-22 15:39:03 -080072#ifdef ENABLE_VERBOSE_DEBUG
73static inline void
74hal_set_verbose_debug(bool flag)
75{
76 is_hal_verbose_debug_enabled = flag;
77}
78#endif
79
Jinwei Chen99ae1c12019-11-01 19:43:30 +080080#ifdef HAL_REGISTER_WRITE_DEBUG
81/**
82 * hal_reg_write_result_check() - check register writing result
83 * @hal_soc: HAL soc handle
84 * @offset: register offset to read
85 * @exp_val: the expected value of register
86 * @ret_confirm: result confirm flag
87 *
88 * Return: none
89 */
90static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
91 uint32_t offset,
92 uint32_t exp_val,
93 bool ret_confirm)
94{
95 uint32_t value;
96
97 if (!ret_confirm)
98 return;
99
100 value = qdf_ioread32(hal_soc->dev_base_addr + offset);
101 if (exp_val != value) {
102 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
103 "register offset 0x%x write failed!\n", offset);
104 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
105 "the expectation 0x%x, actual value 0x%x\n",
106 exp_val,
107 value);
108 }
109}
110#else
111/* no op */
112#define hal_reg_write_result_check(_hal_soc, _offset, _exp_val, _ret_confirm)
113#endif
114
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700115#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Pramod Simha95c59f22018-08-27 10:03:04 -0700116static inline int hal_force_wake_request(struct hal_soc *soc)
117{
118 return 0;
119}
120
121static inline int hal_force_wake_release(struct hal_soc *soc)
122{
123 return 0;
124}
Jinwei Chen7d419462019-10-18 09:56:18 +0800125
126static inline void hal_lock_reg_access(struct hal_soc *soc,
127 unsigned long *flags)
128{
129 qdf_spin_lock_irqsave(&soc->register_access_lock);
130}
131
132static inline void hal_unlock_reg_access(struct hal_soc *soc,
133 unsigned long *flags)
134{
135 qdf_spin_unlock_irqrestore(&soc->register_access_lock);
136}
137
Pramod Simha95c59f22018-08-27 10:03:04 -0700138#else
139static inline int hal_force_wake_request(struct hal_soc *soc)
140{
141 uint32_t timeout = 0;
Sravan Goud7d2afb42019-10-16 21:26:32 +0530142 int ret;
Pramod Simha95c59f22018-08-27 10:03:04 -0700143
Sravan Goud7d2afb42019-10-16 21:26:32 +0530144 ret = pld_force_wake_request(soc->qdf_dev->dev);
145 if (ret) {
Pramod Simha95c59f22018-08-27 10:03:04 -0700146 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sravan Goud7d2afb42019-10-16 21:26:32 +0530147 "%s: Request send failed %d\n", __func__, ret);
Pramod Simha95c59f22018-08-27 10:03:04 -0700148 return -EINVAL;
149 }
150
151 while (!pld_is_device_awake(soc->qdf_dev->dev) &&
152 timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
153 mdelay(FORCE_WAKE_DELAY_MS);
154 timeout += FORCE_WAKE_DELAY_MS;
155 }
156
157 if (pld_is_device_awake(soc->qdf_dev->dev) == true)
158 return 0;
159 else
160 return -ETIMEDOUT;
161}
162
163static inline int hal_force_wake_release(struct hal_soc *soc)
164{
165 return pld_force_wake_release(soc->qdf_dev->dev);
166}
Jinwei Chen7d419462019-10-18 09:56:18 +0800167
168static inline void hal_lock_reg_access(struct hal_soc *soc,
169 unsigned long *flags)
170{
171 pld_lock_reg_window(soc->qdf_dev->dev, flags);
172}
173
174static inline void hal_unlock_reg_access(struct hal_soc *soc,
175 unsigned long *flags)
176{
177 pld_unlock_reg_window(soc->qdf_dev->dev, flags);
178}
Pramod Simha95c59f22018-08-27 10:03:04 -0700179#endif
180
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530181#ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800182static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset,
183 bool ret_confirm)
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530184{
185 uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
186
187 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
188 WINDOW_ENABLE_BIT | window);
189 hal_soc->register_window = window;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800190
191 hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
192 WINDOW_ENABLE_BIT | window,
193 ret_confirm);
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530194}
195#else
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800196static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset,
197 bool ret_confirm)
Houston Hoffman61dad492017-04-07 17:09:34 -0700198{
199 uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
200 if (window != hal_soc->register_window) {
201 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
202 WINDOW_ENABLE_BIT | window);
203 hal_soc->register_window = window;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800204
205 hal_reg_write_result_check(
206 hal_soc,
207 WINDOW_REG_ADDRESS,
208 WINDOW_ENABLE_BIT | window,
209 ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700210 }
211}
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530212#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700213
214/**
215 * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
216 * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
217 * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
218 * would be a bug
219 */
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700220#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Houston Hoffman61dad492017-04-07 17:09:34 -0700221static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800222 uint32_t value, bool ret_confirm)
Houston Hoffman61dad492017-04-07 17:09:34 -0700223{
Jinwei Chen7d419462019-10-18 09:56:18 +0800224 unsigned long flags;
225
Houston Hoffman61dad492017-04-07 17:09:34 -0700226 if (!hal_soc->use_register_windowing ||
227 offset < MAX_UNWINDOWED_ADDRESS) {
228 qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800229 hal_reg_write_result_check(hal_soc, offset,
230 value, ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700231 } else {
Jinwei Chen7d419462019-10-18 09:56:18 +0800232 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800233 hal_select_window(hal_soc, offset, ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700234 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
235 (offset & WINDOW_RANGE_MASK), value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800236
237 hal_reg_write_result_check(
238 hal_soc,
239 WINDOW_START + (offset & WINDOW_RANGE_MASK),
240 value, ret_confirm);
Jinwei Chen7d419462019-10-18 09:56:18 +0800241 hal_unlock_reg_access(hal_soc, &flags);
Houston Hoffman61dad492017-04-07 17:09:34 -0700242 }
243}
Pramod Simha95c59f22018-08-27 10:03:04 -0700244#else
245static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800246 uint32_t value, bool ret_confirm)
Pramod Simha95c59f22018-08-27 10:03:04 -0700247{
Sravan Goud7d2afb42019-10-16 21:26:32 +0530248 int ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800249 unsigned long flags;
Sravan Goud7d2afb42019-10-16 21:26:32 +0530250
251 if (offset > MAPPED_REF_OFF) {
252 ret = hal_force_wake_request(hal_soc);
253 if (ret) {
254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
255 "%s: Wake up request failed %d\n",
256 __func__, ret);
257 QDF_BUG(0);
258 return;
259 }
Pramod Simha95c59f22018-08-27 10:03:04 -0700260 }
261
262 if (!hal_soc->use_register_windowing ||
263 offset < MAX_UNWINDOWED_ADDRESS) {
264 qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800265 hal_reg_write_result_check(hal_soc, offset,
266 value, ret_confirm);
Pramod Simha95c59f22018-08-27 10:03:04 -0700267 } else {
Jinwei Chen7d419462019-10-18 09:56:18 +0800268 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800269 hal_select_window(hal_soc, offset, ret_confirm);
Pramod Simha95c59f22018-08-27 10:03:04 -0700270 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
271 (offset & WINDOW_RANGE_MASK), value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800272
273 hal_reg_write_result_check(
274 hal_soc,
275 WINDOW_START + (offset & WINDOW_RANGE_MASK),
276 value,
277 ret_confirm);
Jinwei Chen7d419462019-10-18 09:56:18 +0800278 hal_unlock_reg_access(hal_soc, &flags);
Pramod Simha95c59f22018-08-27 10:03:04 -0700279 }
280
281 if ((offset > MAPPED_REF_OFF) &&
282 hal_force_wake_release(hal_soc))
283 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
284 "%s: Wake up release failed\n", __func__);
285}
286
287#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700288
289/**
290 * hal_write_address_32_mb - write a value to a register
291 *
292 */
293static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
294 void __iomem *addr, uint32_t value)
295{
296 uint32_t offset;
297
298 if (!hal_soc->use_register_windowing)
299 return qdf_iowrite32(addr, value);
300
301 offset = addr - hal_soc->dev_base_addr;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800302 hal_write32_mb(hal_soc, offset, value, false);
Houston Hoffman61dad492017-04-07 17:09:34 -0700303}
304
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700305#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Houston Hoffman61dad492017-04-07 17:09:34 -0700306static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
307{
308 uint32_t ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800309 unsigned long flags;
Houston Hoffman61dad492017-04-07 17:09:34 -0700310
311 if (!hal_soc->use_register_windowing ||
312 offset < MAX_UNWINDOWED_ADDRESS) {
313 return qdf_ioread32(hal_soc->dev_base_addr + offset);
314 }
315
Jinwei Chen7d419462019-10-18 09:56:18 +0800316 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800317 hal_select_window(hal_soc, offset, false);
Houston Hoffman61dad492017-04-07 17:09:34 -0700318 ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
319 (offset & WINDOW_RANGE_MASK));
Jinwei Chen7d419462019-10-18 09:56:18 +0800320 hal_unlock_reg_access(hal_soc, &flags);
Houston Hoffman61dad492017-04-07 17:09:34 -0700321
322 return ret;
323}
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -0700324
325/**
326 * hal_read_address_32_mb() - Read 32-bit value from the register
327 * @soc: soc handle
328 * @addr: register address to read
329 *
330 * Return: 32-bit value
331 */
332static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
333 void __iomem *addr)
334{
335 uint32_t offset;
336 uint32_t ret;
337
338 if (!soc->use_register_windowing)
339 return qdf_ioread32(addr);
340
341 offset = addr - soc->dev_base_addr;
342 ret = hal_read32_mb(soc, offset);
343 return ret;
344}
Pramod Simha95c59f22018-08-27 10:03:04 -0700345#else
346static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
347{
348 uint32_t ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800349 unsigned long flags;
Pramod Simha95c59f22018-08-27 10:03:04 -0700350
351 if ((offset > MAPPED_REF_OFF) &&
352 hal_force_wake_request(hal_soc)) {
353 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 "%s: Wake up request failed\n", __func__);
355 return -EINVAL;
356 }
357
358 if (!hal_soc->use_register_windowing ||
359 offset < MAX_UNWINDOWED_ADDRESS) {
360 return qdf_ioread32(hal_soc->dev_base_addr + offset);
361 }
362
Jinwei Chen7d419462019-10-18 09:56:18 +0800363 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800364 hal_select_window(hal_soc, offset, false);
Pramod Simha95c59f22018-08-27 10:03:04 -0700365 ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
366 (offset & WINDOW_RANGE_MASK));
Jinwei Chen7d419462019-10-18 09:56:18 +0800367 hal_unlock_reg_access(hal_soc, &flags);
Pramod Simha95c59f22018-08-27 10:03:04 -0700368
369 if ((offset > MAPPED_REF_OFF) &&
370 hal_force_wake_release(hal_soc))
371 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
372 "%s: Wake up release failed\n", __func__);
373
374 return ret;
375}
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -0700376
377static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
378 void __iomem *addr)
379{
380 uint32_t offset;
381 uint32_t ret;
382
383 if (!soc->use_register_windowing)
384 return qdf_ioread32(addr);
385
386 offset = addr - soc->dev_base_addr;
387 ret = hal_read32_mb(soc, offset);
388 return ret;
389}
Pramod Simha95c59f22018-08-27 10:03:04 -0700390#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700391
392#include "hif_io32.h"
393
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700394/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -0700395 * hal_attach - Initialize HAL layer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700396 * @hif_handle: Opaque HIF handle
397 * @qdf_dev: QDF device
398 *
399 * Return: Opaque HAL SOC handle
400 * NULL on failure (if given ring is not available)
401 *
402 * This function should be called as part of HIF initialization (for accessing
403 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
404 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530405void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700406
407/**
408 * hal_detach - Detach HAL layer
409 * @hal_soc: HAL SOC handle
410 *
411 * This function should be called as part of HIF detach
412 *
413 */
414extern void hal_detach(void *hal_soc);
415
416/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
417enum hal_ring_type {
Mohit Khanna81179cb2018-08-16 20:50:43 -0700418 REO_DST = 0,
419 REO_EXCEPTION = 1,
420 REO_REINJECT = 2,
421 REO_CMD = 3,
422 REO_STATUS = 4,
423 TCL_DATA = 5,
424 TCL_CMD = 6,
425 TCL_STATUS = 7,
426 CE_SRC = 8,
427 CE_DST = 9,
428 CE_DST_STATUS = 10,
429 WBM_IDLE_LINK = 11,
430 SW2WBM_RELEASE = 12,
431 WBM2SW_RELEASE = 13,
432 RXDMA_BUF = 14,
433 RXDMA_DST = 15,
434 RXDMA_MONITOR_BUF = 16,
435 RXDMA_MONITOR_STATUS = 17,
436 RXDMA_MONITOR_DST = 18,
437 RXDMA_MONITOR_DESC = 19,
438 DIR_BUF_RX_DMA_SRC = 20,
Naveen Rawatba24c482017-05-15 12:02:48 -0700439#ifdef WLAN_FEATURE_CIF_CFR
440 WIFI_POS_SRC,
441#endif
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700442 MAX_RING_TYPES
443};
444
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530445#define HAL_SRNG_LMAC_RING 0x80000000
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700446/* SRNG flags passed in hal_srng_params.flags */
447#define HAL_SRNG_MSI_SWAP 0x00000008
448#define HAL_SRNG_RING_PTR_SWAP 0x00000010
449#define HAL_SRNG_DATA_TLV_SWAP 0x00000020
450#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
451#define HAL_SRNG_MSI_INTR 0x00020000
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530452#define HAL_SRNG_CACHED_DESC 0x00040000
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700453
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530454#define PN_SIZE_24 0
455#define PN_SIZE_48 1
456#define PN_SIZE_128 2
457
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700458/**
459 * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
460 * used by callers for calculating the size of memory to be allocated before
461 * calling hal_srng_setup to setup the ring
462 *
463 * @hal_soc: Opaque HAL SOC handle
464 * @ring_type: one of the types from hal_ring_type
465 *
466 */
467extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
468
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800469/**
470 * hal_srng_max_entries - Returns maximum possible number of ring entries
471 * @hal_soc: Opaque HAL SOC handle
472 * @ring_type: one of the types from hal_ring_type
473 *
474 * Return: Maximum number of entries for the given ring_type
475 */
476uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
477
Houston Hoffman648a9182017-05-21 23:27:50 -0700478/**
Kai Liub8e12412018-01-12 16:52:26 +0800479 * hal_srng_dump - Dump ring status
480 * @srng: hal srng pointer
481 */
482void hal_srng_dump(struct hal_srng *srng);
483
484/**
Houston Hoffman648a9182017-05-21 23:27:50 -0700485 * hal_srng_get_dir - Returns the direction of the ring
486 * @hal_soc: Opaque HAL SOC handle
487 * @ring_type: one of the types from hal_ring_type
488 *
489 * Return: Ring direction
490 */
491enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
492
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +0530493/* HAL memory information */
494struct hal_mem_info {
495 /* dev base virutal addr */
496 void *dev_base_addr;
497 /* dev base physical addr */
498 void *dev_base_paddr;
499 /* Remote virtual pointer memory for HW/FW updates */
500 void *shadow_rdptr_mem_vaddr;
501 /* Remote physical pointer memory for HW/FW updates */
502 void *shadow_rdptr_mem_paddr;
503 /* Shared memory for ring pointer updates from host to FW */
504 void *shadow_wrptr_mem_vaddr;
505 /* Shared physical memory for ring pointer updates from host to FW */
506 void *shadow_wrptr_mem_paddr;
507};
508
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700509/* SRNG parameters to be passed to hal_srng_setup */
510struct hal_srng_params {
511 /* Physical base address of the ring */
512 qdf_dma_addr_t ring_base_paddr;
513 /* Virtual base address of the ring */
514 void *ring_base_vaddr;
515 /* Number of entries in ring */
516 uint32_t num_entries;
Houston Hoffman74109122016-10-21 14:58:34 -0700517 /* max transfer length */
518 uint16_t max_buffer_length;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700519 /* MSI Address */
520 qdf_dma_addr_t msi_addr;
521 /* MSI data */
522 uint32_t msi_data;
523 /* Interrupt timer threshold – in micro seconds */
524 uint32_t intr_timer_thres_us;
525 /* Interrupt batch counter threshold – in number of ring entries */
526 uint32_t intr_batch_cntr_thres_entries;
527 /* Low threshold – in number of ring entries
528 * (valid for src rings only)
529 */
530 uint32_t low_threshold;
531 /* Misc flags */
532 uint32_t flags;
Dhanashri Atre7351d172016-10-12 13:08:09 -0700533 /* Unique ring id */
534 uint8_t ring_id;
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +0530535 /* Source or Destination ring */
536 enum hal_srng_dir ring_dir;
537 /* Size of ring entry */
538 uint32_t entry_size;
539 /* hw register base address */
540 void *hwreg_base[MAX_SRNG_REG_GROUPS];
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700541};
542
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800543/* hal_construct_shadow_config() - initialize the shadow registers for dp rings
544 * @hal_soc: hal handle
545 *
546 * Return: QDF_STATUS_OK on success
547 */
548extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
549
550/* hal_set_one_shadow_config() - add a config for the specified ring
551 * @hal_soc: hal handle
552 * @ring_type: ring type
553 * @ring_num: ring num
554 *
555 * The ring type and ring num uniquely specify the ring. After this call,
556 * the hp/tp will be added as the next entry int the shadow register
557 * configuration table. The hal code will use the shadow register address
558 * in place of the hp/tp address.
559 *
560 * This function is exposed, so that the CE module can skip configuring shadow
561 * registers for unused ring and rings assigned to the firmware.
562 *
563 * Return: QDF_STATUS_OK on success
564 */
565extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
566 int ring_num);
567/**
568 * hal_get_shadow_config() - retrieve the config table
569 * @hal_soc: hal handle
570 * @shadow_config: will point to the table after
571 * @num_shadow_registers_configured: will contain the number of valid entries
572 */
573extern void hal_get_shadow_config(void *hal_soc,
574 struct pld_shadow_reg_v2_cfg **shadow_config,
575 int *num_shadow_registers_configured);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700576/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -0700577 * hal_srng_setup - Initialize HW SRNG ring.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700578 *
579 * @hal_soc: Opaque HAL SOC handle
580 * @ring_type: one of the types from hal_ring_type
581 * @ring_num: Ring number if there are multiple rings of
582 * same type (staring from 0)
583 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
584 * @ring_params: SRNG ring params in hal_srng_params structure.
585
586 * Callers are expected to allocate contiguous ring memory of size
587 * 'num_entries * entry_size' bytes and pass the physical and virtual base
588 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
589 * structure. Ring base address should be 8 byte aligned and size of each ring
590 * entry should be queried using the API hal_srng_get_entrysize
591 *
592 * Return: Opaque pointer to ring on success
593 * NULL on failure (if given ring is not available)
594 */
595extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
596 int mac_id, struct hal_srng_params *ring_params);
597
Yun Parkfde6b9e2017-06-26 17:13:11 -0700598/* Remapping ids of REO rings */
599#define REO_REMAP_TCL 0
600#define REO_REMAP_SW1 1
601#define REO_REMAP_SW2 2
602#define REO_REMAP_SW3 3
603#define REO_REMAP_SW4 4
604#define REO_REMAP_RELEASE 5
605#define REO_REMAP_FW 6
606#define REO_REMAP_UNUSED 7
607
608/*
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700609 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
610 * to map destination to rings
Yun Parkfde6b9e2017-06-26 17:13:11 -0700611 */
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700612#define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
613 ((_VALUE) << \
Yun Parkfde6b9e2017-06-26 17:13:11 -0700614 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700615 _OFFSET ## _SHFT))
616
617/*
618 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
619 * to map destination to rings
620 */
621#define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
622 ((_VALUE) << \
623 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
624 _OFFSET ## _SHFT))
625
626/*
627 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
628 * to map destination to rings
629 */
630#define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
631 ((_VALUE) << \
632 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
633 _OFFSET ## _SHFT))
Yun Parkfde6b9e2017-06-26 17:13:11 -0700634
635/**
jiad09526ac2019-04-12 17:42:40 +0800636 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
Akshay Kosigi6a206752019-06-10 23:14:52 +0530637 * @hal_soc_hdl: HAL SOC handle
jiad09526ac2019-04-12 17:42:40 +0800638 * @read: boolean value to indicate if read or write
639 * @ix0: pointer to store IX0 reg value
640 * @ix1: pointer to store IX1 reg value
641 * @ix2: pointer to store IX2 reg value
642 * @ix3: pointer to store IX3 reg value
Yun Parkfde6b9e2017-06-26 17:13:11 -0700643 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530644void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
645 uint32_t *ix0, uint32_t *ix1,
646 uint32_t *ix2, uint32_t *ix3);
Yun Parkfde6b9e2017-06-26 17:13:11 -0700647
648/**
Yun Park601d0d82017-08-28 21:49:31 -0700649 * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
Yun Parkfde6b9e2017-06-26 17:13:11 -0700650 * @sring: sring pointer
651 * @paddr: physical address
652 */
Yun Park601d0d82017-08-28 21:49:31 -0700653extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
654
655/**
656 * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
657 * @srng: sring pointer
658 * @vaddr: virtual address
659 */
660extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
Yun Parkfde6b9e2017-06-26 17:13:11 -0700661
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700662/**
663 * hal_srng_cleanup - Deinitialize HW SRNG ring.
664 * @hal_soc: Opaque HAL SOC handle
665 * @hal_srng: Opaque HAL SRNG pointer
666 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530667void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700668
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530669static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
Houston Hoffman648a9182017-05-21 23:27:50 -0700670{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530671 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Houston Hoffman648a9182017-05-21 23:27:50 -0700672
673 return !!srng->initialized;
674}
675
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700676/**
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530677 * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
678 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530679 * @hal_ring_hdl: Destination ring pointer
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530680 *
681 * Caller takes responsibility for any locking needs.
682 *
683 * Return: Opaque pointer for next ring entry; NULL on failire
684 */
685static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530686void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
687 hal_ring_handle_t hal_ring_hdl)
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530688{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530689 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530690
691 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
692 return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
693
694 return NULL;
695}
696
697/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700698 * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
699 * hal_srng_access_start if locked access is required
700 *
701 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530702 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700703 *
704 * Return: 0 on success; error on failire
705 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530706static inline int
707hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530708 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700709{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530710 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Akshay Kosigi6a206752019-06-10 23:14:52 +0530711 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530712 uint32_t *desc;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700713
714 if (srng->ring_dir == HAL_SRNG_SRC_RING)
715 srng->u.src_ring.cached_tp =
716 *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530717 else {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700718 srng->u.dst_ring.cached_hp =
719 *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
720
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530721 if (srng->flags & HAL_SRNG_CACHED_DESC) {
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530722 desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530723 if (qdf_likely(desc)) {
724 qdf_mem_dma_cache_sync(soc->qdf_dev,
725 qdf_mem_virt_to_phys
726 (desc),
727 QDF_DMA_FROM_DEVICE,
728 (srng->entry_size *
729 sizeof(uint32_t)));
730 qdf_prefetch(desc);
731 }
732 }
733 }
734
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700735 return 0;
736}
737
738/**
739 * hal_srng_access_start - Start (locked) ring access
740 *
741 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530742 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700743 *
744 * Return: 0 on success; error on failire
745 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530746static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530747 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700748{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530749 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700750
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530751 if (qdf_unlikely(!hal_ring_hdl)) {
Krunal Sonief1f0f92018-09-17 21:09:55 -0700752 qdf_print("Error: Invalid hal_ring\n");
753 return -EINVAL;
754 }
755
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700756 SRNG_LOCK(&(srng->lock));
757
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530758 return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700759}
760
761/**
762 * hal_srng_dst_get_next - Get next entry from a destination ring and move
763 * cached tail pointer
764 *
765 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530766 * @hal_ring_hdl: Destination ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700767 *
768 * Return: Opaque pointer for next ring entry; NULL on failire
769 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530770static inline
771void *hal_srng_dst_get_next(void *hal_soc,
772 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700773{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530774 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530775 struct hal_soc *soc = (struct hal_soc *)hal_soc;
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700776 uint32_t *desc;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530777 uint32_t *desc_next;
778 uint32_t tp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700779
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700780 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
781 desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800782 /* TODO: Using % is expensive, but we have to do this since
783 * size of some SRNG rings is not power of 2 (due to descriptor
784 * sizes). Need to create separate API for rings used
785 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
786 * SW2RXDMA and CE rings)
787 */
788 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
789 srng->ring_size;
790
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530791 if (srng->flags & HAL_SRNG_CACHED_DESC) {
792 tp = srng->u.dst_ring.tp;
793 desc_next = &srng->ring_base_vaddr[tp];
794 qdf_mem_dma_cache_sync(soc->qdf_dev,
795 qdf_mem_virt_to_phys(desc_next),
796 QDF_DMA_FROM_DEVICE,
797 (srng->entry_size *
798 sizeof(uint32_t)));
799 qdf_prefetch(desc_next);
800 }
801
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700802 return (void *)desc;
803 }
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700804
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700805 return NULL;
806}
807
808/**
Yun Park601d0d82017-08-28 21:49:31 -0700809 * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
810 * cached head pointer
811 *
812 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530813 * @hal_ring_hdl: Destination ring pointer
Yun Park601d0d82017-08-28 21:49:31 -0700814 *
815 * Return: Opaque pointer for next ring entry; NULL on failire
816 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530817static inline void *
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530818hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530819 hal_ring_handle_t hal_ring_hdl)
Yun Park601d0d82017-08-28 21:49:31 -0700820{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530821 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Yun Park601d0d82017-08-28 21:49:31 -0700822 uint32_t *desc;
823 /* TODO: Using % is expensive, but we have to do this since
824 * size of some SRNG rings is not power of 2 (due to descriptor
825 * sizes). Need to create separate API for rings used
826 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
827 * SW2RXDMA and CE rings)
828 */
829 uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
830 srng->ring_size;
831
832 if (next_hp != srng->u.dst_ring.tp) {
833 desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
834 srng->u.dst_ring.cached_hp = next_hp;
835 return (void *)desc;
836 }
837
838 return NULL;
839}
840
841/**
Mohit Khannae5a6e942018-11-28 14:22:48 -0800842 * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
843 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530844 * @hal_ring_hdl: Destination ring pointer
Mohit Khannae5a6e942018-11-28 14:22:48 -0800845 *
846 * Sync cached head pointer with HW.
847 * Caller takes responsibility for any locking needs.
848 *
849 * Return: Opaque pointer for next ring entry; NULL on failire
850 */
851static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530852void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
853 hal_ring_handle_t hal_ring_hdl)
Mohit Khannae5a6e942018-11-28 14:22:48 -0800854{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530855 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Mohit Khannae5a6e942018-11-28 14:22:48 -0800856
857 srng->u.dst_ring.cached_hp =
858 *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
859
860 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700861 return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700862
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700863 return NULL;
864}
865
866/**
Mohit Khannae5a6e942018-11-28 14:22:48 -0800867 * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
868 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530869 * @hal_ring_hdl: Destination ring pointer
Mohit Khannae5a6e942018-11-28 14:22:48 -0800870 *
871 * Sync cached head pointer with HW.
872 * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
873 *
874 * Return: Opaque pointer for next ring entry; NULL on failire
875 */
876static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530877void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530878 hal_ring_handle_t hal_ring_hdl)
Mohit Khannae5a6e942018-11-28 14:22:48 -0800879{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530880 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Mohit Khannae5a6e942018-11-28 14:22:48 -0800881 void *ring_desc_ptr = NULL;
882
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530883 if (qdf_unlikely(!hal_ring_hdl)) {
Mohit Khannae5a6e942018-11-28 14:22:48 -0800884 qdf_print("Error: Invalid hal_ring\n");
885 return NULL;
886 }
887
888 SRNG_LOCK(&srng->lock);
889
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530890 ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
Mohit Khannae5a6e942018-11-28 14:22:48 -0800891
892 SRNG_UNLOCK(&srng->lock);
893
894 return ring_desc_ptr;
895}
896
897/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700898 * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
899 * by SW) in destination ring
900 *
901 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530902 * @hal_ring_hdl: Destination ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700903 * @sync_hw_ptr: Sync cached head pointer with HW
904 *
905 */
Mohit Khanna80002652019-10-14 23:27:36 -0700906static inline
907uint32_t hal_srng_dst_num_valid(void *hal_soc,
908 hal_ring_handle_t hal_ring_hdl,
909 int sync_hw_ptr)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700910{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530911 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530912 uint32_t hp;
913 uint32_t tp = srng->u.dst_ring.tp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700914
915 if (sync_hw_ptr) {
Jinwei Chen1cb78172019-02-12 12:36:10 +0800916 hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700917 srng->u.dst_ring.cached_hp = hp;
918 } else {
919 hp = srng->u.dst_ring.cached_hp;
920 }
921
922 if (hp >= tp)
923 return (hp - tp) / srng->entry_size;
924 else
925 return (srng->ring_size - tp + hp) / srng->entry_size;
926}
927
928/**
Mohit Khanna80002652019-10-14 23:27:36 -0700929 * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
930 *
931 * @hal_soc: Opaque HAL SOC handle
932 * @hal_ring_hdl: Destination ring pointer
933 * @sync_hw_ptr: Sync cached head pointer with HW
934 *
935 * Returns number of valid entries to be processed by the host driver. The
936 * function takes up SRNG lock.
937 *
938 * Return: Number of valid destination entries
939 */
940static inline uint32_t
941hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
942 hal_ring_handle_t hal_ring_hdl,
943 int sync_hw_ptr)
944{
945 uint32_t num_valid;
946 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
947
948 SRNG_LOCK(&srng->lock);
949 num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
950 SRNG_UNLOCK(&srng->lock);
951
952 return num_valid;
953}
954
955/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700956 * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
957 * pointer. This can be used to release any buffers associated with completed
958 * ring entries. Note that this should not be used for posting new descriptor
959 * entries. Posting of new entries should be done only using
960 * hal_srng_src_get_next_reaped when this function is used for reaping.
961 *
962 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530963 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700964 *
965 * Return: Opaque pointer for next ring entry; NULL on failire
966 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530967static inline void *
968hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700969{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530970 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700971 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800972
973 /* TODO: Using % is expensive, but we have to do this since
974 * size of some SRNG rings is not power of 2 (due to descriptor
975 * sizes). Need to create separate API for rings used
976 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
977 * SW2RXDMA and CE rings)
978 */
979 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
980 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700981
982 if (next_reap_hp != srng->u.src_ring.cached_tp) {
983 desc = &(srng->ring_base_vaddr[next_reap_hp]);
984 srng->u.src_ring.reap_hp = next_reap_hp;
985 return (void *)desc;
986 }
987
988 return NULL;
989}
990
991/**
992 * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
993 * already reaped using hal_srng_src_reap_next, for posting new entries to
994 * the ring
995 *
996 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530997 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700998 *
999 * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1000 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301001static inline void *
1002hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001003{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301004 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001005 uint32_t *desc;
1006
1007 if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1008 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001009 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1010 srng->ring_size;
1011
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001012 return (void *)desc;
1013 }
1014
1015 return NULL;
1016}
1017
1018/**
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301019 * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1020 * move reap pointer. This API is used in detach path to release any buffers
1021 * associated with ring entries which are pending reap.
1022 *
1023 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301024 * @hal_ring_hdl: Source ring pointer
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301025 *
1026 * Return: Opaque pointer for next ring entry; NULL on failire
1027 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301028static inline void *
1029hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301030{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301031 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301032 uint32_t *desc;
1033
1034 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1035 srng->ring_size;
1036
1037 if (next_reap_hp != srng->u.src_ring.hp) {
1038 desc = &(srng->ring_base_vaddr[next_reap_hp]);
1039 srng->u.src_ring.reap_hp = next_reap_hp;
1040 return (void *)desc;
1041 }
1042
1043 return NULL;
1044}
1045
1046/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001047 * hal_srng_src_done_val -
1048 *
1049 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301050 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001051 *
1052 * Return: Opaque pointer for next ring entry; NULL on failire
1053 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301054static inline uint32_t
1055hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001056{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301057 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001058 /* TODO: Using % is expensive, but we have to do this since
1059 * size of some SRNG rings is not power of 2 (due to descriptor
1060 * sizes). Need to create separate API for rings used
1061 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1062 * SW2RXDMA and CE rings)
1063 */
1064 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1065 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001066
1067 if (next_reap_hp == srng->u.src_ring.cached_tp)
1068 return 0;
1069
1070 if (srng->u.src_ring.cached_tp > next_reap_hp)
1071 return (srng->u.src_ring.cached_tp - next_reap_hp) /
1072 srng->entry_size;
1073 else
1074 return ((srng->ring_size - next_reap_hp) +
1075 srng->u.src_ring.cached_tp) / srng->entry_size;
1076}
sumedh baikady72b1c712017-08-24 12:11:46 -07001077
1078/**
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001079 * hal_get_entrysize_from_srng() - Retrieve ring entry size
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301080 * @hal_ring_hdl: Source ring pointer
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001081 *
1082 * Return: uint8_t
1083 */
1084static inline
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301085uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001086{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301087 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001088
1089 return srng->entry_size;
1090}
1091
1092/**
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001093 * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
sumedh baikady72b1c712017-08-24 12:11:46 -07001094 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301095 * @hal_ring_hdl: Source ring pointer
sumedh baikady72b1c712017-08-24 12:11:46 -07001096 * @tailp: Tail Pointer
1097 * @headp: Head Pointer
1098 *
1099 * Return: Update tail pointer and head pointer in arguments.
1100 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301101static inline
1102void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1103 uint32_t *tailp, uint32_t *headp)
sumedh baikady72b1c712017-08-24 12:11:46 -07001104{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301105 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
sumedh baikady72b1c712017-08-24 12:11:46 -07001106
1107 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Rakesh Pillai56320c12019-06-05 00:25:48 +05301108 *headp = srng->u.src_ring.hp;
1109 *tailp = *srng->u.src_ring.tp_addr;
sumedh baikady72b1c712017-08-24 12:11:46 -07001110 } else {
Rakesh Pillai56320c12019-06-05 00:25:48 +05301111 *tailp = srng->u.dst_ring.tp;
1112 *headp = *srng->u.dst_ring.hp_addr;
sumedh baikady72b1c712017-08-24 12:11:46 -07001113 }
1114}
1115
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001116/**
1117 * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1118 *
1119 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301120 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001121 *
1122 * Return: Opaque pointer for next ring entry; NULL on failire
1123 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301124static inline
1125void *hal_srng_src_get_next(void *hal_soc,
1126 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001127{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301128 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001129 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001130 /* TODO: Using % is expensive, but we have to do this since
1131 * size of some SRNG rings is not power of 2 (due to descriptor
1132 * sizes). Need to create separate API for rings used
1133 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1134 * SW2RXDMA and CE rings)
1135 */
1136 uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1137 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001138
1139 if (next_hp != srng->u.src_ring.cached_tp) {
1140 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1141 srng->u.src_ring.hp = next_hp;
1142 /* TODO: Since reap function is not used by all rings, we can
1143 * remove the following update of reap_hp in this function
1144 * if we can ensure that only hal_srng_src_get_next_reaped
1145 * is used for the rings requiring reap functionality
1146 */
1147 srng->u.src_ring.reap_hp = next_hp;
1148 return (void *)desc;
1149 }
1150
1151 return NULL;
1152}
1153
1154/**
1155 * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
1156 * hal_srng_src_get_next should be called subsequently to move the head pointer
1157 *
1158 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301159 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001160 *
1161 * Return: Opaque pointer for next ring entry; NULL on failire
1162 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301163static inline
1164void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
1165 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001166{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301167 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001168 uint32_t *desc;
1169
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001170 /* TODO: Using % is expensive, but we have to do this since
1171 * size of some SRNG rings is not power of 2 (due to descriptor
1172 * sizes). Need to create separate API for rings used
1173 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1174 * SW2RXDMA and CE rings)
1175 */
1176 if (((srng->u.src_ring.hp + srng->entry_size) %
1177 srng->ring_size) != srng->u.src_ring.cached_tp) {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001178 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1179 return (void *)desc;
1180 }
1181
1182 return NULL;
1183}
1184
1185/**
1186 * hal_srng_src_num_avail - Returns number of available entries in src ring
1187 *
1188 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301189 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001190 * @sync_hw_ptr: Sync cached tail pointer with HW
1191 *
1192 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301193static inline uint32_t
1194hal_srng_src_num_avail(void *hal_soc,
1195 hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001196{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301197 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301198 uint32_t tp;
1199 uint32_t hp = srng->u.src_ring.hp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001200
1201 if (sync_hw_ptr) {
1202 tp = *(srng->u.src_ring.tp_addr);
1203 srng->u.src_ring.cached_tp = tp;
1204 } else {
1205 tp = srng->u.src_ring.cached_tp;
1206 }
1207
1208 if (tp > hp)
1209 return ((tp - hp) / srng->entry_size) - 1;
1210 else
1211 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1212}
1213
1214/**
1215 * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1216 * ring head/tail pointers to HW.
1217 * This should be used only if hal_srng_access_start_unlocked to start ring
1218 * access
1219 *
1220 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301221 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001222 *
1223 * Return: 0 on success; error on failire
1224 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301225static inline void
1226hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001227{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301228 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001229
1230 /* TODO: See if we need a write memory barrier here */
1231 if (srng->flags & HAL_SRNG_LMAC_RING) {
1232 /* For LMAC rings, ring pointer updates are done through FW and
1233 * hence written to a shared memory location that is read by FW
1234 */
Kai Chen6eca1a62017-01-12 10:17:53 -08001235 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001236 *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
Kai Chen6eca1a62017-01-12 10:17:53 -08001237 } else {
1238 *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1239 }
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001240 } else {
1241 if (srng->ring_dir == HAL_SRNG_SRC_RING)
Houston Hoffman8bbc9902017-04-10 14:09:51 -07001242 hal_write_address_32_mb(hal_soc,
1243 srng->u.src_ring.hp_addr,
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001244 srng->u.src_ring.hp);
1245 else
Houston Hoffman8bbc9902017-04-10 14:09:51 -07001246 hal_write_address_32_mb(hal_soc,
1247 srng->u.dst_ring.tp_addr,
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001248 srng->u.dst_ring.tp);
1249 }
1250}
1251
1252/**
1253 * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1254 * pointers to HW
1255 * This should be used only if hal_srng_access_start to start ring access
1256 *
1257 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301258 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001259 *
1260 * Return: 0 on success; error on failire
1261 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301262static inline void
1263hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001264{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301265 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001266
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301267 if (qdf_unlikely(!hal_ring_hdl)) {
Krunal Sonief1f0f92018-09-17 21:09:55 -07001268 qdf_print("Error: Invalid hal_ring\n");
1269 return;
1270 }
1271
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301272 hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001273 SRNG_UNLOCK(&(srng->lock));
1274}
1275
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301276/**
1277 * hal_srng_access_end_reap - Unlock ring access
1278 * This should be used only if hal_srng_access_start to start ring access
1279 * and should be used only while reaping SRC ring completions
1280 *
1281 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301282 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301283 *
1284 * Return: 0 on success; error on failire
1285 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301286static inline void
1287hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301288{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301289 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Yun Park601d0d82017-08-28 21:49:31 -07001290
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301291 SRNG_UNLOCK(&(srng->lock));
1292}
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001293
1294/* TODO: Check if the following definitions is available in HW headers */
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001295#define WBM_IDLE_SCATTER_BUF_SIZE 32704
1296#define NUM_MPDUS_PER_LINK_DESC 6
1297#define NUM_MSDUS_PER_LINK_DESC 7
1298#define REO_QUEUE_DESC_ALIGN 128
1299
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001300#define LINK_DESC_ALIGN 128
1301
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001302#define ADDRESS_MATCH_TAG_VAL 0x5
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001303/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1304 * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1305 */
1306#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1307
1308/* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1309 * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1310 * should be specified in 16 word units. But the number of bits defined for
1311 * this field in HW header files is 5.
1312 */
1313#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1314
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001315
1316/**
1317 * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1318 * in an idle list
1319 *
1320 * @hal_soc: Opaque HAL SOC handle
1321 *
1322 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301323static inline
1324uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001325{
1326 return WBM_IDLE_SCATTER_BUF_SIZE;
1327}
1328
1329/**
1330 * hal_get_link_desc_size - Get the size of each link descriptor
1331 *
1332 * @hal_soc: Opaque HAL SOC handle
1333 *
1334 */
Akshay Kosigi6a206752019-06-10 23:14:52 +05301335static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001336{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301337 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1338
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301339 if (!hal_soc || !hal_soc->ops) {
1340 qdf_print("Error: Invalid ops\n");
1341 QDF_BUG(0);
1342 return -EINVAL;
1343 }
1344 if (!hal_soc->ops->hal_get_link_desc_size) {
1345 qdf_print("Error: Invalid function pointer\n");
1346 QDF_BUG(0);
1347 return -EINVAL;
1348 }
1349 return hal_soc->ops->hal_get_link_desc_size();
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001350}
1351
1352/**
1353 * hal_get_link_desc_align - Get the required start address alignment for
1354 * link descriptors
1355 *
1356 * @hal_soc: Opaque HAL SOC handle
1357 *
1358 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301359static inline
1360uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001361{
1362 return LINK_DESC_ALIGN;
1363}
1364
1365/**
1366 * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1367 *
1368 * @hal_soc: Opaque HAL SOC handle
1369 *
1370 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301371static inline
1372uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001373{
1374 return NUM_MPDUS_PER_LINK_DESC;
1375}
1376
1377/**
1378 * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1379 *
1380 * @hal_soc: Opaque HAL SOC handle
1381 *
1382 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301383static inline
1384uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001385{
1386 return NUM_MSDUS_PER_LINK_DESC;
1387}
1388
1389/**
1390 * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1391 * descriptor can hold
1392 *
1393 * @hal_soc: Opaque HAL SOC handle
1394 *
1395 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301396static inline
1397uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001398{
1399 return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1400}
1401
1402/**
1403 * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1404 * that the given buffer size
1405 *
1406 * @hal_soc: Opaque HAL SOC handle
1407 * @scatter_buf_size: Size of scatter buffer
1408 *
1409 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301410static inline
1411uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1412 uint32_t scatter_buf_size)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001413{
1414 return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301415 hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001416}
1417
1418/**
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001419 * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1420 * each given buffer size
1421 *
1422 * @hal_soc: Opaque HAL SOC handle
1423 * @total_mem: size of memory to be scattered
1424 * @scatter_buf_size: Size of scatter buffer
1425 *
1426 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301427static inline
1428uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1429 uint32_t total_mem,
1430 uint32_t scatter_buf_size)
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001431{
1432 uint8_t rem = (total_mem % (scatter_buf_size -
1433 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1434
1435 uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1436 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1437
1438 return num_scatter_bufs;
1439}
1440
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001441enum hal_pn_type {
1442 HAL_PN_NONE,
1443 HAL_PN_WPA,
1444 HAL_PN_WAPI_EVEN,
1445 HAL_PN_WAPI_UNEVEN,
1446};
1447
1448#define HAL_RX_MAX_BA_WINDOW 256
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001449
1450/**
1451 * hal_get_reo_qdesc_align - Get start address alignment for reo
1452 * queue descriptors
1453 *
1454 * @hal_soc: Opaque HAL SOC handle
1455 *
1456 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301457static inline
1458uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001459{
1460 return REO_QUEUE_DESC_ALIGN;
1461}
1462
1463/**
1464 * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1465 *
1466 * @hal_soc: Opaque HAL SOC handle
1467 * @ba_window_size: BlockAck window size
1468 * @start_seq: Starting sequence number
1469 * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1470 * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1471 * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1472 *
1473 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301474void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1475 int tid, uint32_t ba_window_size,
1476 uint32_t start_seq, void *hw_qdesc_vaddr,
1477 qdf_dma_addr_t hw_qdesc_paddr,
1478 int pn_type);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001479
1480/**
1481 * hal_srng_get_hp_addr - Get head pointer physical address
1482 *
1483 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301484 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001485 *
1486 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301487static inline qdf_dma_addr_t
1488hal_srng_get_hp_addr(void *hal_soc,
1489 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001490{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301491 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001492 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1493
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001494 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -08001495 return hal->shadow_wrptr_mem_paddr +
1496 ((unsigned long)(srng->u.src_ring.hp_addr) -
1497 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001498 } else {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -08001499 return hal->shadow_rdptr_mem_paddr +
1500 ((unsigned long)(srng->u.dst_ring.hp_addr) -
1501 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001502 }
1503}
1504
1505/**
1506 * hal_srng_get_tp_addr - Get tail pointer physical address
1507 *
1508 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301509 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001510 *
1511 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301512static inline qdf_dma_addr_t
1513hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001514{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301515 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001516 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1517
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001518 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1519 return hal->shadow_rdptr_mem_paddr +
1520 ((unsigned long)(srng->u.src_ring.tp_addr) -
1521 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1522 } else {
1523 return hal->shadow_wrptr_mem_paddr +
1524 ((unsigned long)(srng->u.dst_ring.tp_addr) -
1525 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1526 }
1527}
1528
1529/**
Mohit Khanna80002652019-10-14 23:27:36 -07001530 * hal_srng_get_num_entries - Get total entries in the HAL Srng
1531 *
1532 * @hal_soc: Opaque HAL SOC handle
1533 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1534 *
1535 * Return: total number of entries in hal ring
1536 */
1537static inline
1538uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1539 hal_ring_handle_t hal_ring_hdl)
1540{
1541 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1542
1543 return srng->num_entries;
1544}
1545
1546/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -07001547 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001548 *
1549 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301550 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001551 * @ring_params: SRNG parameters will be returned through this structure
1552 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301553void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1554 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301555 struct hal_srng_params *ring_params);
Ravi Joshi36f68ad2016-11-09 17:09:47 -08001556
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +05301557/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -07001558 * hal_mem_info - Retrieve hal memory base address
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +05301559 *
1560 * @hal_soc: Opaque HAL SOC handle
1561 * @mem: pointer to structure to be updated with hal mem info
1562 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301563void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301564
1565/**
1566 * hal_get_target_type - Return target type
1567 *
1568 * @hal_soc: Opaque HAL SOC handle
1569 */
Akshay Kosigi6a206752019-06-10 23:14:52 +05301570uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
sumedh baikady1f8f3192018-02-20 17:30:32 -08001571
1572/**
1573 * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1574 *
1575 * @hal_soc: Opaque HAL SOC handle
1576 * @ac: Access category
1577 * @value: timeout duration in millisec
1578 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301579void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
sumedh baikady1f8f3192018-02-20 17:30:32 -08001580 uint32_t *value);
sumedh baikady1f8f3192018-02-20 17:30:32 -08001581/**
1582 * hal_set_aging_timeout - Set BA aging timeout
1583 *
1584 * @hal_soc: Opaque HAL SOC handle
1585 * @ac: Access category in millisec
1586 * @value: timeout duration value
1587 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301588void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
sumedh baikady1f8f3192018-02-20 17:30:32 -08001589 uint32_t value);
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301590/**
1591 * hal_srng_dst_hw_init - Private function to initialize SRNG
1592 * destination ring HW
1593 * @hal_soc: HAL SOC handle
1594 * @srng: SRNG ring pointer
1595 */
1596static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1597 struct hal_srng *srng)
1598{
1599 hal->ops->hal_srng_dst_hw_init(hal, srng);
1600}
sumedh baikady1f8f3192018-02-20 17:30:32 -08001601
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301602/**
1603 * hal_srng_src_hw_init - Private function to initialize SRNG
1604 * source ring HW
1605 * @hal_soc: HAL SOC handle
1606 * @srng: SRNG ring pointer
1607 */
1608static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1609 struct hal_srng *srng)
1610{
1611 hal->ops->hal_srng_src_hw_init(hal, srng);
1612}
1613
1614/**
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001615 * hal_get_hw_hptp() - Get HW head and tail pointer value for any ring
1616 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301617 * @hal_ring_hdl: Source ring pointer
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001618 * @headp: Head Pointer
1619 * @tailp: Tail Pointer
1620 * @ring_type: Ring
1621 *
1622 * Return: Update tail pointer and head pointer in arguments.
1623 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301624static inline
1625void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
1626 hal_ring_handle_t hal_ring_hdl,
1627 uint32_t *headp, uint32_t *tailp,
1628 uint8_t ring_type)
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001629{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301630 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1631
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301632 hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301633 headp, tailp, ring_type);
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001634}
1635
1636/**
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301637 * hal_reo_setup - Initialize HW REO block
1638 *
1639 * @hal_soc: Opaque HAL SOC handle
1640 * @reo_params: parameters needed by HAL for REO config
1641 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301642static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
1643 void *reoparams)
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301644{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301645 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301646
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301647 hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301648}
1649
1650/**
1651 * hal_setup_link_idle_list - Setup scattered idle list using the
1652 * buffer list provided
1653 *
1654 * @hal_soc: Opaque HAL SOC handle
1655 * @scatter_bufs_base_paddr: Array of physical base addresses
1656 * @scatter_bufs_base_vaddr: Array of virtual base addresses
1657 * @num_scatter_bufs: Number of scatter buffers in the above lists
1658 * @scatter_buf_size: Size of each scatter buffer
1659 * @last_buf_end_offset: Offset to the last entry
1660 * @num_entries: Total entries of all scatter bufs
1661 *
1662 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301663static inline
1664void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
1665 qdf_dma_addr_t scatter_bufs_base_paddr[],
1666 void *scatter_bufs_base_vaddr[],
1667 uint32_t num_scatter_bufs,
1668 uint32_t scatter_buf_size,
1669 uint32_t last_buf_end_offset,
1670 uint32_t num_entries)
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301671{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301672 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301673
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301674 hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301675 scatter_bufs_base_vaddr, num_scatter_bufs,
1676 scatter_buf_size, last_buf_end_offset,
1677 num_entries);
1678
1679}
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301680
1681/**
1682 * hal_srng_dump_ring_desc() - Dump ring descriptor info
1683 *
1684 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301685 * @hal_ring_hdl: Source ring pointer
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301686 * @ring_desc: Opaque ring descriptor handle
1687 */
Akshay Kosigia870c612019-07-08 23:10:30 +05301688static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301689 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi91c56522019-07-02 11:49:39 +05301690 hal_ring_desc_t ring_desc)
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301691{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301692 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301693
Saket Jha16d84322019-07-11 16:09:41 -07001694 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301695 ring_desc, (srng->entry_size << 2));
1696}
1697
1698/**
1699 * hal_srng_dump_ring() - Dump last 128 descs of the ring
1700 *
1701 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301702 * @hal_ring_hdl: Source ring pointer
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301703 */
Akshay Kosigia870c612019-07-08 23:10:30 +05301704static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301705 hal_ring_handle_t hal_ring_hdl)
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301706{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301707 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301708 uint32_t *desc;
1709 uint32_t tp, i;
1710
1711 tp = srng->u.dst_ring.tp;
1712
1713 for (i = 0; i < 128; i++) {
1714 if (!tp)
1715 tp = srng->ring_size;
1716
1717 desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1718 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
nwzhaoea2ffbb2019-01-31 11:43:17 -08001719 QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301720 desc, (srng->entry_size << 2));
1721
1722 tp -= srng->entry_size;
1723 }
1724}
1725
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301726/*
1727 * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
1728 * to opaque dp_ring desc type
1729 * @ring_desc - rxdma ring desc
1730 *
1731 * Return: hal_rxdma_desc_t type
1732 */
1733static inline
1734hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
1735{
1736 return (hal_ring_desc_t)ring_desc;
1737}
Sravan Kumar Kairam78b01a12019-09-16 14:22:55 +05301738
1739/**
1740 * hal_srng_set_event() - Set hal_srng event
1741 * @hal_ring_hdl: Source ring pointer
1742 * @event: SRNG ring event
1743 *
1744 * Return: None
1745 */
1746static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
1747{
1748 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1749
1750 qdf_atomic_set_bit(event, &srng->srng_event);
1751}
1752
1753/**
1754 * hal_srng_clear_event() - Clear hal_srng event
1755 * @hal_ring_hdl: Source ring pointer
1756 * @event: SRNG ring event
1757 *
1758 * Return: None
1759 */
1760static inline
1761void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1762{
1763 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1764
1765 qdf_atomic_clear_bit(event, &srng->srng_event);
1766}
1767
1768/**
1769 * hal_srng_get_clear_event() - Clear srng event and return old value
1770 * @hal_ring_hdl: Source ring pointer
1771 * @event: SRNG ring event
1772 *
1773 * Return: Return old event value
1774 */
1775static inline
1776int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1777{
1778 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1779
1780 return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
1781}
1782
1783/**
1784 * hal_srng_set_flush_last_ts() - Record last flush time stamp
1785 * @hal_ring_hdl: Source ring pointer
1786 *
1787 * Return: None
1788 */
1789static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
1790{
1791 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1792
1793 srng->last_flush_ts = qdf_get_log_timestamp();
1794}
1795
1796/**
1797 * hal_srng_inc_flush_cnt() - Increment flush counter
1798 * @hal_ring_hdl: Source ring pointer
1799 *
1800 * Return: None
1801 */
1802static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
1803{
1804 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1805
1806 srng->flush_count++;
1807}
Ravi Joshi36f68ad2016-11-09 17:09:47 -08001808#endif /* _HAL_APIH_ */