blob: b1b7f744db6cd70a2f28a783fafe0206618853e6 [file] [log] [blame]
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001/*
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07003 *
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07008 *
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05309 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -070017 */
18
19#ifndef _HAL_API_H_
20#define _HAL_API_H_
21
22#include "qdf_types.h"
Houston Hoffman61dad492017-04-07 17:09:34 -070023#include "qdf_util.h"
Sravan Kumar Kairam78b01a12019-09-16 14:22:55 +053024#include "qdf_atomic.h"
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -070025#include "hal_internal.h"
Houston Hoffman61dad492017-04-07 17:09:34 -070026#define MAX_UNWINDOWED_ADDRESS 0x80000
Nandha Kishore Easwaranfb73acb2019-10-24 17:33:52 +053027#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
28 defined(QCA_WIFI_QCN9000)
Venkata Sharath Chandra Manchala9347b8d2018-06-07 15:26:11 -070029#define WINDOW_ENABLE_BIT 0x40000000
30#else
Houston Hoffman61dad492017-04-07 17:09:34 -070031#define WINDOW_ENABLE_BIT 0x80000000
Venkata Sharath Chandra Manchala9347b8d2018-06-07 15:26:11 -070032#endif
Houston Hoffman61dad492017-04-07 17:09:34 -070033#define WINDOW_REG_ADDRESS 0x310C
34#define WINDOW_SHIFT 19
jiad5661cef2017-11-09 18:24:41 +080035#define WINDOW_VALUE_MASK 0x3F
Houston Hoffman61dad492017-04-07 17:09:34 -070036#define WINDOW_START MAX_UNWINDOWED_ADDRESS
37#define WINDOW_RANGE_MASK 0x7FFFF
38
Pramod Simha95c59f22018-08-27 10:03:04 -070039/*
40 * BAR + 4K is always accessible, any access outside this
41 * space requires force wake procedure.
42 * OFFSET = 4K - 32 bytes = 0x4063
43 */
44#define MAPPED_REF_OFF 0x4063
45#define FORCE_WAKE_DELAY_TIMEOUT 50
46#define FORCE_WAKE_DELAY_MS 5
47
Akshay Kosigi8eda31c2019-07-10 14:42:42 +053048/**
49 * hal_ring_desc - opaque handle for DP ring descriptor
50 */
51struct hal_ring_desc;
52typedef struct hal_ring_desc *hal_ring_desc_t;
53
54/**
55 * hal_link_desc - opaque handle for DP link descriptor
56 */
57struct hal_link_desc;
58typedef struct hal_link_desc *hal_link_desc_t;
59
60/**
61 * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
62 */
63struct hal_rxdma_desc;
64typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
65
Krunal Soni9911b442019-02-22 15:39:03 -080066#ifdef ENABLE_VERBOSE_DEBUG
67static inline void
68hal_set_verbose_debug(bool flag)
69{
70 is_hal_verbose_debug_enabled = flag;
71}
72#endif
73
Jinwei Chen99ae1c12019-11-01 19:43:30 +080074#ifdef HAL_REGISTER_WRITE_DEBUG
75/**
76 * hal_reg_write_result_check() - check register writing result
77 * @hal_soc: HAL soc handle
78 * @offset: register offset to read
79 * @exp_val: the expected value of register
80 * @ret_confirm: result confirm flag
81 *
82 * Return: none
83 */
84static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
85 uint32_t offset,
86 uint32_t exp_val,
87 bool ret_confirm)
88{
89 uint32_t value;
90
91 if (!ret_confirm)
92 return;
93
94 value = qdf_ioread32(hal_soc->dev_base_addr + offset);
95 if (exp_val != value) {
96 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
97 "register offset 0x%x write failed!\n", offset);
98 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
99 "the expectation 0x%x, actual value 0x%x\n",
100 exp_val,
101 value);
102 }
103}
104#else
105/* no op */
106#define hal_reg_write_result_check(_hal_soc, _offset, _exp_val, _ret_confirm)
107#endif
108
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700109#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Pramod Simha95c59f22018-08-27 10:03:04 -0700110static inline int hal_force_wake_request(struct hal_soc *soc)
111{
112 return 0;
113}
114
115static inline int hal_force_wake_release(struct hal_soc *soc)
116{
117 return 0;
118}
Jinwei Chen7d419462019-10-18 09:56:18 +0800119
120static inline void hal_lock_reg_access(struct hal_soc *soc,
121 unsigned long *flags)
122{
123 qdf_spin_lock_irqsave(&soc->register_access_lock);
124}
125
126static inline void hal_unlock_reg_access(struct hal_soc *soc,
127 unsigned long *flags)
128{
129 qdf_spin_unlock_irqrestore(&soc->register_access_lock);
130}
131
Pramod Simha95c59f22018-08-27 10:03:04 -0700132#else
133static inline int hal_force_wake_request(struct hal_soc *soc)
134{
135 uint32_t timeout = 0;
Sravan Goud7d2afb42019-10-16 21:26:32 +0530136 int ret;
Pramod Simha95c59f22018-08-27 10:03:04 -0700137
Sravan Goud7d2afb42019-10-16 21:26:32 +0530138 ret = pld_force_wake_request(soc->qdf_dev->dev);
139 if (ret) {
Pramod Simha95c59f22018-08-27 10:03:04 -0700140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sravan Goud7d2afb42019-10-16 21:26:32 +0530141 "%s: Request send failed %d\n", __func__, ret);
Pramod Simha95c59f22018-08-27 10:03:04 -0700142 return -EINVAL;
143 }
144
145 while (!pld_is_device_awake(soc->qdf_dev->dev) &&
146 timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
147 mdelay(FORCE_WAKE_DELAY_MS);
148 timeout += FORCE_WAKE_DELAY_MS;
149 }
150
151 if (pld_is_device_awake(soc->qdf_dev->dev) == true)
152 return 0;
153 else
154 return -ETIMEDOUT;
155}
156
157static inline int hal_force_wake_release(struct hal_soc *soc)
158{
159 return pld_force_wake_release(soc->qdf_dev->dev);
160}
Jinwei Chen7d419462019-10-18 09:56:18 +0800161
162static inline void hal_lock_reg_access(struct hal_soc *soc,
163 unsigned long *flags)
164{
165 pld_lock_reg_window(soc->qdf_dev->dev, flags);
166}
167
168static inline void hal_unlock_reg_access(struct hal_soc *soc,
169 unsigned long *flags)
170{
171 pld_unlock_reg_window(soc->qdf_dev->dev, flags);
172}
Pramod Simha95c59f22018-08-27 10:03:04 -0700173#endif
174
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530175#ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800176static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset,
177 bool ret_confirm)
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530178{
179 uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
180
181 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
182 WINDOW_ENABLE_BIT | window);
183 hal_soc->register_window = window;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800184
185 hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
186 WINDOW_ENABLE_BIT | window,
187 ret_confirm);
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530188}
189#else
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800190static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset,
191 bool ret_confirm)
Houston Hoffman61dad492017-04-07 17:09:34 -0700192{
193 uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
194 if (window != hal_soc->register_window) {
195 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
196 WINDOW_ENABLE_BIT | window);
197 hal_soc->register_window = window;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800198
199 hal_reg_write_result_check(
200 hal_soc,
201 WINDOW_REG_ADDRESS,
202 WINDOW_ENABLE_BIT | window,
203 ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700204 }
205}
Sravan Kumar Kairam830542f2019-07-10 12:09:55 +0530206#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700207
208/**
209 * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
210 * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
211 * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
212 * would be a bug
213 */
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700214#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Houston Hoffman61dad492017-04-07 17:09:34 -0700215static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800216 uint32_t value, bool ret_confirm)
Houston Hoffman61dad492017-04-07 17:09:34 -0700217{
Jinwei Chen7d419462019-10-18 09:56:18 +0800218 unsigned long flags;
219
Houston Hoffman61dad492017-04-07 17:09:34 -0700220 if (!hal_soc->use_register_windowing ||
221 offset < MAX_UNWINDOWED_ADDRESS) {
222 qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800223 hal_reg_write_result_check(hal_soc, offset,
224 value, ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700225 } else {
Jinwei Chen7d419462019-10-18 09:56:18 +0800226 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800227 hal_select_window(hal_soc, offset, ret_confirm);
Houston Hoffman61dad492017-04-07 17:09:34 -0700228 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
229 (offset & WINDOW_RANGE_MASK), value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800230
231 hal_reg_write_result_check(
232 hal_soc,
233 WINDOW_START + (offset & WINDOW_RANGE_MASK),
234 value, ret_confirm);
Jinwei Chen7d419462019-10-18 09:56:18 +0800235 hal_unlock_reg_access(hal_soc, &flags);
Houston Hoffman61dad492017-04-07 17:09:34 -0700236 }
237}
Pramod Simha95c59f22018-08-27 10:03:04 -0700238#else
239static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800240 uint32_t value, bool ret_confirm)
Pramod Simha95c59f22018-08-27 10:03:04 -0700241{
Sravan Goud7d2afb42019-10-16 21:26:32 +0530242 int ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800243 unsigned long flags;
Sravan Goud7d2afb42019-10-16 21:26:32 +0530244
245 if (offset > MAPPED_REF_OFF) {
246 ret = hal_force_wake_request(hal_soc);
247 if (ret) {
248 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
249 "%s: Wake up request failed %d\n",
250 __func__, ret);
251 QDF_BUG(0);
252 return;
253 }
Pramod Simha95c59f22018-08-27 10:03:04 -0700254 }
255
256 if (!hal_soc->use_register_windowing ||
257 offset < MAX_UNWINDOWED_ADDRESS) {
258 qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800259 hal_reg_write_result_check(hal_soc, offset,
260 value, ret_confirm);
Pramod Simha95c59f22018-08-27 10:03:04 -0700261 } else {
Jinwei Chen7d419462019-10-18 09:56:18 +0800262 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800263 hal_select_window(hal_soc, offset, ret_confirm);
Pramod Simha95c59f22018-08-27 10:03:04 -0700264 qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
265 (offset & WINDOW_RANGE_MASK), value);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800266
267 hal_reg_write_result_check(
268 hal_soc,
269 WINDOW_START + (offset & WINDOW_RANGE_MASK),
270 value,
271 ret_confirm);
Jinwei Chen7d419462019-10-18 09:56:18 +0800272 hal_unlock_reg_access(hal_soc, &flags);
Pramod Simha95c59f22018-08-27 10:03:04 -0700273 }
274
275 if ((offset > MAPPED_REF_OFF) &&
276 hal_force_wake_release(hal_soc))
277 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
278 "%s: Wake up release failed\n", __func__);
279}
280
281#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700282
283/**
284 * hal_write_address_32_mb - write a value to a register
285 *
286 */
287static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
288 void __iomem *addr, uint32_t value)
289{
290 uint32_t offset;
291
292 if (!hal_soc->use_register_windowing)
293 return qdf_iowrite32(addr, value);
294
295 offset = addr - hal_soc->dev_base_addr;
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800296 hal_write32_mb(hal_soc, offset, value, false);
Houston Hoffman61dad492017-04-07 17:09:34 -0700297}
298
Venkata Sharath Chandra Manchalae69c9c22019-09-23 18:31:36 -0700299#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
Houston Hoffman61dad492017-04-07 17:09:34 -0700300static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
301{
302 uint32_t ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800303 unsigned long flags;
Houston Hoffman61dad492017-04-07 17:09:34 -0700304
305 if (!hal_soc->use_register_windowing ||
306 offset < MAX_UNWINDOWED_ADDRESS) {
307 return qdf_ioread32(hal_soc->dev_base_addr + offset);
308 }
309
Jinwei Chen7d419462019-10-18 09:56:18 +0800310 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800311 hal_select_window(hal_soc, offset, false);
Houston Hoffman61dad492017-04-07 17:09:34 -0700312 ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
313 (offset & WINDOW_RANGE_MASK));
Jinwei Chen7d419462019-10-18 09:56:18 +0800314 hal_unlock_reg_access(hal_soc, &flags);
Houston Hoffman61dad492017-04-07 17:09:34 -0700315
316 return ret;
317}
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -0700318
319/**
320 * hal_read_address_32_mb() - Read 32-bit value from the register
321 * @soc: soc handle
322 * @addr: register address to read
323 *
324 * Return: 32-bit value
325 */
326static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
327 void __iomem *addr)
328{
329 uint32_t offset;
330 uint32_t ret;
331
332 if (!soc->use_register_windowing)
333 return qdf_ioread32(addr);
334
335 offset = addr - soc->dev_base_addr;
336 ret = hal_read32_mb(soc, offset);
337 return ret;
338}
Pramod Simha95c59f22018-08-27 10:03:04 -0700339#else
340static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
341{
342 uint32_t ret;
Jinwei Chen7d419462019-10-18 09:56:18 +0800343 unsigned long flags;
Pramod Simha95c59f22018-08-27 10:03:04 -0700344
345 if ((offset > MAPPED_REF_OFF) &&
346 hal_force_wake_request(hal_soc)) {
347 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
348 "%s: Wake up request failed\n", __func__);
349 return -EINVAL;
350 }
351
352 if (!hal_soc->use_register_windowing ||
353 offset < MAX_UNWINDOWED_ADDRESS) {
354 return qdf_ioread32(hal_soc->dev_base_addr + offset);
355 }
356
Jinwei Chen7d419462019-10-18 09:56:18 +0800357 hal_lock_reg_access(hal_soc, &flags);
Jinwei Chen99ae1c12019-11-01 19:43:30 +0800358 hal_select_window(hal_soc, offset, false);
Pramod Simha95c59f22018-08-27 10:03:04 -0700359 ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
360 (offset & WINDOW_RANGE_MASK));
Jinwei Chen7d419462019-10-18 09:56:18 +0800361 hal_unlock_reg_access(hal_soc, &flags);
Pramod Simha95c59f22018-08-27 10:03:04 -0700362
363 if ((offset > MAPPED_REF_OFF) &&
364 hal_force_wake_release(hal_soc))
365 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
366 "%s: Wake up release failed\n", __func__);
367
368 return ret;
369}
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -0700370
371static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
372 void __iomem *addr)
373{
374 uint32_t offset;
375 uint32_t ret;
376
377 if (!soc->use_register_windowing)
378 return qdf_ioread32(addr);
379
380 offset = addr - soc->dev_base_addr;
381 ret = hal_read32_mb(soc, offset);
382 return ret;
383}
Pramod Simha95c59f22018-08-27 10:03:04 -0700384#endif
Houston Hoffman61dad492017-04-07 17:09:34 -0700385
386#include "hif_io32.h"
387
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700388/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -0700389 * hal_attach - Initialize HAL layer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700390 * @hif_handle: Opaque HIF handle
391 * @qdf_dev: QDF device
392 *
393 * Return: Opaque HAL SOC handle
394 * NULL on failure (if given ring is not available)
395 *
396 * This function should be called as part of HIF initialization (for accessing
397 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
398 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530399void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700400
401/**
402 * hal_detach - Detach HAL layer
403 * @hal_soc: HAL SOC handle
404 *
405 * This function should be called as part of HIF detach
406 *
407 */
408extern void hal_detach(void *hal_soc);
409
410/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
411enum hal_ring_type {
Mohit Khanna81179cb2018-08-16 20:50:43 -0700412 REO_DST = 0,
413 REO_EXCEPTION = 1,
414 REO_REINJECT = 2,
415 REO_CMD = 3,
416 REO_STATUS = 4,
417 TCL_DATA = 5,
418 TCL_CMD = 6,
419 TCL_STATUS = 7,
420 CE_SRC = 8,
421 CE_DST = 9,
422 CE_DST_STATUS = 10,
423 WBM_IDLE_LINK = 11,
424 SW2WBM_RELEASE = 12,
425 WBM2SW_RELEASE = 13,
426 RXDMA_BUF = 14,
427 RXDMA_DST = 15,
428 RXDMA_MONITOR_BUF = 16,
429 RXDMA_MONITOR_STATUS = 17,
430 RXDMA_MONITOR_DST = 18,
431 RXDMA_MONITOR_DESC = 19,
432 DIR_BUF_RX_DMA_SRC = 20,
Naveen Rawatba24c482017-05-15 12:02:48 -0700433#ifdef WLAN_FEATURE_CIF_CFR
434 WIFI_POS_SRC,
435#endif
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700436 MAX_RING_TYPES
437};
438
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530439#define HAL_SRNG_LMAC_RING 0x80000000
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700440/* SRNG flags passed in hal_srng_params.flags */
441#define HAL_SRNG_MSI_SWAP 0x00000008
442#define HAL_SRNG_RING_PTR_SWAP 0x00000010
443#define HAL_SRNG_DATA_TLV_SWAP 0x00000020
444#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
445#define HAL_SRNG_MSI_INTR 0x00020000
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530446#define HAL_SRNG_CACHED_DESC 0x00040000
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700447
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530448#define PN_SIZE_24 0
449#define PN_SIZE_48 1
450#define PN_SIZE_128 2
451
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700452/**
453 * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
454 * used by callers for calculating the size of memory to be allocated before
455 * calling hal_srng_setup to setup the ring
456 *
457 * @hal_soc: Opaque HAL SOC handle
458 * @ring_type: one of the types from hal_ring_type
459 *
460 */
461extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
462
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800463/**
464 * hal_srng_max_entries - Returns maximum possible number of ring entries
465 * @hal_soc: Opaque HAL SOC handle
466 * @ring_type: one of the types from hal_ring_type
467 *
468 * Return: Maximum number of entries for the given ring_type
469 */
470uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
471
Houston Hoffman648a9182017-05-21 23:27:50 -0700472/**
Kai Liub8e12412018-01-12 16:52:26 +0800473 * hal_srng_dump - Dump ring status
474 * @srng: hal srng pointer
475 */
476void hal_srng_dump(struct hal_srng *srng);
477
478/**
Houston Hoffman648a9182017-05-21 23:27:50 -0700479 * hal_srng_get_dir - Returns the direction of the ring
480 * @hal_soc: Opaque HAL SOC handle
481 * @ring_type: one of the types from hal_ring_type
482 *
483 * Return: Ring direction
484 */
485enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
486
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +0530487/* HAL memory information */
488struct hal_mem_info {
489 /* dev base virutal addr */
490 void *dev_base_addr;
491 /* dev base physical addr */
492 void *dev_base_paddr;
493 /* Remote virtual pointer memory for HW/FW updates */
494 void *shadow_rdptr_mem_vaddr;
495 /* Remote physical pointer memory for HW/FW updates */
496 void *shadow_rdptr_mem_paddr;
497 /* Shared memory for ring pointer updates from host to FW */
498 void *shadow_wrptr_mem_vaddr;
499 /* Shared physical memory for ring pointer updates from host to FW */
500 void *shadow_wrptr_mem_paddr;
501};
502
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700503/* SRNG parameters to be passed to hal_srng_setup */
504struct hal_srng_params {
505 /* Physical base address of the ring */
506 qdf_dma_addr_t ring_base_paddr;
507 /* Virtual base address of the ring */
508 void *ring_base_vaddr;
509 /* Number of entries in ring */
510 uint32_t num_entries;
Houston Hoffman74109122016-10-21 14:58:34 -0700511 /* max transfer length */
512 uint16_t max_buffer_length;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700513 /* MSI Address */
514 qdf_dma_addr_t msi_addr;
515 /* MSI data */
516 uint32_t msi_data;
517 /* Interrupt timer threshold – in micro seconds */
518 uint32_t intr_timer_thres_us;
519 /* Interrupt batch counter threshold – in number of ring entries */
520 uint32_t intr_batch_cntr_thres_entries;
521 /* Low threshold – in number of ring entries
522 * (valid for src rings only)
523 */
524 uint32_t low_threshold;
525 /* Misc flags */
526 uint32_t flags;
Dhanashri Atre7351d172016-10-12 13:08:09 -0700527 /* Unique ring id */
528 uint8_t ring_id;
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +0530529 /* Source or Destination ring */
530 enum hal_srng_dir ring_dir;
531 /* Size of ring entry */
532 uint32_t entry_size;
533 /* hw register base address */
534 void *hwreg_base[MAX_SRNG_REG_GROUPS];
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700535};
536
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800537/* hal_construct_shadow_config() - initialize the shadow registers for dp rings
538 * @hal_soc: hal handle
539 *
540 * Return: QDF_STATUS_OK on success
541 */
542extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
543
544/* hal_set_one_shadow_config() - add a config for the specified ring
545 * @hal_soc: hal handle
546 * @ring_type: ring type
547 * @ring_num: ring num
548 *
549 * The ring type and ring num uniquely specify the ring. After this call,
550 * the hp/tp will be added as the next entry int the shadow register
551 * configuration table. The hal code will use the shadow register address
552 * in place of the hp/tp address.
553 *
554 * This function is exposed, so that the CE module can skip configuring shadow
555 * registers for unused ring and rings assigned to the firmware.
556 *
557 * Return: QDF_STATUS_OK on success
558 */
559extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
560 int ring_num);
561/**
562 * hal_get_shadow_config() - retrieve the config table
563 * @hal_soc: hal handle
564 * @shadow_config: will point to the table after
565 * @num_shadow_registers_configured: will contain the number of valid entries
566 */
567extern void hal_get_shadow_config(void *hal_soc,
568 struct pld_shadow_reg_v2_cfg **shadow_config,
569 int *num_shadow_registers_configured);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700570/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -0700571 * hal_srng_setup - Initialize HW SRNG ring.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700572 *
573 * @hal_soc: Opaque HAL SOC handle
574 * @ring_type: one of the types from hal_ring_type
575 * @ring_num: Ring number if there are multiple rings of
576 * same type (staring from 0)
577 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
578 * @ring_params: SRNG ring params in hal_srng_params structure.
579
580 * Callers are expected to allocate contiguous ring memory of size
581 * 'num_entries * entry_size' bytes and pass the physical and virtual base
582 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
583 * structure. Ring base address should be 8 byte aligned and size of each ring
584 * entry should be queried using the API hal_srng_get_entrysize
585 *
586 * Return: Opaque pointer to ring on success
587 * NULL on failure (if given ring is not available)
588 */
589extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
590 int mac_id, struct hal_srng_params *ring_params);
591
Yun Parkfde6b9e2017-06-26 17:13:11 -0700592/* Remapping ids of REO rings */
593#define REO_REMAP_TCL 0
594#define REO_REMAP_SW1 1
595#define REO_REMAP_SW2 2
596#define REO_REMAP_SW3 3
597#define REO_REMAP_SW4 4
598#define REO_REMAP_RELEASE 5
599#define REO_REMAP_FW 6
600#define REO_REMAP_UNUSED 7
601
602/*
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700603 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
604 * to map destination to rings
Yun Parkfde6b9e2017-06-26 17:13:11 -0700605 */
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700606#define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
607 ((_VALUE) << \
Yun Parkfde6b9e2017-06-26 17:13:11 -0700608 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
Venkata Sharath Chandra Manchalac9e344d2019-10-23 14:13:14 -0700609 _OFFSET ## _SHFT))
610
611/*
612 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
613 * to map destination to rings
614 */
615#define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
616 ((_VALUE) << \
617 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
618 _OFFSET ## _SHFT))
619
620/*
621 * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
622 * to map destination to rings
623 */
624#define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
625 ((_VALUE) << \
626 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
627 _OFFSET ## _SHFT))
Yun Parkfde6b9e2017-06-26 17:13:11 -0700628
629/**
jiad09526ac2019-04-12 17:42:40 +0800630 * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
Akshay Kosigi6a206752019-06-10 23:14:52 +0530631 * @hal_soc_hdl: HAL SOC handle
jiad09526ac2019-04-12 17:42:40 +0800632 * @read: boolean value to indicate if read or write
633 * @ix0: pointer to store IX0 reg value
634 * @ix1: pointer to store IX1 reg value
635 * @ix2: pointer to store IX2 reg value
636 * @ix3: pointer to store IX3 reg value
Yun Parkfde6b9e2017-06-26 17:13:11 -0700637 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530638void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
639 uint32_t *ix0, uint32_t *ix1,
640 uint32_t *ix2, uint32_t *ix3);
Yun Parkfde6b9e2017-06-26 17:13:11 -0700641
642/**
Yun Park601d0d82017-08-28 21:49:31 -0700643 * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
Yun Parkfde6b9e2017-06-26 17:13:11 -0700644 * @sring: sring pointer
645 * @paddr: physical address
646 */
Yun Park601d0d82017-08-28 21:49:31 -0700647extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
648
649/**
650 * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
651 * @srng: sring pointer
652 * @vaddr: virtual address
653 */
654extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
Yun Parkfde6b9e2017-06-26 17:13:11 -0700655
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700656/**
657 * hal_srng_cleanup - Deinitialize HW SRNG ring.
658 * @hal_soc: Opaque HAL SOC handle
659 * @hal_srng: Opaque HAL SRNG pointer
660 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530661void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700662
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530663static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
Houston Hoffman648a9182017-05-21 23:27:50 -0700664{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530665 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Houston Hoffman648a9182017-05-21 23:27:50 -0700666
667 return !!srng->initialized;
668}
669
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700670/**
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530671 * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
672 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530673 * @hal_ring_hdl: Destination ring pointer
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530674 *
675 * Caller takes responsibility for any locking needs.
676 *
677 * Return: Opaque pointer for next ring entry; NULL on failire
678 */
679static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530680void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
681 hal_ring_handle_t hal_ring_hdl)
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530682{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530683 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530684
685 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
686 return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
687
688 return NULL;
689}
690
691/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700692 * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
693 * hal_srng_access_start if locked access is required
694 *
695 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530696 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700697 *
698 * Return: 0 on success; error on failire
699 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530700static inline int
701hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530702 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700703{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530704 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Akshay Kosigi6a206752019-06-10 23:14:52 +0530705 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530706 uint32_t *desc;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700707
708 if (srng->ring_dir == HAL_SRNG_SRC_RING)
709 srng->u.src_ring.cached_tp =
710 *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530711 else {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700712 srng->u.dst_ring.cached_hp =
713 *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
714
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530715 if (srng->flags & HAL_SRNG_CACHED_DESC) {
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530716 desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530717 if (qdf_likely(desc)) {
718 qdf_mem_dma_cache_sync(soc->qdf_dev,
719 qdf_mem_virt_to_phys
720 (desc),
721 QDF_DMA_FROM_DEVICE,
722 (srng->entry_size *
723 sizeof(uint32_t)));
724 qdf_prefetch(desc);
725 }
726 }
727 }
728
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700729 return 0;
730}
731
732/**
733 * hal_srng_access_start - Start (locked) ring access
734 *
735 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530736 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700737 *
738 * Return: 0 on success; error on failire
739 */
Akshay Kosigi6a206752019-06-10 23:14:52 +0530740static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530741 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700742{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530743 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700744
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530745 if (qdf_unlikely(!hal_ring_hdl)) {
Krunal Sonief1f0f92018-09-17 21:09:55 -0700746 qdf_print("Error: Invalid hal_ring\n");
747 return -EINVAL;
748 }
749
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700750 SRNG_LOCK(&(srng->lock));
751
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530752 return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700753}
754
755/**
756 * hal_srng_dst_get_next - Get next entry from a destination ring and move
757 * cached tail pointer
758 *
759 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530760 * @hal_ring_hdl: Destination ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700761 *
762 * Return: Opaque pointer for next ring entry; NULL on failire
763 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530764static inline
765void *hal_srng_dst_get_next(void *hal_soc,
766 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700767{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530768 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530769 struct hal_soc *soc = (struct hal_soc *)hal_soc;
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700770 uint32_t *desc;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530771 uint32_t *desc_next;
772 uint32_t tp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700773
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700774 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
775 desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800776 /* TODO: Using % is expensive, but we have to do this since
777 * size of some SRNG rings is not power of 2 (due to descriptor
778 * sizes). Need to create separate API for rings used
779 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
780 * SW2RXDMA and CE rings)
781 */
782 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
783 srng->ring_size;
784
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530785 if (srng->flags & HAL_SRNG_CACHED_DESC) {
786 tp = srng->u.dst_ring.tp;
787 desc_next = &srng->ring_base_vaddr[tp];
788 qdf_mem_dma_cache_sync(soc->qdf_dev,
789 qdf_mem_virt_to_phys(desc_next),
790 QDF_DMA_FROM_DEVICE,
791 (srng->entry_size *
792 sizeof(uint32_t)));
793 qdf_prefetch(desc_next);
794 }
795
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700796 return (void *)desc;
797 }
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700798
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700799 return NULL;
800}
801
802/**
Yun Park601d0d82017-08-28 21:49:31 -0700803 * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
804 * cached head pointer
805 *
806 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530807 * @hal_ring_hdl: Destination ring pointer
Yun Park601d0d82017-08-28 21:49:31 -0700808 *
809 * Return: Opaque pointer for next ring entry; NULL on failire
810 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530811static inline void *
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530812hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530813 hal_ring_handle_t hal_ring_hdl)
Yun Park601d0d82017-08-28 21:49:31 -0700814{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530815 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Yun Park601d0d82017-08-28 21:49:31 -0700816 uint32_t *desc;
817 /* TODO: Using % is expensive, but we have to do this since
818 * size of some SRNG rings is not power of 2 (due to descriptor
819 * sizes). Need to create separate API for rings used
820 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
821 * SW2RXDMA and CE rings)
822 */
823 uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
824 srng->ring_size;
825
826 if (next_hp != srng->u.dst_ring.tp) {
827 desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
828 srng->u.dst_ring.cached_hp = next_hp;
829 return (void *)desc;
830 }
831
832 return NULL;
833}
834
835/**
Mohit Khannae5a6e942018-11-28 14:22:48 -0800836 * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
837 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530838 * @hal_ring_hdl: Destination ring pointer
Mohit Khannae5a6e942018-11-28 14:22:48 -0800839 *
840 * Sync cached head pointer with HW.
841 * Caller takes responsibility for any locking needs.
842 *
843 * Return: Opaque pointer for next ring entry; NULL on failire
844 */
845static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530846void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
847 hal_ring_handle_t hal_ring_hdl)
Mohit Khannae5a6e942018-11-28 14:22:48 -0800848{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530849 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Mohit Khannae5a6e942018-11-28 14:22:48 -0800850
851 srng->u.dst_ring.cached_hp =
852 *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
853
854 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
Karunakar Dasineni6bcbdd52017-08-10 18:31:07 -0700855 return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700856
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700857 return NULL;
858}
859
860/**
Mohit Khannae5a6e942018-11-28 14:22:48 -0800861 * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
862 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530863 * @hal_ring_hdl: Destination ring pointer
Mohit Khannae5a6e942018-11-28 14:22:48 -0800864 *
865 * Sync cached head pointer with HW.
866 * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
867 *
868 * Return: Opaque pointer for next ring entry; NULL on failire
869 */
870static inline
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530871void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530872 hal_ring_handle_t hal_ring_hdl)
Mohit Khannae5a6e942018-11-28 14:22:48 -0800873{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530874 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Mohit Khannae5a6e942018-11-28 14:22:48 -0800875 void *ring_desc_ptr = NULL;
876
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530877 if (qdf_unlikely(!hal_ring_hdl)) {
Mohit Khannae5a6e942018-11-28 14:22:48 -0800878 qdf_print("Error: Invalid hal_ring\n");
879 return NULL;
880 }
881
882 SRNG_LOCK(&srng->lock);
883
Akshay Kosigi8eda31c2019-07-10 14:42:42 +0530884 ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
Mohit Khannae5a6e942018-11-28 14:22:48 -0800885
886 SRNG_UNLOCK(&srng->lock);
887
888 return ring_desc_ptr;
889}
890
891/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700892 * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
893 * by SW) in destination ring
894 *
895 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530896 * @hal_ring_hdl: Destination ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700897 * @sync_hw_ptr: Sync cached head pointer with HW
898 *
899 */
Mohit Khanna80002652019-10-14 23:27:36 -0700900static inline
901uint32_t hal_srng_dst_num_valid(void *hal_soc,
902 hal_ring_handle_t hal_ring_hdl,
903 int sync_hw_ptr)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700904{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530905 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530906 uint32_t hp;
907 uint32_t tp = srng->u.dst_ring.tp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700908
909 if (sync_hw_ptr) {
Jinwei Chen1cb78172019-02-12 12:36:10 +0800910 hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700911 srng->u.dst_ring.cached_hp = hp;
912 } else {
913 hp = srng->u.dst_ring.cached_hp;
914 }
915
916 if (hp >= tp)
917 return (hp - tp) / srng->entry_size;
918 else
919 return (srng->ring_size - tp + hp) / srng->entry_size;
920}
921
922/**
Mohit Khanna80002652019-10-14 23:27:36 -0700923 * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
924 *
925 * @hal_soc: Opaque HAL SOC handle
926 * @hal_ring_hdl: Destination ring pointer
927 * @sync_hw_ptr: Sync cached head pointer with HW
928 *
929 * Returns number of valid entries to be processed by the host driver. The
930 * function takes up SRNG lock.
931 *
932 * Return: Number of valid destination entries
933 */
934static inline uint32_t
935hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
936 hal_ring_handle_t hal_ring_hdl,
937 int sync_hw_ptr)
938{
939 uint32_t num_valid;
940 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
941
942 SRNG_LOCK(&srng->lock);
943 num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
944 SRNG_UNLOCK(&srng->lock);
945
946 return num_valid;
947}
948
949/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700950 * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
951 * pointer. This can be used to release any buffers associated with completed
952 * ring entries. Note that this should not be used for posting new descriptor
953 * entries. Posting of new entries should be done only using
954 * hal_srng_src_get_next_reaped when this function is used for reaping.
955 *
956 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530957 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700958 *
959 * Return: Opaque pointer for next ring entry; NULL on failire
960 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530961static inline void *
962hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700963{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530964 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700965 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800966
967 /* TODO: Using % is expensive, but we have to do this since
968 * size of some SRNG rings is not power of 2 (due to descriptor
969 * sizes). Need to create separate API for rings used
970 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
971 * SW2RXDMA and CE rings)
972 */
973 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
974 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700975
976 if (next_reap_hp != srng->u.src_ring.cached_tp) {
977 desc = &(srng->ring_base_vaddr[next_reap_hp]);
978 srng->u.src_ring.reap_hp = next_reap_hp;
979 return (void *)desc;
980 }
981
982 return NULL;
983}
984
985/**
986 * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
987 * already reaped using hal_srng_src_reap_next, for posting new entries to
988 * the ring
989 *
990 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530991 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700992 *
993 * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
994 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530995static inline void *
996hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700997{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530998 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700999 uint32_t *desc;
1000
1001 if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1002 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001003 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1004 srng->ring_size;
1005
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001006 return (void *)desc;
1007 }
1008
1009 return NULL;
1010}
1011
1012/**
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301013 * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1014 * move reap pointer. This API is used in detach path to release any buffers
1015 * associated with ring entries which are pending reap.
1016 *
1017 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301018 * @hal_ring_hdl: Source ring pointer
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301019 *
1020 * Return: Opaque pointer for next ring entry; NULL on failire
1021 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301022static inline void *
1023hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301024{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301025 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Pamidipati, Vijay980ceb92017-07-05 05:20:17 +05301026 uint32_t *desc;
1027
1028 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1029 srng->ring_size;
1030
1031 if (next_reap_hp != srng->u.src_ring.hp) {
1032 desc = &(srng->ring_base_vaddr[next_reap_hp]);
1033 srng->u.src_ring.reap_hp = next_reap_hp;
1034 return (void *)desc;
1035 }
1036
1037 return NULL;
1038}
1039
1040/**
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001041 * hal_srng_src_done_val -
1042 *
1043 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301044 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001045 *
1046 * Return: Opaque pointer for next ring entry; NULL on failire
1047 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301048static inline uint32_t
1049hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001050{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301051 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001052 /* TODO: Using % is expensive, but we have to do this since
1053 * size of some SRNG rings is not power of 2 (due to descriptor
1054 * sizes). Need to create separate API for rings used
1055 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1056 * SW2RXDMA and CE rings)
1057 */
1058 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1059 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001060
1061 if (next_reap_hp == srng->u.src_ring.cached_tp)
1062 return 0;
1063
1064 if (srng->u.src_ring.cached_tp > next_reap_hp)
1065 return (srng->u.src_ring.cached_tp - next_reap_hp) /
1066 srng->entry_size;
1067 else
1068 return ((srng->ring_size - next_reap_hp) +
1069 srng->u.src_ring.cached_tp) / srng->entry_size;
1070}
sumedh baikady72b1c712017-08-24 12:11:46 -07001071
1072/**
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001073 * hal_get_entrysize_from_srng() - Retrieve ring entry size
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301074 * @hal_ring_hdl: Source ring pointer
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001075 *
1076 * Return: uint8_t
1077 */
1078static inline
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301079uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001080{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301081 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -07001082
1083 return srng->entry_size;
1084}
1085
1086/**
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001087 * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
sumedh baikady72b1c712017-08-24 12:11:46 -07001088 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301089 * @hal_ring_hdl: Source ring pointer
sumedh baikady72b1c712017-08-24 12:11:46 -07001090 * @tailp: Tail Pointer
1091 * @headp: Head Pointer
1092 *
1093 * Return: Update tail pointer and head pointer in arguments.
1094 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301095static inline
1096void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1097 uint32_t *tailp, uint32_t *headp)
sumedh baikady72b1c712017-08-24 12:11:46 -07001098{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301099 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
sumedh baikady72b1c712017-08-24 12:11:46 -07001100
1101 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Rakesh Pillai56320c12019-06-05 00:25:48 +05301102 *headp = srng->u.src_ring.hp;
1103 *tailp = *srng->u.src_ring.tp_addr;
sumedh baikady72b1c712017-08-24 12:11:46 -07001104 } else {
Rakesh Pillai56320c12019-06-05 00:25:48 +05301105 *tailp = srng->u.dst_ring.tp;
1106 *headp = *srng->u.dst_ring.hp_addr;
sumedh baikady72b1c712017-08-24 12:11:46 -07001107 }
1108}
1109
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001110/**
1111 * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1112 *
1113 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301114 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001115 *
1116 * Return: Opaque pointer for next ring entry; NULL on failire
1117 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301118static inline
1119void *hal_srng_src_get_next(void *hal_soc,
1120 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001121{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301122 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001123 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001124 /* TODO: Using % is expensive, but we have to do this since
1125 * size of some SRNG rings is not power of 2 (due to descriptor
1126 * sizes). Need to create separate API for rings used
1127 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1128 * SW2RXDMA and CE rings)
1129 */
1130 uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1131 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001132
1133 if (next_hp != srng->u.src_ring.cached_tp) {
1134 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1135 srng->u.src_ring.hp = next_hp;
1136 /* TODO: Since reap function is not used by all rings, we can
1137 * remove the following update of reap_hp in this function
1138 * if we can ensure that only hal_srng_src_get_next_reaped
1139 * is used for the rings requiring reap functionality
1140 */
1141 srng->u.src_ring.reap_hp = next_hp;
1142 return (void *)desc;
1143 }
1144
1145 return NULL;
1146}
1147
1148/**
1149 * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
1150 * hal_srng_src_get_next should be called subsequently to move the head pointer
1151 *
1152 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301153 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001154 *
1155 * Return: Opaque pointer for next ring entry; NULL on failire
1156 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301157static inline
1158void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
1159 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001160{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301161 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001162 uint32_t *desc;
1163
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001164 /* TODO: Using % is expensive, but we have to do this since
1165 * size of some SRNG rings is not power of 2 (due to descriptor
1166 * sizes). Need to create separate API for rings used
1167 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1168 * SW2RXDMA and CE rings)
1169 */
1170 if (((srng->u.src_ring.hp + srng->entry_size) %
1171 srng->ring_size) != srng->u.src_ring.cached_tp) {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001172 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1173 return (void *)desc;
1174 }
1175
1176 return NULL;
1177}
1178
1179/**
1180 * hal_srng_src_num_avail - Returns number of available entries in src ring
1181 *
1182 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301183 * @hal_ring_hdl: Source ring pointer
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001184 * @sync_hw_ptr: Sync cached tail pointer with HW
1185 *
1186 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301187static inline uint32_t
1188hal_srng_src_num_avail(void *hal_soc,
1189 hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001190{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301191 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301192 uint32_t tp;
1193 uint32_t hp = srng->u.src_ring.hp;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001194
1195 if (sync_hw_ptr) {
1196 tp = *(srng->u.src_ring.tp_addr);
1197 srng->u.src_ring.cached_tp = tp;
1198 } else {
1199 tp = srng->u.src_ring.cached_tp;
1200 }
1201
1202 if (tp > hp)
1203 return ((tp - hp) / srng->entry_size) - 1;
1204 else
1205 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1206}
1207
1208/**
1209 * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1210 * ring head/tail pointers to HW.
1211 * This should be used only if hal_srng_access_start_unlocked to start ring
1212 * access
1213 *
1214 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301215 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001216 *
1217 * Return: 0 on success; error on failire
1218 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301219static inline void
1220hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001221{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301222 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001223
1224 /* TODO: See if we need a write memory barrier here */
1225 if (srng->flags & HAL_SRNG_LMAC_RING) {
1226 /* For LMAC rings, ring pointer updates are done through FW and
1227 * hence written to a shared memory location that is read by FW
1228 */
Kai Chen6eca1a62017-01-12 10:17:53 -08001229 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001230 *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
Kai Chen6eca1a62017-01-12 10:17:53 -08001231 } else {
1232 *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1233 }
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001234 } else {
1235 if (srng->ring_dir == HAL_SRNG_SRC_RING)
Houston Hoffman8bbc9902017-04-10 14:09:51 -07001236 hal_write_address_32_mb(hal_soc,
1237 srng->u.src_ring.hp_addr,
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001238 srng->u.src_ring.hp);
1239 else
Houston Hoffman8bbc9902017-04-10 14:09:51 -07001240 hal_write_address_32_mb(hal_soc,
1241 srng->u.dst_ring.tp_addr,
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001242 srng->u.dst_ring.tp);
1243 }
1244}
1245
1246/**
1247 * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1248 * pointers to HW
1249 * This should be used only if hal_srng_access_start to start ring access
1250 *
1251 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301252 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001253 *
1254 * Return: 0 on success; error on failire
1255 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301256static inline void
1257hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001258{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301259 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001260
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301261 if (qdf_unlikely(!hal_ring_hdl)) {
Krunal Sonief1f0f92018-09-17 21:09:55 -07001262 qdf_print("Error: Invalid hal_ring\n");
1263 return;
1264 }
1265
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301266 hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001267 SRNG_UNLOCK(&(srng->lock));
1268}
1269
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301270/**
1271 * hal_srng_access_end_reap - Unlock ring access
1272 * This should be used only if hal_srng_access_start to start ring access
1273 * and should be used only while reaping SRC ring completions
1274 *
1275 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301276 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301277 *
1278 * Return: 0 on success; error on failire
1279 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301280static inline void
1281hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301282{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301283 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Yun Park601d0d82017-08-28 21:49:31 -07001284
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +05301285 SRNG_UNLOCK(&(srng->lock));
1286}
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001287
1288/* TODO: Check if the following definitions is available in HW headers */
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001289#define WBM_IDLE_SCATTER_BUF_SIZE 32704
1290#define NUM_MPDUS_PER_LINK_DESC 6
1291#define NUM_MSDUS_PER_LINK_DESC 7
1292#define REO_QUEUE_DESC_ALIGN 128
1293
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001294#define LINK_DESC_ALIGN 128
1295
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001296#define ADDRESS_MATCH_TAG_VAL 0x5
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001297/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1298 * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1299 */
1300#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1301
1302/* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1303 * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1304 * should be specified in 16 word units. But the number of bits defined for
1305 * this field in HW header files is 5.
1306 */
1307#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1308
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001309
1310/**
1311 * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1312 * in an idle list
1313 *
1314 * @hal_soc: Opaque HAL SOC handle
1315 *
1316 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301317static inline
1318uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001319{
1320 return WBM_IDLE_SCATTER_BUF_SIZE;
1321}
1322
1323/**
1324 * hal_get_link_desc_size - Get the size of each link descriptor
1325 *
1326 * @hal_soc: Opaque HAL SOC handle
1327 *
1328 */
Akshay Kosigi6a206752019-06-10 23:14:52 +05301329static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001330{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301331 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1332
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301333 if (!hal_soc || !hal_soc->ops) {
1334 qdf_print("Error: Invalid ops\n");
1335 QDF_BUG(0);
1336 return -EINVAL;
1337 }
1338 if (!hal_soc->ops->hal_get_link_desc_size) {
1339 qdf_print("Error: Invalid function pointer\n");
1340 QDF_BUG(0);
1341 return -EINVAL;
1342 }
1343 return hal_soc->ops->hal_get_link_desc_size();
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001344}
1345
1346/**
1347 * hal_get_link_desc_align - Get the required start address alignment for
1348 * link descriptors
1349 *
1350 * @hal_soc: Opaque HAL SOC handle
1351 *
1352 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301353static inline
1354uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001355{
1356 return LINK_DESC_ALIGN;
1357}
1358
1359/**
1360 * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1361 *
1362 * @hal_soc: Opaque HAL SOC handle
1363 *
1364 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301365static inline
1366uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001367{
1368 return NUM_MPDUS_PER_LINK_DESC;
1369}
1370
1371/**
1372 * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1373 *
1374 * @hal_soc: Opaque HAL SOC handle
1375 *
1376 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301377static inline
1378uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001379{
1380 return NUM_MSDUS_PER_LINK_DESC;
1381}
1382
1383/**
1384 * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1385 * descriptor can hold
1386 *
1387 * @hal_soc: Opaque HAL SOC handle
1388 *
1389 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301390static inline
1391uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001392{
1393 return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1394}
1395
1396/**
1397 * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1398 * that the given buffer size
1399 *
1400 * @hal_soc: Opaque HAL SOC handle
1401 * @scatter_buf_size: Size of scatter buffer
1402 *
1403 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301404static inline
1405uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1406 uint32_t scatter_buf_size)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001407{
1408 return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301409 hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001410}
1411
1412/**
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001413 * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1414 * each given buffer size
1415 *
1416 * @hal_soc: Opaque HAL SOC handle
1417 * @total_mem: size of memory to be scattered
1418 * @scatter_buf_size: Size of scatter buffer
1419 *
1420 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301421static inline
1422uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1423 uint32_t total_mem,
1424 uint32_t scatter_buf_size)
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001425{
1426 uint8_t rem = (total_mem % (scatter_buf_size -
1427 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1428
1429 uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1430 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1431
1432 return num_scatter_bufs;
1433}
1434
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001435enum hal_pn_type {
1436 HAL_PN_NONE,
1437 HAL_PN_WPA,
1438 HAL_PN_WAPI_EVEN,
1439 HAL_PN_WAPI_UNEVEN,
1440};
1441
1442#define HAL_RX_MAX_BA_WINDOW 256
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001443
1444/**
1445 * hal_get_reo_qdesc_align - Get start address alignment for reo
1446 * queue descriptors
1447 *
1448 * @hal_soc: Opaque HAL SOC handle
1449 *
1450 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301451static inline
1452uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001453{
1454 return REO_QUEUE_DESC_ALIGN;
1455}
1456
1457/**
1458 * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1459 *
1460 * @hal_soc: Opaque HAL SOC handle
1461 * @ba_window_size: BlockAck window size
1462 * @start_seq: Starting sequence number
1463 * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1464 * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1465 * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1466 *
1467 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301468void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1469 int tid, uint32_t ba_window_size,
1470 uint32_t start_seq, void *hw_qdesc_vaddr,
1471 qdf_dma_addr_t hw_qdesc_paddr,
1472 int pn_type);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001473
1474/**
1475 * hal_srng_get_hp_addr - Get head pointer physical address
1476 *
1477 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301478 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001479 *
1480 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301481static inline qdf_dma_addr_t
1482hal_srng_get_hp_addr(void *hal_soc,
1483 hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001484{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301485 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001486 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1487
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001488 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -08001489 return hal->shadow_wrptr_mem_paddr +
1490 ((unsigned long)(srng->u.src_ring.hp_addr) -
1491 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001492 } else {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -08001493 return hal->shadow_rdptr_mem_paddr +
1494 ((unsigned long)(srng->u.dst_ring.hp_addr) -
1495 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001496 }
1497}
1498
1499/**
1500 * hal_srng_get_tp_addr - Get tail pointer physical address
1501 *
1502 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301503 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001504 *
1505 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301506static inline qdf_dma_addr_t
1507hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001508{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301509 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001510 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1511
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001512 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1513 return hal->shadow_rdptr_mem_paddr +
1514 ((unsigned long)(srng->u.src_ring.tp_addr) -
1515 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1516 } else {
1517 return hal->shadow_wrptr_mem_paddr +
1518 ((unsigned long)(srng->u.dst_ring.tp_addr) -
1519 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1520 }
1521}
1522
1523/**
Mohit Khanna80002652019-10-14 23:27:36 -07001524 * hal_srng_get_num_entries - Get total entries in the HAL Srng
1525 *
1526 * @hal_soc: Opaque HAL SOC handle
1527 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1528 *
1529 * Return: total number of entries in hal ring
1530 */
1531static inline
1532uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1533 hal_ring_handle_t hal_ring_hdl)
1534{
1535 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1536
1537 return srng->num_entries;
1538}
1539
1540/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -07001541 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001542 *
1543 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301544 * @hal_ring_hdl: Ring pointer (Source or Destination ring)
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001545 * @ring_params: SRNG parameters will be returned through this structure
1546 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301547void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1548 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301549 struct hal_srng_params *ring_params);
Ravi Joshi36f68ad2016-11-09 17:09:47 -08001550
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +05301551/**
Jeff Johnsonf7aed492018-05-12 11:14:55 -07001552 * hal_mem_info - Retrieve hal memory base address
Bharat Kumar M9e22d3d2017-05-08 16:09:32 +05301553 *
1554 * @hal_soc: Opaque HAL SOC handle
1555 * @mem: pointer to structure to be updated with hal mem info
1556 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301557void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +05301558
1559/**
1560 * hal_get_target_type - Return target type
1561 *
1562 * @hal_soc: Opaque HAL SOC handle
1563 */
Akshay Kosigi6a206752019-06-10 23:14:52 +05301564uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
sumedh baikady1f8f3192018-02-20 17:30:32 -08001565
1566/**
1567 * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1568 *
1569 * @hal_soc: Opaque HAL SOC handle
1570 * @ac: Access category
1571 * @value: timeout duration in millisec
1572 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301573void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
sumedh baikady1f8f3192018-02-20 17:30:32 -08001574 uint32_t *value);
sumedh baikady1f8f3192018-02-20 17:30:32 -08001575/**
1576 * hal_set_aging_timeout - Set BA aging timeout
1577 *
1578 * @hal_soc: Opaque HAL SOC handle
1579 * @ac: Access category in millisec
1580 * @value: timeout duration value
1581 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301582void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
sumedh baikady1f8f3192018-02-20 17:30:32 -08001583 uint32_t value);
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301584/**
1585 * hal_srng_dst_hw_init - Private function to initialize SRNG
1586 * destination ring HW
1587 * @hal_soc: HAL SOC handle
1588 * @srng: SRNG ring pointer
1589 */
1590static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1591 struct hal_srng *srng)
1592{
1593 hal->ops->hal_srng_dst_hw_init(hal, srng);
1594}
sumedh baikady1f8f3192018-02-20 17:30:32 -08001595
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301596/**
1597 * hal_srng_src_hw_init - Private function to initialize SRNG
1598 * source ring HW
1599 * @hal_soc: HAL SOC handle
1600 * @srng: SRNG ring pointer
1601 */
1602static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1603 struct hal_srng *srng)
1604{
1605 hal->ops->hal_srng_src_hw_init(hal, srng);
1606}
1607
1608/**
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001609 * hal_get_hw_hptp() - Get HW head and tail pointer value for any ring
1610 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301611 * @hal_ring_hdl: Source ring pointer
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001612 * @headp: Head Pointer
1613 * @tailp: Tail Pointer
1614 * @ring_type: Ring
1615 *
1616 * Return: Update tail pointer and head pointer in arguments.
1617 */
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301618static inline
1619void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
1620 hal_ring_handle_t hal_ring_hdl,
1621 uint32_t *headp, uint32_t *tailp,
1622 uint8_t ring_type)
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001623{
Akshay Kosigi6a206752019-06-10 23:14:52 +05301624 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1625
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301626 hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
Akshay Kosigi6a206752019-06-10 23:14:52 +05301627 headp, tailp, ring_type);
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07001628}
1629
1630/**
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301631 * hal_reo_setup - Initialize HW REO block
1632 *
1633 * @hal_soc: Opaque HAL SOC handle
1634 * @reo_params: parameters needed by HAL for REO config
1635 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301636static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
1637 void *reoparams)
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301638{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301639 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301640
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301641 hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301642}
1643
1644/**
1645 * hal_setup_link_idle_list - Setup scattered idle list using the
1646 * buffer list provided
1647 *
1648 * @hal_soc: Opaque HAL SOC handle
1649 * @scatter_bufs_base_paddr: Array of physical base addresses
1650 * @scatter_bufs_base_vaddr: Array of virtual base addresses
1651 * @num_scatter_bufs: Number of scatter buffers in the above lists
1652 * @scatter_buf_size: Size of each scatter buffer
1653 * @last_buf_end_offset: Offset to the last entry
1654 * @num_entries: Total entries of all scatter bufs
1655 *
1656 */
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301657static inline
1658void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
1659 qdf_dma_addr_t scatter_bufs_base_paddr[],
1660 void *scatter_bufs_base_vaddr[],
1661 uint32_t num_scatter_bufs,
1662 uint32_t scatter_buf_size,
1663 uint32_t last_buf_end_offset,
1664 uint32_t num_entries)
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301665{
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301666 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301667
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301668 hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
Balamurugan Mahalingam5d806412018-07-30 18:04:15 +05301669 scatter_bufs_base_vaddr, num_scatter_bufs,
1670 scatter_buf_size, last_buf_end_offset,
1671 num_entries);
1672
1673}
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301674
1675/**
1676 * hal_srng_dump_ring_desc() - Dump ring descriptor info
1677 *
1678 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301679 * @hal_ring_hdl: Source ring pointer
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301680 * @ring_desc: Opaque ring descriptor handle
1681 */
Akshay Kosigia870c612019-07-08 23:10:30 +05301682static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301683 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi91c56522019-07-02 11:49:39 +05301684 hal_ring_desc_t ring_desc)
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301685{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301686 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301687
Saket Jha16d84322019-07-11 16:09:41 -07001688 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301689 ring_desc, (srng->entry_size << 2));
1690}
1691
1692/**
1693 * hal_srng_dump_ring() - Dump last 128 descs of the ring
1694 *
1695 * @hal_soc: Opaque HAL SOC handle
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301696 * @hal_ring_hdl: Source ring pointer
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301697 */
Akshay Kosigia870c612019-07-08 23:10:30 +05301698static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301699 hal_ring_handle_t hal_ring_hdl)
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301700{
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301701 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301702 uint32_t *desc;
1703 uint32_t tp, i;
1704
1705 tp = srng->u.dst_ring.tp;
1706
1707 for (i = 0; i < 128; i++) {
1708 if (!tp)
1709 tp = srng->ring_size;
1710
1711 desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1712 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
nwzhaoea2ffbb2019-01-31 11:43:17 -08001713 QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05301714 desc, (srng->entry_size << 2));
1715
1716 tp -= srng->entry_size;
1717 }
1718}
1719
Akshay Kosigi8eda31c2019-07-10 14:42:42 +05301720/*
1721 * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
1722 * to opaque dp_ring desc type
1723 * @ring_desc - rxdma ring desc
1724 *
1725 * Return: hal_rxdma_desc_t type
1726 */
1727static inline
1728hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
1729{
1730 return (hal_ring_desc_t)ring_desc;
1731}
Sravan Kumar Kairam78b01a12019-09-16 14:22:55 +05301732
1733/**
1734 * hal_srng_set_event() - Set hal_srng event
1735 * @hal_ring_hdl: Source ring pointer
1736 * @event: SRNG ring event
1737 *
1738 * Return: None
1739 */
1740static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
1741{
1742 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1743
1744 qdf_atomic_set_bit(event, &srng->srng_event);
1745}
1746
1747/**
1748 * hal_srng_clear_event() - Clear hal_srng event
1749 * @hal_ring_hdl: Source ring pointer
1750 * @event: SRNG ring event
1751 *
1752 * Return: None
1753 */
1754static inline
1755void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1756{
1757 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1758
1759 qdf_atomic_clear_bit(event, &srng->srng_event);
1760}
1761
1762/**
1763 * hal_srng_get_clear_event() - Clear srng event and return old value
1764 * @hal_ring_hdl: Source ring pointer
1765 * @event: SRNG ring event
1766 *
1767 * Return: Return old event value
1768 */
1769static inline
1770int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1771{
1772 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1773
1774 return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
1775}
1776
1777/**
1778 * hal_srng_set_flush_last_ts() - Record last flush time stamp
1779 * @hal_ring_hdl: Source ring pointer
1780 *
1781 * Return: None
1782 */
1783static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
1784{
1785 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1786
1787 srng->last_flush_ts = qdf_get_log_timestamp();
1788}
1789
1790/**
1791 * hal_srng_inc_flush_cnt() - Increment flush counter
1792 * @hal_ring_hdl: Source ring pointer
1793 *
1794 * Return: None
1795 */
1796static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
1797{
1798 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1799
1800 srng->flush_count++;
1801}
Ravi Joshi36f68ad2016-11-09 17:09:47 -08001802#endif /* _HAL_APIH_ */