blob: 28fe0504e961a6ea8043b5b03a6fcb1c49ed6b7e [file] [log] [blame]
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07001/*
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002 * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -07003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above
10 * copyright notice, this list of conditions and the following
11 * disclaimer in the documentation and/or other materials provided
12 * with the distribution.
13 * * Neither the name of The Linux Foundation nor the names of its
14 * contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef _HAL_API_H_
31#define _HAL_API_H_
32
33#include "qdf_types.h"
34#include "hal_internal.h"
35#include "hif_io32.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070036#include "rx_msdu_link.h"
37#include "rx_reo_queue.h"
38#include "rx_reo_queue_ext.h"
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -070039
40/**
41 * hal_attach - Initalize HAL layer
42 * @hif_handle: Opaque HIF handle
43 * @qdf_dev: QDF device
44 *
45 * Return: Opaque HAL SOC handle
46 * NULL on failure (if given ring is not available)
47 *
48 * This function should be called as part of HIF initialization (for accessing
49 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
50 */
51extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
52
53/**
54 * hal_detach - Detach HAL layer
55 * @hal_soc: HAL SOC handle
56 *
57 * This function should be called as part of HIF detach
58 *
59 */
60extern void hal_detach(void *hal_soc);
61
62/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
63enum hal_ring_type {
64 REO_DST,
65 REO_EXCEPTION,
66 REO_REINJECT,
67 REO_CMD,
68 REO_STATUS,
69 TCL_DATA,
70 TCL_CMD,
71 TCL_STATUS,
72 CE_SRC,
73 CE_DST,
74 CE_DST_STATUS,
75 WBM_IDLE_LINK,
76 SW2WBM_RELEASE,
77 WBM2SW_RELEASE,
78 RXDMA_BUF,
79 RXDMA_DST,
80 RXDMA_MONITOR_BUF,
81 RXDMA_MONITOR_STATUS,
82 RXDMA_MONITOR_DST,
83 MAX_RING_TYPES
84};
85
86/* SRNG flags passed in hal_srng_params.flags */
87#define HAL_SRNG_MSI_SWAP 0x00000008
88#define HAL_SRNG_RING_PTR_SWAP 0x00000010
89#define HAL_SRNG_DATA_TLV_SWAP 0x00000020
90#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
91#define HAL_SRNG_MSI_INTR 0x00020000
92
93/**
94 * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
95 * used by callers for calculating the size of memory to be allocated before
96 * calling hal_srng_setup to setup the ring
97 *
98 * @hal_soc: Opaque HAL SOC handle
99 * @ring_type: one of the types from hal_ring_type
100 *
101 */
102extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
103
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800104/**
105 * hal_srng_max_entries - Returns maximum possible number of ring entries
106 * @hal_soc: Opaque HAL SOC handle
107 * @ring_type: one of the types from hal_ring_type
108 *
109 * Return: Maximum number of entries for the given ring_type
110 */
111uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
112
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700113/* SRNG parameters to be passed to hal_srng_setup */
114struct hal_srng_params {
115 /* Physical base address of the ring */
116 qdf_dma_addr_t ring_base_paddr;
117 /* Virtual base address of the ring */
118 void *ring_base_vaddr;
119 /* Number of entries in ring */
120 uint32_t num_entries;
Houston Hoffman74109122016-10-21 14:58:34 -0700121 /* max transfer length */
122 uint16_t max_buffer_length;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700123 /* MSI Address */
124 qdf_dma_addr_t msi_addr;
125 /* MSI data */
126 uint32_t msi_data;
127 /* Interrupt timer threshold – in micro seconds */
128 uint32_t intr_timer_thres_us;
129 /* Interrupt batch counter threshold – in number of ring entries */
130 uint32_t intr_batch_cntr_thres_entries;
131 /* Low threshold – in number of ring entries
132 * (valid for src rings only)
133 */
134 uint32_t low_threshold;
135 /* Misc flags */
136 uint32_t flags;
Dhanashri Atre7351d172016-10-12 13:08:09 -0700137 /* Unique ring id */
138 uint8_t ring_id;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700139};
140
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800141/* hal_construct_shadow_config() - initialize the shadow registers for dp rings
142 * @hal_soc: hal handle
143 *
144 * Return: QDF_STATUS_OK on success
145 */
146extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
147
148/* hal_set_one_shadow_config() - add a config for the specified ring
149 * @hal_soc: hal handle
150 * @ring_type: ring type
151 * @ring_num: ring num
152 *
153 * The ring type and ring num uniquely specify the ring. After this call,
154 * the hp/tp will be added as the next entry int the shadow register
155 * configuration table. The hal code will use the shadow register address
156 * in place of the hp/tp address.
157 *
158 * This function is exposed, so that the CE module can skip configuring shadow
159 * registers for unused ring and rings assigned to the firmware.
160 *
161 * Return: QDF_STATUS_OK on success
162 */
163extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
164 int ring_num);
165/**
166 * hal_get_shadow_config() - retrieve the config table
167 * @hal_soc: hal handle
168 * @shadow_config: will point to the table after
169 * @num_shadow_registers_configured: will contain the number of valid entries
170 */
171extern void hal_get_shadow_config(void *hal_soc,
172 struct pld_shadow_reg_v2_cfg **shadow_config,
173 int *num_shadow_registers_configured);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700174/**
175 * hal_srng_setup - Initalize HW SRNG ring.
176 *
177 * @hal_soc: Opaque HAL SOC handle
178 * @ring_type: one of the types from hal_ring_type
179 * @ring_num: Ring number if there are multiple rings of
180 * same type (staring from 0)
181 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
182 * @ring_params: SRNG ring params in hal_srng_params structure.
183
184 * Callers are expected to allocate contiguous ring memory of size
185 * 'num_entries * entry_size' bytes and pass the physical and virtual base
186 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
187 * structure. Ring base address should be 8 byte aligned and size of each ring
188 * entry should be queried using the API hal_srng_get_entrysize
189 *
190 * Return: Opaque pointer to ring on success
191 * NULL on failure (if given ring is not available)
192 */
193extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
194 int mac_id, struct hal_srng_params *ring_params);
195
196/**
197 * hal_srng_cleanup - Deinitialize HW SRNG ring.
198 * @hal_soc: Opaque HAL SOC handle
199 * @hal_srng: Opaque HAL SRNG pointer
200 */
201extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
202
203/**
204 * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
205 * hal_srng_access_start if locked access is required
206 *
207 * @hal_soc: Opaque HAL SOC handle
208 * @hal_ring: Ring pointer (Source or Destination ring)
209 *
210 * Return: 0 on success; error on failire
211 */
212static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
213{
214 struct hal_srng *srng = (struct hal_srng *)hal_ring;
215
216 if (srng->ring_dir == HAL_SRNG_SRC_RING)
217 srng->u.src_ring.cached_tp =
218 *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
219 else
220 srng->u.dst_ring.cached_hp =
221 *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
222
223 return 0;
224}
225
226/**
227 * hal_srng_access_start - Start (locked) ring access
228 *
229 * @hal_soc: Opaque HAL SOC handle
230 * @hal_ring: Ring pointer (Source or Destination ring)
231 *
232 * Return: 0 on success; error on failire
233 */
234static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
235{
236 struct hal_srng *srng = (struct hal_srng *)hal_ring;
237
238 SRNG_LOCK(&(srng->lock));
239
240 return hal_srng_access_start_unlocked(hal_soc, hal_ring);
241}
242
243/**
244 * hal_srng_dst_get_next - Get next entry from a destination ring and move
245 * cached tail pointer
246 *
247 * @hal_soc: Opaque HAL SOC handle
248 * @hal_ring: Destination ring pointer
249 *
250 * Return: Opaque pointer for next ring entry; NULL on failire
251 */
252static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
253{
254 struct hal_srng *srng = (struct hal_srng *)hal_ring;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800255 volatile uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700256 uint32_t desc_loop_cnt;
257
258 desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
259 >> SRNG_LOOP_CNT_LSB;
260
261 if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800262 /* TODO: Using % is expensive, but we have to do this since
263 * size of some SRNG rings is not power of 2 (due to descriptor
264 * sizes). Need to create separate API for rings used
265 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
266 * SW2RXDMA and CE rings)
267 */
268 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
269 srng->ring_size;
270
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700271 srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
272 !srng->u.dst_ring.tp) &
273 (SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
274 /* TODO: Confirm if loop count mask is same for all rings */
275 return (void *)desc;
276 }
277 return NULL;
278}
279
280/**
281 * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
282 * hal_srng_dst_get_next should be called subsequently to move the tail pointer
283 * TODO: See if we need an optimized version of get_next that doesn't check for
284 * loop_cnt
285 *
286 * @hal_soc: Opaque HAL SOC handle
287 * @hal_ring: Destination ring pointer
288 *
289 * Return: Opaque pointer for next ring entry; NULL on failire
290 */
291static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
292{
293 struct hal_srng *srng = (struct hal_srng *)hal_ring;
294 uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
295 uint32_t desc_loop_cnt;
296
297 desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
298 >> SRNG_LOOP_CNT_LSB;
299
300 if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
301 return (void *)desc;
302 return NULL;
303}
304
305/**
306 * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
307 * by SW) in destination ring
308 *
309 * @hal_soc: Opaque HAL SOC handle
310 * @hal_ring: Destination ring pointer
311 * @sync_hw_ptr: Sync cached head pointer with HW
312 *
313 */
314static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
315 int sync_hw_ptr)
316{
317 struct hal_srng *srng = (struct hal_srng *)hal_ring;
318 uint32 hp;
319 uint32 tp = srng->u.dst_ring.tp;
320
321 if (sync_hw_ptr) {
322 hp = *(srng->u.dst_ring.hp_addr);
323 srng->u.dst_ring.cached_hp = hp;
324 } else {
325 hp = srng->u.dst_ring.cached_hp;
326 }
327
328 if (hp >= tp)
329 return (hp - tp) / srng->entry_size;
330 else
331 return (srng->ring_size - tp + hp) / srng->entry_size;
332}
333
334/**
335 * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
336 * pointer. This can be used to release any buffers associated with completed
337 * ring entries. Note that this should not be used for posting new descriptor
338 * entries. Posting of new entries should be done only using
339 * hal_srng_src_get_next_reaped when this function is used for reaping.
340 *
341 * @hal_soc: Opaque HAL SOC handle
342 * @hal_ring: Source ring pointer
343 *
344 * Return: Opaque pointer for next ring entry; NULL on failire
345 */
346static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
347{
348 struct hal_srng *srng = (struct hal_srng *)hal_ring;
349 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800350
351 /* TODO: Using % is expensive, but we have to do this since
352 * size of some SRNG rings is not power of 2 (due to descriptor
353 * sizes). Need to create separate API for rings used
354 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
355 * SW2RXDMA and CE rings)
356 */
357 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
358 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700359
360 if (next_reap_hp != srng->u.src_ring.cached_tp) {
361 desc = &(srng->ring_base_vaddr[next_reap_hp]);
362 srng->u.src_ring.reap_hp = next_reap_hp;
363 return (void *)desc;
364 }
365
366 return NULL;
367}
368
369/**
370 * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
371 * already reaped using hal_srng_src_reap_next, for posting new entries to
372 * the ring
373 *
374 * @hal_soc: Opaque HAL SOC handle
375 * @hal_ring: Source ring pointer
376 *
377 * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
378 */
379static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
380{
381 struct hal_srng *srng = (struct hal_srng *)hal_ring;
382 uint32_t *desc;
383
384 if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
385 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800386 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
387 srng->ring_size;
388
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700389 return (void *)desc;
390 }
391
392 return NULL;
393}
394
395/**
396 * hal_srng_src_done_val -
397 *
398 * @hal_soc: Opaque HAL SOC handle
399 * @hal_ring: Source ring pointer
400 *
401 * Return: Opaque pointer for next ring entry; NULL on failire
402 */
403static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
404{
405 struct hal_srng *srng = (struct hal_srng *)hal_ring;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800406 /* TODO: Using % is expensive, but we have to do this since
407 * size of some SRNG rings is not power of 2 (due to descriptor
408 * sizes). Need to create separate API for rings used
409 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
410 * SW2RXDMA and CE rings)
411 */
412 uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
413 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700414
415 if (next_reap_hp == srng->u.src_ring.cached_tp)
416 return 0;
417
418 if (srng->u.src_ring.cached_tp > next_reap_hp)
419 return (srng->u.src_ring.cached_tp - next_reap_hp) /
420 srng->entry_size;
421 else
422 return ((srng->ring_size - next_reap_hp) +
423 srng->u.src_ring.cached_tp) / srng->entry_size;
424}
425/**
426 * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
427 *
428 * @hal_soc: Opaque HAL SOC handle
429 * @hal_ring: Source ring pointer
430 *
431 * Return: Opaque pointer for next ring entry; NULL on failire
432 */
433static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
434{
435 struct hal_srng *srng = (struct hal_srng *)hal_ring;
436 uint32_t *desc;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800437 /* TODO: Using % is expensive, but we have to do this since
438 * size of some SRNG rings is not power of 2 (due to descriptor
439 * sizes). Need to create separate API for rings used
440 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
441 * SW2RXDMA and CE rings)
442 */
443 uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
444 srng->ring_size;
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700445
446 if (next_hp != srng->u.src_ring.cached_tp) {
447 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
448 srng->u.src_ring.hp = next_hp;
449 /* TODO: Since reap function is not used by all rings, we can
450 * remove the following update of reap_hp in this function
451 * if we can ensure that only hal_srng_src_get_next_reaped
452 * is used for the rings requiring reap functionality
453 */
454 srng->u.src_ring.reap_hp = next_hp;
455 return (void *)desc;
456 }
457
458 return NULL;
459}
460
461/**
462 * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
463 * hal_srng_src_get_next should be called subsequently to move the head pointer
464 *
465 * @hal_soc: Opaque HAL SOC handle
466 * @hal_ring: Source ring pointer
467 *
468 * Return: Opaque pointer for next ring entry; NULL on failire
469 */
470static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
471{
472 struct hal_srng *srng = (struct hal_srng *)hal_ring;
473 uint32_t *desc;
474
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800475 /* TODO: Using % is expensive, but we have to do this since
476 * size of some SRNG rings is not power of 2 (due to descriptor
477 * sizes). Need to create separate API for rings used
478 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
479 * SW2RXDMA and CE rings)
480 */
481 if (((srng->u.src_ring.hp + srng->entry_size) %
482 srng->ring_size) != srng->u.src_ring.cached_tp) {
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700483 desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
484 return (void *)desc;
485 }
486
487 return NULL;
488}
489
490/**
491 * hal_srng_src_num_avail - Returns number of available entries in src ring
492 *
493 * @hal_soc: Opaque HAL SOC handle
494 * @hal_ring: Source ring pointer
495 * @sync_hw_ptr: Sync cached tail pointer with HW
496 *
497 */
498static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
499 void *hal_ring, int sync_hw_ptr)
500{
501 struct hal_srng *srng = (struct hal_srng *)hal_ring;
502 uint32 tp;
503 uint32 hp = srng->u.src_ring.hp;
504
505 if (sync_hw_ptr) {
506 tp = *(srng->u.src_ring.tp_addr);
507 srng->u.src_ring.cached_tp = tp;
508 } else {
509 tp = srng->u.src_ring.cached_tp;
510 }
511
512 if (tp > hp)
513 return ((tp - hp) / srng->entry_size) - 1;
514 else
515 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
516}
517
518/**
519 * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
520 * ring head/tail pointers to HW.
521 * This should be used only if hal_srng_access_start_unlocked to start ring
522 * access
523 *
524 * @hal_soc: Opaque HAL SOC handle
525 * @hal_ring: Ring pointer (Source or Destination ring)
526 *
527 * Return: 0 on success; error on failire
528 */
529static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
530{
531 struct hal_srng *srng = (struct hal_srng *)hal_ring;
532
533 /* TODO: See if we need a write memory barrier here */
534 if (srng->flags & HAL_SRNG_LMAC_RING) {
535 /* For LMAC rings, ring pointer updates are done through FW and
536 * hence written to a shared memory location that is read by FW
537 */
538 if (srng->ring_dir == HAL_SRNG_SRC_RING)
539 *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
540 else
541 *(srng->u.src_ring.tp_addr) = srng->u.dst_ring.tp;
542 } else {
543 if (srng->ring_dir == HAL_SRNG_SRC_RING)
544 hif_write32_mb(srng->u.src_ring.hp_addr,
545 srng->u.src_ring.hp);
546 else
547 hif_write32_mb(srng->u.dst_ring.tp_addr,
548 srng->u.dst_ring.tp);
549 }
550}
551
552/**
553 * hal_srng_access_end - Unlock ring access and update cached ring head/tail
554 * pointers to HW
555 * This should be used only if hal_srng_access_start to start ring access
556 *
557 * @hal_soc: Opaque HAL SOC handle
558 * @hal_ring: Ring pointer (Source or Destination ring)
559 *
560 * Return: 0 on success; error on failire
561 */
562static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
563{
564 struct hal_srng *srng = (struct hal_srng *)hal_ring;
565
566 hal_srng_access_end_unlocked(hal_soc, hal_ring);
567 SRNG_UNLOCK(&(srng->lock));
568}
569
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530570/**
571 * hal_srng_access_end_reap - Unlock ring access
572 * This should be used only if hal_srng_access_start to start ring access
573 * and should be used only while reaping SRC ring completions
574 *
575 * @hal_soc: Opaque HAL SOC handle
576 * @hal_ring: Ring pointer (Source or Destination ring)
577 *
578 * Return: 0 on success; error on failire
579 */
580static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
581{
582 struct hal_srng *srng = (struct hal_srng *)hal_ring;
583 SRNG_UNLOCK(&(srng->lock));
584}
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700585
586/* TODO: Check if the following definitions is available in HW headers */
587#define WBM_IDLE_DESC_LIST 1
588#define WBM_IDLE_SCATTER_BUF_SIZE 32704
589#define NUM_MPDUS_PER_LINK_DESC 6
590#define NUM_MSDUS_PER_LINK_DESC 7
591#define REO_QUEUE_DESC_ALIGN 128
592
593#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
594#define LINK_DESC_ALIGN 128
595
596/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
597 * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
598 */
599#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
600
601/* TODO: Check with HW team on the scatter buffer size supported. As per WBM
602 * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
603 * should be specified in 16 word units. But the number of bits defined for
604 * this field in HW header files is 5.
605 */
606#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
607
608/**
609 * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
610 * HW structure
611 *
612 * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
613 * @cookie: SW cookie for the buffer/descriptor
614 * @link_desc_paddr: Physical address of link descriptor entry
615 *
616 */
617static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
618 qdf_dma_addr_t link_desc_paddr)
619{
620 uint32_t *buf_addr = (uint32_t *)desc;
621 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
622 link_desc_paddr & 0xffffffff);
623 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
624 (uint64_t)link_desc_paddr >> 32);
625 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
626 WBM_IDLE_DESC_LIST);
627 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
628 cookie);
629}
630
631/**
632 * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
633 * in an idle list
634 *
635 * @hal_soc: Opaque HAL SOC handle
636 *
637 */
638static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
639{
640 return WBM_IDLE_SCATTER_BUF_SIZE;
641}
642
643/**
644 * hal_get_link_desc_size - Get the size of each link descriptor
645 *
646 * @hal_soc: Opaque HAL SOC handle
647 *
648 */
649static inline uint32_t hal_get_link_desc_size(void *hal_soc)
650{
651 return LINK_DESC_SIZE;
652}
653
654/**
655 * hal_get_link_desc_align - Get the required start address alignment for
656 * link descriptors
657 *
658 * @hal_soc: Opaque HAL SOC handle
659 *
660 */
661static inline uint32_t hal_get_link_desc_align(void *hal_soc)
662{
663 return LINK_DESC_ALIGN;
664}
665
666/**
667 * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
668 *
669 * @hal_soc: Opaque HAL SOC handle
670 *
671 */
672static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
673{
674 return NUM_MPDUS_PER_LINK_DESC;
675}
676
677/**
678 * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
679 *
680 * @hal_soc: Opaque HAL SOC handle
681 *
682 */
683static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
684{
685 return NUM_MSDUS_PER_LINK_DESC;
686}
687
688/**
689 * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
690 * descriptor can hold
691 *
692 * @hal_soc: Opaque HAL SOC handle
693 *
694 */
695static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
696{
697 return NUM_MPDU_LINKS_PER_QUEUE_DESC;
698}
699
700/**
701 * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
702 * that the given buffer size
703 *
704 * @hal_soc: Opaque HAL SOC handle
705 * @scatter_buf_size: Size of scatter buffer
706 *
707 */
708static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
709 uint32_t scatter_buf_size)
710{
711 return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
712 hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
713}
714
715/**
716 * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
717 * provided
718 *
719 * @hal_soc: Opaque HAL SOC handle
720 * @idle_scatter_bufs_base_paddr: Array of physical base addresses
721 * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
722 * @num_scatter_bufs: Number of scatter buffers in the above lists
723 * @scatter_buf_size: Size of each scatter buffer
724 *
725 */
726extern void hal_setup_link_idle_list(void *hal_soc,
727 qdf_dma_addr_t scatter_bufs_base_paddr[],
728 void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
729 uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
730
731/**
732 * hal_reo_setup - Initialize HW REO block
733 *
734 * @hal_soc: Opaque HAL SOC handle
735 */
736extern void hal_reo_setup(void *hal_soc);
737
738enum hal_pn_type {
739 HAL_PN_NONE,
740 HAL_PN_WPA,
741 HAL_PN_WAPI_EVEN,
742 HAL_PN_WAPI_UNEVEN,
743};
744
745#define HAL_RX_MAX_BA_WINDOW 256
746/**
747 * hal_get_reo_qdesc_size - Get size of reo queue descriptor
748 *
749 * @hal_soc: Opaque HAL SOC handle
750 * @ba_window_size: BlockAck window size
751 *
752 */
753static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
754 uint32_t ba_window_size)
755{
756 if (ba_window_size <= 1)
757 return sizeof(struct rx_reo_queue);
758
759 if (ba_window_size <= 105)
760 return sizeof(struct rx_reo_queue) +
761 sizeof(struct rx_reo_queue_ext);
762
763 if (ba_window_size <= 210)
764 return sizeof(struct rx_reo_queue) +
765 (2 * sizeof(struct rx_reo_queue_ext));
766
767 return sizeof(struct rx_reo_queue) +
768 (3 * sizeof(struct rx_reo_queue_ext));
769}
770
771/**
772 * hal_get_reo_qdesc_align - Get start address alignment for reo
773 * queue descriptors
774 *
775 * @hal_soc: Opaque HAL SOC handle
776 *
777 */
778static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
779{
780 return REO_QUEUE_DESC_ALIGN;
781}
782
783/**
784 * hal_reo_qdesc_setup - Setup HW REO queue descriptor
785 *
786 * @hal_soc: Opaque HAL SOC handle
787 * @ba_window_size: BlockAck window size
788 * @start_seq: Starting sequence number
789 * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
790 * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
791 * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
792 *
793 */
794extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
795 uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
796 int pn_type);
797
798/**
799 * hal_srng_get_hp_addr - Get head pointer physical address
800 *
801 * @hal_soc: Opaque HAL SOC handle
802 * @hal_ring: Ring pointer (Source or Destination ring)
803 *
804 */
805static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
806{
807 struct hal_srng *srng = (struct hal_srng *)hal_ring;
808 struct hal_soc *hal = (struct hal_soc *)hal_soc;
809
810 if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
811 /* Currently this interface is required only for LMAC rings */
812 return (qdf_dma_addr_t)NULL;
813 }
814
815 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -0800816 return hal->shadow_wrptr_mem_paddr +
817 ((unsigned long)(srng->u.src_ring.hp_addr) -
818 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700819 } else {
Manoj Ekbote7980f3e2017-02-06 15:30:00 -0800820 return hal->shadow_rdptr_mem_paddr +
821 ((unsigned long)(srng->u.dst_ring.hp_addr) -
822 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
Karunakar Dasineni8fbfeea2016-08-31 14:43:27 -0700823 }
824}
825
826/**
827 * hal_srng_get_tp_addr - Get tail pointer physical address
828 *
829 * @hal_soc: Opaque HAL SOC handle
830 * @hal_ring: Ring pointer (Source or Destination ring)
831 *
832 */
833static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
834{
835 struct hal_srng *srng = (struct hal_srng *)hal_ring;
836 struct hal_soc *hal = (struct hal_soc *)hal_soc;
837
838 if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
839 /* Currently this interface is required only for LMAC rings */
840 return (qdf_dma_addr_t)NULL;
841 }
842
843 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
844 return hal->shadow_rdptr_mem_paddr +
845 ((unsigned long)(srng->u.src_ring.tp_addr) -
846 (unsigned long)(hal->shadow_rdptr_mem_vaddr));
847 } else {
848 return hal->shadow_wrptr_mem_paddr +
849 ((unsigned long)(srng->u.dst_ring.tp_addr) -
850 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
851 }
852}
853
854/**
855 * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
856 *
857 * @hal_soc: Opaque HAL SOC handle
858 * @hal_ring: Ring pointer (Source or Destination ring)
859 * @ring_params: SRNG parameters will be returned through this structure
860 */
861extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
862 struct hal_srng_params *ring_params);
863#endif /* _HAL_API_H_ */