blob: 5ff48e0d16944ed8a13afa53b321ee2409f3eb3b [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Manjunathappa Prakash05f70da2019-04-09 23:39:48 -07002 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef __COPY_ENGINE_API_H__
20#define __COPY_ENGINE_API_H__
21
Houston Hoffman10fedfc2017-01-23 15:23:09 -080022#include "pld_common.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053024#include "hif_main.h"
25
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080026/* TBDXXX: Use int return values for consistency with Target */
27
28/* TBDXXX: Perhaps merge Host/Target-->common */
29
30/*
31 * Copy Engine support: low-level Target-side Copy Engine API.
32 * This is a hardware access layer used by code that understands
33 * how to use copy engines.
34 */
35
36/*
37 * A "struct CE_handle *" serves as an opaque pointer-sized
38 * handle to a specific copy engine.
39 */
40struct CE_handle;
41
42/*
43 * "Send Completion" callback type for Send Completion Notification.
44 *
45 * If a Send Completion callback is registered and one or more sends
46 * have completed, the callback is invoked.
47 *
48 * per_ce_send_context is a context supplied by the calling layer
49 * (via ce_send_cb_register). It is associated with a copy engine.
50 *
51 * per_transfer_send_context is context supplied by the calling layer
52 * (via the "send" call). It may be different for each invocation
53 * of send.
54 *
55 * The buffer parameter is the first byte sent of the first buffer
56 * sent (if more than one buffer).
57 *
58 * nbytes is the number of bytes of that buffer that were sent.
59 *
60 * transfer_id matches the value used when the buffer or
61 * buf_list was sent.
62 *
63 * Implementation note: Pops 1 completed send buffer from Source ring
64 */
65typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 void *per_ce_send_context,
67 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053068 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080069 unsigned int nbytes,
70 unsigned int transfer_id,
71 unsigned int sw_index,
72 unsigned int hw_index,
73 uint32_t toeplitz_hash_result);
74
75/*
76 * "Buffer Received" callback type for Buffer Received Notification.
77 *
78 * Implementation note: Pops 1 completed recv buffer from Dest ring
79 */
80typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 void *per_CE_recv_context,
82 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053083 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084 unsigned int nbytes,
85 unsigned int transfer_id,
86 unsigned int flags);
87
88/*
89 * Copy Engine Watermark callback type.
90 *
91 * Allows upper layers to be notified when watermarks are reached:
92 * space is available and/or running short in a source ring
93 * buffers are exhausted and/or abundant in a destination ring
94 *
95 * The flags parameter indicates which condition triggered this
96 * callback. See CE_WM_FLAG_*.
97 *
98 * Watermark APIs are provided to allow upper layers "batch"
99 * descriptor processing and to allow upper layers to
100 * throttle/unthrottle.
101 */
102typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 void *per_CE_wm_context, unsigned int flags);
104
Sathish Kumar86876492018-08-27 13:39:20 +0530105
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106#define CE_WM_FLAG_SEND_HIGH 1
107#define CE_WM_FLAG_SEND_LOW 2
108#define CE_WM_FLAG_RECV_HIGH 4
109#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700110#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111
Sathish Kumar86876492018-08-27 13:39:20 +0530112
113/**
114 * ce_service_srng_init() - Initialization routine for CE services
115 * in SRNG based targets
116 * Return : None
117 */
118void ce_service_srng_init(void);
119
120/**
121 * ce_service_legacy_init() - Initialization routine for CE services
122 * in legacy targets
123 * Return : None
124 */
125void ce_service_legacy_init(void);
126
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800127/* A list of buffers to be gathered and sent */
128struct ce_sendlist;
129
130/* Copy Engine settable attributes */
131struct CE_attr;
132
133/*==================Send=====================================================*/
134
135/* ce_send flags */
136/* disable ring's byte swap, even if the default policy is to swap */
137#define CE_SEND_FLAG_SWAP_DISABLE 1
138
139/*
140 * Queue a source buffer to be sent to an anonymous destination buffer.
141 * copyeng - which copy engine to use
142 * buffer - address of buffer
143 * nbytes - number of bytes to send
144 * transfer_id - arbitrary ID; reflected to destination
145 * flags - CE_SEND_FLAG_* values
146 * Returns 0 on success; otherwise an error status.
147 *
148 * Note: If no flags are specified, use CE's default data swap mode.
149 *
150 * Implementation note: pushes 1 buffer to Source ring
151 */
152int ce_send(struct CE_handle *copyeng,
153 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530154 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155 unsigned int nbytes,
156 unsigned int transfer_id,
157 unsigned int flags,
158 unsigned int user_flags);
159
160#ifdef WLAN_FEATURE_FASTPATH
Nirav Shahda0881a2016-05-16 10:45:16 +0530161int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
162 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163
164#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165
Houston Hoffman56e0d702016-05-05 17:48:06 -0700166void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
167extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
168 qdf_nbuf_t msdu,
169 uint32_t transfer_id,
170 uint32_t len,
171 uint32_t sendhead);
172
Saket Jha5cdfcc82019-06-11 18:13:59 -0700173QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
174 qdf_nbuf_t msdu,
175 uint32_t transfer_id,
176 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177/*
178 * Register a Send Callback function.
179 * This function is called as soon as the contents of a Send
180 * have reached the destination, unless disable_interrupts is
181 * requested. In this case, the callback is invoked when the
182 * send status is polled, shortly after the send completes.
183 */
184void ce_send_cb_register(struct CE_handle *copyeng,
185 ce_send_cb fn_ptr,
186 void *per_ce_send_context, int disable_interrupts);
187
188/*
189 * Return the size of a SendList. This allows the caller to allocate
190 * a SendList while the SendList structure remains opaque.
191 */
192unsigned int ce_sendlist_sizeof(void);
193
194/* Initialize a sendlist */
195void ce_sendlist_init(struct ce_sendlist *sendlist);
196
197/* Append a simple buffer (address/length) to a sendlist. */
198int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530199 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 unsigned int nbytes,
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700201 /* OR-ed with internal flags */
202 uint32_t flags,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 uint32_t user_flags);
204
205/*
206 * Queue a "sendlist" of buffers to be sent using gather to a single
207 * anonymous destination buffer
208 * copyeng - which copy engine to use
209 * sendlist - list of simple buffers to send using gather
210 * transfer_id - arbitrary ID; reflected to destination
211 * Returns 0 on success; otherwise an error status.
212 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700213 * Implementation note: Pushes multiple buffers with Gather to Source ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 */
215int ce_sendlist_send(struct CE_handle *copyeng,
216 void *per_transfer_send_context,
217 struct ce_sendlist *sendlist,
218 unsigned int transfer_id);
219
220/*==================Recv=====================================================*/
221
222/*
223 * Make a buffer available to receive. The buffer must be at least of a
224 * minimal size appropriate for this copy engine (src_sz_max attribute).
225 * copyeng - which copy engine to use
226 * per_transfer_recv_context - context passed back to caller's recv_cb
227 * buffer - address of buffer in CE space
228 * Returns 0 on success; otherwise an error status.
229 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700230 * Implementation note: Pushes a buffer to Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800231 */
232int ce_recv_buf_enqueue(struct CE_handle *copyeng,
233 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530234 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235
236/*
237 * Register a Receive Callback function.
238 * This function is called as soon as data is received
239 * from the source.
240 */
241void ce_recv_cb_register(struct CE_handle *copyeng,
242 CE_recv_cb fn_ptr,
243 void *per_CE_recv_context,
244 int disable_interrupts);
245
246/*==================CE Watermark=============================================*/
247
248/*
249 * Register a Watermark Callback function.
250 * This function is called as soon as a watermark level
251 * is crossed. A Watermark Callback function is free to
252 * handle received data "en masse"; but then some coordination
253 * is required with a registered Receive Callback function.
254 * [Suggestion: Either handle Receives in a Receive Callback
255 * or en masse in a Watermark Callback; but not both.]
256 */
257void ce_watermark_cb_register(struct CE_handle *copyeng,
258 CE_watermark_cb fn_ptr,
259 void *per_CE_wm_context);
260
261/*
262 * Set low/high watermarks for the send/source side of a copy engine.
263 *
264 * Typically, the destination side CPU manages watermarks for
265 * the receive side and the source side CPU manages watermarks
266 * for the send side.
267 *
268 * A low watermark of 0 is never hit (so the watermark function
269 * will never be called for a Low Watermark condition).
270 *
271 * A high watermark equal to nentries is never hit (so the
272 * watermark function will never be called for a High Watermark
273 * condition).
274 */
275void ce_send_watermarks_set(struct CE_handle *copyeng,
276 unsigned int low_alert_nentries,
277 unsigned int high_alert_nentries);
278
279/* Set low/high watermarks for the receive/destination side of copy engine. */
280void ce_recv_watermarks_set(struct CE_handle *copyeng,
281 unsigned int low_alert_nentries,
282 unsigned int high_alert_nentries);
283
284/*
285 * Return the number of entries that can be queued
286 * to a ring at an instant in time.
287 *
288 * For source ring, does not imply that destination-side
289 * buffers are available; merely indicates descriptor space
290 * in the source ring.
291 *
292 * For destination ring, does not imply that previously
293 * received buffers have been processed; merely indicates
294 * descriptor space in destination ring.
295 *
296 * Mainly for use with CE Watermark callback.
297 */
298unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
299unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
300
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301/* recv flags */
302/* Data is byte-swapped */
303#define CE_RECV_FLAG_SWAPPED 1
304
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800305/*
306 * Supply data for the next completed unprocessed receive descriptor.
307 *
308 * For use
309 * with CE Watermark callback,
310 * in a recv_cb function when processing buf_lists
311 * in a recv_cb function in order to mitigate recv_cb's.
312 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700313 * Implementation note: Pops buffer from Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314 */
315int ce_completed_recv_next(struct CE_handle *copyeng,
316 void **per_CE_contextp,
317 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530318 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319 unsigned int *nbytesp,
320 unsigned int *transfer_idp,
321 unsigned int *flagsp);
322
323/*
324 * Supply data for the next completed unprocessed send descriptor.
325 *
326 * For use
327 * with CE Watermark callback
328 * in a send_cb function in order to mitigate send_cb's.
329 *
330 * Implementation note: Pops 1 completed send buffer from Source ring
331 */
332int ce_completed_send_next(struct CE_handle *copyeng,
333 void **per_CE_contextp,
334 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530335 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336 unsigned int *nbytesp,
337 unsigned int *transfer_idp,
338 unsigned int *sw_idx,
339 unsigned int *hw_idx,
340 uint32_t *toeplitz_hash_result);
341
342/*==================CE Engine Initialization=================================*/
343
344/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530345struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346 unsigned int CE_id, struct CE_attr *attr);
347
348/*==================CE Engine Shutdown=======================================*/
349/*
350 * Support clean shutdown by allowing the caller to revoke
351 * receive buffers. Target DMA must be stopped before using
352 * this API.
353 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530354QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355ce_revoke_recv_next(struct CE_handle *copyeng,
356 void **per_CE_contextp,
357 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530358 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800359
360/*
361 * Support clean shutdown by allowing the caller to cancel
362 * pending sends. Target DMA must be stopped before using
363 * this API.
364 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530365QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800366ce_cancel_send_next(struct CE_handle *copyeng,
367 void **per_CE_contextp,
368 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530369 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800370 unsigned int *nbytesp,
371 unsigned int *transfer_idp,
372 uint32_t *toeplitz_hash_result);
373
374void ce_fini(struct CE_handle *copyeng);
375
376/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530377void ce_per_engine_service_any(int irq, struct hif_softc *scn);
378int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
379void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380
381/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530382void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
383void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384
385/* API to check if any of the copy engine pipes has
386 * pending frames for prcoessing
387 */
Komal Seelam644263d2016-02-22 20:45:49 +0530388bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800389
Sathish Kumar86876492018-08-27 13:39:20 +0530390/**
391 * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
392 *
393 * Return: None
394 */
395void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
396 u32 ctrl_addr, unsigned int write_index);
397
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398/* CE_attr.flags values */
399#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
400#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
401#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
402#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
403#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530404#define CE_ATTR_DIAG 0x20 /* Diag CE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700406/**
Jeff Johnson1002ca52018-05-12 11:29:24 -0700407 * struct CE_attr - Attributes of an instance of a Copy Engine
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700408 * @flags: CE_ATTR_* values
409 * @priority: TBD
410 * @src_nentries: #entries in source ring - Must be a power of 2
411 * @src_sz_max: Max source send size for this CE. This is also the minimum
412 * size of a destination buffer
413 * @dest_nentries: #entries in destination ring - Must be a power of 2
414 * @reserved: Future Use
415 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416struct CE_attr {
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700417 unsigned int flags;
418 unsigned int priority;
419 unsigned int src_nentries;
420 unsigned int src_sz_max;
421 unsigned int dest_nentries;
422 void *reserved;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423};
424
425/*
426 * When using sendlist_send to transfer multiple buffer fragments, the
427 * transfer context of each fragment, except last one, will be filled
428 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
429 * each fragment done with send and the transfer context would be
430 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
431 * status of a send completion.
432 */
433#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
434
435/*
436 * This is an opaque type that is at least large enough to hold
437 * a sendlist. A sendlist can only be accessed through CE APIs,
438 * but this allows a sendlist to be allocated on the run-time
439 * stack. TBDXXX: un-opaque would be simpler...
440 */
441struct ce_sendlist {
442 unsigned int word[62];
443};
444
445#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
446#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
447#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
448
449#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530451 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800452 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530453 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454#else
Leo Chang8e073612015-11-13 10:55:34 -0800455/**
456 * ce_ipa_get_resource() - get uc resource on copyengine
457 * @ce: copyengine context
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530458 * @ce_sr: copyengine source ring resource info
Leo Chang8e073612015-11-13 10:55:34 -0800459 * @ce_sr_ring_size: copyengine source ring size
460 * @ce_reg_paddr: copyengine register physical address
461 *
462 * Copy engine should release resource to micro controller
463 * Micro controller needs
464 * - Copy engine source descriptor base address
465 * - Copy engine source descriptor size
466 * - PCI BAR address to access copy engine regiser
467 *
468 * Return: None
469 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470static inline void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530471 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530473 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475}
476#endif /* IPA_OFFLOAD */
477
478static inline void ce_pkt_error_count_incr(
479 struct HIF_CE_state *_hif_state,
480 enum ol_ath_hif_pkt_ecodes _hif_ecode)
481{
Komal Seelam644263d2016-02-22 20:45:49 +0530482 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
483
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530485 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 += 1;
487}
488
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700489bool ce_check_rx_pending(struct CE_state *CE_state);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700490void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530491struct ce_ops *ce_services_srng(void);
492struct ce_ops *ce_services_legacy(void);
493bool ce_srng_based(struct hif_softc *scn);
494/* Forward declaration */
495struct CE_ring_state;
496
497struct ce_ops {
498 uint32_t (*ce_get_desc_size)(uint8_t ring_type);
Yun Park3fb36442017-08-17 17:37:53 -0700499 int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530500 uint32_t ce_id, struct CE_ring_state *ring,
501 struct CE_attr *attr);
502 int (*ce_send_nolock)(struct CE_handle *copyeng,
503 void *per_transfer_context,
504 qdf_dma_addr_t buffer,
505 uint32_t nbytes,
506 uint32_t transfer_id,
507 uint32_t flags,
508 uint32_t user_flags);
509 int (*ce_sendlist_send)(struct CE_handle *copyeng,
510 void *per_transfer_context,
511 struct ce_sendlist *sendlist, unsigned int transfer_id);
512 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
513 void **per_CE_contextp,
514 void **per_transfer_contextp,
515 qdf_dma_addr_t *bufferp);
516 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
517 void **per_CE_contextp, void **per_transfer_contextp,
518 qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
519 unsigned int *transfer_idp,
520 uint32_t *toeplitz_hash_result);
521 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
522 void *per_recv_context, qdf_dma_addr_t buffer);
523 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
524 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
525 void **per_CE_contextp,
526 void **per_transfer_contextp,
527 qdf_dma_addr_t *bufferp,
528 unsigned int *nbytesp,
529 unsigned int *transfer_idp,
530 unsigned int *flagsp);
531 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
532 void **per_CE_contextp,
533 void **per_transfer_contextp,
534 qdf_dma_addr_t *bufferp,
535 unsigned int *nbytesp,
536 unsigned int *transfer_idp,
537 unsigned int *sw_idx,
538 unsigned int *hw_idx,
539 uint32_t *toeplitz_hash_result);
540 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
541 struct CE_state *CE_state);
542 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
543 struct CE_state *CE_state);
544 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
545 int disable_copy_compl_intr);
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800546 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
547 struct pld_shadow_reg_v2_cfg **shadow_config,
548 int *num_shadow_registers_configured);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530549};
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800550
551int hif_ce_bus_early_suspend(struct hif_softc *scn);
552int hif_ce_bus_late_resume(struct hif_softc *scn);
Aditya Sathish80bbaef2018-10-25 10:02:05 +0530553
554/*
555 * ce_engine_service_reg:
556 * @scn: hif_context
557 * @CE_id: Copy engine ID
558 *
559 * Called from ce_per_engine_service and goes through the regular interrupt
560 * handling that does not involve the WLAN fast path feature.
561 *
562 * Returns void
563 */
564void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
565
566/**
567 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
568 * @scn: hif_context
569 * @ce_id: Copy engine ID
570 *
571 * Return: void
572 */
573void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
574
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800575#endif /* __COPY_ENGINE_API_H__ */