blob: 97397f049253845bfba15b779e15b5f281c21577 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef __COPY_ENGINE_API_H__
29#define __COPY_ENGINE_API_H__
30
Houston Hoffman10fedfc2017-01-23 15:23:09 -080031#include "pld_common.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053033#include "hif_main.h"
34
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035/* TBDXXX: Use int return values for consistency with Target */
36
37/* TBDXXX: Perhaps merge Host/Target-->common */
38
39/*
40 * Copy Engine support: low-level Target-side Copy Engine API.
41 * This is a hardware access layer used by code that understands
42 * how to use copy engines.
43 */
44
45/*
46 * A "struct CE_handle *" serves as an opaque pointer-sized
47 * handle to a specific copy engine.
48 */
49struct CE_handle;
50
51/*
52 * "Send Completion" callback type for Send Completion Notification.
53 *
54 * If a Send Completion callback is registered and one or more sends
55 * have completed, the callback is invoked.
56 *
57 * per_ce_send_context is a context supplied by the calling layer
58 * (via ce_send_cb_register). It is associated with a copy engine.
59 *
60 * per_transfer_send_context is context supplied by the calling layer
61 * (via the "send" call). It may be different for each invocation
62 * of send.
63 *
64 * The buffer parameter is the first byte sent of the first buffer
65 * sent (if more than one buffer).
66 *
67 * nbytes is the number of bytes of that buffer that were sent.
68 *
69 * transfer_id matches the value used when the buffer or
70 * buf_list was sent.
71 *
72 * Implementation note: Pops 1 completed send buffer from Source ring
73 */
74typedef void (*ce_send_cb)(struct CE_handle *copyeng,
75 void *per_ce_send_context,
76 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053077 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078 unsigned int nbytes,
79 unsigned int transfer_id,
80 unsigned int sw_index,
81 unsigned int hw_index,
82 uint32_t toeplitz_hash_result);
83
84/*
85 * "Buffer Received" callback type for Buffer Received Notification.
86 *
87 * Implementation note: Pops 1 completed recv buffer from Dest ring
88 */
89typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
90 void *per_CE_recv_context,
91 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053092 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080093 unsigned int nbytes,
94 unsigned int transfer_id,
95 unsigned int flags);
96
97/*
98 * Copy Engine Watermark callback type.
99 *
100 * Allows upper layers to be notified when watermarks are reached:
101 * space is available and/or running short in a source ring
102 * buffers are exhausted and/or abundant in a destination ring
103 *
104 * The flags parameter indicates which condition triggered this
105 * callback. See CE_WM_FLAG_*.
106 *
107 * Watermark APIs are provided to allow upper layers "batch"
108 * descriptor processing and to allow upper layers to
109 * throttle/unthrottle.
110 */
111typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
112 void *per_CE_wm_context, unsigned int flags);
113
114#define CE_WM_FLAG_SEND_HIGH 1
115#define CE_WM_FLAG_SEND_LOW 2
116#define CE_WM_FLAG_RECV_HIGH 4
117#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700118#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800119
120/* A list of buffers to be gathered and sent */
121struct ce_sendlist;
122
123/* Copy Engine settable attributes */
124struct CE_attr;
125
126/*==================Send=====================================================*/
127
128/* ce_send flags */
129/* disable ring's byte swap, even if the default policy is to swap */
130#define CE_SEND_FLAG_SWAP_DISABLE 1
131
132/*
133 * Queue a source buffer to be sent to an anonymous destination buffer.
134 * copyeng - which copy engine to use
135 * buffer - address of buffer
136 * nbytes - number of bytes to send
137 * transfer_id - arbitrary ID; reflected to destination
138 * flags - CE_SEND_FLAG_* values
139 * Returns 0 on success; otherwise an error status.
140 *
141 * Note: If no flags are specified, use CE's default data swap mode.
142 *
143 * Implementation note: pushes 1 buffer to Source ring
144 */
145int ce_send(struct CE_handle *copyeng,
146 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530147 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148 unsigned int nbytes,
149 unsigned int transfer_id,
150 unsigned int flags,
151 unsigned int user_flags);
152
153#ifdef WLAN_FEATURE_FASTPATH
Nirav Shahda0881a2016-05-16 10:45:16 +0530154int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
155 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156
157#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
Houston Hoffman56e0d702016-05-05 17:48:06 -0700159void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
160extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
161 qdf_nbuf_t msdu,
162 uint32_t transfer_id,
163 uint32_t len,
164 uint32_t sendhead);
165
166extern int ce_send_single(struct CE_handle *ce_tx_hdl,
167 qdf_nbuf_t msdu,
168 uint32_t transfer_id,
169 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170/*
171 * Register a Send Callback function.
172 * This function is called as soon as the contents of a Send
173 * have reached the destination, unless disable_interrupts is
174 * requested. In this case, the callback is invoked when the
175 * send status is polled, shortly after the send completes.
176 */
177void ce_send_cb_register(struct CE_handle *copyeng,
178 ce_send_cb fn_ptr,
179 void *per_ce_send_context, int disable_interrupts);
180
181/*
182 * Return the size of a SendList. This allows the caller to allocate
183 * a SendList while the SendList structure remains opaque.
184 */
185unsigned int ce_sendlist_sizeof(void);
186
187/* Initialize a sendlist */
188void ce_sendlist_init(struct ce_sendlist *sendlist);
189
190/* Append a simple buffer (address/length) to a sendlist. */
191int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530192 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193 unsigned int nbytes,
194 uint32_t flags, /* OR-ed with internal flags */
195 uint32_t user_flags);
196
197/*
198 * Queue a "sendlist" of buffers to be sent using gather to a single
199 * anonymous destination buffer
200 * copyeng - which copy engine to use
201 * sendlist - list of simple buffers to send using gather
202 * transfer_id - arbitrary ID; reflected to destination
203 * Returns 0 on success; otherwise an error status.
204 *
205 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
206 */
207int ce_sendlist_send(struct CE_handle *copyeng,
208 void *per_transfer_send_context,
209 struct ce_sendlist *sendlist,
210 unsigned int transfer_id);
211
212/*==================Recv=====================================================*/
213
214/*
215 * Make a buffer available to receive. The buffer must be at least of a
216 * minimal size appropriate for this copy engine (src_sz_max attribute).
217 * copyeng - which copy engine to use
218 * per_transfer_recv_context - context passed back to caller's recv_cb
219 * buffer - address of buffer in CE space
220 * Returns 0 on success; otherwise an error status.
221 *
222 * Implemenation note: Pushes a buffer to Dest ring.
223 */
224int ce_recv_buf_enqueue(struct CE_handle *copyeng,
225 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530226 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227
228/*
229 * Register a Receive Callback function.
230 * This function is called as soon as data is received
231 * from the source.
232 */
233void ce_recv_cb_register(struct CE_handle *copyeng,
234 CE_recv_cb fn_ptr,
235 void *per_CE_recv_context,
236 int disable_interrupts);
237
238/*==================CE Watermark=============================================*/
239
240/*
241 * Register a Watermark Callback function.
242 * This function is called as soon as a watermark level
243 * is crossed. A Watermark Callback function is free to
244 * handle received data "en masse"; but then some coordination
245 * is required with a registered Receive Callback function.
246 * [Suggestion: Either handle Receives in a Receive Callback
247 * or en masse in a Watermark Callback; but not both.]
248 */
249void ce_watermark_cb_register(struct CE_handle *copyeng,
250 CE_watermark_cb fn_ptr,
251 void *per_CE_wm_context);
252
253/*
254 * Set low/high watermarks for the send/source side of a copy engine.
255 *
256 * Typically, the destination side CPU manages watermarks for
257 * the receive side and the source side CPU manages watermarks
258 * for the send side.
259 *
260 * A low watermark of 0 is never hit (so the watermark function
261 * will never be called for a Low Watermark condition).
262 *
263 * A high watermark equal to nentries is never hit (so the
264 * watermark function will never be called for a High Watermark
265 * condition).
266 */
267void ce_send_watermarks_set(struct CE_handle *copyeng,
268 unsigned int low_alert_nentries,
269 unsigned int high_alert_nentries);
270
271/* Set low/high watermarks for the receive/destination side of copy engine. */
272void ce_recv_watermarks_set(struct CE_handle *copyeng,
273 unsigned int low_alert_nentries,
274 unsigned int high_alert_nentries);
275
276/*
277 * Return the number of entries that can be queued
278 * to a ring at an instant in time.
279 *
280 * For source ring, does not imply that destination-side
281 * buffers are available; merely indicates descriptor space
282 * in the source ring.
283 *
284 * For destination ring, does not imply that previously
285 * received buffers have been processed; merely indicates
286 * descriptor space in destination ring.
287 *
288 * Mainly for use with CE Watermark callback.
289 */
290unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
291unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
292
293/*
294 * Return the number of entries in the ring that are ready
295 * to be processed by software.
296 *
297 * For source ring, the number of descriptors that have
298 * been completed and can now be overwritten with new send
299 * descriptors.
300 *
301 * For destination ring, the number of descriptors that
302 * are available to be processed (newly received buffers).
303 */
304unsigned int ce_send_entries_done(struct CE_handle *copyeng);
305unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
306
307/* recv flags */
308/* Data is byte-swapped */
309#define CE_RECV_FLAG_SWAPPED 1
310
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311/*
312 * Supply data for the next completed unprocessed receive descriptor.
313 *
314 * For use
315 * with CE Watermark callback,
316 * in a recv_cb function when processing buf_lists
317 * in a recv_cb function in order to mitigate recv_cb's.
318 *
319 * Implemenation note: Pops buffer from Dest ring.
320 */
321int ce_completed_recv_next(struct CE_handle *copyeng,
322 void **per_CE_contextp,
323 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530324 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 unsigned int *nbytesp,
326 unsigned int *transfer_idp,
327 unsigned int *flagsp);
328
329/*
330 * Supply data for the next completed unprocessed send descriptor.
331 *
332 * For use
333 * with CE Watermark callback
334 * in a send_cb function in order to mitigate send_cb's.
335 *
336 * Implementation note: Pops 1 completed send buffer from Source ring
337 */
338int ce_completed_send_next(struct CE_handle *copyeng,
339 void **per_CE_contextp,
340 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530341 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342 unsigned int *nbytesp,
343 unsigned int *transfer_idp,
344 unsigned int *sw_idx,
345 unsigned int *hw_idx,
346 uint32_t *toeplitz_hash_result);
347
348/*==================CE Engine Initialization=================================*/
349
350/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530351struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 unsigned int CE_id, struct CE_attr *attr);
353
354/*==================CE Engine Shutdown=======================================*/
355/*
356 * Support clean shutdown by allowing the caller to revoke
357 * receive buffers. Target DMA must be stopped before using
358 * this API.
359 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530360QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800361ce_revoke_recv_next(struct CE_handle *copyeng,
362 void **per_CE_contextp,
363 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530364 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365
366/*
367 * Support clean shutdown by allowing the caller to cancel
368 * pending sends. Target DMA must be stopped before using
369 * this API.
370 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530371QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372ce_cancel_send_next(struct CE_handle *copyeng,
373 void **per_CE_contextp,
374 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530375 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 unsigned int *nbytesp,
377 unsigned int *transfer_idp,
378 uint32_t *toeplitz_hash_result);
379
380void ce_fini(struct CE_handle *copyeng);
381
382/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530383void ce_per_engine_service_any(int irq, struct hif_softc *scn);
384int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
385void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386
387/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530388void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
389void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390
391/* API to check if any of the copy engine pipes has
392 * pending frames for prcoessing
393 */
Komal Seelam644263d2016-02-22 20:45:49 +0530394bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395
396/* CE_attr.flags values */
397#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
398#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
399#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
400#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
401#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530402#define CE_ATTR_DIAG 0x20 /* Diag CE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403
404/* Attributes of an instance of a Copy Engine */
405struct CE_attr {
406 unsigned int flags; /* CE_ATTR_* values */
407 unsigned int priority; /* TBD */
408 unsigned int src_nentries; /* #entries in source ring -
409 * Must be a power of 2 */
410 unsigned int src_sz_max; /* Max source send size for this CE.
411 * This is also the minimum size of
412 * a destination buffer. */
413 unsigned int dest_nentries; /* #entries in destination ring -
414 * Must be a power of 2 */
415 void *reserved; /* Future use */
416};
417
418/*
419 * When using sendlist_send to transfer multiple buffer fragments, the
420 * transfer context of each fragment, except last one, will be filled
421 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
422 * each fragment done with send and the transfer context would be
423 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
424 * status of a send completion.
425 */
426#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
427
428/*
429 * This is an opaque type that is at least large enough to hold
430 * a sendlist. A sendlist can only be accessed through CE APIs,
431 * but this allows a sendlist to be allocated on the run-time
432 * stack. TBDXXX: un-opaque would be simpler...
433 */
434struct ce_sendlist {
435 unsigned int word[62];
436};
437
438#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
439#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
440#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
441
442#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800443void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530444 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530446 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447#else
Leo Chang8e073612015-11-13 10:55:34 -0800448/**
449 * ce_ipa_get_resource() - get uc resource on copyengine
450 * @ce: copyengine context
451 * @ce_sr_base_paddr: copyengine source ring base physical address
452 * @ce_sr_ring_size: copyengine source ring size
453 * @ce_reg_paddr: copyengine register physical address
454 *
455 * Copy engine should release resource to micro controller
456 * Micro controller needs
457 * - Copy engine source descriptor base address
458 * - Copy engine source descriptor size
459 * - PCI BAR address to access copy engine regiser
460 *
461 * Return: None
462 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463static inline void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530464 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530466 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800467{
468 return;
469}
470#endif /* IPA_OFFLOAD */
471
472static inline void ce_pkt_error_count_incr(
473 struct HIF_CE_state *_hif_state,
474 enum ol_ath_hif_pkt_ecodes _hif_ecode)
475{
Komal Seelam644263d2016-02-22 20:45:49 +0530476 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
477
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530479 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480 += 1;
481}
482
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700483bool ce_check_rx_pending(struct CE_state *CE_state);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700484void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485#if defined(FEATURE_LRO)
Manjunathappa Prakash32afe372016-04-29 11:12:41 -0700486int ce_lro_flush_cb_register(struct hif_opaque_softc *scn,
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700487 void (handler)(void *),
488 void *(lro_init_handler)(void));
489int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
490 void (lro_deinit_cb)(void *));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491#endif
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530492struct ce_ops *ce_services_srng(void);
493struct ce_ops *ce_services_legacy(void);
494bool ce_srng_based(struct hif_softc *scn);
495/* Forward declaration */
496struct CE_ring_state;
497
498struct ce_ops {
499 uint32_t (*ce_get_desc_size)(uint8_t ring_type);
500 void (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
501 uint32_t ce_id, struct CE_ring_state *ring,
502 struct CE_attr *attr);
503 int (*ce_send_nolock)(struct CE_handle *copyeng,
504 void *per_transfer_context,
505 qdf_dma_addr_t buffer,
506 uint32_t nbytes,
507 uint32_t transfer_id,
508 uint32_t flags,
509 uint32_t user_flags);
510 int (*ce_sendlist_send)(struct CE_handle *copyeng,
511 void *per_transfer_context,
512 struct ce_sendlist *sendlist, unsigned int transfer_id);
513 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
514 void **per_CE_contextp,
515 void **per_transfer_contextp,
516 qdf_dma_addr_t *bufferp);
517 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
518 void **per_CE_contextp, void **per_transfer_contextp,
519 qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
520 unsigned int *transfer_idp,
521 uint32_t *toeplitz_hash_result);
522 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
523 void *per_recv_context, qdf_dma_addr_t buffer);
524 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
525 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
526 void **per_CE_contextp,
527 void **per_transfer_contextp,
528 qdf_dma_addr_t *bufferp,
529 unsigned int *nbytesp,
530 unsigned int *transfer_idp,
531 unsigned int *flagsp);
532 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
533 void **per_CE_contextp,
534 void **per_transfer_contextp,
535 qdf_dma_addr_t *bufferp,
536 unsigned int *nbytesp,
537 unsigned int *transfer_idp,
538 unsigned int *sw_idx,
539 unsigned int *hw_idx,
540 uint32_t *toeplitz_hash_result);
541 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
542 struct CE_state *CE_state);
543 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
544 struct CE_state *CE_state);
545 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
546 int disable_copy_compl_intr);
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800547 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
548 struct pld_shadow_reg_v2_cfg **shadow_config,
549 int *num_shadow_registers_configured);
550
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530551};
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800552
553int hif_ce_bus_early_suspend(struct hif_softc *scn);
554int hif_ce_bus_late_resume(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555#endif /* __COPY_ENGINE_API_H__ */