blob: 2b50d0c9cb7ef1696b4bd7035302365298a3bc8d [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef __COPY_ENGINE_API_H__
20#define __COPY_ENGINE_API_H__
21
Houston Hoffman10fedfc2017-01-23 15:23:09 -080022#include "pld_common.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053024#include "hif_main.h"
25
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080026/* TBDXXX: Use int return values for consistency with Target */
27
28/* TBDXXX: Perhaps merge Host/Target-->common */
29
30/*
31 * Copy Engine support: low-level Target-side Copy Engine API.
32 * This is a hardware access layer used by code that understands
33 * how to use copy engines.
34 */
35
36/*
37 * A "struct CE_handle *" serves as an opaque pointer-sized
38 * handle to a specific copy engine.
39 */
40struct CE_handle;
41
42/*
43 * "Send Completion" callback type for Send Completion Notification.
44 *
45 * If a Send Completion callback is registered and one or more sends
46 * have completed, the callback is invoked.
47 *
48 * per_ce_send_context is a context supplied by the calling layer
49 * (via ce_send_cb_register). It is associated with a copy engine.
50 *
51 * per_transfer_send_context is context supplied by the calling layer
52 * (via the "send" call). It may be different for each invocation
53 * of send.
54 *
55 * The buffer parameter is the first byte sent of the first buffer
56 * sent (if more than one buffer).
57 *
58 * nbytes is the number of bytes of that buffer that were sent.
59 *
60 * transfer_id matches the value used when the buffer or
61 * buf_list was sent.
62 *
63 * Implementation note: Pops 1 completed send buffer from Source ring
64 */
65typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 void *per_ce_send_context,
67 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053068 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080069 unsigned int nbytes,
70 unsigned int transfer_id,
71 unsigned int sw_index,
72 unsigned int hw_index,
73 uint32_t toeplitz_hash_result);
74
75/*
76 * "Buffer Received" callback type for Buffer Received Notification.
77 *
78 * Implementation note: Pops 1 completed recv buffer from Dest ring
79 */
80typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 void *per_CE_recv_context,
82 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053083 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084 unsigned int nbytes,
85 unsigned int transfer_id,
86 unsigned int flags);
87
88/*
89 * Copy Engine Watermark callback type.
90 *
91 * Allows upper layers to be notified when watermarks are reached:
92 * space is available and/or running short in a source ring
93 * buffers are exhausted and/or abundant in a destination ring
94 *
95 * The flags parameter indicates which condition triggered this
96 * callback. See CE_WM_FLAG_*.
97 *
98 * Watermark APIs are provided to allow upper layers "batch"
99 * descriptor processing and to allow upper layers to
100 * throttle/unthrottle.
101 */
102typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 void *per_CE_wm_context, unsigned int flags);
104
105#define CE_WM_FLAG_SEND_HIGH 1
106#define CE_WM_FLAG_SEND_LOW 2
107#define CE_WM_FLAG_RECV_HIGH 4
108#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700109#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110
111/* A list of buffers to be gathered and sent */
112struct ce_sendlist;
113
114/* Copy Engine settable attributes */
115struct CE_attr;
116
117/*==================Send=====================================================*/
118
119/* ce_send flags */
120/* disable ring's byte swap, even if the default policy is to swap */
121#define CE_SEND_FLAG_SWAP_DISABLE 1
122
123/*
124 * Queue a source buffer to be sent to an anonymous destination buffer.
125 * copyeng - which copy engine to use
126 * buffer - address of buffer
127 * nbytes - number of bytes to send
128 * transfer_id - arbitrary ID; reflected to destination
129 * flags - CE_SEND_FLAG_* values
130 * Returns 0 on success; otherwise an error status.
131 *
132 * Note: If no flags are specified, use CE's default data swap mode.
133 *
134 * Implementation note: pushes 1 buffer to Source ring
135 */
136int ce_send(struct CE_handle *copyeng,
137 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530138 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800139 unsigned int nbytes,
140 unsigned int transfer_id,
141 unsigned int flags,
142 unsigned int user_flags);
143
144#ifdef WLAN_FEATURE_FASTPATH
Nirav Shahda0881a2016-05-16 10:45:16 +0530145int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
146 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147
148#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149
Houston Hoffman56e0d702016-05-05 17:48:06 -0700150void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
151extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
152 qdf_nbuf_t msdu,
153 uint32_t transfer_id,
154 uint32_t len,
155 uint32_t sendhead);
156
157extern int ce_send_single(struct CE_handle *ce_tx_hdl,
158 qdf_nbuf_t msdu,
159 uint32_t transfer_id,
160 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161/*
162 * Register a Send Callback function.
163 * This function is called as soon as the contents of a Send
164 * have reached the destination, unless disable_interrupts is
165 * requested. In this case, the callback is invoked when the
166 * send status is polled, shortly after the send completes.
167 */
168void ce_send_cb_register(struct CE_handle *copyeng,
169 ce_send_cb fn_ptr,
170 void *per_ce_send_context, int disable_interrupts);
171
172/*
173 * Return the size of a SendList. This allows the caller to allocate
174 * a SendList while the SendList structure remains opaque.
175 */
176unsigned int ce_sendlist_sizeof(void);
177
178/* Initialize a sendlist */
179void ce_sendlist_init(struct ce_sendlist *sendlist);
180
181/* Append a simple buffer (address/length) to a sendlist. */
182int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530183 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184 unsigned int nbytes,
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700185 /* OR-ed with internal flags */
186 uint32_t flags,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 uint32_t user_flags);
188
189/*
190 * Queue a "sendlist" of buffers to be sent using gather to a single
191 * anonymous destination buffer
192 * copyeng - which copy engine to use
193 * sendlist - list of simple buffers to send using gather
194 * transfer_id - arbitrary ID; reflected to destination
195 * Returns 0 on success; otherwise an error status.
196 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700197 * Implementation note: Pushes multiple buffers with Gather to Source ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 */
199int ce_sendlist_send(struct CE_handle *copyeng,
200 void *per_transfer_send_context,
201 struct ce_sendlist *sendlist,
202 unsigned int transfer_id);
203
204/*==================Recv=====================================================*/
205
206/*
207 * Make a buffer available to receive. The buffer must be at least of a
208 * minimal size appropriate for this copy engine (src_sz_max attribute).
209 * copyeng - which copy engine to use
210 * per_transfer_recv_context - context passed back to caller's recv_cb
211 * buffer - address of buffer in CE space
212 * Returns 0 on success; otherwise an error status.
213 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700214 * Implementation note: Pushes a buffer to Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800215 */
216int ce_recv_buf_enqueue(struct CE_handle *copyeng,
217 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530218 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219
220/*
221 * Register a Receive Callback function.
222 * This function is called as soon as data is received
223 * from the source.
224 */
225void ce_recv_cb_register(struct CE_handle *copyeng,
226 CE_recv_cb fn_ptr,
227 void *per_CE_recv_context,
228 int disable_interrupts);
229
230/*==================CE Watermark=============================================*/
231
232/*
233 * Register a Watermark Callback function.
234 * This function is called as soon as a watermark level
235 * is crossed. A Watermark Callback function is free to
236 * handle received data "en masse"; but then some coordination
237 * is required with a registered Receive Callback function.
238 * [Suggestion: Either handle Receives in a Receive Callback
239 * or en masse in a Watermark Callback; but not both.]
240 */
241void ce_watermark_cb_register(struct CE_handle *copyeng,
242 CE_watermark_cb fn_ptr,
243 void *per_CE_wm_context);
244
245/*
246 * Set low/high watermarks for the send/source side of a copy engine.
247 *
248 * Typically, the destination side CPU manages watermarks for
249 * the receive side and the source side CPU manages watermarks
250 * for the send side.
251 *
252 * A low watermark of 0 is never hit (so the watermark function
253 * will never be called for a Low Watermark condition).
254 *
255 * A high watermark equal to nentries is never hit (so the
256 * watermark function will never be called for a High Watermark
257 * condition).
258 */
259void ce_send_watermarks_set(struct CE_handle *copyeng,
260 unsigned int low_alert_nentries,
261 unsigned int high_alert_nentries);
262
263/* Set low/high watermarks for the receive/destination side of copy engine. */
264void ce_recv_watermarks_set(struct CE_handle *copyeng,
265 unsigned int low_alert_nentries,
266 unsigned int high_alert_nentries);
267
268/*
269 * Return the number of entries that can be queued
270 * to a ring at an instant in time.
271 *
272 * For source ring, does not imply that destination-side
273 * buffers are available; merely indicates descriptor space
274 * in the source ring.
275 *
276 * For destination ring, does not imply that previously
277 * received buffers have been processed; merely indicates
278 * descriptor space in destination ring.
279 *
280 * Mainly for use with CE Watermark callback.
281 */
282unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
283unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
284
285/*
286 * Return the number of entries in the ring that are ready
287 * to be processed by software.
288 *
289 * For source ring, the number of descriptors that have
290 * been completed and can now be overwritten with new send
291 * descriptors.
292 *
293 * For destination ring, the number of descriptors that
294 * are available to be processed (newly received buffers).
295 */
296unsigned int ce_send_entries_done(struct CE_handle *copyeng);
297unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
298
299/* recv flags */
300/* Data is byte-swapped */
301#define CE_RECV_FLAG_SWAPPED 1
302
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303/*
304 * Supply data for the next completed unprocessed receive descriptor.
305 *
306 * For use
307 * with CE Watermark callback,
308 * in a recv_cb function when processing buf_lists
309 * in a recv_cb function in order to mitigate recv_cb's.
310 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700311 * Implementation note: Pops buffer from Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 */
313int ce_completed_recv_next(struct CE_handle *copyeng,
314 void **per_CE_contextp,
315 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530316 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317 unsigned int *nbytesp,
318 unsigned int *transfer_idp,
319 unsigned int *flagsp);
320
321/*
322 * Supply data for the next completed unprocessed send descriptor.
323 *
324 * For use
325 * with CE Watermark callback
326 * in a send_cb function in order to mitigate send_cb's.
327 *
328 * Implementation note: Pops 1 completed send buffer from Source ring
329 */
330int ce_completed_send_next(struct CE_handle *copyeng,
331 void **per_CE_contextp,
332 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530333 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800334 unsigned int *nbytesp,
335 unsigned int *transfer_idp,
336 unsigned int *sw_idx,
337 unsigned int *hw_idx,
338 uint32_t *toeplitz_hash_result);
339
340/*==================CE Engine Initialization=================================*/
341
342/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530343struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344 unsigned int CE_id, struct CE_attr *attr);
345
346/*==================CE Engine Shutdown=======================================*/
347/*
348 * Support clean shutdown by allowing the caller to revoke
349 * receive buffers. Target DMA must be stopped before using
350 * this API.
351 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530352QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353ce_revoke_recv_next(struct CE_handle *copyeng,
354 void **per_CE_contextp,
355 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530356 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357
358/*
359 * Support clean shutdown by allowing the caller to cancel
360 * pending sends. Target DMA must be stopped before using
361 * this API.
362 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530363QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364ce_cancel_send_next(struct CE_handle *copyeng,
365 void **per_CE_contextp,
366 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530367 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800368 unsigned int *nbytesp,
369 unsigned int *transfer_idp,
370 uint32_t *toeplitz_hash_result);
371
372void ce_fini(struct CE_handle *copyeng);
373
374/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530375void ce_per_engine_service_any(int irq, struct hif_softc *scn);
376int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
377void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378
379/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530380void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
381void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382
383/* API to check if any of the copy engine pipes has
384 * pending frames for prcoessing
385 */
Komal Seelam644263d2016-02-22 20:45:49 +0530386bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800387
388/* CE_attr.flags values */
389#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
390#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
391#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
392#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
393#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530394#define CE_ATTR_DIAG 0x20 /* Diag CE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700396/**
Jeff Johnson1002ca52018-05-12 11:29:24 -0700397 * struct CE_attr - Attributes of an instance of a Copy Engine
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700398 * @flags: CE_ATTR_* values
399 * @priority: TBD
400 * @src_nentries: #entries in source ring - Must be a power of 2
401 * @src_sz_max: Max source send size for this CE. This is also the minimum
402 * size of a destination buffer
403 * @dest_nentries: #entries in destination ring - Must be a power of 2
404 * @reserved: Future Use
405 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406struct CE_attr {
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700407 unsigned int flags;
408 unsigned int priority;
409 unsigned int src_nentries;
410 unsigned int src_sz_max;
411 unsigned int dest_nentries;
412 void *reserved;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413};
414
415/*
416 * When using sendlist_send to transfer multiple buffer fragments, the
417 * transfer context of each fragment, except last one, will be filled
418 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
419 * each fragment done with send and the transfer context would be
420 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
421 * status of a send completion.
422 */
423#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
424
425/*
426 * This is an opaque type that is at least large enough to hold
427 * a sendlist. A sendlist can only be accessed through CE APIs,
428 * but this allows a sendlist to be allocated on the run-time
429 * stack. TBDXXX: un-opaque would be simpler...
430 */
431struct ce_sendlist {
432 unsigned int word[62];
433};
434
435#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
436#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
437#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
438
439#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800440void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530441 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530443 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444#else
Leo Chang8e073612015-11-13 10:55:34 -0800445/**
446 * ce_ipa_get_resource() - get uc resource on copyengine
447 * @ce: copyengine context
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530448 * @ce_sr: copyengine source ring resource info
Leo Chang8e073612015-11-13 10:55:34 -0800449 * @ce_sr_ring_size: copyengine source ring size
450 * @ce_reg_paddr: copyengine register physical address
451 *
452 * Copy engine should release resource to micro controller
453 * Micro controller needs
454 * - Copy engine source descriptor base address
455 * - Copy engine source descriptor size
456 * - PCI BAR address to access copy engine regiser
457 *
458 * Return: None
459 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460static inline void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530461 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530463 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465}
466#endif /* IPA_OFFLOAD */
467
468static inline void ce_pkt_error_count_incr(
469 struct HIF_CE_state *_hif_state,
470 enum ol_ath_hif_pkt_ecodes _hif_ecode)
471{
Komal Seelam644263d2016-02-22 20:45:49 +0530472 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
473
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530475 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 += 1;
477}
478
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700479bool ce_check_rx_pending(struct CE_state *CE_state);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700480void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530481struct ce_ops *ce_services_srng(void);
482struct ce_ops *ce_services_legacy(void);
483bool ce_srng_based(struct hif_softc *scn);
484/* Forward declaration */
485struct CE_ring_state;
486
487struct ce_ops {
488 uint32_t (*ce_get_desc_size)(uint8_t ring_type);
Yun Park3fb36442017-08-17 17:37:53 -0700489 int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530490 uint32_t ce_id, struct CE_ring_state *ring,
491 struct CE_attr *attr);
492 int (*ce_send_nolock)(struct CE_handle *copyeng,
493 void *per_transfer_context,
494 qdf_dma_addr_t buffer,
495 uint32_t nbytes,
496 uint32_t transfer_id,
497 uint32_t flags,
498 uint32_t user_flags);
499 int (*ce_sendlist_send)(struct CE_handle *copyeng,
500 void *per_transfer_context,
501 struct ce_sendlist *sendlist, unsigned int transfer_id);
502 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
503 void **per_CE_contextp,
504 void **per_transfer_contextp,
505 qdf_dma_addr_t *bufferp);
506 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
507 void **per_CE_contextp, void **per_transfer_contextp,
508 qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
509 unsigned int *transfer_idp,
510 uint32_t *toeplitz_hash_result);
511 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
512 void *per_recv_context, qdf_dma_addr_t buffer);
513 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
514 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
515 void **per_CE_contextp,
516 void **per_transfer_contextp,
517 qdf_dma_addr_t *bufferp,
518 unsigned int *nbytesp,
519 unsigned int *transfer_idp,
520 unsigned int *flagsp);
521 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
522 void **per_CE_contextp,
523 void **per_transfer_contextp,
524 qdf_dma_addr_t *bufferp,
525 unsigned int *nbytesp,
526 unsigned int *transfer_idp,
527 unsigned int *sw_idx,
528 unsigned int *hw_idx,
529 uint32_t *toeplitz_hash_result);
530 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
531 struct CE_state *CE_state);
532 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
533 struct CE_state *CE_state);
534 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
535 int disable_copy_compl_intr);
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800536 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
537 struct pld_shadow_reg_v2_cfg **shadow_config,
538 int *num_shadow_registers_configured);
539
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530540};
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800541
542int hif_ce_bus_early_suspend(struct hif_softc *scn);
543int hif_ce_bus_late_resume(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544#endif /* __COPY_ENGINE_API_H__ */