blob: d11354163f0fc6e54c88f5d631854eea0b44fc56 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam02cf2f82016-02-22 20:44:25 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef __COPY_ENGINE_API_H__
29#define __COPY_ENGINE_API_H__
30
31#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053032#include "hif_main.h"
33
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034/* TBDXXX: Use int return values for consistency with Target */
35
36/* TBDXXX: Perhaps merge Host/Target-->common */
37
38/*
39 * Copy Engine support: low-level Target-side Copy Engine API.
40 * This is a hardware access layer used by code that understands
41 * how to use copy engines.
42 */
43
44/*
45 * A "struct CE_handle *" serves as an opaque pointer-sized
46 * handle to a specific copy engine.
47 */
48struct CE_handle;
49
50/*
51 * "Send Completion" callback type for Send Completion Notification.
52 *
53 * If a Send Completion callback is registered and one or more sends
54 * have completed, the callback is invoked.
55 *
56 * per_ce_send_context is a context supplied by the calling layer
57 * (via ce_send_cb_register). It is associated with a copy engine.
58 *
59 * per_transfer_send_context is context supplied by the calling layer
60 * (via the "send" call). It may be different for each invocation
61 * of send.
62 *
63 * The buffer parameter is the first byte sent of the first buffer
64 * sent (if more than one buffer).
65 *
66 * nbytes is the number of bytes of that buffer that were sent.
67 *
68 * transfer_id matches the value used when the buffer or
69 * buf_list was sent.
70 *
71 * Implementation note: Pops 1 completed send buffer from Source ring
72 */
73typedef void (*ce_send_cb)(struct CE_handle *copyeng,
74 void *per_ce_send_context,
75 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053076 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080077 unsigned int nbytes,
78 unsigned int transfer_id,
79 unsigned int sw_index,
80 unsigned int hw_index,
81 uint32_t toeplitz_hash_result);
82
83/*
84 * "Buffer Received" callback type for Buffer Received Notification.
85 *
86 * Implementation note: Pops 1 completed recv buffer from Dest ring
87 */
88typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
89 void *per_CE_recv_context,
90 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053091 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092 unsigned int nbytes,
93 unsigned int transfer_id,
94 unsigned int flags);
95
96/*
97 * Copy Engine Watermark callback type.
98 *
99 * Allows upper layers to be notified when watermarks are reached:
100 * space is available and/or running short in a source ring
101 * buffers are exhausted and/or abundant in a destination ring
102 *
103 * The flags parameter indicates which condition triggered this
104 * callback. See CE_WM_FLAG_*.
105 *
106 * Watermark APIs are provided to allow upper layers "batch"
107 * descriptor processing and to allow upper layers to
108 * throttle/unthrottle.
109 */
110typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
111 void *per_CE_wm_context, unsigned int flags);
112
113#define CE_WM_FLAG_SEND_HIGH 1
114#define CE_WM_FLAG_SEND_LOW 2
115#define CE_WM_FLAG_RECV_HIGH 4
116#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700117#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800118
119/* A list of buffers to be gathered and sent */
120struct ce_sendlist;
121
122/* Copy Engine settable attributes */
123struct CE_attr;
124
125/*==================Send=====================================================*/
126
127/* ce_send flags */
128/* disable ring's byte swap, even if the default policy is to swap */
129#define CE_SEND_FLAG_SWAP_DISABLE 1
130
131/*
132 * Queue a source buffer to be sent to an anonymous destination buffer.
133 * copyeng - which copy engine to use
134 * buffer - address of buffer
135 * nbytes - number of bytes to send
136 * transfer_id - arbitrary ID; reflected to destination
137 * flags - CE_SEND_FLAG_* values
138 * Returns 0 on success; otherwise an error status.
139 *
140 * Note: If no flags are specified, use CE's default data swap mode.
141 *
142 * Implementation note: pushes 1 buffer to Source ring
143 */
144int ce_send(struct CE_handle *copyeng,
145 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530146 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147 unsigned int nbytes,
148 unsigned int transfer_id,
149 unsigned int flags,
150 unsigned int user_flags);
151
152#ifdef WLAN_FEATURE_FASTPATH
Nirav Shahda0881a2016-05-16 10:45:16 +0530153int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
154 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155
156#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157
Houston Hoffman56e0d702016-05-05 17:48:06 -0700158void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
159extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
160 qdf_nbuf_t msdu,
161 uint32_t transfer_id,
162 uint32_t len,
163 uint32_t sendhead);
164
165extern int ce_send_single(struct CE_handle *ce_tx_hdl,
166 qdf_nbuf_t msdu,
167 uint32_t transfer_id,
168 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800169/*
170 * Register a Send Callback function.
171 * This function is called as soon as the contents of a Send
172 * have reached the destination, unless disable_interrupts is
173 * requested. In this case, the callback is invoked when the
174 * send status is polled, shortly after the send completes.
175 */
176void ce_send_cb_register(struct CE_handle *copyeng,
177 ce_send_cb fn_ptr,
178 void *per_ce_send_context, int disable_interrupts);
179
180/*
181 * Return the size of a SendList. This allows the caller to allocate
182 * a SendList while the SendList structure remains opaque.
183 */
184unsigned int ce_sendlist_sizeof(void);
185
186/* Initialize a sendlist */
187void ce_sendlist_init(struct ce_sendlist *sendlist);
188
189/* Append a simple buffer (address/length) to a sendlist. */
190int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530191 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192 unsigned int nbytes,
193 uint32_t flags, /* OR-ed with internal flags */
194 uint32_t user_flags);
195
196/*
197 * Queue a "sendlist" of buffers to be sent using gather to a single
198 * anonymous destination buffer
199 * copyeng - which copy engine to use
200 * sendlist - list of simple buffers to send using gather
201 * transfer_id - arbitrary ID; reflected to destination
202 * Returns 0 on success; otherwise an error status.
203 *
204 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
205 */
206int ce_sendlist_send(struct CE_handle *copyeng,
207 void *per_transfer_send_context,
208 struct ce_sendlist *sendlist,
209 unsigned int transfer_id);
210
211/*==================Recv=====================================================*/
212
213/*
214 * Make a buffer available to receive. The buffer must be at least of a
215 * minimal size appropriate for this copy engine (src_sz_max attribute).
216 * copyeng - which copy engine to use
217 * per_transfer_recv_context - context passed back to caller's recv_cb
218 * buffer - address of buffer in CE space
219 * Returns 0 on success; otherwise an error status.
220 *
221 * Implemenation note: Pushes a buffer to Dest ring.
222 */
223int ce_recv_buf_enqueue(struct CE_handle *copyeng,
224 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530225 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226
227/*
228 * Register a Receive Callback function.
229 * This function is called as soon as data is received
230 * from the source.
231 */
232void ce_recv_cb_register(struct CE_handle *copyeng,
233 CE_recv_cb fn_ptr,
234 void *per_CE_recv_context,
235 int disable_interrupts);
236
237/*==================CE Watermark=============================================*/
238
239/*
240 * Register a Watermark Callback function.
241 * This function is called as soon as a watermark level
242 * is crossed. A Watermark Callback function is free to
243 * handle received data "en masse"; but then some coordination
244 * is required with a registered Receive Callback function.
245 * [Suggestion: Either handle Receives in a Receive Callback
246 * or en masse in a Watermark Callback; but not both.]
247 */
248void ce_watermark_cb_register(struct CE_handle *copyeng,
249 CE_watermark_cb fn_ptr,
250 void *per_CE_wm_context);
251
252/*
253 * Set low/high watermarks for the send/source side of a copy engine.
254 *
255 * Typically, the destination side CPU manages watermarks for
256 * the receive side and the source side CPU manages watermarks
257 * for the send side.
258 *
259 * A low watermark of 0 is never hit (so the watermark function
260 * will never be called for a Low Watermark condition).
261 *
262 * A high watermark equal to nentries is never hit (so the
263 * watermark function will never be called for a High Watermark
264 * condition).
265 */
266void ce_send_watermarks_set(struct CE_handle *copyeng,
267 unsigned int low_alert_nentries,
268 unsigned int high_alert_nentries);
269
270/* Set low/high watermarks for the receive/destination side of copy engine. */
271void ce_recv_watermarks_set(struct CE_handle *copyeng,
272 unsigned int low_alert_nentries,
273 unsigned int high_alert_nentries);
274
275/*
276 * Return the number of entries that can be queued
277 * to a ring at an instant in time.
278 *
279 * For source ring, does not imply that destination-side
280 * buffers are available; merely indicates descriptor space
281 * in the source ring.
282 *
283 * For destination ring, does not imply that previously
284 * received buffers have been processed; merely indicates
285 * descriptor space in destination ring.
286 *
287 * Mainly for use with CE Watermark callback.
288 */
289unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
290unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
291
292/*
293 * Return the number of entries in the ring that are ready
294 * to be processed by software.
295 *
296 * For source ring, the number of descriptors that have
297 * been completed and can now be overwritten with new send
298 * descriptors.
299 *
300 * For destination ring, the number of descriptors that
301 * are available to be processed (newly received buffers).
302 */
303unsigned int ce_send_entries_done(struct CE_handle *copyeng);
304unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
305
306/* recv flags */
307/* Data is byte-swapped */
308#define CE_RECV_FLAG_SWAPPED 1
309
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800310/*
311 * Supply data for the next completed unprocessed receive descriptor.
312 *
313 * For use
314 * with CE Watermark callback,
315 * in a recv_cb function when processing buf_lists
316 * in a recv_cb function in order to mitigate recv_cb's.
317 *
318 * Implemenation note: Pops buffer from Dest ring.
319 */
320int ce_completed_recv_next(struct CE_handle *copyeng,
321 void **per_CE_contextp,
322 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530323 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324 unsigned int *nbytesp,
325 unsigned int *transfer_idp,
326 unsigned int *flagsp);
327
328/*
329 * Supply data for the next completed unprocessed send descriptor.
330 *
331 * For use
332 * with CE Watermark callback
333 * in a send_cb function in order to mitigate send_cb's.
334 *
335 * Implementation note: Pops 1 completed send buffer from Source ring
336 */
337int ce_completed_send_next(struct CE_handle *copyeng,
338 void **per_CE_contextp,
339 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530340 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800341 unsigned int *nbytesp,
342 unsigned int *transfer_idp,
343 unsigned int *sw_idx,
344 unsigned int *hw_idx,
345 uint32_t *toeplitz_hash_result);
346
347/*==================CE Engine Initialization=================================*/
348
349/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530350struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800351 unsigned int CE_id, struct CE_attr *attr);
352
353/*==================CE Engine Shutdown=======================================*/
354/*
355 * Support clean shutdown by allowing the caller to revoke
356 * receive buffers. Target DMA must be stopped before using
357 * this API.
358 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530359QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360ce_revoke_recv_next(struct CE_handle *copyeng,
361 void **per_CE_contextp,
362 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530363 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364
365/*
366 * Support clean shutdown by allowing the caller to cancel
367 * pending sends. Target DMA must be stopped before using
368 * this API.
369 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530370QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371ce_cancel_send_next(struct CE_handle *copyeng,
372 void **per_CE_contextp,
373 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530374 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 unsigned int *nbytesp,
376 unsigned int *transfer_idp,
377 uint32_t *toeplitz_hash_result);
378
379void ce_fini(struct CE_handle *copyeng);
380
381/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530382void ce_per_engine_service_any(int irq, struct hif_softc *scn);
383int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
384void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385
386/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530387void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
388void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800389
390/* API to check if any of the copy engine pipes has
391 * pending frames for prcoessing
392 */
Komal Seelam644263d2016-02-22 20:45:49 +0530393bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394
395/* CE_attr.flags values */
396#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
397#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
398#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
399#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
400#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
401
402/* Attributes of an instance of a Copy Engine */
403struct CE_attr {
404 unsigned int flags; /* CE_ATTR_* values */
405 unsigned int priority; /* TBD */
406 unsigned int src_nentries; /* #entries in source ring -
407 * Must be a power of 2 */
408 unsigned int src_sz_max; /* Max source send size for this CE.
409 * This is also the minimum size of
410 * a destination buffer. */
411 unsigned int dest_nentries; /* #entries in destination ring -
412 * Must be a power of 2 */
413 void *reserved; /* Future use */
414};
415
416/*
417 * When using sendlist_send to transfer multiple buffer fragments, the
418 * transfer context of each fragment, except last one, will be filled
419 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
420 * each fragment done with send and the transfer context would be
421 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
422 * status of a send completion.
423 */
424#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
425
426/*
427 * This is an opaque type that is at least large enough to hold
428 * a sendlist. A sendlist can only be accessed through CE APIs,
429 * but this allows a sendlist to be allocated on the run-time
430 * stack. TBDXXX: un-opaque would be simpler...
431 */
432struct ce_sendlist {
433 unsigned int word[62];
434};
435
436#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
437#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
438#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
439
440#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530442 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800443 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530444 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445#else
Leo Chang8e073612015-11-13 10:55:34 -0800446/**
447 * ce_ipa_get_resource() - get uc resource on copyengine
448 * @ce: copyengine context
449 * @ce_sr_base_paddr: copyengine source ring base physical address
450 * @ce_sr_ring_size: copyengine source ring size
451 * @ce_reg_paddr: copyengine register physical address
452 *
453 * Copy engine should release resource to micro controller
454 * Micro controller needs
455 * - Copy engine source descriptor base address
456 * - Copy engine source descriptor size
457 * - PCI BAR address to access copy engine regiser
458 *
459 * Return: None
460 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461static inline void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530464 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465{
466 return;
467}
468#endif /* IPA_OFFLOAD */
469
470static inline void ce_pkt_error_count_incr(
471 struct HIF_CE_state *_hif_state,
472 enum ol_ath_hif_pkt_ecodes _hif_ecode)
473{
Komal Seelam644263d2016-02-22 20:45:49 +0530474 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
475
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530477 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478 += 1;
479}
480
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700481bool ce_check_rx_pending(struct CE_state *CE_state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482#if defined(FEATURE_LRO)
Manjunathappa Prakash32afe372016-04-29 11:12:41 -0700483int ce_lro_flush_cb_register(struct hif_opaque_softc *scn,
484 void (handler)(void *), void *data);
485int ce_lro_flush_cb_deregister(struct hif_opaque_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486#endif
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530487struct ce_ops *ce_services_srng(void);
488struct ce_ops *ce_services_legacy(void);
489bool ce_srng_based(struct hif_softc *scn);
490/* Forward declaration */
491struct CE_ring_state;
492
493struct ce_ops {
494 uint32_t (*ce_get_desc_size)(uint8_t ring_type);
495 void (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
496 uint32_t ce_id, struct CE_ring_state *ring,
497 struct CE_attr *attr);
498 int (*ce_send_nolock)(struct CE_handle *copyeng,
499 void *per_transfer_context,
500 qdf_dma_addr_t buffer,
501 uint32_t nbytes,
502 uint32_t transfer_id,
503 uint32_t flags,
504 uint32_t user_flags);
505 int (*ce_sendlist_send)(struct CE_handle *copyeng,
506 void *per_transfer_context,
507 struct ce_sendlist *sendlist, unsigned int transfer_id);
508 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
509 void **per_CE_contextp,
510 void **per_transfer_contextp,
511 qdf_dma_addr_t *bufferp);
512 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
513 void **per_CE_contextp, void **per_transfer_contextp,
514 qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
515 unsigned int *transfer_idp,
516 uint32_t *toeplitz_hash_result);
517 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
518 void *per_recv_context, qdf_dma_addr_t buffer);
519 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
520 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
521 void **per_CE_contextp,
522 void **per_transfer_contextp,
523 qdf_dma_addr_t *bufferp,
524 unsigned int *nbytesp,
525 unsigned int *transfer_idp,
526 unsigned int *flagsp);
527 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
528 void **per_CE_contextp,
529 void **per_transfer_contextp,
530 qdf_dma_addr_t *bufferp,
531 unsigned int *nbytesp,
532 unsigned int *transfer_idp,
533 unsigned int *sw_idx,
534 unsigned int *hw_idx,
535 uint32_t *toeplitz_hash_result);
536 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
537 struct CE_state *CE_state);
538 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
539 struct CE_state *CE_state);
540 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
541 int disable_copy_compl_intr);
542};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543#endif /* __COPY_ENGINE_API_H__ */