blob: 8e579dd350db4d22d8505f3ae849cbd824bca155 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef __COPY_ENGINE_API_H__
20#define __COPY_ENGINE_API_H__
21
Houston Hoffman10fedfc2017-01-23 15:23:09 -080022#include "pld_common.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053024#include "hif_main.h"
25
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080026/* TBDXXX: Use int return values for consistency with Target */
27
28/* TBDXXX: Perhaps merge Host/Target-->common */
29
30/*
31 * Copy Engine support: low-level Target-side Copy Engine API.
32 * This is a hardware access layer used by code that understands
33 * how to use copy engines.
34 */
35
36/*
37 * A "struct CE_handle *" serves as an opaque pointer-sized
38 * handle to a specific copy engine.
39 */
40struct CE_handle;
41
42/*
43 * "Send Completion" callback type for Send Completion Notification.
44 *
45 * If a Send Completion callback is registered and one or more sends
46 * have completed, the callback is invoked.
47 *
48 * per_ce_send_context is a context supplied by the calling layer
49 * (via ce_send_cb_register). It is associated with a copy engine.
50 *
51 * per_transfer_send_context is context supplied by the calling layer
52 * (via the "send" call). It may be different for each invocation
53 * of send.
54 *
55 * The buffer parameter is the first byte sent of the first buffer
56 * sent (if more than one buffer).
57 *
58 * nbytes is the number of bytes of that buffer that were sent.
59 *
60 * transfer_id matches the value used when the buffer or
61 * buf_list was sent.
62 *
63 * Implementation note: Pops 1 completed send buffer from Source ring
64 */
65typedef void (*ce_send_cb)(struct CE_handle *copyeng,
66 void *per_ce_send_context,
67 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053068 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080069 unsigned int nbytes,
70 unsigned int transfer_id,
71 unsigned int sw_index,
72 unsigned int hw_index,
73 uint32_t toeplitz_hash_result);
74
75/*
76 * "Buffer Received" callback type for Buffer Received Notification.
77 *
78 * Implementation note: Pops 1 completed recv buffer from Dest ring
79 */
80typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
81 void *per_CE_recv_context,
82 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053083 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084 unsigned int nbytes,
85 unsigned int transfer_id,
86 unsigned int flags);
87
88/*
89 * Copy Engine Watermark callback type.
90 *
91 * Allows upper layers to be notified when watermarks are reached:
92 * space is available and/or running short in a source ring
93 * buffers are exhausted and/or abundant in a destination ring
94 *
95 * The flags parameter indicates which condition triggered this
96 * callback. See CE_WM_FLAG_*.
97 *
98 * Watermark APIs are provided to allow upper layers "batch"
99 * descriptor processing and to allow upper layers to
100 * throttle/unthrottle.
101 */
102typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
103 void *per_CE_wm_context, unsigned int flags);
104
Sathish Kumar86876492018-08-27 13:39:20 +0530105
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106#define CE_WM_FLAG_SEND_HIGH 1
107#define CE_WM_FLAG_SEND_LOW 2
108#define CE_WM_FLAG_RECV_HIGH 4
109#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700110#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800111
Sathish Kumar86876492018-08-27 13:39:20 +0530112
113/**
114 * ce_service_srng_init() - Initialization routine for CE services
115 * in SRNG based targets
116 * Return : None
117 */
118void ce_service_srng_init(void);
119
120/**
121 * ce_service_legacy_init() - Initialization routine for CE services
122 * in legacy targets
123 * Return : None
124 */
125void ce_service_legacy_init(void);
126
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800127/* A list of buffers to be gathered and sent */
128struct ce_sendlist;
129
130/* Copy Engine settable attributes */
131struct CE_attr;
132
133/*==================Send=====================================================*/
134
135/* ce_send flags */
136/* disable ring's byte swap, even if the default policy is to swap */
137#define CE_SEND_FLAG_SWAP_DISABLE 1
138
139/*
140 * Queue a source buffer to be sent to an anonymous destination buffer.
141 * copyeng - which copy engine to use
142 * buffer - address of buffer
143 * nbytes - number of bytes to send
144 * transfer_id - arbitrary ID; reflected to destination
145 * flags - CE_SEND_FLAG_* values
146 * Returns 0 on success; otherwise an error status.
147 *
148 * Note: If no flags are specified, use CE's default data swap mode.
149 *
150 * Implementation note: pushes 1 buffer to Source ring
151 */
152int ce_send(struct CE_handle *copyeng,
153 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530154 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155 unsigned int nbytes,
156 unsigned int transfer_id,
157 unsigned int flags,
158 unsigned int user_flags);
159
160#ifdef WLAN_FEATURE_FASTPATH
Nirav Shahda0881a2016-05-16 10:45:16 +0530161int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
162 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163
164#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165
Houston Hoffman56e0d702016-05-05 17:48:06 -0700166void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
167extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
168 qdf_nbuf_t msdu,
169 uint32_t transfer_id,
170 uint32_t len,
171 uint32_t sendhead);
172
173extern int ce_send_single(struct CE_handle *ce_tx_hdl,
174 qdf_nbuf_t msdu,
175 uint32_t transfer_id,
176 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177/*
178 * Register a Send Callback function.
179 * This function is called as soon as the contents of a Send
180 * have reached the destination, unless disable_interrupts is
181 * requested. In this case, the callback is invoked when the
182 * send status is polled, shortly after the send completes.
183 */
184void ce_send_cb_register(struct CE_handle *copyeng,
185 ce_send_cb fn_ptr,
186 void *per_ce_send_context, int disable_interrupts);
187
188/*
189 * Return the size of a SendList. This allows the caller to allocate
190 * a SendList while the SendList structure remains opaque.
191 */
192unsigned int ce_sendlist_sizeof(void);
193
194/* Initialize a sendlist */
195void ce_sendlist_init(struct ce_sendlist *sendlist);
196
197/* Append a simple buffer (address/length) to a sendlist. */
198int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530199 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 unsigned int nbytes,
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700201 /* OR-ed with internal flags */
202 uint32_t flags,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 uint32_t user_flags);
204
205/*
206 * Queue a "sendlist" of buffers to be sent using gather to a single
207 * anonymous destination buffer
208 * copyeng - which copy engine to use
209 * sendlist - list of simple buffers to send using gather
210 * transfer_id - arbitrary ID; reflected to destination
211 * Returns 0 on success; otherwise an error status.
212 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700213 * Implementation note: Pushes multiple buffers with Gather to Source ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 */
215int ce_sendlist_send(struct CE_handle *copyeng,
216 void *per_transfer_send_context,
217 struct ce_sendlist *sendlist,
218 unsigned int transfer_id);
219
220/*==================Recv=====================================================*/
221
222/*
223 * Make a buffer available to receive. The buffer must be at least of a
224 * minimal size appropriate for this copy engine (src_sz_max attribute).
225 * copyeng - which copy engine to use
226 * per_transfer_recv_context - context passed back to caller's recv_cb
227 * buffer - address of buffer in CE space
228 * Returns 0 on success; otherwise an error status.
229 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700230 * Implementation note: Pushes a buffer to Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800231 */
232int ce_recv_buf_enqueue(struct CE_handle *copyeng,
233 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530234 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235
236/*
237 * Register a Receive Callback function.
238 * This function is called as soon as data is received
239 * from the source.
240 */
241void ce_recv_cb_register(struct CE_handle *copyeng,
242 CE_recv_cb fn_ptr,
243 void *per_CE_recv_context,
244 int disable_interrupts);
245
246/*==================CE Watermark=============================================*/
247
248/*
249 * Register a Watermark Callback function.
250 * This function is called as soon as a watermark level
251 * is crossed. A Watermark Callback function is free to
252 * handle received data "en masse"; but then some coordination
253 * is required with a registered Receive Callback function.
254 * [Suggestion: Either handle Receives in a Receive Callback
255 * or en masse in a Watermark Callback; but not both.]
256 */
257void ce_watermark_cb_register(struct CE_handle *copyeng,
258 CE_watermark_cb fn_ptr,
259 void *per_CE_wm_context);
260
261/*
262 * Set low/high watermarks for the send/source side of a copy engine.
263 *
264 * Typically, the destination side CPU manages watermarks for
265 * the receive side and the source side CPU manages watermarks
266 * for the send side.
267 *
268 * A low watermark of 0 is never hit (so the watermark function
269 * will never be called for a Low Watermark condition).
270 *
271 * A high watermark equal to nentries is never hit (so the
272 * watermark function will never be called for a High Watermark
273 * condition).
274 */
275void ce_send_watermarks_set(struct CE_handle *copyeng,
276 unsigned int low_alert_nentries,
277 unsigned int high_alert_nentries);
278
279/* Set low/high watermarks for the receive/destination side of copy engine. */
280void ce_recv_watermarks_set(struct CE_handle *copyeng,
281 unsigned int low_alert_nentries,
282 unsigned int high_alert_nentries);
283
284/*
285 * Return the number of entries that can be queued
286 * to a ring at an instant in time.
287 *
288 * For source ring, does not imply that destination-side
289 * buffers are available; merely indicates descriptor space
290 * in the source ring.
291 *
292 * For destination ring, does not imply that previously
293 * received buffers have been processed; merely indicates
294 * descriptor space in destination ring.
295 *
296 * Mainly for use with CE Watermark callback.
297 */
298unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
299unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
300
301/*
302 * Return the number of entries in the ring that are ready
303 * to be processed by software.
304 *
305 * For source ring, the number of descriptors that have
306 * been completed and can now be overwritten with new send
307 * descriptors.
308 *
309 * For destination ring, the number of descriptors that
310 * are available to be processed (newly received buffers).
311 */
312unsigned int ce_send_entries_done(struct CE_handle *copyeng);
313unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
314
315/* recv flags */
316/* Data is byte-swapped */
317#define CE_RECV_FLAG_SWAPPED 1
318
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319/*
320 * Supply data for the next completed unprocessed receive descriptor.
321 *
322 * For use
323 * with CE Watermark callback,
324 * in a recv_cb function when processing buf_lists
325 * in a recv_cb function in order to mitigate recv_cb's.
326 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700327 * Implementation note: Pops buffer from Dest ring.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328 */
329int ce_completed_recv_next(struct CE_handle *copyeng,
330 void **per_CE_contextp,
331 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530332 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 unsigned int *nbytesp,
334 unsigned int *transfer_idp,
335 unsigned int *flagsp);
336
337/*
338 * Supply data for the next completed unprocessed send descriptor.
339 *
340 * For use
341 * with CE Watermark callback
342 * in a send_cb function in order to mitigate send_cb's.
343 *
344 * Implementation note: Pops 1 completed send buffer from Source ring
345 */
346int ce_completed_send_next(struct CE_handle *copyeng,
347 void **per_CE_contextp,
348 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530349 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 unsigned int *nbytesp,
351 unsigned int *transfer_idp,
352 unsigned int *sw_idx,
353 unsigned int *hw_idx,
354 uint32_t *toeplitz_hash_result);
355
356/*==================CE Engine Initialization=================================*/
357
358/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530359struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360 unsigned int CE_id, struct CE_attr *attr);
361
362/*==================CE Engine Shutdown=======================================*/
363/*
364 * Support clean shutdown by allowing the caller to revoke
365 * receive buffers. Target DMA must be stopped before using
366 * this API.
367 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530368QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369ce_revoke_recv_next(struct CE_handle *copyeng,
370 void **per_CE_contextp,
371 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530372 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373
374/*
375 * Support clean shutdown by allowing the caller to cancel
376 * pending sends. Target DMA must be stopped before using
377 * this API.
378 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530379QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380ce_cancel_send_next(struct CE_handle *copyeng,
381 void **per_CE_contextp,
382 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530383 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384 unsigned int *nbytesp,
385 unsigned int *transfer_idp,
386 uint32_t *toeplitz_hash_result);
387
388void ce_fini(struct CE_handle *copyeng);
389
390/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530391void ce_per_engine_service_any(int irq, struct hif_softc *scn);
392int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
393void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394
395/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530396void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
397void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398
399/* API to check if any of the copy engine pipes has
400 * pending frames for prcoessing
401 */
Komal Seelam644263d2016-02-22 20:45:49 +0530402bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403
Sathish Kumar86876492018-08-27 13:39:20 +0530404/**
405 * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
406 *
407 * Return: None
408 */
409void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
410 u32 ctrl_addr, unsigned int write_index);
411
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412/* CE_attr.flags values */
413#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
414#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
415#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
416#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
417#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530418#define CE_ATTR_DIAG 0x20 /* Diag CE */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800419
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700420/**
Jeff Johnson1002ca52018-05-12 11:29:24 -0700421 * struct CE_attr - Attributes of an instance of a Copy Engine
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700422 * @flags: CE_ATTR_* values
423 * @priority: TBD
424 * @src_nentries: #entries in source ring - Must be a power of 2
425 * @src_sz_max: Max source send size for this CE. This is also the minimum
426 * size of a destination buffer
427 * @dest_nentries: #entries in destination ring - Must be a power of 2
428 * @reserved: Future Use
429 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800430struct CE_attr {
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700431 unsigned int flags;
432 unsigned int priority;
433 unsigned int src_nentries;
434 unsigned int src_sz_max;
435 unsigned int dest_nentries;
436 void *reserved;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437};
438
439/*
440 * When using sendlist_send to transfer multiple buffer fragments, the
441 * transfer context of each fragment, except last one, will be filled
442 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
443 * each fragment done with send and the transfer context would be
444 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
445 * status of a send completion.
446 */
447#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
448
449/*
450 * This is an opaque type that is at least large enough to hold
451 * a sendlist. A sendlist can only be accessed through CE APIs,
452 * but this allows a sendlist to be allocated on the run-time
453 * stack. TBDXXX: un-opaque would be simpler...
454 */
455struct ce_sendlist {
456 unsigned int word[62];
457};
458
459#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
460#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
461#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
462
463#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530465 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530467 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468#else
Leo Chang8e073612015-11-13 10:55:34 -0800469/**
470 * ce_ipa_get_resource() - get uc resource on copyengine
471 * @ce: copyengine context
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530472 * @ce_sr: copyengine source ring resource info
Leo Chang8e073612015-11-13 10:55:34 -0800473 * @ce_sr_ring_size: copyengine source ring size
474 * @ce_reg_paddr: copyengine register physical address
475 *
476 * Copy engine should release resource to micro controller
477 * Micro controller needs
478 * - Copy engine source descriptor base address
479 * - Copy engine source descriptor size
480 * - PCI BAR address to access copy engine regiser
481 *
482 * Return: None
483 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484static inline void ce_ipa_get_resource(struct CE_handle *ce,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530485 qdf_shared_mem_t **ce_sr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530487 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800488{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800489}
490#endif /* IPA_OFFLOAD */
491
492static inline void ce_pkt_error_count_incr(
493 struct HIF_CE_state *_hif_state,
494 enum ol_ath_hif_pkt_ecodes _hif_ecode)
495{
Komal Seelam644263d2016-02-22 20:45:49 +0530496 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
497
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530499 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500 += 1;
501}
502
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700503bool ce_check_rx_pending(struct CE_state *CE_state);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700504void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530505struct ce_ops *ce_services_srng(void);
506struct ce_ops *ce_services_legacy(void);
507bool ce_srng_based(struct hif_softc *scn);
508/* Forward declaration */
509struct CE_ring_state;
510
511struct ce_ops {
512 uint32_t (*ce_get_desc_size)(uint8_t ring_type);
Yun Park3fb36442017-08-17 17:37:53 -0700513 int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530514 uint32_t ce_id, struct CE_ring_state *ring,
515 struct CE_attr *attr);
516 int (*ce_send_nolock)(struct CE_handle *copyeng,
517 void *per_transfer_context,
518 qdf_dma_addr_t buffer,
519 uint32_t nbytes,
520 uint32_t transfer_id,
521 uint32_t flags,
522 uint32_t user_flags);
523 int (*ce_sendlist_send)(struct CE_handle *copyeng,
524 void *per_transfer_context,
525 struct ce_sendlist *sendlist, unsigned int transfer_id);
526 QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
527 void **per_CE_contextp,
528 void **per_transfer_contextp,
529 qdf_dma_addr_t *bufferp);
530 QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
531 void **per_CE_contextp, void **per_transfer_contextp,
532 qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
533 unsigned int *transfer_idp,
534 uint32_t *toeplitz_hash_result);
535 int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
536 void *per_recv_context, qdf_dma_addr_t buffer);
537 bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
538 int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
539 void **per_CE_contextp,
540 void **per_transfer_contextp,
541 qdf_dma_addr_t *bufferp,
542 unsigned int *nbytesp,
543 unsigned int *transfer_idp,
544 unsigned int *flagsp);
545 int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
546 void **per_CE_contextp,
547 void **per_transfer_contextp,
548 qdf_dma_addr_t *bufferp,
549 unsigned int *nbytesp,
550 unsigned int *transfer_idp,
551 unsigned int *sw_idx,
552 unsigned int *hw_idx,
553 uint32_t *toeplitz_hash_result);
554 unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
555 struct CE_state *CE_state);
556 unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
557 struct CE_state *CE_state);
558 void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
559 int disable_copy_compl_intr);
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800560 void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
561 struct pld_shadow_reg_v2_cfg **shadow_config,
562 int *num_shadow_registers_configured);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530563};
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800564
565int hif_ce_bus_early_suspend(struct hif_softc *scn);
566int hif_ce_bus_late_resume(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567#endif /* __COPY_ENGINE_API_H__ */