blob: 7a15365e789d776ab920d816d573482866a84fe3 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam02cf2f82016-02-22 20:44:25 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef __COPY_ENGINE_API_H__
29#define __COPY_ENGINE_API_H__
30
31#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053032#include "hif_main.h"
33
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034/* TBDXXX: Use int return values for consistency with Target */
35
36/* TBDXXX: Perhaps merge Host/Target-->common */
37
38/*
39 * Copy Engine support: low-level Target-side Copy Engine API.
40 * This is a hardware access layer used by code that understands
41 * how to use copy engines.
42 */
43
44/*
45 * A "struct CE_handle *" serves as an opaque pointer-sized
46 * handle to a specific copy engine.
47 */
48struct CE_handle;
49
50/*
51 * "Send Completion" callback type for Send Completion Notification.
52 *
53 * If a Send Completion callback is registered and one or more sends
54 * have completed, the callback is invoked.
55 *
56 * per_ce_send_context is a context supplied by the calling layer
57 * (via ce_send_cb_register). It is associated with a copy engine.
58 *
59 * per_transfer_send_context is context supplied by the calling layer
60 * (via the "send" call). It may be different for each invocation
61 * of send.
62 *
63 * The buffer parameter is the first byte sent of the first buffer
64 * sent (if more than one buffer).
65 *
66 * nbytes is the number of bytes of that buffer that were sent.
67 *
68 * transfer_id matches the value used when the buffer or
69 * buf_list was sent.
70 *
71 * Implementation note: Pops 1 completed send buffer from Source ring
72 */
73typedef void (*ce_send_cb)(struct CE_handle *copyeng,
74 void *per_ce_send_context,
75 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053076 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080077 unsigned int nbytes,
78 unsigned int transfer_id,
79 unsigned int sw_index,
80 unsigned int hw_index,
81 uint32_t toeplitz_hash_result);
82
83/*
84 * "Buffer Received" callback type for Buffer Received Notification.
85 *
86 * Implementation note: Pops 1 completed recv buffer from Dest ring
87 */
88typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
89 void *per_CE_recv_context,
90 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053091 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092 unsigned int nbytes,
93 unsigned int transfer_id,
94 unsigned int flags);
95
96/*
97 * Copy Engine Watermark callback type.
98 *
99 * Allows upper layers to be notified when watermarks are reached:
100 * space is available and/or running short in a source ring
101 * buffers are exhausted and/or abundant in a destination ring
102 *
103 * The flags parameter indicates which condition triggered this
104 * callback. See CE_WM_FLAG_*.
105 *
106 * Watermark APIs are provided to allow upper layers "batch"
107 * descriptor processing and to allow upper layers to
108 * throttle/unthrottle.
109 */
110typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
111 void *per_CE_wm_context, unsigned int flags);
112
113#define CE_WM_FLAG_SEND_HIGH 1
114#define CE_WM_FLAG_SEND_LOW 2
115#define CE_WM_FLAG_RECV_HIGH 4
116#define CE_WM_FLAG_RECV_LOW 8
Houston Hoffman56e0d702016-05-05 17:48:06 -0700117#define CE_HTT_TX_CE 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800118
119/* A list of buffers to be gathered and sent */
120struct ce_sendlist;
121
122/* Copy Engine settable attributes */
123struct CE_attr;
124
125/*==================Send=====================================================*/
126
127/* ce_send flags */
128/* disable ring's byte swap, even if the default policy is to swap */
129#define CE_SEND_FLAG_SWAP_DISABLE 1
130
131/*
132 * Queue a source buffer to be sent to an anonymous destination buffer.
133 * copyeng - which copy engine to use
134 * buffer - address of buffer
135 * nbytes - number of bytes to send
136 * transfer_id - arbitrary ID; reflected to destination
137 * flags - CE_SEND_FLAG_* values
138 * Returns 0 on success; otherwise an error status.
139 *
140 * Note: If no flags are specified, use CE's default data swap mode.
141 *
142 * Implementation note: pushes 1 buffer to Source ring
143 */
144int ce_send(struct CE_handle *copyeng,
145 void *per_transfer_send_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530146 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147 unsigned int nbytes,
148 unsigned int transfer_id,
149 unsigned int flags,
150 unsigned int user_flags);
151
152#ifdef WLAN_FEATURE_FASTPATH
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530153int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800154 unsigned int num_msdus, unsigned int transfer_id);
155
156#endif
157void ce_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
158
Houston Hoffman56e0d702016-05-05 17:48:06 -0700159void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
160extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
161 qdf_nbuf_t msdu,
162 uint32_t transfer_id,
163 uint32_t len,
164 uint32_t sendhead);
165
166extern int ce_send_single(struct CE_handle *ce_tx_hdl,
167 qdf_nbuf_t msdu,
168 uint32_t transfer_id,
169 uint32_t len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170/*
171 * Register a Send Callback function.
172 * This function is called as soon as the contents of a Send
173 * have reached the destination, unless disable_interrupts is
174 * requested. In this case, the callback is invoked when the
175 * send status is polled, shortly after the send completes.
176 */
177void ce_send_cb_register(struct CE_handle *copyeng,
178 ce_send_cb fn_ptr,
179 void *per_ce_send_context, int disable_interrupts);
180
181/*
182 * Return the size of a SendList. This allows the caller to allocate
183 * a SendList while the SendList structure remains opaque.
184 */
185unsigned int ce_sendlist_sizeof(void);
186
187/* Initialize a sendlist */
188void ce_sendlist_init(struct ce_sendlist *sendlist);
189
190/* Append a simple buffer (address/length) to a sendlist. */
191int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530192 qdf_dma_addr_t buffer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193 unsigned int nbytes,
194 uint32_t flags, /* OR-ed with internal flags */
195 uint32_t user_flags);
196
197/*
198 * Queue a "sendlist" of buffers to be sent using gather to a single
199 * anonymous destination buffer
200 * copyeng - which copy engine to use
201 * sendlist - list of simple buffers to send using gather
202 * transfer_id - arbitrary ID; reflected to destination
203 * Returns 0 on success; otherwise an error status.
204 *
205 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
206 */
207int ce_sendlist_send(struct CE_handle *copyeng,
208 void *per_transfer_send_context,
209 struct ce_sendlist *sendlist,
210 unsigned int transfer_id);
211
212/*==================Recv=====================================================*/
213
214/*
215 * Make a buffer available to receive. The buffer must be at least of a
216 * minimal size appropriate for this copy engine (src_sz_max attribute).
217 * copyeng - which copy engine to use
218 * per_transfer_recv_context - context passed back to caller's recv_cb
219 * buffer - address of buffer in CE space
220 * Returns 0 on success; otherwise an error status.
221 *
222 * Implemenation note: Pushes a buffer to Dest ring.
223 */
224int ce_recv_buf_enqueue(struct CE_handle *copyeng,
225 void *per_transfer_recv_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530226 qdf_dma_addr_t buffer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227
228/*
229 * Register a Receive Callback function.
230 * This function is called as soon as data is received
231 * from the source.
232 */
233void ce_recv_cb_register(struct CE_handle *copyeng,
234 CE_recv_cb fn_ptr,
235 void *per_CE_recv_context,
236 int disable_interrupts);
237
238/*==================CE Watermark=============================================*/
239
240/*
241 * Register a Watermark Callback function.
242 * This function is called as soon as a watermark level
243 * is crossed. A Watermark Callback function is free to
244 * handle received data "en masse"; but then some coordination
245 * is required with a registered Receive Callback function.
246 * [Suggestion: Either handle Receives in a Receive Callback
247 * or en masse in a Watermark Callback; but not both.]
248 */
249void ce_watermark_cb_register(struct CE_handle *copyeng,
250 CE_watermark_cb fn_ptr,
251 void *per_CE_wm_context);
252
253/*
254 * Set low/high watermarks for the send/source side of a copy engine.
255 *
256 * Typically, the destination side CPU manages watermarks for
257 * the receive side and the source side CPU manages watermarks
258 * for the send side.
259 *
260 * A low watermark of 0 is never hit (so the watermark function
261 * will never be called for a Low Watermark condition).
262 *
263 * A high watermark equal to nentries is never hit (so the
264 * watermark function will never be called for a High Watermark
265 * condition).
266 */
267void ce_send_watermarks_set(struct CE_handle *copyeng,
268 unsigned int low_alert_nentries,
269 unsigned int high_alert_nentries);
270
271/* Set low/high watermarks for the receive/destination side of copy engine. */
272void ce_recv_watermarks_set(struct CE_handle *copyeng,
273 unsigned int low_alert_nentries,
274 unsigned int high_alert_nentries);
275
276/*
277 * Return the number of entries that can be queued
278 * to a ring at an instant in time.
279 *
280 * For source ring, does not imply that destination-side
281 * buffers are available; merely indicates descriptor space
282 * in the source ring.
283 *
284 * For destination ring, does not imply that previously
285 * received buffers have been processed; merely indicates
286 * descriptor space in destination ring.
287 *
288 * Mainly for use with CE Watermark callback.
289 */
290unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
291unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
292
293/*
294 * Return the number of entries in the ring that are ready
295 * to be processed by software.
296 *
297 * For source ring, the number of descriptors that have
298 * been completed and can now be overwritten with new send
299 * descriptors.
300 *
301 * For destination ring, the number of descriptors that
302 * are available to be processed (newly received buffers).
303 */
304unsigned int ce_send_entries_done(struct CE_handle *copyeng);
305unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
306
307/* recv flags */
308/* Data is byte-swapped */
309#define CE_RECV_FLAG_SWAPPED 1
310
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311/*
312 * Supply data for the next completed unprocessed receive descriptor.
313 *
314 * For use
315 * with CE Watermark callback,
316 * in a recv_cb function when processing buf_lists
317 * in a recv_cb function in order to mitigate recv_cb's.
318 *
319 * Implemenation note: Pops buffer from Dest ring.
320 */
321int ce_completed_recv_next(struct CE_handle *copyeng,
322 void **per_CE_contextp,
323 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530324 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 unsigned int *nbytesp,
326 unsigned int *transfer_idp,
327 unsigned int *flagsp);
328
329/*
330 * Supply data for the next completed unprocessed send descriptor.
331 *
332 * For use
333 * with CE Watermark callback
334 * in a send_cb function in order to mitigate send_cb's.
335 *
336 * Implementation note: Pops 1 completed send buffer from Source ring
337 */
338int ce_completed_send_next(struct CE_handle *copyeng,
339 void **per_CE_contextp,
340 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530341 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342 unsigned int *nbytesp,
343 unsigned int *transfer_idp,
344 unsigned int *sw_idx,
345 unsigned int *hw_idx,
346 uint32_t *toeplitz_hash_result);
347
348/*==================CE Engine Initialization=================================*/
349
350/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530351struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 unsigned int CE_id, struct CE_attr *attr);
353
354/*==================CE Engine Shutdown=======================================*/
355/*
356 * Support clean shutdown by allowing the caller to revoke
357 * receive buffers. Target DMA must be stopped before using
358 * this API.
359 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530360QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800361ce_revoke_recv_next(struct CE_handle *copyeng,
362 void **per_CE_contextp,
363 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530364 qdf_dma_addr_t *bufferp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365
366/*
367 * Support clean shutdown by allowing the caller to cancel
368 * pending sends. Target DMA must be stopped before using
369 * this API.
370 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530371QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372ce_cancel_send_next(struct CE_handle *copyeng,
373 void **per_CE_contextp,
374 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530375 qdf_dma_addr_t *bufferp,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 unsigned int *nbytesp,
377 unsigned int *transfer_idp,
378 uint32_t *toeplitz_hash_result);
379
380void ce_fini(struct CE_handle *copyeng);
381
382/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530383void ce_per_engine_service_any(int irq, struct hif_softc *scn);
384int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
385void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386
387/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530388void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
389void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390
391/* API to check if any of the copy engine pipes has
392 * pending frames for prcoessing
393 */
Komal Seelam644263d2016-02-22 20:45:49 +0530394bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395
396/* CE_attr.flags values */
397#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
398#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
399#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
400#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
401#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
402
403/* Attributes of an instance of a Copy Engine */
404struct CE_attr {
405 unsigned int flags; /* CE_ATTR_* values */
406 unsigned int priority; /* TBD */
407 unsigned int src_nentries; /* #entries in source ring -
408 * Must be a power of 2 */
409 unsigned int src_sz_max; /* Max source send size for this CE.
410 * This is also the minimum size of
411 * a destination buffer. */
412 unsigned int dest_nentries; /* #entries in destination ring -
413 * Must be a power of 2 */
414 void *reserved; /* Future use */
415};
416
417/*
418 * When using sendlist_send to transfer multiple buffer fragments, the
419 * transfer context of each fragment, except last one, will be filled
420 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
421 * each fragment done with send and the transfer context would be
422 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
423 * status of a send completion.
424 */
425#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
426
427/*
428 * This is an opaque type that is at least large enough to hold
429 * a sendlist. A sendlist can only be accessed through CE APIs,
430 * but this allows a sendlist to be allocated on the run-time
431 * stack. TBDXXX: un-opaque would be simpler...
432 */
433struct ce_sendlist {
434 unsigned int word[62];
435};
436
437#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
438#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
439#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
440
441#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530443 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530445 qdf_dma_addr_t *ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446#else
Leo Chang8e073612015-11-13 10:55:34 -0800447/**
448 * ce_ipa_get_resource() - get uc resource on copyengine
449 * @ce: copyengine context
450 * @ce_sr_base_paddr: copyengine source ring base physical address
451 * @ce_sr_ring_size: copyengine source ring size
452 * @ce_reg_paddr: copyengine register physical address
453 *
454 * Copy engine should release resource to micro controller
455 * Micro controller needs
456 * - Copy engine source descriptor base address
457 * - Copy engine source descriptor size
458 * - PCI BAR address to access copy engine regiser
459 *
460 * Return: None
461 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462static inline void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530463 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530465 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466{
467 return;
468}
469#endif /* IPA_OFFLOAD */
470
471static inline void ce_pkt_error_count_incr(
472 struct HIF_CE_state *_hif_state,
473 enum ol_ath_hif_pkt_ecodes _hif_ecode)
474{
Komal Seelam644263d2016-02-22 20:45:49 +0530475 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
476
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530478 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800479 += 1;
480}
481
Houston Hoffmaneb2516c2016-04-01 12:53:50 -0700482bool ce_check_rx_pending(struct CE_state *CE_state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483#if defined(FEATURE_LRO)
Manjunathappa Prakash32afe372016-04-29 11:12:41 -0700484int ce_lro_flush_cb_register(struct hif_opaque_softc *scn,
485 void (handler)(void *), void *data);
486int ce_lro_flush_cb_deregister(struct hif_opaque_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800487#endif
488#endif /* __COPY_ENGINE_API_H__ */