blob: 7e04e773e6b8f97328bfabc540f6390fb287727f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam02cf2f82016-02-22 20:44:25 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef __COPY_ENGINE_API_H__
29#define __COPY_ENGINE_API_H__
30
31#include "ce_main.h"
Komal Seelam02cf2f82016-02-22 20:44:25 +053032#include "hif_main.h"
33
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034/* TBDXXX: Use int return values for consistency with Target */
35
36/* TBDXXX: Perhaps merge Host/Target-->common */
37
38/*
39 * Copy Engine support: low-level Target-side Copy Engine API.
40 * This is a hardware access layer used by code that understands
41 * how to use copy engines.
42 */
43
44/*
45 * A "struct CE_handle *" serves as an opaque pointer-sized
46 * handle to a specific copy engine.
47 */
48struct CE_handle;
49
50/*
51 * "Send Completion" callback type for Send Completion Notification.
52 *
53 * If a Send Completion callback is registered and one or more sends
54 * have completed, the callback is invoked.
55 *
56 * per_ce_send_context is a context supplied by the calling layer
57 * (via ce_send_cb_register). It is associated with a copy engine.
58 *
59 * per_transfer_send_context is context supplied by the calling layer
60 * (via the "send" call). It may be different for each invocation
61 * of send.
62 *
63 * The buffer parameter is the first byte sent of the first buffer
64 * sent (if more than one buffer).
65 *
66 * nbytes is the number of bytes of that buffer that were sent.
67 *
68 * transfer_id matches the value used when the buffer or
69 * buf_list was sent.
70 *
71 * Implementation note: Pops 1 completed send buffer from Source ring
72 */
73typedef void (*ce_send_cb)(struct CE_handle *copyeng,
74 void *per_ce_send_context,
75 void *per_transfer_send_context,
76 cdf_dma_addr_t buffer,
77 unsigned int nbytes,
78 unsigned int transfer_id,
79 unsigned int sw_index,
80 unsigned int hw_index,
81 uint32_t toeplitz_hash_result);
82
83/*
84 * "Buffer Received" callback type for Buffer Received Notification.
85 *
86 * Implementation note: Pops 1 completed recv buffer from Dest ring
87 */
88typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
89 void *per_CE_recv_context,
90 void *per_transfer_recv_context,
91 cdf_dma_addr_t buffer,
92 unsigned int nbytes,
93 unsigned int transfer_id,
94 unsigned int flags);
95
96/*
97 * Copy Engine Watermark callback type.
98 *
99 * Allows upper layers to be notified when watermarks are reached:
100 * space is available and/or running short in a source ring
101 * buffers are exhausted and/or abundant in a destination ring
102 *
103 * The flags parameter indicates which condition triggered this
104 * callback. See CE_WM_FLAG_*.
105 *
106 * Watermark APIs are provided to allow upper layers "batch"
107 * descriptor processing and to allow upper layers to
108 * throttle/unthrottle.
109 */
110typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
111 void *per_CE_wm_context, unsigned int flags);
112
113#define CE_WM_FLAG_SEND_HIGH 1
114#define CE_WM_FLAG_SEND_LOW 2
115#define CE_WM_FLAG_RECV_HIGH 4
116#define CE_WM_FLAG_RECV_LOW 8
117
118/* A list of buffers to be gathered and sent */
119struct ce_sendlist;
120
121/* Copy Engine settable attributes */
122struct CE_attr;
123
124/*==================Send=====================================================*/
125
126/* ce_send flags */
127/* disable ring's byte swap, even if the default policy is to swap */
128#define CE_SEND_FLAG_SWAP_DISABLE 1
129
130/*
131 * Queue a source buffer to be sent to an anonymous destination buffer.
132 * copyeng - which copy engine to use
133 * buffer - address of buffer
134 * nbytes - number of bytes to send
135 * transfer_id - arbitrary ID; reflected to destination
136 * flags - CE_SEND_FLAG_* values
137 * Returns 0 on success; otherwise an error status.
138 *
139 * Note: If no flags are specified, use CE's default data swap mode.
140 *
141 * Implementation note: pushes 1 buffer to Source ring
142 */
143int ce_send(struct CE_handle *copyeng,
144 void *per_transfer_send_context,
145 cdf_dma_addr_t buffer,
146 unsigned int nbytes,
147 unsigned int transfer_id,
148 unsigned int flags,
149 unsigned int user_flags);
150
151#ifdef WLAN_FEATURE_FASTPATH
152int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
153 unsigned int num_msdus, unsigned int transfer_id);
154
155#endif
156void ce_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
157
158/*
159 * Register a Send Callback function.
160 * This function is called as soon as the contents of a Send
161 * have reached the destination, unless disable_interrupts is
162 * requested. In this case, the callback is invoked when the
163 * send status is polled, shortly after the send completes.
164 */
165void ce_send_cb_register(struct CE_handle *copyeng,
166 ce_send_cb fn_ptr,
167 void *per_ce_send_context, int disable_interrupts);
168
169/*
170 * Return the size of a SendList. This allows the caller to allocate
171 * a SendList while the SendList structure remains opaque.
172 */
173unsigned int ce_sendlist_sizeof(void);
174
175/* Initialize a sendlist */
176void ce_sendlist_init(struct ce_sendlist *sendlist);
177
178/* Append a simple buffer (address/length) to a sendlist. */
179int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
180 cdf_dma_addr_t buffer,
181 unsigned int nbytes,
182 uint32_t flags, /* OR-ed with internal flags */
183 uint32_t user_flags);
184
185/*
186 * Queue a "sendlist" of buffers to be sent using gather to a single
187 * anonymous destination buffer
188 * copyeng - which copy engine to use
189 * sendlist - list of simple buffers to send using gather
190 * transfer_id - arbitrary ID; reflected to destination
191 * Returns 0 on success; otherwise an error status.
192 *
193 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
194 */
195int ce_sendlist_send(struct CE_handle *copyeng,
196 void *per_transfer_send_context,
197 struct ce_sendlist *sendlist,
198 unsigned int transfer_id);
199
200/*==================Recv=====================================================*/
201
202/*
203 * Make a buffer available to receive. The buffer must be at least of a
204 * minimal size appropriate for this copy engine (src_sz_max attribute).
205 * copyeng - which copy engine to use
206 * per_transfer_recv_context - context passed back to caller's recv_cb
207 * buffer - address of buffer in CE space
208 * Returns 0 on success; otherwise an error status.
209 *
210 * Implemenation note: Pushes a buffer to Dest ring.
211 */
212int ce_recv_buf_enqueue(struct CE_handle *copyeng,
213 void *per_transfer_recv_context,
214 cdf_dma_addr_t buffer);
215
216/*
217 * Register a Receive Callback function.
218 * This function is called as soon as data is received
219 * from the source.
220 */
221void ce_recv_cb_register(struct CE_handle *copyeng,
222 CE_recv_cb fn_ptr,
223 void *per_CE_recv_context,
224 int disable_interrupts);
225
226/*==================CE Watermark=============================================*/
227
228/*
229 * Register a Watermark Callback function.
230 * This function is called as soon as a watermark level
231 * is crossed. A Watermark Callback function is free to
232 * handle received data "en masse"; but then some coordination
233 * is required with a registered Receive Callback function.
234 * [Suggestion: Either handle Receives in a Receive Callback
235 * or en masse in a Watermark Callback; but not both.]
236 */
237void ce_watermark_cb_register(struct CE_handle *copyeng,
238 CE_watermark_cb fn_ptr,
239 void *per_CE_wm_context);
240
241/*
242 * Set low/high watermarks for the send/source side of a copy engine.
243 *
244 * Typically, the destination side CPU manages watermarks for
245 * the receive side and the source side CPU manages watermarks
246 * for the send side.
247 *
248 * A low watermark of 0 is never hit (so the watermark function
249 * will never be called for a Low Watermark condition).
250 *
251 * A high watermark equal to nentries is never hit (so the
252 * watermark function will never be called for a High Watermark
253 * condition).
254 */
255void ce_send_watermarks_set(struct CE_handle *copyeng,
256 unsigned int low_alert_nentries,
257 unsigned int high_alert_nentries);
258
259/* Set low/high watermarks for the receive/destination side of copy engine. */
260void ce_recv_watermarks_set(struct CE_handle *copyeng,
261 unsigned int low_alert_nentries,
262 unsigned int high_alert_nentries);
263
264/*
265 * Return the number of entries that can be queued
266 * to a ring at an instant in time.
267 *
268 * For source ring, does not imply that destination-side
269 * buffers are available; merely indicates descriptor space
270 * in the source ring.
271 *
272 * For destination ring, does not imply that previously
273 * received buffers have been processed; merely indicates
274 * descriptor space in destination ring.
275 *
276 * Mainly for use with CE Watermark callback.
277 */
278unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
279unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
280
281/*
282 * Return the number of entries in the ring that are ready
283 * to be processed by software.
284 *
285 * For source ring, the number of descriptors that have
286 * been completed and can now be overwritten with new send
287 * descriptors.
288 *
289 * For destination ring, the number of descriptors that
290 * are available to be processed (newly received buffers).
291 */
292unsigned int ce_send_entries_done(struct CE_handle *copyeng);
293unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
294
295/* recv flags */
296/* Data is byte-swapped */
297#define CE_RECV_FLAG_SWAPPED 1
298
Komal Seelam644263d2016-02-22 20:45:49 +0530299void ce_enable_msi(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300 unsigned int CE_id,
301 uint32_t msi_addr_lo,
302 uint32_t msi_addr_hi,
303 uint32_t msi_data);
304/*
305 * Supply data for the next completed unprocessed receive descriptor.
306 *
307 * For use
308 * with CE Watermark callback,
309 * in a recv_cb function when processing buf_lists
310 * in a recv_cb function in order to mitigate recv_cb's.
311 *
312 * Implemenation note: Pops buffer from Dest ring.
313 */
314int ce_completed_recv_next(struct CE_handle *copyeng,
315 void **per_CE_contextp,
316 void **per_transfer_contextp,
317 cdf_dma_addr_t *bufferp,
318 unsigned int *nbytesp,
319 unsigned int *transfer_idp,
320 unsigned int *flagsp);
321
322/*
323 * Supply data for the next completed unprocessed send descriptor.
324 *
325 * For use
326 * with CE Watermark callback
327 * in a send_cb function in order to mitigate send_cb's.
328 *
329 * Implementation note: Pops 1 completed send buffer from Source ring
330 */
331int ce_completed_send_next(struct CE_handle *copyeng,
332 void **per_CE_contextp,
333 void **per_transfer_contextp,
334 cdf_dma_addr_t *bufferp,
335 unsigned int *nbytesp,
336 unsigned int *transfer_idp,
337 unsigned int *sw_idx,
338 unsigned int *hw_idx,
339 uint32_t *toeplitz_hash_result);
340
341/*==================CE Engine Initialization=================================*/
342
343/* Initialize an instance of a CE */
Komal Seelam644263d2016-02-22 20:45:49 +0530344struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 unsigned int CE_id, struct CE_attr *attr);
346
347/*==================CE Engine Shutdown=======================================*/
348/*
349 * Support clean shutdown by allowing the caller to revoke
350 * receive buffers. Target DMA must be stopped before using
351 * this API.
352 */
353CDF_STATUS
354ce_revoke_recv_next(struct CE_handle *copyeng,
355 void **per_CE_contextp,
356 void **per_transfer_contextp,
357 cdf_dma_addr_t *bufferp);
358
359/*
360 * Support clean shutdown by allowing the caller to cancel
361 * pending sends. Target DMA must be stopped before using
362 * this API.
363 */
364CDF_STATUS
365ce_cancel_send_next(struct CE_handle *copyeng,
366 void **per_CE_contextp,
367 void **per_transfer_contextp,
368 cdf_dma_addr_t *bufferp,
369 unsigned int *nbytesp,
370 unsigned int *transfer_idp,
371 uint32_t *toeplitz_hash_result);
372
373void ce_fini(struct CE_handle *copyeng);
374
375/*==================CE Interrupt Handlers====================================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530376void ce_per_engine_service_any(int irq, struct hif_softc *scn);
377int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
378void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379
380/*===================CE cmpl interrupt Enable/Disable =======================*/
Komal Seelam644263d2016-02-22 20:45:49 +0530381void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
382void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383
384/* API to check if any of the copy engine pipes has
385 * pending frames for prcoessing
386 */
Komal Seelam644263d2016-02-22 20:45:49 +0530387bool ce_get_rx_pending(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388
389/* CE_attr.flags values */
390#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
391#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
392#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
393#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
394#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
395
396/* Attributes of an instance of a Copy Engine */
397struct CE_attr {
398 unsigned int flags; /* CE_ATTR_* values */
399 unsigned int priority; /* TBD */
400 unsigned int src_nentries; /* #entries in source ring -
401 * Must be a power of 2 */
402 unsigned int src_sz_max; /* Max source send size for this CE.
403 * This is also the minimum size of
404 * a destination buffer. */
405 unsigned int dest_nentries; /* #entries in destination ring -
406 * Must be a power of 2 */
407 void *reserved; /* Future use */
408};
409
410/*
411 * When using sendlist_send to transfer multiple buffer fragments, the
412 * transfer context of each fragment, except last one, will be filled
413 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
414 * each fragment done with send and the transfer context would be
415 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
416 * status of a send completion.
417 */
418#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
419
420/*
421 * This is an opaque type that is at least large enough to hold
422 * a sendlist. A sendlist can only be accessed through CE APIs,
423 * but this allows a sendlist to be allocated on the run-time
424 * stack. TBDXXX: un-opaque would be simpler...
425 */
426struct ce_sendlist {
427 unsigned int word[62];
428};
429
430#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
431#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
432#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
433
434#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800435void ce_ipa_get_resource(struct CE_handle *ce,
Leo Chang8e073612015-11-13 10:55:34 -0800436 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437 uint32_t *ce_sr_ring_size,
438 cdf_dma_addr_t *ce_reg_paddr);
439#else
Leo Chang8e073612015-11-13 10:55:34 -0800440/**
441 * ce_ipa_get_resource() - get uc resource on copyengine
442 * @ce: copyengine context
443 * @ce_sr_base_paddr: copyengine source ring base physical address
444 * @ce_sr_ring_size: copyengine source ring size
445 * @ce_reg_paddr: copyengine register physical address
446 *
447 * Copy engine should release resource to micro controller
448 * Micro controller needs
449 * - Copy engine source descriptor base address
450 * - Copy engine source descriptor size
451 * - PCI BAR address to access copy engine regiser
452 *
453 * Return: None
454 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800455static inline void ce_ipa_get_resource(struct CE_handle *ce,
Leo Chang8e073612015-11-13 10:55:34 -0800456 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457 uint32_t *ce_sr_ring_size,
458 cdf_dma_addr_t *ce_reg_paddr)
459{
460 return;
461}
462#endif /* IPA_OFFLOAD */
463
464static inline void ce_pkt_error_count_incr(
465 struct HIF_CE_state *_hif_state,
466 enum ol_ath_hif_pkt_ecodes _hif_ecode)
467{
Komal Seelam644263d2016-02-22 20:45:49 +0530468 struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
469
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
Komal Seelam02cf2f82016-02-22 20:44:25 +0530471 (scn->pkt_stats.hif_pipe_no_resrc_count)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 += 1;
473}
474
Komal Seelam644263d2016-02-22 20:45:49 +0530475bool ce_check_rx_pending(struct hif_softc *scn, int ce_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476#if defined(FEATURE_LRO)
Komal Seelam644263d2016-02-22 20:45:49 +0530477void ce_lro_flush_cb_register(struct hif_softc *scn,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478 void (handler)(void *), void *data);
Komal Seelam644263d2016-02-22 20:45:49 +0530479void ce_lro_flush_cb_deregister(struct hif_softc *scn);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480#endif
481#endif /* __COPY_ENGINE_API_H__ */