blob: 8bde74dba5d7239d9b1802d6f2cda0a5c7d61c78 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef __COPY_ENGINE_API_H__
29#define __COPY_ENGINE_API_H__
30
31#include "ce_main.h"
32/* TBDXXX: Use int return values for consistency with Target */
33
34/* TBDXXX: Perhaps merge Host/Target-->common */
35
36/*
37 * Copy Engine support: low-level Target-side Copy Engine API.
38 * This is a hardware access layer used by code that understands
39 * how to use copy engines.
40 */
41
42/*
43 * A "struct CE_handle *" serves as an opaque pointer-sized
44 * handle to a specific copy engine.
45 */
46struct CE_handle;
47
48/*
49 * "Send Completion" callback type for Send Completion Notification.
50 *
51 * If a Send Completion callback is registered and one or more sends
52 * have completed, the callback is invoked.
53 *
54 * per_ce_send_context is a context supplied by the calling layer
55 * (via ce_send_cb_register). It is associated with a copy engine.
56 *
57 * per_transfer_send_context is context supplied by the calling layer
58 * (via the "send" call). It may be different for each invocation
59 * of send.
60 *
61 * The buffer parameter is the first byte sent of the first buffer
62 * sent (if more than one buffer).
63 *
64 * nbytes is the number of bytes of that buffer that were sent.
65 *
66 * transfer_id matches the value used when the buffer or
67 * buf_list was sent.
68 *
69 * Implementation note: Pops 1 completed send buffer from Source ring
70 */
71typedef void (*ce_send_cb)(struct CE_handle *copyeng,
72 void *per_ce_send_context,
73 void *per_transfer_send_context,
74 cdf_dma_addr_t buffer,
75 unsigned int nbytes,
76 unsigned int transfer_id,
77 unsigned int sw_index,
78 unsigned int hw_index,
79 uint32_t toeplitz_hash_result);
80
81/*
82 * "Buffer Received" callback type for Buffer Received Notification.
83 *
84 * Implementation note: Pops 1 completed recv buffer from Dest ring
85 */
86typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
87 void *per_CE_recv_context,
88 void *per_transfer_recv_context,
89 cdf_dma_addr_t buffer,
90 unsigned int nbytes,
91 unsigned int transfer_id,
92 unsigned int flags);
93
94/*
95 * Copy Engine Watermark callback type.
96 *
97 * Allows upper layers to be notified when watermarks are reached:
98 * space is available and/or running short in a source ring
99 * buffers are exhausted and/or abundant in a destination ring
100 *
101 * The flags parameter indicates which condition triggered this
102 * callback. See CE_WM_FLAG_*.
103 *
104 * Watermark APIs are provided to allow upper layers "batch"
105 * descriptor processing and to allow upper layers to
106 * throttle/unthrottle.
107 */
108typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
109 void *per_CE_wm_context, unsigned int flags);
110
111#define CE_WM_FLAG_SEND_HIGH 1
112#define CE_WM_FLAG_SEND_LOW 2
113#define CE_WM_FLAG_RECV_HIGH 4
114#define CE_WM_FLAG_RECV_LOW 8
115
116/* A list of buffers to be gathered and sent */
117struct ce_sendlist;
118
119/* Copy Engine settable attributes */
120struct CE_attr;
121
122/*==================Send=====================================================*/
123
124/* ce_send flags */
125/* disable ring's byte swap, even if the default policy is to swap */
126#define CE_SEND_FLAG_SWAP_DISABLE 1
127
128/*
129 * Queue a source buffer to be sent to an anonymous destination buffer.
130 * copyeng - which copy engine to use
131 * buffer - address of buffer
132 * nbytes - number of bytes to send
133 * transfer_id - arbitrary ID; reflected to destination
134 * flags - CE_SEND_FLAG_* values
135 * Returns 0 on success; otherwise an error status.
136 *
137 * Note: If no flags are specified, use CE's default data swap mode.
138 *
139 * Implementation note: pushes 1 buffer to Source ring
140 */
141int ce_send(struct CE_handle *copyeng,
142 void *per_transfer_send_context,
143 cdf_dma_addr_t buffer,
144 unsigned int nbytes,
145 unsigned int transfer_id,
146 unsigned int flags,
147 unsigned int user_flags);
148
149#ifdef WLAN_FEATURE_FASTPATH
150int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
151 unsigned int num_msdus, unsigned int transfer_id);
152
153#endif
154void ce_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
155
156/*
157 * Register a Send Callback function.
158 * This function is called as soon as the contents of a Send
159 * have reached the destination, unless disable_interrupts is
160 * requested. In this case, the callback is invoked when the
161 * send status is polled, shortly after the send completes.
162 */
163void ce_send_cb_register(struct CE_handle *copyeng,
164 ce_send_cb fn_ptr,
165 void *per_ce_send_context, int disable_interrupts);
166
167/*
168 * Return the size of a SendList. This allows the caller to allocate
169 * a SendList while the SendList structure remains opaque.
170 */
171unsigned int ce_sendlist_sizeof(void);
172
173/* Initialize a sendlist */
174void ce_sendlist_init(struct ce_sendlist *sendlist);
175
176/* Append a simple buffer (address/length) to a sendlist. */
177int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
178 cdf_dma_addr_t buffer,
179 unsigned int nbytes,
180 uint32_t flags, /* OR-ed with internal flags */
181 uint32_t user_flags);
182
183/*
184 * Queue a "sendlist" of buffers to be sent using gather to a single
185 * anonymous destination buffer
186 * copyeng - which copy engine to use
187 * sendlist - list of simple buffers to send using gather
188 * transfer_id - arbitrary ID; reflected to destination
189 * Returns 0 on success; otherwise an error status.
190 *
191 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
192 */
193int ce_sendlist_send(struct CE_handle *copyeng,
194 void *per_transfer_send_context,
195 struct ce_sendlist *sendlist,
196 unsigned int transfer_id);
197
198/*==================Recv=====================================================*/
199
200/*
201 * Make a buffer available to receive. The buffer must be at least of a
202 * minimal size appropriate for this copy engine (src_sz_max attribute).
203 * copyeng - which copy engine to use
204 * per_transfer_recv_context - context passed back to caller's recv_cb
205 * buffer - address of buffer in CE space
206 * Returns 0 on success; otherwise an error status.
207 *
208 * Implemenation note: Pushes a buffer to Dest ring.
209 */
210int ce_recv_buf_enqueue(struct CE_handle *copyeng,
211 void *per_transfer_recv_context,
212 cdf_dma_addr_t buffer);
213
214/*
215 * Register a Receive Callback function.
216 * This function is called as soon as data is received
217 * from the source.
218 */
219void ce_recv_cb_register(struct CE_handle *copyeng,
220 CE_recv_cb fn_ptr,
221 void *per_CE_recv_context,
222 int disable_interrupts);
223
224/*==================CE Watermark=============================================*/
225
226/*
227 * Register a Watermark Callback function.
228 * This function is called as soon as a watermark level
229 * is crossed. A Watermark Callback function is free to
230 * handle received data "en masse"; but then some coordination
231 * is required with a registered Receive Callback function.
232 * [Suggestion: Either handle Receives in a Receive Callback
233 * or en masse in a Watermark Callback; but not both.]
234 */
235void ce_watermark_cb_register(struct CE_handle *copyeng,
236 CE_watermark_cb fn_ptr,
237 void *per_CE_wm_context);
238
239/*
240 * Set low/high watermarks for the send/source side of a copy engine.
241 *
242 * Typically, the destination side CPU manages watermarks for
243 * the receive side and the source side CPU manages watermarks
244 * for the send side.
245 *
246 * A low watermark of 0 is never hit (so the watermark function
247 * will never be called for a Low Watermark condition).
248 *
249 * A high watermark equal to nentries is never hit (so the
250 * watermark function will never be called for a High Watermark
251 * condition).
252 */
253void ce_send_watermarks_set(struct CE_handle *copyeng,
254 unsigned int low_alert_nentries,
255 unsigned int high_alert_nentries);
256
257/* Set low/high watermarks for the receive/destination side of copy engine. */
258void ce_recv_watermarks_set(struct CE_handle *copyeng,
259 unsigned int low_alert_nentries,
260 unsigned int high_alert_nentries);
261
262/*
263 * Return the number of entries that can be queued
264 * to a ring at an instant in time.
265 *
266 * For source ring, does not imply that destination-side
267 * buffers are available; merely indicates descriptor space
268 * in the source ring.
269 *
270 * For destination ring, does not imply that previously
271 * received buffers have been processed; merely indicates
272 * descriptor space in destination ring.
273 *
274 * Mainly for use with CE Watermark callback.
275 */
276unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
277unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
278
279/*
280 * Return the number of entries in the ring that are ready
281 * to be processed by software.
282 *
283 * For source ring, the number of descriptors that have
284 * been completed and can now be overwritten with new send
285 * descriptors.
286 *
287 * For destination ring, the number of descriptors that
288 * are available to be processed (newly received buffers).
289 */
290unsigned int ce_send_entries_done(struct CE_handle *copyeng);
291unsigned int ce_recv_entries_done(struct CE_handle *copyeng);
292
293/* recv flags */
294/* Data is byte-swapped */
295#define CE_RECV_FLAG_SWAPPED 1
296
297void ce_enable_msi(struct ol_softc *scn,
298 unsigned int CE_id,
299 uint32_t msi_addr_lo,
300 uint32_t msi_addr_hi,
301 uint32_t msi_data);
302/*
303 * Supply data for the next completed unprocessed receive descriptor.
304 *
305 * For use
306 * with CE Watermark callback,
307 * in a recv_cb function when processing buf_lists
308 * in a recv_cb function in order to mitigate recv_cb's.
309 *
310 * Implemenation note: Pops buffer from Dest ring.
311 */
312int ce_completed_recv_next(struct CE_handle *copyeng,
313 void **per_CE_contextp,
314 void **per_transfer_contextp,
315 cdf_dma_addr_t *bufferp,
316 unsigned int *nbytesp,
317 unsigned int *transfer_idp,
318 unsigned int *flagsp);
319
320/*
321 * Supply data for the next completed unprocessed send descriptor.
322 *
323 * For use
324 * with CE Watermark callback
325 * in a send_cb function in order to mitigate send_cb's.
326 *
327 * Implementation note: Pops 1 completed send buffer from Source ring
328 */
329int ce_completed_send_next(struct CE_handle *copyeng,
330 void **per_CE_contextp,
331 void **per_transfer_contextp,
332 cdf_dma_addr_t *bufferp,
333 unsigned int *nbytesp,
334 unsigned int *transfer_idp,
335 unsigned int *sw_idx,
336 unsigned int *hw_idx,
337 uint32_t *toeplitz_hash_result);
338
339/*==================CE Engine Initialization=================================*/
340
341/* Initialize an instance of a CE */
342struct CE_handle *ce_init(struct ol_softc *scn,
343 unsigned int CE_id, struct CE_attr *attr);
344
345/*==================CE Engine Shutdown=======================================*/
346/*
347 * Support clean shutdown by allowing the caller to revoke
348 * receive buffers. Target DMA must be stopped before using
349 * this API.
350 */
351CDF_STATUS
352ce_revoke_recv_next(struct CE_handle *copyeng,
353 void **per_CE_contextp,
354 void **per_transfer_contextp,
355 cdf_dma_addr_t *bufferp);
356
357/*
358 * Support clean shutdown by allowing the caller to cancel
359 * pending sends. Target DMA must be stopped before using
360 * this API.
361 */
362CDF_STATUS
363ce_cancel_send_next(struct CE_handle *copyeng,
364 void **per_CE_contextp,
365 void **per_transfer_contextp,
366 cdf_dma_addr_t *bufferp,
367 unsigned int *nbytesp,
368 unsigned int *transfer_idp,
369 uint32_t *toeplitz_hash_result);
370
371void ce_fini(struct CE_handle *copyeng);
372
373/*==================CE Interrupt Handlers====================================*/
374void ce_per_engine_service_any(int irq, struct ol_softc *scn);
375int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id);
376void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id);
377
378/*===================CE cmpl interrupt Enable/Disable =======================*/
379void ce_disable_any_copy_compl_intr(struct ol_softc *scn);
380void ce_enable_any_copy_compl_intr(struct ol_softc *scn);
381void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn);
382void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn);
383
384/* API to check if any of the copy engine pipes has
385 * pending frames for prcoessing
386 */
387bool ce_get_rx_pending(struct ol_softc *scn);
388
389/* CE_attr.flags values */
390#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */
391#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */
392#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */
393#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */
394#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */
395
396/* Attributes of an instance of a Copy Engine */
397struct CE_attr {
398 unsigned int flags; /* CE_ATTR_* values */
399 unsigned int priority; /* TBD */
400 unsigned int src_nentries; /* #entries in source ring -
401 * Must be a power of 2 */
402 unsigned int src_sz_max; /* Max source send size for this CE.
403 * This is also the minimum size of
404 * a destination buffer. */
405 unsigned int dest_nentries; /* #entries in destination ring -
406 * Must be a power of 2 */
407 void *reserved; /* Future use */
408};
409
410/*
411 * When using sendlist_send to transfer multiple buffer fragments, the
412 * transfer context of each fragment, except last one, will be filled
413 * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
414 * each fragment done with send and the transfer context would be
415 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
416 * status of a send completion.
417 */
418#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
419
420/*
421 * This is an opaque type that is at least large enough to hold
422 * a sendlist. A sendlist can only be accessed through CE APIs,
423 * but this allows a sendlist to be allocated on the run-time
424 * stack. TBDXXX: un-opaque would be simpler...
425 */
426struct ce_sendlist {
427 unsigned int word[62];
428};
429
430#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */
431#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */
432#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */
433
434#ifdef IPA_OFFLOAD
435/*
436 * Copy engine should release resource to micro controller
437 * Micro controller needs
438 - Copy engine source descriptor base address
439 - Copy engine source descriptor size
440 - PCI BAR address to access copy engine regiser
441 */
442void ce_ipa_get_resource(struct CE_handle *ce,
443 uint32_t *ce_sr_base_paddr,
444 uint32_t *ce_sr_ring_size,
445 cdf_dma_addr_t *ce_reg_paddr);
446#else
447static inline void ce_ipa_get_resource(struct CE_handle *ce,
448 uint32_t *ce_sr_base_paddr,
449 uint32_t *ce_sr_ring_size,
450 cdf_dma_addr_t *ce_reg_paddr)
451{
452 return;
453}
454#endif /* IPA_OFFLOAD */
455
456static inline void ce_pkt_error_count_incr(
457 struct HIF_CE_state *_hif_state,
458 enum ol_ath_hif_pkt_ecodes _hif_ecode)
459{
460 if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
461 (_hif_state->scn->pkt_stats.hif_pipe_no_resrc_count)
462 += 1;
463}
464
465int hif_completion_thread(struct HIF_CE_state *hif_state);
466bool ce_check_rx_pending(struct ol_softc *scn, int ce_id);
467#if defined(FEATURE_LRO)
468void ce_lro_flush_cb_register(struct ol_softc *scn,
469 void (handler)(void *), void *data);
470#endif
471#endif /* __COPY_ENGINE_API_H__ */