blob: e65bf62fea35a9f8e2f386e91db0c8e0d193b070 [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
2 * Copyright (c) 2016 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "hif.h"
20#include "hif_io32.h"
21#include "reg_struct.h"
22#include "ce_api.h"
23#include "ce_main.h"
24#include "ce_internal.h"
25#include "ce_reg.h"
26#include "qdf_lock.h"
27#include "regtable.h"
28#include "hif_main.h"
29#include "hif_debug.h"
30#include "hal_api.h"
31
32/*
33 * Support for Copy Engine hardware, which is mainly used for
34 * communication between Host and Target over a PCIe interconnect.
35 */
36
37/*
38 * A single CopyEngine (CE) comprises two "rings":
39 * a source ring
40 * a destination ring
41 *
42 * Each ring consists of a number of descriptors which specify
43 * an address, length, and meta-data.
44 *
45 * Typically, one side of the PCIe interconnect (Host or Target)
46 * controls one ring and the other side controls the other ring.
47 * The source side chooses when to initiate a transfer and it
48 * chooses what to send (buffer address, length). The destination
49 * side keeps a supply of "anonymous receive buffers" available and
50 * it handles incoming data as it arrives (when the destination
51 * receives an interrupt).
52 *
53 * The sender may send a simple buffer (address/length) or it may
54 * send a small list of buffers. When a small list is sent, hardware
55 * "gathers" these and they end up in a single destination buffer
56 * with a single interrupt.
57 *
58 * There are several "contexts" managed by this layer -- more, it
59 * may seem -- than should be needed. These are provided mainly for
60 * maximum flexibility and especially to facilitate a simpler HIF
61 * implementation. There are per-CopyEngine recv, send, and watermark
62 * contexts. These are supplied by the caller when a recv, send,
63 * or watermark handler is established and they are echoed back to
64 * the caller when the respective callbacks are invoked. There is
65 * also a per-transfer context supplied by the caller when a buffer
66 * (or sendlist) is sent and when a buffer is enqueued for recv.
67 * These per-transfer contexts are echoed back to the caller when
68 * the buffer is sent/received.
69 * Target TX harsh result toeplitz_hash_result
70 */
71
72#define CE_ADDR_COPY(desc, dma_addr) do {\
73 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
74 0xFFFFFFFF);\
75 (desc)->buffer_addr_hi =\
76 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
77 } while (0)
78
79int
80ce_send_nolock_srng(struct CE_handle *copyeng,
81 void *per_transfer_context,
82 qdf_dma_addr_t buffer,
83 uint32_t nbytes,
84 uint32_t transfer_id,
85 uint32_t flags,
86 uint32_t user_flags)
87{
88 int status;
89 struct CE_state *CE_state = (struct CE_state *)copyeng;
90 struct CE_ring_state *src_ring = CE_state->src_ring;
91 unsigned int nentries_mask = src_ring->nentries_mask;
92 unsigned int write_index = src_ring->write_index;
93 uint64_t dma_addr = buffer;
94 struct hif_softc *scn = CE_state->scn;
95
96 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
97 return QDF_STATUS_E_FAILURE;
98 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
99 false) <= 0)) {
100 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
101 Q_TARGET_ACCESS_END(scn);
102 return QDF_STATUS_E_FAILURE;
103 }
104 {
105 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
106 struct ce_srng_src_desc *src_desc;
107
108 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
109 Q_TARGET_ACCESS_END(scn);
110 return QDF_STATUS_E_FAILURE;
111 }
112
113 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
114 src_ring->srng_ctx);
115
116 /* Update low 32 bits source descriptor address */
117 src_desc->buffer_addr_lo =
118 (uint32_t)(dma_addr & 0xFFFFFFFF);
119 src_desc->buffer_addr_hi =
120 (uint32_t)((dma_addr >> 32) & 0xFF);
121
122 src_desc->meta_data = transfer_id;
123
124 /*
125 * Set the swap bit if:
126 * typical sends on this CE are swapped (host is big-endian)
127 * and this send doesn't disable the swapping
128 * (data is not bytestream)
129 */
130 src_desc->byte_swap =
131 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
132 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
133 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
134 src_desc->nbytes = nbytes;
135
136 src_ring->per_transfer_context[write_index] =
137 per_transfer_context;
138 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
139
140 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
141
142 /* src_ring->write index hasn't been updated event though
143 * the register has allready been written to.
144 */
145 hif_record_ce_desc_event(scn, CE_state->id, event_type,
146 (union ce_desc *) src_desc, per_transfer_context,
147 src_ring->write_index);
148
149 src_ring->write_index = write_index;
150 status = QDF_STATUS_SUCCESS;
151 }
152 Q_TARGET_ACCESS_END(scn);
153 return status;
154}
155
156int
157ce_sendlist_send_srng(struct CE_handle *copyeng,
158 void *per_transfer_context,
159 struct ce_sendlist *sendlist, unsigned int transfer_id)
160{
161 int status = -ENOMEM;
162 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
163 struct CE_state *CE_state = (struct CE_state *)copyeng;
164 struct CE_ring_state *src_ring = CE_state->src_ring;
165 unsigned int num_items = sl->num_items;
166 unsigned int sw_index;
167 unsigned int write_index;
168 struct hif_softc *scn = CE_state->scn;
169
170 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
171
172 qdf_spin_lock_bh(&CE_state->ce_index_lock);
173 sw_index = src_ring->sw_index;
174 write_index = src_ring->write_index;
175
176 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
177 num_items) {
178 struct ce_sendlist_item *item;
179 int i;
180
181 /* handle all but the last item uniformly */
182 for (i = 0; i < num_items - 1; i++) {
183 item = &sl->item[i];
184 /* TBDXXX: Support extensible sendlist_types? */
185 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
186 status = ce_send_nolock_srng(copyeng,
187 CE_SENDLIST_ITEM_CTXT,
188 (qdf_dma_addr_t) item->data,
189 item->u.nbytes, transfer_id,
190 item->flags | CE_SEND_FLAG_GATHER,
191 item->user_flags);
192 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
193 }
194 /* provide valid context pointer for final item */
195 item = &sl->item[i];
196 /* TBDXXX: Support extensible sendlist_types? */
197 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
198 status = ce_send_nolock_srng(copyeng, per_transfer_context,
199 (qdf_dma_addr_t) item->data,
200 item->u.nbytes,
201 transfer_id, item->flags,
202 item->user_flags);
203 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
204 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
205 QDF_NBUF_TX_PKT_CE);
206 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
207 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
208 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700209 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530210 } else {
211 /*
212 * Probably not worth the additional complexity to support
213 * partial sends with continuation or notification. We expect
214 * to use large rings and small sendlists. If we can't handle
215 * the entire request at once, punt it back to the caller.
216 */
217 }
218 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
219
220 return status;
221}
222
223#define SLOTS_PER_DATAPATH_TX 2
224
225#ifndef AH_NEED_TX_DATA_SWAP
226#define AH_NEED_TX_DATA_SWAP 0
227#endif
228/**
229 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
230 * @coyeng: copy engine handle
231 * @per_recv_context: virtual address of the nbuf
232 * @buffer: physical address of the nbuf
233 *
234 * Return: 0 if the buffer is enqueued
235 */
236int
237ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
238 void *per_recv_context, qdf_dma_addr_t buffer)
239{
240 int status;
241 struct CE_state *CE_state = (struct CE_state *)copyeng;
242 struct CE_ring_state *dest_ring = CE_state->dest_ring;
243 unsigned int nentries_mask = dest_ring->nentries_mask;
244 unsigned int write_index;
245 unsigned int sw_index;
246 uint64_t dma_addr = buffer;
247 struct hif_softc *scn = CE_state->scn;
248
249 qdf_spin_lock_bh(&CE_state->ce_index_lock);
250 write_index = dest_ring->write_index;
251 sw_index = dest_ring->sw_index;
252
253 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
254 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
255 return -EIO;
256 }
257
258 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
259 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
260 return QDF_STATUS_E_FAILURE;
261 }
262
263 if ((hal_srng_src_num_avail(scn->hal_soc,
264 dest_ring->srng_ctx, false) > 0)) {
265 struct ce_srng_dest_desc *dest_desc =
266 hal_srng_src_get_next(scn->hal_soc,
267 dest_ring->srng_ctx);
268
269 if (dest_desc == NULL) {
270 status = QDF_STATUS_E_FAILURE;
271 } else {
272
273 CE_ADDR_COPY(dest_desc, dma_addr);
274
275 dest_ring->per_transfer_context[write_index] =
276 per_recv_context;
277
278 /* Update Destination Ring Write Index */
279 write_index = CE_RING_IDX_INCR(nentries_mask,
280 write_index);
281 status = QDF_STATUS_SUCCESS;
282 }
283 } else
284 status = QDF_STATUS_E_FAILURE;
285
286 dest_ring->write_index = write_index;
287 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
288 Q_TARGET_ACCESS_END(scn);
289 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
290 return status;
291}
292
293/**
294 * ce_send_watermarks_set_srng
295 */
296void
297ce_send_watermarks_set_srng(struct CE_handle *copyeng,
298 unsigned int low_alert_nentries,
299 unsigned int high_alert_nentries)
300{
301 /*TODO*/
302}
303/*
304 * ce_recv_watermarks_set_srng
305 */
306void
307ce_recv_watermarks_set_srng(struct CE_handle *copyeng,
308 unsigned int low_alert_nentries,
309 unsigned int high_alert_nentries)
310{
311 /*TODO*/
312}
313
314unsigned int ce_send_entries_avail_srng(struct CE_handle *copyeng)
315{
316 struct CE_state *CE_state = (struct CE_state *)copyeng;
317 struct CE_ring_state *src_ring = CE_state->src_ring;
318 struct hif_softc *scn = CE_state->scn;
319
320 return hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false);
321}
322
323unsigned int ce_recv_entries_avail_srng(struct CE_handle *copyeng)
324{
325 struct CE_state *CE_state = (struct CE_state *)copyeng;
326 struct CE_ring_state *dest_ring = CE_state->dest_ring;
327 struct hif_softc *scn = CE_state->scn;
328
329
330 return hal_srng_src_num_avail(scn->hal_soc, dest_ring->srng_ctx, false);
331}
332
333/*
334 * Guts of ce_recv_entries_done.
335 * The caller takes responsibility for any necessary locking.
336 */
337unsigned int
338ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
339 struct CE_state *CE_state)
340{
341 struct CE_ring_state *status_ring = CE_state->status_ring;
342
343 return hal_srng_dst_num_valid(scn->hal_soc,
344 status_ring->srng_ctx, false);
345}
346
347/*
348 * Guts of ce_send_entries_done.
349 * The caller takes responsibility for any necessary locking.
350 */
351unsigned int
352ce_send_entries_done_nolock_srng(struct hif_softc *scn,
353 struct CE_state *CE_state)
354{
355
356 struct CE_ring_state *src_ring = CE_state->src_ring;
357 int count = 0;
358
359 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
360 return 0;
361
362 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
363
364 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
365
366 return count;
367}
368
369/* Debug support */
370void *ce_debug_cmplrn_context_srng; /* completed recv next context */
371void *ce_debug_cmplsn_context_srng; /* completed send next context */
372
373/*
374 * Guts of ce_completed_recv_next.
375 * The caller takes responsibility for any necessary locking.
376 */
377int
378ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
379 void **per_CE_contextp,
380 void **per_transfer_contextp,
381 qdf_dma_addr_t *bufferp,
382 unsigned int *nbytesp,
383 unsigned int *transfer_idp,
384 unsigned int *flagsp)
385{
386 int status;
387 struct CE_ring_state *dest_ring = CE_state->dest_ring;
388 struct CE_ring_state *status_ring = CE_state->status_ring;
389 unsigned int nentries_mask = dest_ring->nentries_mask;
390 unsigned int sw_index = dest_ring->sw_index;
391 struct hif_softc *scn = CE_state->scn;
392 struct ce_srng_dest_status_desc *dest_status;
393 int nbytes;
394 struct ce_srng_dest_status_desc dest_status_info;
395
396 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
397 status = QDF_STATUS_E_FAILURE;
398 goto done;
399 }
400
401 dest_status = hal_srng_dst_get_next(scn->hal_soc,
402 status_ring->srng_ctx);
403
404 if (dest_status == NULL) {
405 status = QDF_STATUS_E_FAILURE;
406 goto done;
407 }
408 /*
409 * By copying the dest_desc_info element to local memory, we could
410 * avoid extra memory read from non-cachable memory.
411 */
412 dest_status_info = *dest_status;
413 nbytes = dest_status_info.nbytes;
414 if (nbytes == 0) {
415 /*
416 * This closes a relatively unusual race where the Host
417 * sees the updated DRRI before the update to the
418 * corresponding descriptor has completed. We treat this
419 * as a descriptor that is not yet done.
420 */
421 status = QDF_STATUS_E_FAILURE;
422 goto done;
423 }
424
425 dest_status->nbytes = 0;
426
427 *nbytesp = nbytes;
428 *transfer_idp = dest_status_info.meta_data;
429 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
430
431 if (per_CE_contextp)
432 *per_CE_contextp = CE_state->recv_context;
433
434 /* NOTE: sw_index is more like a read_index in this context. It has a
435 * one-to-one mapping with status ring.
436 * Get the per trasnfer context from dest_ring.
437 */
438 ce_debug_cmplrn_context_srng =
439 dest_ring->per_transfer_context[sw_index];
440
441 if (per_transfer_contextp)
442 *per_transfer_contextp = ce_debug_cmplrn_context_srng;
443
444 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
445
446 /* Update sw_index */
447 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
448 dest_ring->sw_index = sw_index;
449 status = QDF_STATUS_SUCCESS;
450
451done:
452 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
453
454 return status;
455}
456
457QDF_STATUS
458ce_revoke_recv_next_srng(struct CE_handle *copyeng,
459 void **per_CE_contextp,
460 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
461{
462 QDF_STATUS status = QDF_STATUS_E_FAILURE;
463
464 return status;
465}
466
467/*
468 * Guts of ce_completed_send_next.
469 * The caller takes responsibility for any necessary locking.
470 */
471int
472ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
473 void **per_CE_contextp,
474 void **per_transfer_contextp,
475 qdf_dma_addr_t *bufferp,
476 unsigned int *nbytesp,
477 unsigned int *transfer_idp,
478 unsigned int *sw_idx,
479 unsigned int *hw_idx,
480 uint32_t *toeplitz_hash_result)
481{
482 int status = QDF_STATUS_E_FAILURE;
483 struct CE_ring_state *src_ring = CE_state->src_ring;
484 unsigned int nentries_mask = src_ring->nentries_mask;
485 unsigned int sw_index = src_ring->sw_index;
486 struct hif_softc *scn = CE_state->scn;
487 struct ce_srng_src_desc *src_desc;
488
489 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
490 status = QDF_STATUS_E_FAILURE;
491 return status;
492 }
493
494 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
495 if (src_desc) {
496
497 /* Return data from completed source descriptor */
498 *bufferp = (qdf_dma_addr_t)
499 (((uint64_t)(src_desc)->buffer_addr_lo +
500 ((uint64_t)((src_desc)->buffer_addr_hi &
501 0xFF) << 32)));
502 *nbytesp = src_desc->nbytes;
503 *transfer_idp = src_desc->meta_data;
504 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
505
506 if (per_CE_contextp)
507 *per_CE_contextp = CE_state->send_context;
508
509 /* sw_index is used more like read index */
510 ce_debug_cmplsn_context_srng =
511 src_ring->per_transfer_context[sw_index];
512 if (per_transfer_contextp)
513 *per_transfer_contextp = ce_debug_cmplsn_context_srng;
514
515 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
516
517 /* Update sw_index */
518 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
519 src_ring->sw_index = sw_index;
520 status = QDF_STATUS_SUCCESS;
521 }
522 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
523
524 return status;
525}
526
527/* NB: Modelled after ce_completed_send_next */
528QDF_STATUS
529ce_cancel_send_next_srng(struct CE_handle *copyeng,
530 void **per_CE_contextp,
531 void **per_transfer_contextp,
532 qdf_dma_addr_t *bufferp,
533 unsigned int *nbytesp,
534 unsigned int *transfer_idp,
535 uint32_t *toeplitz_hash_result)
536{
537 return 0;
538}
539
540/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
541#define CE_WM_SHFT 1
542
543/*
544 * Number of times to check for any pending tx/rx completion on
545 * a copy engine, this count should be big enough. Once we hit
546 * this threashold we'll not check for any Tx/Rx comlpetion in same
547 * interrupt handling. Note that this threashold is only used for
548 * Rx interrupt processing, this can be used tor Tx as well if we
549 * suspect any infinite loop in checking for pending Tx completion.
550 */
551#define CE_TXRX_COMP_CHECK_THRESHOLD 20
552
553/*
554 * Adjust interrupts for the copy complete handler.
555 * If it's needed for either send or recv, then unmask
556 * this interrupt; otherwise, mask it.
557 *
558 * Called with target_lock held.
559 */
560static void
561ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
562 int disable_copy_compl_intr)
563{
564}
565
566bool ce_check_int_watermark_srng(struct CE_state *CE_state, unsigned int *flags)
567{
568 /*TODO*/
569 return false;
570}
571
572uint32_t ce_get_desc_size_srng(uint8_t ring_type)
573{
574 switch (ring_type) {
575 case CE_RING_SRC:
576 return sizeof(struct ce_srng_src_desc);
577 case CE_RING_DEST:
578 return sizeof(struct ce_srng_dest_desc);
579 case CE_RING_STATUS:
580 return sizeof(struct ce_srng_dest_status_desc);
581 default:
582 return 0;
583 }
584 return 0;
585}
586
587void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
588 struct CE_ring_state *src_ring)
589{
590 struct hal_srng_params ring_params = {0};
591
592 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
593 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
594 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700595 /*
596 * The minimum increment for the timer is 8us
597 * A default value of 0 disables the timer
598 * A valid default value caused continuous interrupts to
599 * fire with MSI enabled. Need to revisit usage of the timer
600 */
601 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530602 ring_params.intr_batch_cntr_thres_entries = 1;
603
604 /* TODO
605 * ring_params.msi_addr = XXX;
606 * ring_params.msi_data = XXX;
607 * ring_params.flags = XXX;
608 */
609
610 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
611 &ring_params);
612}
613
614void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700615 struct CE_ring_state *dest_ring,
616 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530617{
618 struct hal_srng_params ring_params = {0};
619
620 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
621 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
622 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700623 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530624 ring_params.intr_batch_cntr_thres_entries = 1;
Houston Hoffman74109122016-10-21 14:58:34 -0700625 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530626
627 /* TODO
628 * ring_params.msi_addr = XXX;
629 * ring_params.msi_data = XXX;
630 * ring_params.flags = XXX;
631 */
632
633 /*Dest ring is also source ring*/
634 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
635 &ring_params);
636}
637
638void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
639 struct CE_ring_state *status_ring)
640{
641 struct hal_srng_params ring_params = {0};
642
643 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
644 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
645 ring_params.num_entries = status_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700646 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530647 ring_params.intr_batch_cntr_thres_entries = 1;
648
649 /* TODO
650 * ring_params.msi_addr = XXX;
651 * ring_params.msi_data = XXX;
652 * ring_params.flags = XXX;
653 */
654
655 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
656 ce_id, 0, &ring_params);
657}
658
659void ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
660 uint32_t ce_id, struct CE_ring_state *ring,
661 struct CE_attr *attr)
662{
663 switch (ring_type) {
664 case CE_RING_SRC:
665 ce_srng_src_ring_setup(scn, ce_id, ring);
666 break;
667 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700668 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530669 break;
670 case CE_RING_STATUS:
671 ce_srng_status_ring_setup(scn, ce_id, ring);
672 break;
673 default:
674 qdf_assert(0);
675 break;
676 }
677}
678struct ce_ops ce_service_srng = {
679 .ce_get_desc_size = ce_get_desc_size_srng,
680 .ce_ring_setup = ce_ring_setup_srng,
681 .ce_sendlist_send = ce_sendlist_send_srng,
682 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
683 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
684 .ce_cancel_send_next = ce_cancel_send_next_srng,
685 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
686 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
687 .ce_send_nolock = ce_send_nolock_srng,
688 .watermark_int = ce_check_int_watermark_srng,
689 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
690 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
691 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
692};
693
694struct ce_ops *ce_services_srng()
695{
696 return &ce_service_srng;
697}