blob: a5df6b759b6d27ac1a89b29f41205fb4e445d27c [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
2 * Copyright (c) 2016 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "hif.h"
20#include "hif_io32.h"
21#include "reg_struct.h"
22#include "ce_api.h"
23#include "ce_main.h"
24#include "ce_internal.h"
25#include "ce_reg.h"
26#include "qdf_lock.h"
27#include "regtable.h"
28#include "hif_main.h"
29#include "hif_debug.h"
30#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070031#include "pld_common.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053032
33/*
34 * Support for Copy Engine hardware, which is mainly used for
35 * communication between Host and Target over a PCIe interconnect.
36 */
37
38/*
39 * A single CopyEngine (CE) comprises two "rings":
40 * a source ring
41 * a destination ring
42 *
43 * Each ring consists of a number of descriptors which specify
44 * an address, length, and meta-data.
45 *
46 * Typically, one side of the PCIe interconnect (Host or Target)
47 * controls one ring and the other side controls the other ring.
48 * The source side chooses when to initiate a transfer and it
49 * chooses what to send (buffer address, length). The destination
50 * side keeps a supply of "anonymous receive buffers" available and
51 * it handles incoming data as it arrives (when the destination
52 * receives an interrupt).
53 *
54 * The sender may send a simple buffer (address/length) or it may
55 * send a small list of buffers. When a small list is sent, hardware
56 * "gathers" these and they end up in a single destination buffer
57 * with a single interrupt.
58 *
59 * There are several "contexts" managed by this layer -- more, it
60 * may seem -- than should be needed. These are provided mainly for
61 * maximum flexibility and especially to facilitate a simpler HIF
62 * implementation. There are per-CopyEngine recv, send, and watermark
63 * contexts. These are supplied by the caller when a recv, send,
64 * or watermark handler is established and they are echoed back to
65 * the caller when the respective callbacks are invoked. There is
66 * also a per-transfer context supplied by the caller when a buffer
67 * (or sendlist) is sent and when a buffer is enqueued for recv.
68 * These per-transfer contexts are echoed back to the caller when
69 * the buffer is sent/received.
70 * Target TX harsh result toeplitz_hash_result
71 */
72
73#define CE_ADDR_COPY(desc, dma_addr) do {\
74 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 0xFFFFFFFF);\
76 (desc)->buffer_addr_hi =\
77 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 } while (0)
79
80int
81ce_send_nolock_srng(struct CE_handle *copyeng,
82 void *per_transfer_context,
83 qdf_dma_addr_t buffer,
84 uint32_t nbytes,
85 uint32_t transfer_id,
86 uint32_t flags,
87 uint32_t user_flags)
88{
89 int status;
90 struct CE_state *CE_state = (struct CE_state *)copyeng;
91 struct CE_ring_state *src_ring = CE_state->src_ring;
92 unsigned int nentries_mask = src_ring->nentries_mask;
93 unsigned int write_index = src_ring->write_index;
94 uint64_t dma_addr = buffer;
95 struct hif_softc *scn = CE_state->scn;
96
97 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
98 return QDF_STATUS_E_FAILURE;
99 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
100 false) <= 0)) {
101 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
102 Q_TARGET_ACCESS_END(scn);
103 return QDF_STATUS_E_FAILURE;
104 }
105 {
106 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
107 struct ce_srng_src_desc *src_desc;
108
109 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
110 Q_TARGET_ACCESS_END(scn);
111 return QDF_STATUS_E_FAILURE;
112 }
113
114 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
115 src_ring->srng_ctx);
116
117 /* Update low 32 bits source descriptor address */
118 src_desc->buffer_addr_lo =
119 (uint32_t)(dma_addr & 0xFFFFFFFF);
120 src_desc->buffer_addr_hi =
121 (uint32_t)((dma_addr >> 32) & 0xFF);
122
123 src_desc->meta_data = transfer_id;
124
125 /*
126 * Set the swap bit if:
127 * typical sends on this CE are swapped (host is big-endian)
128 * and this send doesn't disable the swapping
129 * (data is not bytestream)
130 */
131 src_desc->byte_swap =
132 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
133 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
134 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
135 src_desc->nbytes = nbytes;
136
137 src_ring->per_transfer_context[write_index] =
138 per_transfer_context;
139 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
140
141 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
142
143 /* src_ring->write index hasn't been updated event though
144 * the register has allready been written to.
145 */
146 hif_record_ce_desc_event(scn, CE_state->id, event_type,
147 (union ce_desc *) src_desc, per_transfer_context,
148 src_ring->write_index);
149
150 src_ring->write_index = write_index;
151 status = QDF_STATUS_SUCCESS;
152 }
153 Q_TARGET_ACCESS_END(scn);
154 return status;
155}
156
157int
158ce_sendlist_send_srng(struct CE_handle *copyeng,
159 void *per_transfer_context,
160 struct ce_sendlist *sendlist, unsigned int transfer_id)
161{
162 int status = -ENOMEM;
163 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
164 struct CE_state *CE_state = (struct CE_state *)copyeng;
165 struct CE_ring_state *src_ring = CE_state->src_ring;
166 unsigned int num_items = sl->num_items;
167 unsigned int sw_index;
168 unsigned int write_index;
169 struct hif_softc *scn = CE_state->scn;
170
171 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
172
173 qdf_spin_lock_bh(&CE_state->ce_index_lock);
174 sw_index = src_ring->sw_index;
175 write_index = src_ring->write_index;
176
177 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
178 num_items) {
179 struct ce_sendlist_item *item;
180 int i;
181
182 /* handle all but the last item uniformly */
183 for (i = 0; i < num_items - 1; i++) {
184 item = &sl->item[i];
185 /* TBDXXX: Support extensible sendlist_types? */
186 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
187 status = ce_send_nolock_srng(copyeng,
188 CE_SENDLIST_ITEM_CTXT,
189 (qdf_dma_addr_t) item->data,
190 item->u.nbytes, transfer_id,
191 item->flags | CE_SEND_FLAG_GATHER,
192 item->user_flags);
193 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
194 }
195 /* provide valid context pointer for final item */
196 item = &sl->item[i];
197 /* TBDXXX: Support extensible sendlist_types? */
198 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
199 status = ce_send_nolock_srng(copyeng, per_transfer_context,
200 (qdf_dma_addr_t) item->data,
201 item->u.nbytes,
202 transfer_id, item->flags,
203 item->user_flags);
204 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
205 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
206 QDF_NBUF_TX_PKT_CE);
207 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
208 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
209 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700210 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530211 } else {
212 /*
213 * Probably not worth the additional complexity to support
214 * partial sends with continuation or notification. We expect
215 * to use large rings and small sendlists. If we can't handle
216 * the entire request at once, punt it back to the caller.
217 */
218 }
219 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
220
221 return status;
222}
223
224#define SLOTS_PER_DATAPATH_TX 2
225
226#ifndef AH_NEED_TX_DATA_SWAP
227#define AH_NEED_TX_DATA_SWAP 0
228#endif
229/**
230 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
231 * @coyeng: copy engine handle
232 * @per_recv_context: virtual address of the nbuf
233 * @buffer: physical address of the nbuf
234 *
235 * Return: 0 if the buffer is enqueued
236 */
237int
238ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
239 void *per_recv_context, qdf_dma_addr_t buffer)
240{
241 int status;
242 struct CE_state *CE_state = (struct CE_state *)copyeng;
243 struct CE_ring_state *dest_ring = CE_state->dest_ring;
244 unsigned int nentries_mask = dest_ring->nentries_mask;
245 unsigned int write_index;
246 unsigned int sw_index;
247 uint64_t dma_addr = buffer;
248 struct hif_softc *scn = CE_state->scn;
249
250 qdf_spin_lock_bh(&CE_state->ce_index_lock);
251 write_index = dest_ring->write_index;
252 sw_index = dest_ring->sw_index;
253
254 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
255 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
256 return -EIO;
257 }
258
259 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
260 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
261 return QDF_STATUS_E_FAILURE;
262 }
263
264 if ((hal_srng_src_num_avail(scn->hal_soc,
265 dest_ring->srng_ctx, false) > 0)) {
266 struct ce_srng_dest_desc *dest_desc =
267 hal_srng_src_get_next(scn->hal_soc,
268 dest_ring->srng_ctx);
269
270 if (dest_desc == NULL) {
271 status = QDF_STATUS_E_FAILURE;
272 } else {
273
274 CE_ADDR_COPY(dest_desc, dma_addr);
275
276 dest_ring->per_transfer_context[write_index] =
277 per_recv_context;
278
279 /* Update Destination Ring Write Index */
280 write_index = CE_RING_IDX_INCR(nentries_mask,
281 write_index);
282 status = QDF_STATUS_SUCCESS;
283 }
284 } else
285 status = QDF_STATUS_E_FAILURE;
286
287 dest_ring->write_index = write_index;
288 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
289 Q_TARGET_ACCESS_END(scn);
290 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
291 return status;
292}
293
294/**
295 * ce_send_watermarks_set_srng
296 */
297void
298ce_send_watermarks_set_srng(struct CE_handle *copyeng,
299 unsigned int low_alert_nentries,
300 unsigned int high_alert_nentries)
301{
302 /*TODO*/
303}
304/*
305 * ce_recv_watermarks_set_srng
306 */
307void
308ce_recv_watermarks_set_srng(struct CE_handle *copyeng,
309 unsigned int low_alert_nentries,
310 unsigned int high_alert_nentries)
311{
312 /*TODO*/
313}
314
315unsigned int ce_send_entries_avail_srng(struct CE_handle *copyeng)
316{
317 struct CE_state *CE_state = (struct CE_state *)copyeng;
318 struct CE_ring_state *src_ring = CE_state->src_ring;
319 struct hif_softc *scn = CE_state->scn;
320
321 return hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false);
322}
323
324unsigned int ce_recv_entries_avail_srng(struct CE_handle *copyeng)
325{
326 struct CE_state *CE_state = (struct CE_state *)copyeng;
327 struct CE_ring_state *dest_ring = CE_state->dest_ring;
328 struct hif_softc *scn = CE_state->scn;
329
330
331 return hal_srng_src_num_avail(scn->hal_soc, dest_ring->srng_ctx, false);
332}
333
334/*
335 * Guts of ce_recv_entries_done.
336 * The caller takes responsibility for any necessary locking.
337 */
338unsigned int
339ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
340 struct CE_state *CE_state)
341{
342 struct CE_ring_state *status_ring = CE_state->status_ring;
343
344 return hal_srng_dst_num_valid(scn->hal_soc,
345 status_ring->srng_ctx, false);
346}
347
348/*
349 * Guts of ce_send_entries_done.
350 * The caller takes responsibility for any necessary locking.
351 */
352unsigned int
353ce_send_entries_done_nolock_srng(struct hif_softc *scn,
354 struct CE_state *CE_state)
355{
356
357 struct CE_ring_state *src_ring = CE_state->src_ring;
358 int count = 0;
359
360 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
361 return 0;
362
363 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
364
365 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
366
367 return count;
368}
369
370/* Debug support */
371void *ce_debug_cmplrn_context_srng; /* completed recv next context */
372void *ce_debug_cmplsn_context_srng; /* completed send next context */
373
374/*
375 * Guts of ce_completed_recv_next.
376 * The caller takes responsibility for any necessary locking.
377 */
378int
379ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
380 void **per_CE_contextp,
381 void **per_transfer_contextp,
382 qdf_dma_addr_t *bufferp,
383 unsigned int *nbytesp,
384 unsigned int *transfer_idp,
385 unsigned int *flagsp)
386{
387 int status;
388 struct CE_ring_state *dest_ring = CE_state->dest_ring;
389 struct CE_ring_state *status_ring = CE_state->status_ring;
390 unsigned int nentries_mask = dest_ring->nentries_mask;
391 unsigned int sw_index = dest_ring->sw_index;
392 struct hif_softc *scn = CE_state->scn;
393 struct ce_srng_dest_status_desc *dest_status;
394 int nbytes;
395 struct ce_srng_dest_status_desc dest_status_info;
396
397 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
398 status = QDF_STATUS_E_FAILURE;
399 goto done;
400 }
401
402 dest_status = hal_srng_dst_get_next(scn->hal_soc,
403 status_ring->srng_ctx);
404
405 if (dest_status == NULL) {
406 status = QDF_STATUS_E_FAILURE;
407 goto done;
408 }
409 /*
410 * By copying the dest_desc_info element to local memory, we could
411 * avoid extra memory read from non-cachable memory.
412 */
413 dest_status_info = *dest_status;
414 nbytes = dest_status_info.nbytes;
415 if (nbytes == 0) {
416 /*
417 * This closes a relatively unusual race where the Host
418 * sees the updated DRRI before the update to the
419 * corresponding descriptor has completed. We treat this
420 * as a descriptor that is not yet done.
421 */
422 status = QDF_STATUS_E_FAILURE;
423 goto done;
424 }
425
426 dest_status->nbytes = 0;
427
428 *nbytesp = nbytes;
429 *transfer_idp = dest_status_info.meta_data;
430 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
431
432 if (per_CE_contextp)
433 *per_CE_contextp = CE_state->recv_context;
434
435 /* NOTE: sw_index is more like a read_index in this context. It has a
436 * one-to-one mapping with status ring.
437 * Get the per trasnfer context from dest_ring.
438 */
439 ce_debug_cmplrn_context_srng =
440 dest_ring->per_transfer_context[sw_index];
441
442 if (per_transfer_contextp)
443 *per_transfer_contextp = ce_debug_cmplrn_context_srng;
444
445 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
446
447 /* Update sw_index */
448 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
449 dest_ring->sw_index = sw_index;
450 status = QDF_STATUS_SUCCESS;
451
452done:
453 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
454
455 return status;
456}
457
458QDF_STATUS
459ce_revoke_recv_next_srng(struct CE_handle *copyeng,
460 void **per_CE_contextp,
461 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
462{
463 QDF_STATUS status = QDF_STATUS_E_FAILURE;
464
465 return status;
466}
467
468/*
469 * Guts of ce_completed_send_next.
470 * The caller takes responsibility for any necessary locking.
471 */
472int
473ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
474 void **per_CE_contextp,
475 void **per_transfer_contextp,
476 qdf_dma_addr_t *bufferp,
477 unsigned int *nbytesp,
478 unsigned int *transfer_idp,
479 unsigned int *sw_idx,
480 unsigned int *hw_idx,
481 uint32_t *toeplitz_hash_result)
482{
483 int status = QDF_STATUS_E_FAILURE;
484 struct CE_ring_state *src_ring = CE_state->src_ring;
485 unsigned int nentries_mask = src_ring->nentries_mask;
486 unsigned int sw_index = src_ring->sw_index;
487 struct hif_softc *scn = CE_state->scn;
488 struct ce_srng_src_desc *src_desc;
489
490 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
491 status = QDF_STATUS_E_FAILURE;
492 return status;
493 }
494
495 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
496 if (src_desc) {
497
498 /* Return data from completed source descriptor */
499 *bufferp = (qdf_dma_addr_t)
500 (((uint64_t)(src_desc)->buffer_addr_lo +
501 ((uint64_t)((src_desc)->buffer_addr_hi &
502 0xFF) << 32)));
503 *nbytesp = src_desc->nbytes;
504 *transfer_idp = src_desc->meta_data;
505 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
506
507 if (per_CE_contextp)
508 *per_CE_contextp = CE_state->send_context;
509
510 /* sw_index is used more like read index */
511 ce_debug_cmplsn_context_srng =
512 src_ring->per_transfer_context[sw_index];
513 if (per_transfer_contextp)
514 *per_transfer_contextp = ce_debug_cmplsn_context_srng;
515
516 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
517
518 /* Update sw_index */
519 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
520 src_ring->sw_index = sw_index;
521 status = QDF_STATUS_SUCCESS;
522 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530523 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530524
525 return status;
526}
527
528/* NB: Modelled after ce_completed_send_next */
529QDF_STATUS
530ce_cancel_send_next_srng(struct CE_handle *copyeng,
531 void **per_CE_contextp,
532 void **per_transfer_contextp,
533 qdf_dma_addr_t *bufferp,
534 unsigned int *nbytesp,
535 unsigned int *transfer_idp,
536 uint32_t *toeplitz_hash_result)
537{
538 return 0;
539}
540
541/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
542#define CE_WM_SHFT 1
543
544/*
545 * Number of times to check for any pending tx/rx completion on
546 * a copy engine, this count should be big enough. Once we hit
547 * this threashold we'll not check for any Tx/Rx comlpetion in same
548 * interrupt handling. Note that this threashold is only used for
549 * Rx interrupt processing, this can be used tor Tx as well if we
550 * suspect any infinite loop in checking for pending Tx completion.
551 */
552#define CE_TXRX_COMP_CHECK_THRESHOLD 20
553
554/*
555 * Adjust interrupts for the copy complete handler.
556 * If it's needed for either send or recv, then unmask
557 * this interrupt; otherwise, mask it.
558 *
559 * Called with target_lock held.
560 */
561static void
562ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
563 int disable_copy_compl_intr)
564{
565}
566
567bool ce_check_int_watermark_srng(struct CE_state *CE_state, unsigned int *flags)
568{
569 /*TODO*/
570 return false;
571}
572
573uint32_t ce_get_desc_size_srng(uint8_t ring_type)
574{
575 switch (ring_type) {
576 case CE_RING_SRC:
577 return sizeof(struct ce_srng_src_desc);
578 case CE_RING_DEST:
579 return sizeof(struct ce_srng_dest_desc);
580 case CE_RING_STATUS:
581 return sizeof(struct ce_srng_dest_status_desc);
582 default:
583 return 0;
584 }
585 return 0;
586}
587
Houston Hoffman15010772016-09-16 14:01:13 -0700588static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
589 struct hal_srng_params *ring_params)
590{
591 uint32_t addr_low;
592 uint32_t addr_high;
593 uint32_t msi_data_start;
594 uint32_t msi_data_count;
595 uint32_t msi_irq_start;
596 int ret;
597
598 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
599 &msi_data_count, &msi_data_start,
600 &msi_irq_start);
601
602 /* msi config not found */
603 if (ret)
604 return;
605
606 HIF_INFO("%s: ce_id %d, msi_start: %d, msi_count %d", __func__, ce_id,
607 msi_data_start, msi_data_count);
608
609 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
610
611 ring_params->msi_addr = addr_low;
612 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
613 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
614 ring_params->flags |= HAL_SRNG_MSI_INTR;
615
616 HIF_INFO("%s: ce_id %d, msi_addr %p, msi_data %d", __func__, ce_id,
617 (void *)ring_params->msi_addr, ring_params->msi_data);
618}
619
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530620void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
621 struct CE_ring_state *src_ring)
622{
623 struct hal_srng_params ring_params = {0};
624
Houston Hoffman15010772016-09-16 14:01:13 -0700625 HIF_INFO("%s: ce_id %d", __func__, ce_id);
626
627 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
628
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530629 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
630 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
631 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700632 /*
633 * The minimum increment for the timer is 8us
634 * A default value of 0 disables the timer
635 * A valid default value caused continuous interrupts to
636 * fire with MSI enabled. Need to revisit usage of the timer
637 */
638 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530639 ring_params.intr_batch_cntr_thres_entries = 1;
640
641 /* TODO
642 * ring_params.msi_addr = XXX;
643 * ring_params.msi_data = XXX;
644 * ring_params.flags = XXX;
645 */
646
647 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
648 &ring_params);
649}
650
651void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700652 struct CE_ring_state *dest_ring,
653 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530654{
655 struct hal_srng_params ring_params = {0};
656
Houston Hoffman15010772016-09-16 14:01:13 -0700657 HIF_INFO("%s: ce_id %d", __func__, ce_id);
658
659 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
660
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530661 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
662 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
663 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700664 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665 ring_params.intr_batch_cntr_thres_entries = 1;
Houston Hoffman74109122016-10-21 14:58:34 -0700666 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530667
668 /* TODO
669 * ring_params.msi_addr = XXX;
670 * ring_params.msi_data = XXX;
671 * ring_params.flags = XXX;
672 */
673
674 /*Dest ring is also source ring*/
675 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
676 &ring_params);
677}
678
679void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
680 struct CE_ring_state *status_ring)
681{
682 struct hal_srng_params ring_params = {0};
683
Houston Hoffman15010772016-09-16 14:01:13 -0700684 HIF_INFO("%s: ce_id %d", __func__, ce_id);
685
686 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
687
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530688 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
689 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
690 ring_params.num_entries = status_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700691 ring_params.intr_timer_thres_us = 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530692 ring_params.intr_batch_cntr_thres_entries = 1;
693
694 /* TODO
695 * ring_params.msi_addr = XXX;
696 * ring_params.msi_data = XXX;
697 * ring_params.flags = XXX;
698 */
699
700 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
701 ce_id, 0, &ring_params);
702}
703
704void ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
705 uint32_t ce_id, struct CE_ring_state *ring,
706 struct CE_attr *attr)
707{
708 switch (ring_type) {
709 case CE_RING_SRC:
710 ce_srng_src_ring_setup(scn, ce_id, ring);
711 break;
712 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700713 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530714 break;
715 case CE_RING_STATUS:
716 ce_srng_status_ring_setup(scn, ce_id, ring);
717 break;
718 default:
719 qdf_assert(0);
720 break;
721 }
722}
723struct ce_ops ce_service_srng = {
724 .ce_get_desc_size = ce_get_desc_size_srng,
725 .ce_ring_setup = ce_ring_setup_srng,
726 .ce_sendlist_send = ce_sendlist_send_srng,
727 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
728 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
729 .ce_cancel_send_next = ce_cancel_send_next_srng,
730 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
731 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
732 .ce_send_nolock = ce_send_nolock_srng,
733 .watermark_int = ce_check_int_watermark_srng,
734 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
735 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
736 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
737};
738
739struct ce_ops *ce_services_srng()
740{
741 return &ce_service_srng;
742}