blob: 0f6a3ce458dd45ff74822982b0688b9019f0f52d [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
Kiran Venkatappac6862752017-07-09 22:44:17 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "hif.h"
20#include "hif_io32.h"
21#include "reg_struct.h"
22#include "ce_api.h"
23#include "ce_main.h"
24#include "ce_internal.h"
25#include "ce_reg.h"
26#include "qdf_lock.h"
27#include "regtable.h"
28#include "hif_main.h"
29#include "hif_debug.h"
30#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070031#include "pld_common.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053032
33/*
34 * Support for Copy Engine hardware, which is mainly used for
35 * communication between Host and Target over a PCIe interconnect.
36 */
37
38/*
39 * A single CopyEngine (CE) comprises two "rings":
40 * a source ring
41 * a destination ring
42 *
43 * Each ring consists of a number of descriptors which specify
44 * an address, length, and meta-data.
45 *
46 * Typically, one side of the PCIe interconnect (Host or Target)
47 * controls one ring and the other side controls the other ring.
48 * The source side chooses when to initiate a transfer and it
49 * chooses what to send (buffer address, length). The destination
50 * side keeps a supply of "anonymous receive buffers" available and
51 * it handles incoming data as it arrives (when the destination
52 * receives an interrupt).
53 *
54 * The sender may send a simple buffer (address/length) or it may
55 * send a small list of buffers. When a small list is sent, hardware
56 * "gathers" these and they end up in a single destination buffer
57 * with a single interrupt.
58 *
59 * There are several "contexts" managed by this layer -- more, it
60 * may seem -- than should be needed. These are provided mainly for
61 * maximum flexibility and especially to facilitate a simpler HIF
62 * implementation. There are per-CopyEngine recv, send, and watermark
63 * contexts. These are supplied by the caller when a recv, send,
64 * or watermark handler is established and they are echoed back to
65 * the caller when the respective callbacks are invoked. There is
66 * also a per-transfer context supplied by the caller when a buffer
67 * (or sendlist) is sent and when a buffer is enqueued for recv.
68 * These per-transfer contexts are echoed back to the caller when
69 * the buffer is sent/received.
70 * Target TX harsh result toeplitz_hash_result
71 */
72
73#define CE_ADDR_COPY(desc, dma_addr) do {\
74 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 0xFFFFFFFF);\
76 (desc)->buffer_addr_hi =\
77 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 } while (0)
79
Jeff Johnson05718132016-12-17 10:18:17 -080080static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053081ce_send_nolock_srng(struct CE_handle *copyeng,
82 void *per_transfer_context,
83 qdf_dma_addr_t buffer,
84 uint32_t nbytes,
85 uint32_t transfer_id,
86 uint32_t flags,
87 uint32_t user_flags)
88{
89 int status;
90 struct CE_state *CE_state = (struct CE_state *)copyeng;
91 struct CE_ring_state *src_ring = CE_state->src_ring;
92 unsigned int nentries_mask = src_ring->nentries_mask;
93 unsigned int write_index = src_ring->write_index;
94 uint64_t dma_addr = buffer;
95 struct hif_softc *scn = CE_state->scn;
96
97 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
98 return QDF_STATUS_E_FAILURE;
99 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
100 false) <= 0)) {
101 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
102 Q_TARGET_ACCESS_END(scn);
103 return QDF_STATUS_E_FAILURE;
104 }
105 {
106 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
107 struct ce_srng_src_desc *src_desc;
108
109 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
110 Q_TARGET_ACCESS_END(scn);
111 return QDF_STATUS_E_FAILURE;
112 }
113
114 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
115 src_ring->srng_ctx);
116
117 /* Update low 32 bits source descriptor address */
118 src_desc->buffer_addr_lo =
119 (uint32_t)(dma_addr & 0xFFFFFFFF);
120 src_desc->buffer_addr_hi =
121 (uint32_t)((dma_addr >> 32) & 0xFF);
122
123 src_desc->meta_data = transfer_id;
124
125 /*
126 * Set the swap bit if:
127 * typical sends on this CE are swapped (host is big-endian)
128 * and this send doesn't disable the swapping
129 * (data is not bytestream)
130 */
131 src_desc->byte_swap =
132 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
133 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
134 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
135 src_desc->nbytes = nbytes;
136
137 src_ring->per_transfer_context[write_index] =
138 per_transfer_context;
139 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
140
141 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
142
143 /* src_ring->write index hasn't been updated event though
144 * the register has allready been written to.
145 */
146 hif_record_ce_desc_event(scn, CE_state->id, event_type,
147 (union ce_desc *) src_desc, per_transfer_context,
148 src_ring->write_index);
149
150 src_ring->write_index = write_index;
151 status = QDF_STATUS_SUCCESS;
152 }
153 Q_TARGET_ACCESS_END(scn);
154 return status;
155}
156
Jeff Johnson05718132016-12-17 10:18:17 -0800157static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530158ce_sendlist_send_srng(struct CE_handle *copyeng,
159 void *per_transfer_context,
160 struct ce_sendlist *sendlist, unsigned int transfer_id)
161{
162 int status = -ENOMEM;
163 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
164 struct CE_state *CE_state = (struct CE_state *)copyeng;
165 struct CE_ring_state *src_ring = CE_state->src_ring;
166 unsigned int num_items = sl->num_items;
167 unsigned int sw_index;
168 unsigned int write_index;
169 struct hif_softc *scn = CE_state->scn;
170
171 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
172
173 qdf_spin_lock_bh(&CE_state->ce_index_lock);
174 sw_index = src_ring->sw_index;
175 write_index = src_ring->write_index;
176
177 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
178 num_items) {
179 struct ce_sendlist_item *item;
180 int i;
181
182 /* handle all but the last item uniformly */
183 for (i = 0; i < num_items - 1; i++) {
184 item = &sl->item[i];
185 /* TBDXXX: Support extensible sendlist_types? */
186 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
187 status = ce_send_nolock_srng(copyeng,
188 CE_SENDLIST_ITEM_CTXT,
189 (qdf_dma_addr_t) item->data,
190 item->u.nbytes, transfer_id,
191 item->flags | CE_SEND_FLAG_GATHER,
192 item->user_flags);
193 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
194 }
195 /* provide valid context pointer for final item */
196 item = &sl->item[i];
197 /* TBDXXX: Support extensible sendlist_types? */
198 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
199 status = ce_send_nolock_srng(copyeng, per_transfer_context,
200 (qdf_dma_addr_t) item->data,
201 item->u.nbytes,
202 transfer_id, item->flags,
203 item->user_flags);
204 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
205 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
206 QDF_NBUF_TX_PKT_CE);
207 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
208 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +0530209 QDF_TRACE_DEFAULT_PDEV_ID,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530210 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700211 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530212 } else {
213 /*
214 * Probably not worth the additional complexity to support
215 * partial sends with continuation or notification. We expect
216 * to use large rings and small sendlists. If we can't handle
217 * the entire request at once, punt it back to the caller.
218 */
219 }
220 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
221
222 return status;
223}
224
225#define SLOTS_PER_DATAPATH_TX 2
226
227#ifndef AH_NEED_TX_DATA_SWAP
228#define AH_NEED_TX_DATA_SWAP 0
229#endif
230/**
231 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
232 * @coyeng: copy engine handle
233 * @per_recv_context: virtual address of the nbuf
234 * @buffer: physical address of the nbuf
235 *
236 * Return: 0 if the buffer is enqueued
237 */
Jeff Johnson05718132016-12-17 10:18:17 -0800238static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530239ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
240 void *per_recv_context, qdf_dma_addr_t buffer)
241{
242 int status;
243 struct CE_state *CE_state = (struct CE_state *)copyeng;
244 struct CE_ring_state *dest_ring = CE_state->dest_ring;
245 unsigned int nentries_mask = dest_ring->nentries_mask;
246 unsigned int write_index;
247 unsigned int sw_index;
248 uint64_t dma_addr = buffer;
249 struct hif_softc *scn = CE_state->scn;
250
251 qdf_spin_lock_bh(&CE_state->ce_index_lock);
252 write_index = dest_ring->write_index;
253 sw_index = dest_ring->sw_index;
254
255 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
256 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
257 return -EIO;
258 }
259
260 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
261 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
262 return QDF_STATUS_E_FAILURE;
263 }
264
265 if ((hal_srng_src_num_avail(scn->hal_soc,
266 dest_ring->srng_ctx, false) > 0)) {
267 struct ce_srng_dest_desc *dest_desc =
268 hal_srng_src_get_next(scn->hal_soc,
269 dest_ring->srng_ctx);
270
271 if (dest_desc == NULL) {
272 status = QDF_STATUS_E_FAILURE;
273 } else {
274
275 CE_ADDR_COPY(dest_desc, dma_addr);
276
277 dest_ring->per_transfer_context[write_index] =
278 per_recv_context;
279
280 /* Update Destination Ring Write Index */
281 write_index = CE_RING_IDX_INCR(nentries_mask,
282 write_index);
283 status = QDF_STATUS_SUCCESS;
284 }
285 } else
286 status = QDF_STATUS_E_FAILURE;
287
288 dest_ring->write_index = write_index;
289 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
290 Q_TARGET_ACCESS_END(scn);
291 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
292 return status;
293}
294
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530295/*
296 * Guts of ce_recv_entries_done.
297 * The caller takes responsibility for any necessary locking.
298 */
Jeff Johnson05718132016-12-17 10:18:17 -0800299static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530300ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
301 struct CE_state *CE_state)
302{
303 struct CE_ring_state *status_ring = CE_state->status_ring;
304
305 return hal_srng_dst_num_valid(scn->hal_soc,
306 status_ring->srng_ctx, false);
307}
308
309/*
310 * Guts of ce_send_entries_done.
311 * The caller takes responsibility for any necessary locking.
312 */
Jeff Johnson05718132016-12-17 10:18:17 -0800313static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530314ce_send_entries_done_nolock_srng(struct hif_softc *scn,
315 struct CE_state *CE_state)
316{
317
318 struct CE_ring_state *src_ring = CE_state->src_ring;
319 int count = 0;
320
321 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
322 return 0;
323
324 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
325
326 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
327
328 return count;
329}
330
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530331/*
332 * Guts of ce_completed_recv_next.
333 * The caller takes responsibility for any necessary locking.
334 */
Jeff Johnson05718132016-12-17 10:18:17 -0800335static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530336ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
337 void **per_CE_contextp,
338 void **per_transfer_contextp,
339 qdf_dma_addr_t *bufferp,
340 unsigned int *nbytesp,
341 unsigned int *transfer_idp,
342 unsigned int *flagsp)
343{
344 int status;
345 struct CE_ring_state *dest_ring = CE_state->dest_ring;
346 struct CE_ring_state *status_ring = CE_state->status_ring;
347 unsigned int nentries_mask = dest_ring->nentries_mask;
348 unsigned int sw_index = dest_ring->sw_index;
349 struct hif_softc *scn = CE_state->scn;
350 struct ce_srng_dest_status_desc *dest_status;
351 int nbytes;
352 struct ce_srng_dest_status_desc dest_status_info;
353
354 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
355 status = QDF_STATUS_E_FAILURE;
356 goto done;
357 }
358
359 dest_status = hal_srng_dst_get_next(scn->hal_soc,
360 status_ring->srng_ctx);
361
362 if (dest_status == NULL) {
363 status = QDF_STATUS_E_FAILURE;
364 goto done;
365 }
366 /*
367 * By copying the dest_desc_info element to local memory, we could
368 * avoid extra memory read from non-cachable memory.
369 */
370 dest_status_info = *dest_status;
371 nbytes = dest_status_info.nbytes;
372 if (nbytes == 0) {
373 /*
374 * This closes a relatively unusual race where the Host
375 * sees the updated DRRI before the update to the
376 * corresponding descriptor has completed. We treat this
377 * as a descriptor that is not yet done.
378 */
379 status = QDF_STATUS_E_FAILURE;
380 goto done;
381 }
382
383 dest_status->nbytes = 0;
384
385 *nbytesp = nbytes;
386 *transfer_idp = dest_status_info.meta_data;
387 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
388
389 if (per_CE_contextp)
390 *per_CE_contextp = CE_state->recv_context;
391
392 /* NOTE: sw_index is more like a read_index in this context. It has a
393 * one-to-one mapping with status ring.
394 * Get the per trasnfer context from dest_ring.
395 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530396 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800397 *per_transfer_contextp =
398 dest_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530399
400 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
401
402 /* Update sw_index */
403 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
404 dest_ring->sw_index = sw_index;
405 status = QDF_STATUS_SUCCESS;
406
407done:
408 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
409
410 return status;
411}
412
Jeff Johnson05718132016-12-17 10:18:17 -0800413static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530414ce_revoke_recv_next_srng(struct CE_handle *copyeng,
415 void **per_CE_contextp,
416 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
417{
Houston Hoffman72ddc022017-01-30 14:21:46 -0800418 struct CE_state *CE_state = (struct CE_state *)copyeng;
419 struct CE_ring_state *dest_ring = CE_state->dest_ring;
420 unsigned int sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530421
Houston Hoffman72ddc022017-01-30 14:21:46 -0800422 if (!dest_ring)
423 return QDF_STATUS_E_FAILURE;
424
425 sw_index = dest_ring->sw_index;
426
427 if (per_CE_contextp)
428 *per_CE_contextp = CE_state->recv_context;
429
430 /* NOTE: sw_index is more like a read_index in this context. It has a
431 * one-to-one mapping with status ring.
432 * Get the per trasnfer context from dest_ring.
433 */
434 if (per_transfer_contextp)
435 *per_transfer_contextp =
436 dest_ring->per_transfer_context[sw_index];
437
438 if (dest_ring->per_transfer_context[sw_index] == NULL)
439 return QDF_STATUS_E_FAILURE;
440
441 /* provide end condition */
442 dest_ring->per_transfer_context[sw_index] = NULL;
443
444 /* Update sw_index */
445 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
446 dest_ring->sw_index = sw_index;
447 return QDF_STATUS_SUCCESS;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530448}
449
450/*
451 * Guts of ce_completed_send_next.
452 * The caller takes responsibility for any necessary locking.
453 */
Jeff Johnson05718132016-12-17 10:18:17 -0800454static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530455ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
456 void **per_CE_contextp,
457 void **per_transfer_contextp,
458 qdf_dma_addr_t *bufferp,
459 unsigned int *nbytesp,
460 unsigned int *transfer_idp,
461 unsigned int *sw_idx,
462 unsigned int *hw_idx,
463 uint32_t *toeplitz_hash_result)
464{
465 int status = QDF_STATUS_E_FAILURE;
466 struct CE_ring_state *src_ring = CE_state->src_ring;
467 unsigned int nentries_mask = src_ring->nentries_mask;
468 unsigned int sw_index = src_ring->sw_index;
469 struct hif_softc *scn = CE_state->scn;
470 struct ce_srng_src_desc *src_desc;
471
472 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
473 status = QDF_STATUS_E_FAILURE;
474 return status;
475 }
476
477 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
478 if (src_desc) {
479
480 /* Return data from completed source descriptor */
481 *bufferp = (qdf_dma_addr_t)
482 (((uint64_t)(src_desc)->buffer_addr_lo +
483 ((uint64_t)((src_desc)->buffer_addr_hi &
484 0xFF) << 32)));
485 *nbytesp = src_desc->nbytes;
486 *transfer_idp = src_desc->meta_data;
487 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
488
489 if (per_CE_contextp)
490 *per_CE_contextp = CE_state->send_context;
491
492 /* sw_index is used more like read index */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530493 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800494 *per_transfer_contextp =
495 src_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530496
497 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
498
499 /* Update sw_index */
500 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
501 src_ring->sw_index = sw_index;
502 status = QDF_STATUS_SUCCESS;
503 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530504 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530505
506 return status;
507}
508
509/* NB: Modelled after ce_completed_send_next */
Jeff Johnson05718132016-12-17 10:18:17 -0800510static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530511ce_cancel_send_next_srng(struct CE_handle *copyeng,
512 void **per_CE_contextp,
513 void **per_transfer_contextp,
514 qdf_dma_addr_t *bufferp,
515 unsigned int *nbytesp,
516 unsigned int *transfer_idp,
517 uint32_t *toeplitz_hash_result)
518{
Kiran Venkatappac6862752017-07-09 22:44:17 +0530519 struct CE_state *CE_state;
520 int status = QDF_STATUS_E_FAILURE;
521 struct CE_ring_state *src_ring;
522 unsigned int nentries_mask;
523 unsigned int sw_index;
524 struct hif_softc *scn;
525 struct ce_srng_src_desc *src_desc;
526
527 CE_state = (struct CE_state *)copyeng;
528 src_ring = CE_state->src_ring;
529 if (!src_ring)
530 return QDF_STATUS_E_FAILURE;
531
532 nentries_mask = src_ring->nentries_mask;
533 sw_index = src_ring->sw_index;
534 scn = CE_state->scn;
535
536 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
537 status = QDF_STATUS_E_FAILURE;
538 return status;
539 }
540
541 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
542 src_ring->srng_ctx);
543 if (src_desc) {
544 /* Return data from completed source descriptor */
545 *bufferp = (qdf_dma_addr_t)
546 (((uint64_t)(src_desc)->buffer_addr_lo +
547 ((uint64_t)((src_desc)->buffer_addr_hi &
548 0xFF) << 32)));
549 *nbytesp = src_desc->nbytes;
550 *transfer_idp = src_desc->meta_data;
551 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
552
553 if (per_CE_contextp)
554 *per_CE_contextp = CE_state->send_context;
555
556 /* sw_index is used more like read index */
557 if (per_transfer_contextp)
558 *per_transfer_contextp =
559 src_ring->per_transfer_context[sw_index];
560
561 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
562
563 /* Update sw_index */
564 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
565 src_ring->sw_index = sw_index;
566 status = QDF_STATUS_SUCCESS;
567 }
568 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
569
570 return status;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530571}
572
573/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
574#define CE_WM_SHFT 1
575
576/*
577 * Number of times to check for any pending tx/rx completion on
578 * a copy engine, this count should be big enough. Once we hit
579 * this threashold we'll not check for any Tx/Rx comlpetion in same
580 * interrupt handling. Note that this threashold is only used for
581 * Rx interrupt processing, this can be used tor Tx as well if we
582 * suspect any infinite loop in checking for pending Tx completion.
583 */
584#define CE_TXRX_COMP_CHECK_THRESHOLD 20
585
586/*
587 * Adjust interrupts for the copy complete handler.
588 * If it's needed for either send or recv, then unmask
589 * this interrupt; otherwise, mask it.
590 *
591 * Called with target_lock held.
592 */
593static void
594ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
595 int disable_copy_compl_intr)
596{
597}
598
Jeff Johnson05718132016-12-17 10:18:17 -0800599static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
600 unsigned int *flags)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530601{
602 /*TODO*/
603 return false;
604}
605
Jeff Johnson05718132016-12-17 10:18:17 -0800606static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530607{
608 switch (ring_type) {
609 case CE_RING_SRC:
610 return sizeof(struct ce_srng_src_desc);
611 case CE_RING_DEST:
612 return sizeof(struct ce_srng_dest_desc);
613 case CE_RING_STATUS:
614 return sizeof(struct ce_srng_dest_status_desc);
615 default:
616 return 0;
617 }
618 return 0;
619}
620
Houston Hoffman15010772016-09-16 14:01:13 -0700621static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
622 struct hal_srng_params *ring_params)
623{
624 uint32_t addr_low;
625 uint32_t addr_high;
626 uint32_t msi_data_start;
627 uint32_t msi_data_count;
628 uint32_t msi_irq_start;
629 int ret;
630
631 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
632 &msi_data_count, &msi_data_start,
633 &msi_irq_start);
634
635 /* msi config not found */
636 if (ret)
637 return;
638
Houston Hoffman15010772016-09-16 14:01:13 -0700639 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
640
641 ring_params->msi_addr = addr_low;
642 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
643 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
644 ring_params->flags |= HAL_SRNG_MSI_INTR;
645
Jeff Johnsonb9450212017-09-18 10:12:38 -0700646 HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
Houston Hoffman15010772016-09-16 14:01:13 -0700647 (void *)ring_params->msi_addr, ring_params->msi_data);
648}
649
Jeff Johnson05718132016-12-17 10:18:17 -0800650static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700651 struct CE_ring_state *src_ring,
652 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530653{
654 struct hal_srng_params ring_params = {0};
655
Houston Hoffman15010772016-09-16 14:01:13 -0700656 HIF_INFO("%s: ce_id %d", __func__, ce_id);
657
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530658 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
659 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
660 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700661 /*
662 * The minimum increment for the timer is 8us
663 * A default value of 0 disables the timer
664 * A valid default value caused continuous interrupts to
665 * fire with MSI enabled. Need to revisit usage of the timer
666 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530667
Houston Hoffman648a9182017-05-21 23:27:50 -0700668 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
669 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
670
671 ring_params.intr_timer_thres_us = 0;
672 ring_params.intr_batch_cntr_thres_entries = 1;
673 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530674
675 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
676 &ring_params);
677}
678
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700679/**
680 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
681 * @dest_ring: ring being initialized
682 * @ring_params: pointer to initialized parameters
683 *
684 * For Napier & Hawkeye v1, the status ring timer interrupts do not work
685 * As a work arround host configures the destination rings to be a proxy for
686 * work needing to be done.
687 *
688 * The interrupts are setup such that if the destination ring is less than fully
689 * posted, there is likely undone work for the status ring that the host should
690 * process.
691 *
692 * There is a timing bug in srng based copy engines such that a fully posted
693 * srng based copy engine has 2 empty entries instead of just one. The copy
694 * engine data sturctures work with 1 empty entry, but the software frequently
695 * fails to post the last entry due to the race condition.
696 */
697static void ce_srng_initialize_dest_timer_interrupt_war(
698 struct CE_ring_state *dest_ring,
699 struct hal_srng_params *ring_params) {
700 int num_buffers_when_fully_posted = dest_ring->nentries - 2;
701
702 ring_params->low_threshold = num_buffers_when_fully_posted - 1;
703 ring_params->intr_timer_thres_us = 1024;
704 ring_params->intr_batch_cntr_thres_entries = 0;
705 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
706}
707
Jeff Johnson05718132016-12-17 10:18:17 -0800708static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700709 struct CE_ring_state *dest_ring,
710 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530711{
712 struct hal_srng_params ring_params = {0};
Houston Hoffman648a9182017-05-21 23:27:50 -0700713 bool status_ring_timer_thresh_work_arround = true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530714
Houston Hoffman15010772016-09-16 14:01:13 -0700715 HIF_INFO("%s: ce_id %d", __func__, ce_id);
716
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530717 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
718 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
719 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman74109122016-10-21 14:58:34 -0700720 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530721
Houston Hoffman648a9182017-05-21 23:27:50 -0700722 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
723 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
724 if (status_ring_timer_thresh_work_arround) {
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700725 ce_srng_initialize_dest_timer_interrupt_war(
726 dest_ring, &ring_params);
Houston Hoffman648a9182017-05-21 23:27:50 -0700727 } else {
728 /* normal behavior for future chips */
729 ring_params.low_threshold = dest_ring->nentries >> 3;
730 ring_params.intr_timer_thres_us = 100000;
731 ring_params.intr_batch_cntr_thres_entries = 0;
732 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
733 }
734 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530735
736 /*Dest ring is also source ring*/
737 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
738 &ring_params);
739}
740
Jeff Johnson05718132016-12-17 10:18:17 -0800741static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700742 struct CE_ring_state *status_ring,
743 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530744{
745 struct hal_srng_params ring_params = {0};
746
Houston Hoffman15010772016-09-16 14:01:13 -0700747 HIF_INFO("%s: ce_id %d", __func__, ce_id);
748
749 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
750
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530751 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
752 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
753 ring_params.num_entries = status_ring->nentries;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530754
Houston Hoffman648a9182017-05-21 23:27:50 -0700755 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
756 ring_params.intr_timer_thres_us = 0x1000;
757 ring_params.intr_batch_cntr_thres_entries = 0x1;
758 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530759
760 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
761 ce_id, 0, &ring_params);
762}
763
Yun Park3fb36442017-08-17 17:37:53 -0700764static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530765 uint32_t ce_id, struct CE_ring_state *ring,
766 struct CE_attr *attr)
767{
768 switch (ring_type) {
769 case CE_RING_SRC:
Houston Hoffman648a9182017-05-21 23:27:50 -0700770 ce_srng_src_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530771 break;
772 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700773 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530774 break;
775 case CE_RING_STATUS:
Houston Hoffman648a9182017-05-21 23:27:50 -0700776 ce_srng_status_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530777 break;
778 default:
779 qdf_assert(0);
780 break;
781 }
Yun Park3fb36442017-08-17 17:37:53 -0700782
783 return 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530784}
Jeff Johnson05718132016-12-17 10:18:17 -0800785
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800786static void ce_construct_shadow_config_srng(struct hif_softc *scn)
787{
788 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
789 int ce_id;
790
791 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
792 if (hif_state->host_ce_config[ce_id].src_nentries)
793 hal_set_one_shadow_config(scn->hal_soc,
794 CE_SRC, ce_id);
795
796 if (hif_state->host_ce_config[ce_id].dest_nentries) {
797 hal_set_one_shadow_config(scn->hal_soc,
798 CE_DST, ce_id);
799
800 hal_set_one_shadow_config(scn->hal_soc,
801 CE_DST_STATUS, ce_id);
802 }
803 }
804}
805
806static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
807 struct pld_shadow_reg_v2_cfg **shadow_config,
808 int *num_shadow_registers_configured)
809{
810 if (scn->hal_soc == NULL) {
811 HIF_ERROR("%s: hal not initialized: not initializing shadow config",
812 __func__);
813 return;
814 }
815
816 hal_get_shadow_config(scn->hal_soc, shadow_config,
817 num_shadow_registers_configured);
818
819 if (*num_shadow_registers_configured != 0) {
820 HIF_ERROR("%s: hal shadow register configuration allready constructed",
821 __func__);
822
823 /* return with original configuration*/
824 return;
825 }
826
827 hal_construct_shadow_config(scn->hal_soc);
828 ce_construct_shadow_config_srng(scn);
829
830 /* get updated configuration */
831 hal_get_shadow_config(scn->hal_soc, shadow_config,
832 num_shadow_registers_configured);
833}
834
Jeff Johnson05718132016-12-17 10:18:17 -0800835static struct ce_ops ce_service_srng = {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530836 .ce_get_desc_size = ce_get_desc_size_srng,
837 .ce_ring_setup = ce_ring_setup_srng,
838 .ce_sendlist_send = ce_sendlist_send_srng,
839 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
840 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
841 .ce_cancel_send_next = ce_cancel_send_next_srng,
842 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
843 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
844 .ce_send_nolock = ce_send_nolock_srng,
845 .watermark_int = ce_check_int_watermark_srng,
846 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
847 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
848 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800849 .ce_prepare_shadow_register_v2_cfg =
850 ce_prepare_shadow_register_v2_cfg_srng,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530851};
852
853struct ce_ops *ce_services_srng()
854{
855 return &ce_service_srng;
856}