blob: 9f188bb497e896b24a8d42a5262bba1fdaf4672c [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
Kiran Venkatappac6862752017-07-09 22:44:17 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "hif.h"
20#include "hif_io32.h"
21#include "reg_struct.h"
22#include "ce_api.h"
23#include "ce_main.h"
24#include "ce_internal.h"
25#include "ce_reg.h"
26#include "qdf_lock.h"
27#include "regtable.h"
28#include "hif_main.h"
29#include "hif_debug.h"
30#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070031#include "pld_common.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053032#include "qdf_module.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053033
34/*
35 * Support for Copy Engine hardware, which is mainly used for
36 * communication between Host and Target over a PCIe interconnect.
37 */
38
39/*
40 * A single CopyEngine (CE) comprises two "rings":
41 * a source ring
42 * a destination ring
43 *
44 * Each ring consists of a number of descriptors which specify
45 * an address, length, and meta-data.
46 *
47 * Typically, one side of the PCIe interconnect (Host or Target)
48 * controls one ring and the other side controls the other ring.
49 * The source side chooses when to initiate a transfer and it
50 * chooses what to send (buffer address, length). The destination
51 * side keeps a supply of "anonymous receive buffers" available and
52 * it handles incoming data as it arrives (when the destination
53 * receives an interrupt).
54 *
55 * The sender may send a simple buffer (address/length) or it may
56 * send a small list of buffers. When a small list is sent, hardware
57 * "gathers" these and they end up in a single destination buffer
58 * with a single interrupt.
59 *
60 * There are several "contexts" managed by this layer -- more, it
61 * may seem -- than should be needed. These are provided mainly for
62 * maximum flexibility and especially to facilitate a simpler HIF
63 * implementation. There are per-CopyEngine recv, send, and watermark
64 * contexts. These are supplied by the caller when a recv, send,
65 * or watermark handler is established and they are echoed back to
66 * the caller when the respective callbacks are invoked. There is
67 * also a per-transfer context supplied by the caller when a buffer
68 * (or sendlist) is sent and when a buffer is enqueued for recv.
69 * These per-transfer contexts are echoed back to the caller when
70 * the buffer is sent/received.
71 * Target TX harsh result toeplitz_hash_result
72 */
73
74#define CE_ADDR_COPY(desc, dma_addr) do {\
75 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76 0xFFFFFFFF);\
77 (desc)->buffer_addr_hi =\
78 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
79 } while (0)
80
Jeff Johnson05718132016-12-17 10:18:17 -080081static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053082ce_send_nolock_srng(struct CE_handle *copyeng,
83 void *per_transfer_context,
84 qdf_dma_addr_t buffer,
85 uint32_t nbytes,
86 uint32_t transfer_id,
87 uint32_t flags,
88 uint32_t user_flags)
89{
90 int status;
91 struct CE_state *CE_state = (struct CE_state *)copyeng;
92 struct CE_ring_state *src_ring = CE_state->src_ring;
93 unsigned int nentries_mask = src_ring->nentries_mask;
94 unsigned int write_index = src_ring->write_index;
95 uint64_t dma_addr = buffer;
96 struct hif_softc *scn = CE_state->scn;
97
98 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
99 return QDF_STATUS_E_FAILURE;
100 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
101 false) <= 0)) {
102 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
103 Q_TARGET_ACCESS_END(scn);
104 return QDF_STATUS_E_FAILURE;
105 }
106 {
107 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
108 struct ce_srng_src_desc *src_desc;
109
110 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
111 Q_TARGET_ACCESS_END(scn);
112 return QDF_STATUS_E_FAILURE;
113 }
114
115 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
116 src_ring->srng_ctx);
Aditya Sathish61f7fa32018-03-27 17:16:33 +0530117 if (!src_desc) {
118 Q_TARGET_ACCESS_END(scn);
119 return QDF_STATUS_E_INVAL;
120 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530121
122 /* Update low 32 bits source descriptor address */
123 src_desc->buffer_addr_lo =
124 (uint32_t)(dma_addr & 0xFFFFFFFF);
125 src_desc->buffer_addr_hi =
126 (uint32_t)((dma_addr >> 32) & 0xFF);
127
128 src_desc->meta_data = transfer_id;
129
130 /*
131 * Set the swap bit if:
132 * typical sends on this CE are swapped (host is big-endian)
133 * and this send doesn't disable the swapping
134 * (data is not bytestream)
135 */
136 src_desc->byte_swap =
137 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
138 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
139 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
140 src_desc->nbytes = nbytes;
141
142 src_ring->per_transfer_context[write_index] =
143 per_transfer_context;
144 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
145
146 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
147
148 /* src_ring->write index hasn't been updated event though
149 * the register has allready been written to.
150 */
151 hif_record_ce_desc_event(scn, CE_state->id, event_type,
152 (union ce_desc *) src_desc, per_transfer_context,
c_cgodavfda96ad2017-09-07 16:16:00 +0530153 src_ring->write_index, nbytes);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530154
155 src_ring->write_index = write_index;
156 status = QDF_STATUS_SUCCESS;
157 }
158 Q_TARGET_ACCESS_END(scn);
159 return status;
160}
161
Jeff Johnson05718132016-12-17 10:18:17 -0800162static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530163ce_sendlist_send_srng(struct CE_handle *copyeng,
164 void *per_transfer_context,
165 struct ce_sendlist *sendlist, unsigned int transfer_id)
166{
167 int status = -ENOMEM;
168 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
169 struct CE_state *CE_state = (struct CE_state *)copyeng;
170 struct CE_ring_state *src_ring = CE_state->src_ring;
171 unsigned int num_items = sl->num_items;
172 unsigned int sw_index;
173 unsigned int write_index;
174 struct hif_softc *scn = CE_state->scn;
175
176 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
177
178 qdf_spin_lock_bh(&CE_state->ce_index_lock);
179 sw_index = src_ring->sw_index;
180 write_index = src_ring->write_index;
181
182 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
183 num_items) {
184 struct ce_sendlist_item *item;
185 int i;
186
187 /* handle all but the last item uniformly */
188 for (i = 0; i < num_items - 1; i++) {
189 item = &sl->item[i];
190 /* TBDXXX: Support extensible sendlist_types? */
191 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
192 status = ce_send_nolock_srng(copyeng,
193 CE_SENDLIST_ITEM_CTXT,
194 (qdf_dma_addr_t) item->data,
195 item->u.nbytes, transfer_id,
196 item->flags | CE_SEND_FLAG_GATHER,
197 item->user_flags);
198 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
199 }
200 /* provide valid context pointer for final item */
201 item = &sl->item[i];
202 /* TBDXXX: Support extensible sendlist_types? */
203 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
204 status = ce_send_nolock_srng(copyeng, per_transfer_context,
205 (qdf_dma_addr_t) item->data,
206 item->u.nbytes,
207 transfer_id, item->flags,
208 item->user_flags);
209 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
210 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
211 QDF_NBUF_TX_PKT_CE);
212 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
213 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +0530214 QDF_TRACE_DEFAULT_PDEV_ID,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530215 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700216 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530217 } else {
218 /*
219 * Probably not worth the additional complexity to support
220 * partial sends with continuation or notification. We expect
221 * to use large rings and small sendlists. If we can't handle
222 * the entire request at once, punt it back to the caller.
223 */
224 }
225 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
226
227 return status;
228}
229
230#define SLOTS_PER_DATAPATH_TX 2
231
232#ifndef AH_NEED_TX_DATA_SWAP
233#define AH_NEED_TX_DATA_SWAP 0
234#endif
235/**
236 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
237 * @coyeng: copy engine handle
238 * @per_recv_context: virtual address of the nbuf
239 * @buffer: physical address of the nbuf
240 *
241 * Return: 0 if the buffer is enqueued
242 */
Jeff Johnson05718132016-12-17 10:18:17 -0800243static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530244ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
245 void *per_recv_context, qdf_dma_addr_t buffer)
246{
247 int status;
248 struct CE_state *CE_state = (struct CE_state *)copyeng;
249 struct CE_ring_state *dest_ring = CE_state->dest_ring;
250 unsigned int nentries_mask = dest_ring->nentries_mask;
251 unsigned int write_index;
252 unsigned int sw_index;
253 uint64_t dma_addr = buffer;
254 struct hif_softc *scn = CE_state->scn;
255
256 qdf_spin_lock_bh(&CE_state->ce_index_lock);
257 write_index = dest_ring->write_index;
258 sw_index = dest_ring->sw_index;
259
260 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
261 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
262 return -EIO;
263 }
264
265 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
266 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
267 return QDF_STATUS_E_FAILURE;
268 }
269
270 if ((hal_srng_src_num_avail(scn->hal_soc,
271 dest_ring->srng_ctx, false) > 0)) {
272 struct ce_srng_dest_desc *dest_desc =
273 hal_srng_src_get_next(scn->hal_soc,
274 dest_ring->srng_ctx);
275
276 if (dest_desc == NULL) {
277 status = QDF_STATUS_E_FAILURE;
278 } else {
279
280 CE_ADDR_COPY(dest_desc, dma_addr);
281
282 dest_ring->per_transfer_context[write_index] =
283 per_recv_context;
284
285 /* Update Destination Ring Write Index */
286 write_index = CE_RING_IDX_INCR(nentries_mask,
287 write_index);
288 status = QDF_STATUS_SUCCESS;
289 }
290 } else
291 status = QDF_STATUS_E_FAILURE;
292
293 dest_ring->write_index = write_index;
294 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
295 Q_TARGET_ACCESS_END(scn);
296 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
297 return status;
298}
299
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530300/*
301 * Guts of ce_recv_entries_done.
302 * The caller takes responsibility for any necessary locking.
303 */
Jeff Johnson05718132016-12-17 10:18:17 -0800304static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530305ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
306 struct CE_state *CE_state)
307{
308 struct CE_ring_state *status_ring = CE_state->status_ring;
309
310 return hal_srng_dst_num_valid(scn->hal_soc,
311 status_ring->srng_ctx, false);
312}
313
314/*
315 * Guts of ce_send_entries_done.
316 * The caller takes responsibility for any necessary locking.
317 */
Jeff Johnson05718132016-12-17 10:18:17 -0800318static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530319ce_send_entries_done_nolock_srng(struct hif_softc *scn,
320 struct CE_state *CE_state)
321{
322
323 struct CE_ring_state *src_ring = CE_state->src_ring;
324 int count = 0;
325
326 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
327 return 0;
328
329 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
330
331 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
332
333 return count;
334}
335
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530336/*
337 * Guts of ce_completed_recv_next.
338 * The caller takes responsibility for any necessary locking.
339 */
Jeff Johnson05718132016-12-17 10:18:17 -0800340static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530341ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
342 void **per_CE_contextp,
343 void **per_transfer_contextp,
344 qdf_dma_addr_t *bufferp,
345 unsigned int *nbytesp,
346 unsigned int *transfer_idp,
347 unsigned int *flagsp)
348{
349 int status;
350 struct CE_ring_state *dest_ring = CE_state->dest_ring;
351 struct CE_ring_state *status_ring = CE_state->status_ring;
352 unsigned int nentries_mask = dest_ring->nentries_mask;
353 unsigned int sw_index = dest_ring->sw_index;
354 struct hif_softc *scn = CE_state->scn;
355 struct ce_srng_dest_status_desc *dest_status;
356 int nbytes;
357 struct ce_srng_dest_status_desc dest_status_info;
358
359 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
360 status = QDF_STATUS_E_FAILURE;
361 goto done;
362 }
363
364 dest_status = hal_srng_dst_get_next(scn->hal_soc,
365 status_ring->srng_ctx);
366
367 if (dest_status == NULL) {
368 status = QDF_STATUS_E_FAILURE;
369 goto done;
370 }
371 /*
372 * By copying the dest_desc_info element to local memory, we could
373 * avoid extra memory read from non-cachable memory.
374 */
375 dest_status_info = *dest_status;
376 nbytes = dest_status_info.nbytes;
377 if (nbytes == 0) {
378 /*
379 * This closes a relatively unusual race where the Host
380 * sees the updated DRRI before the update to the
381 * corresponding descriptor has completed. We treat this
382 * as a descriptor that is not yet done.
383 */
384 status = QDF_STATUS_E_FAILURE;
385 goto done;
386 }
387
388 dest_status->nbytes = 0;
389
390 *nbytesp = nbytes;
391 *transfer_idp = dest_status_info.meta_data;
392 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
393
394 if (per_CE_contextp)
395 *per_CE_contextp = CE_state->recv_context;
396
397 /* NOTE: sw_index is more like a read_index in this context. It has a
398 * one-to-one mapping with status ring.
399 * Get the per trasnfer context from dest_ring.
400 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530401 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800402 *per_transfer_contextp =
403 dest_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530404
405 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
406
407 /* Update sw_index */
408 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
409 dest_ring->sw_index = sw_index;
410 status = QDF_STATUS_SUCCESS;
411
412done:
413 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
414
415 return status;
416}
417
Jeff Johnson05718132016-12-17 10:18:17 -0800418static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530419ce_revoke_recv_next_srng(struct CE_handle *copyeng,
420 void **per_CE_contextp,
421 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
422{
Houston Hoffman72ddc022017-01-30 14:21:46 -0800423 struct CE_state *CE_state = (struct CE_state *)copyeng;
424 struct CE_ring_state *dest_ring = CE_state->dest_ring;
425 unsigned int sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530426
Houston Hoffman72ddc022017-01-30 14:21:46 -0800427 if (!dest_ring)
428 return QDF_STATUS_E_FAILURE;
429
430 sw_index = dest_ring->sw_index;
431
432 if (per_CE_contextp)
433 *per_CE_contextp = CE_state->recv_context;
434
435 /* NOTE: sw_index is more like a read_index in this context. It has a
436 * one-to-one mapping with status ring.
437 * Get the per trasnfer context from dest_ring.
438 */
439 if (per_transfer_contextp)
440 *per_transfer_contextp =
441 dest_ring->per_transfer_context[sw_index];
442
443 if (dest_ring->per_transfer_context[sw_index] == NULL)
444 return QDF_STATUS_E_FAILURE;
445
446 /* provide end condition */
447 dest_ring->per_transfer_context[sw_index] = NULL;
448
449 /* Update sw_index */
450 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
451 dest_ring->sw_index = sw_index;
452 return QDF_STATUS_SUCCESS;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530453}
454
455/*
456 * Guts of ce_completed_send_next.
457 * The caller takes responsibility for any necessary locking.
458 */
Jeff Johnson05718132016-12-17 10:18:17 -0800459static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530460ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
461 void **per_CE_contextp,
462 void **per_transfer_contextp,
463 qdf_dma_addr_t *bufferp,
464 unsigned int *nbytesp,
465 unsigned int *transfer_idp,
466 unsigned int *sw_idx,
467 unsigned int *hw_idx,
468 uint32_t *toeplitz_hash_result)
469{
470 int status = QDF_STATUS_E_FAILURE;
471 struct CE_ring_state *src_ring = CE_state->src_ring;
472 unsigned int nentries_mask = src_ring->nentries_mask;
473 unsigned int sw_index = src_ring->sw_index;
c_cgodavfda96ad2017-09-07 16:16:00 +0530474 unsigned int swi = src_ring->sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530475 struct hif_softc *scn = CE_state->scn;
476 struct ce_srng_src_desc *src_desc;
477
478 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
479 status = QDF_STATUS_E_FAILURE;
480 return status;
481 }
482
483 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
484 if (src_desc) {
c_cgodavfda96ad2017-09-07 16:16:00 +0530485 hif_record_ce_desc_event(scn, CE_state->id,
486 HIF_TX_DESC_COMPLETION,
487 (union ce_desc *)src_desc,
488 src_ring->per_transfer_context[swi],
489 swi, src_desc->nbytes);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530490
491 /* Return data from completed source descriptor */
492 *bufferp = (qdf_dma_addr_t)
493 (((uint64_t)(src_desc)->buffer_addr_lo +
494 ((uint64_t)((src_desc)->buffer_addr_hi &
495 0xFF) << 32)));
496 *nbytesp = src_desc->nbytes;
497 *transfer_idp = src_desc->meta_data;
498 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
499
500 if (per_CE_contextp)
501 *per_CE_contextp = CE_state->send_context;
502
503 /* sw_index is used more like read index */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530504 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800505 *per_transfer_contextp =
506 src_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530507
508 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
509
510 /* Update sw_index */
511 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
512 src_ring->sw_index = sw_index;
513 status = QDF_STATUS_SUCCESS;
514 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530515 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530516
517 return status;
518}
519
520/* NB: Modelled after ce_completed_send_next */
Jeff Johnson05718132016-12-17 10:18:17 -0800521static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530522ce_cancel_send_next_srng(struct CE_handle *copyeng,
523 void **per_CE_contextp,
524 void **per_transfer_contextp,
525 qdf_dma_addr_t *bufferp,
526 unsigned int *nbytesp,
527 unsigned int *transfer_idp,
528 uint32_t *toeplitz_hash_result)
529{
Kiran Venkatappac6862752017-07-09 22:44:17 +0530530 struct CE_state *CE_state;
531 int status = QDF_STATUS_E_FAILURE;
532 struct CE_ring_state *src_ring;
533 unsigned int nentries_mask;
534 unsigned int sw_index;
535 struct hif_softc *scn;
536 struct ce_srng_src_desc *src_desc;
537
538 CE_state = (struct CE_state *)copyeng;
539 src_ring = CE_state->src_ring;
540 if (!src_ring)
541 return QDF_STATUS_E_FAILURE;
542
543 nentries_mask = src_ring->nentries_mask;
544 sw_index = src_ring->sw_index;
545 scn = CE_state->scn;
546
547 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
548 status = QDF_STATUS_E_FAILURE;
549 return status;
550 }
551
552 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
553 src_ring->srng_ctx);
554 if (src_desc) {
555 /* Return data from completed source descriptor */
556 *bufferp = (qdf_dma_addr_t)
557 (((uint64_t)(src_desc)->buffer_addr_lo +
558 ((uint64_t)((src_desc)->buffer_addr_hi &
559 0xFF) << 32)));
560 *nbytesp = src_desc->nbytes;
561 *transfer_idp = src_desc->meta_data;
562 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
563
564 if (per_CE_contextp)
565 *per_CE_contextp = CE_state->send_context;
566
567 /* sw_index is used more like read index */
568 if (per_transfer_contextp)
569 *per_transfer_contextp =
570 src_ring->per_transfer_context[sw_index];
571
572 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
573
574 /* Update sw_index */
575 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
576 src_ring->sw_index = sw_index;
577 status = QDF_STATUS_SUCCESS;
578 }
579 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
580
581 return status;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530582}
583
584/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
585#define CE_WM_SHFT 1
586
587/*
588 * Number of times to check for any pending tx/rx completion on
589 * a copy engine, this count should be big enough. Once we hit
590 * this threashold we'll not check for any Tx/Rx comlpetion in same
591 * interrupt handling. Note that this threashold is only used for
592 * Rx interrupt processing, this can be used tor Tx as well if we
593 * suspect any infinite loop in checking for pending Tx completion.
594 */
595#define CE_TXRX_COMP_CHECK_THRESHOLD 20
596
597/*
598 * Adjust interrupts for the copy complete handler.
599 * If it's needed for either send or recv, then unmask
600 * this interrupt; otherwise, mask it.
601 *
602 * Called with target_lock held.
603 */
604static void
605ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
606 int disable_copy_compl_intr)
607{
608}
609
Jeff Johnson05718132016-12-17 10:18:17 -0800610static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
611 unsigned int *flags)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530612{
613 /*TODO*/
614 return false;
615}
616
Jeff Johnson05718132016-12-17 10:18:17 -0800617static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530618{
619 switch (ring_type) {
620 case CE_RING_SRC:
621 return sizeof(struct ce_srng_src_desc);
622 case CE_RING_DEST:
623 return sizeof(struct ce_srng_dest_desc);
624 case CE_RING_STATUS:
625 return sizeof(struct ce_srng_dest_status_desc);
626 default:
627 return 0;
628 }
629 return 0;
630}
631
Houston Hoffman15010772016-09-16 14:01:13 -0700632static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
633 struct hal_srng_params *ring_params)
634{
635 uint32_t addr_low;
636 uint32_t addr_high;
637 uint32_t msi_data_start;
638 uint32_t msi_data_count;
639 uint32_t msi_irq_start;
640 int ret;
641
642 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
643 &msi_data_count, &msi_data_start,
644 &msi_irq_start);
645
646 /* msi config not found */
647 if (ret)
648 return;
649
Houston Hoffman15010772016-09-16 14:01:13 -0700650 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
651
652 ring_params->msi_addr = addr_low;
653 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
654 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
655 ring_params->flags |= HAL_SRNG_MSI_INTR;
656
Jeff Johnsonb9450212017-09-18 10:12:38 -0700657 HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
Houston Hoffman15010772016-09-16 14:01:13 -0700658 (void *)ring_params->msi_addr, ring_params->msi_data);
659}
660
Jeff Johnson05718132016-12-17 10:18:17 -0800661static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700662 struct CE_ring_state *src_ring,
663 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530664{
665 struct hal_srng_params ring_params = {0};
666
Houston Hoffman15010772016-09-16 14:01:13 -0700667 HIF_INFO("%s: ce_id %d", __func__, ce_id);
668
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530669 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
670 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
671 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700672 /*
673 * The minimum increment for the timer is 8us
674 * A default value of 0 disables the timer
675 * A valid default value caused continuous interrupts to
676 * fire with MSI enabled. Need to revisit usage of the timer
677 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530678
Houston Hoffman648a9182017-05-21 23:27:50 -0700679 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
680 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
681
682 ring_params.intr_timer_thres_us = 0;
683 ring_params.intr_batch_cntr_thres_entries = 1;
684 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530685
686 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
687 &ring_params);
688}
689
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700690/**
691 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
692 * @dest_ring: ring being initialized
693 * @ring_params: pointer to initialized parameters
694 *
695 * For Napier & Hawkeye v1, the status ring timer interrupts do not work
696 * As a work arround host configures the destination rings to be a proxy for
697 * work needing to be done.
698 *
699 * The interrupts are setup such that if the destination ring is less than fully
700 * posted, there is likely undone work for the status ring that the host should
701 * process.
702 *
703 * There is a timing bug in srng based copy engines such that a fully posted
704 * srng based copy engine has 2 empty entries instead of just one. The copy
705 * engine data sturctures work with 1 empty entry, but the software frequently
706 * fails to post the last entry due to the race condition.
707 */
708static void ce_srng_initialize_dest_timer_interrupt_war(
709 struct CE_ring_state *dest_ring,
710 struct hal_srng_params *ring_params) {
711 int num_buffers_when_fully_posted = dest_ring->nentries - 2;
712
713 ring_params->low_threshold = num_buffers_when_fully_posted - 1;
714 ring_params->intr_timer_thres_us = 1024;
715 ring_params->intr_batch_cntr_thres_entries = 0;
716 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
717}
718
Jeff Johnson05718132016-12-17 10:18:17 -0800719static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700720 struct CE_ring_state *dest_ring,
721 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530722{
723 struct hal_srng_params ring_params = {0};
Houston Hoffman648a9182017-05-21 23:27:50 -0700724 bool status_ring_timer_thresh_work_arround = true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530725
Houston Hoffman15010772016-09-16 14:01:13 -0700726 HIF_INFO("%s: ce_id %d", __func__, ce_id);
727
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530728 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
729 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
730 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman74109122016-10-21 14:58:34 -0700731 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530732
Houston Hoffman648a9182017-05-21 23:27:50 -0700733 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
734 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
735 if (status_ring_timer_thresh_work_arround) {
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700736 ce_srng_initialize_dest_timer_interrupt_war(
737 dest_ring, &ring_params);
Houston Hoffman648a9182017-05-21 23:27:50 -0700738 } else {
739 /* normal behavior for future chips */
740 ring_params.low_threshold = dest_ring->nentries >> 3;
741 ring_params.intr_timer_thres_us = 100000;
742 ring_params.intr_batch_cntr_thres_entries = 0;
743 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
744 }
745 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530746
747 /*Dest ring is also source ring*/
748 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
749 &ring_params);
750}
751
Jeff Johnson05718132016-12-17 10:18:17 -0800752static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700753 struct CE_ring_state *status_ring,
754 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530755{
756 struct hal_srng_params ring_params = {0};
757
Houston Hoffman15010772016-09-16 14:01:13 -0700758 HIF_INFO("%s: ce_id %d", __func__, ce_id);
759
760 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
761
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530762 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
763 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
764 ring_params.num_entries = status_ring->nentries;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530765
Houston Hoffman648a9182017-05-21 23:27:50 -0700766 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
767 ring_params.intr_timer_thres_us = 0x1000;
768 ring_params.intr_batch_cntr_thres_entries = 0x1;
769 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530770
771 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
772 ce_id, 0, &ring_params);
773}
774
Yun Park3fb36442017-08-17 17:37:53 -0700775static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530776 uint32_t ce_id, struct CE_ring_state *ring,
777 struct CE_attr *attr)
778{
779 switch (ring_type) {
780 case CE_RING_SRC:
Houston Hoffman648a9182017-05-21 23:27:50 -0700781 ce_srng_src_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530782 break;
783 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700784 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530785 break;
786 case CE_RING_STATUS:
Houston Hoffman648a9182017-05-21 23:27:50 -0700787 ce_srng_status_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530788 break;
789 default:
790 qdf_assert(0);
791 break;
792 }
Yun Park3fb36442017-08-17 17:37:53 -0700793
794 return 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530795}
Jeff Johnson05718132016-12-17 10:18:17 -0800796
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800797static void ce_construct_shadow_config_srng(struct hif_softc *scn)
798{
799 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
800 int ce_id;
801
802 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
803 if (hif_state->host_ce_config[ce_id].src_nentries)
804 hal_set_one_shadow_config(scn->hal_soc,
805 CE_SRC, ce_id);
806
807 if (hif_state->host_ce_config[ce_id].dest_nentries) {
808 hal_set_one_shadow_config(scn->hal_soc,
809 CE_DST, ce_id);
810
811 hal_set_one_shadow_config(scn->hal_soc,
812 CE_DST_STATUS, ce_id);
813 }
814 }
815}
816
817static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
818 struct pld_shadow_reg_v2_cfg **shadow_config,
819 int *num_shadow_registers_configured)
820{
821 if (scn->hal_soc == NULL) {
822 HIF_ERROR("%s: hal not initialized: not initializing shadow config",
823 __func__);
824 return;
825 }
826
827 hal_get_shadow_config(scn->hal_soc, shadow_config,
828 num_shadow_registers_configured);
829
830 if (*num_shadow_registers_configured != 0) {
831 HIF_ERROR("%s: hal shadow register configuration allready constructed",
832 __func__);
833
834 /* return with original configuration*/
835 return;
836 }
837
838 hal_construct_shadow_config(scn->hal_soc);
839 ce_construct_shadow_config_srng(scn);
840
841 /* get updated configuration */
842 hal_get_shadow_config(scn->hal_soc, shadow_config,
843 num_shadow_registers_configured);
844}
845
Jeff Johnson05718132016-12-17 10:18:17 -0800846static struct ce_ops ce_service_srng = {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530847 .ce_get_desc_size = ce_get_desc_size_srng,
848 .ce_ring_setup = ce_ring_setup_srng,
849 .ce_sendlist_send = ce_sendlist_send_srng,
850 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
851 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
852 .ce_cancel_send_next = ce_cancel_send_next_srng,
853 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
854 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
855 .ce_send_nolock = ce_send_nolock_srng,
856 .watermark_int = ce_check_int_watermark_srng,
857 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
858 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
859 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800860 .ce_prepare_shadow_register_v2_cfg =
861 ce_prepare_shadow_register_v2_cfg_srng,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530862};
863
864struct ce_ops *ce_services_srng()
865{
866 return &ce_service_srng;
867}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530868qdf_export_symbol(ce_services_srng);