blob: 11eb2b14e5b37ff1e00fbf32b1d834bbed6ea10b [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
Kiran Venkatappac6862752017-07-09 22:44:17 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "hif.h"
20#include "hif_io32.h"
21#include "reg_struct.h"
22#include "ce_api.h"
23#include "ce_main.h"
24#include "ce_internal.h"
25#include "ce_reg.h"
26#include "qdf_lock.h"
27#include "regtable.h"
28#include "hif_main.h"
29#include "hif_debug.h"
30#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070031#include "pld_common.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053032#include "qdf_module.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053033
34/*
35 * Support for Copy Engine hardware, which is mainly used for
36 * communication between Host and Target over a PCIe interconnect.
37 */
38
39/*
40 * A single CopyEngine (CE) comprises two "rings":
41 * a source ring
42 * a destination ring
43 *
44 * Each ring consists of a number of descriptors which specify
45 * an address, length, and meta-data.
46 *
47 * Typically, one side of the PCIe interconnect (Host or Target)
48 * controls one ring and the other side controls the other ring.
49 * The source side chooses when to initiate a transfer and it
50 * chooses what to send (buffer address, length). The destination
51 * side keeps a supply of "anonymous receive buffers" available and
52 * it handles incoming data as it arrives (when the destination
53 * receives an interrupt).
54 *
55 * The sender may send a simple buffer (address/length) or it may
56 * send a small list of buffers. When a small list is sent, hardware
57 * "gathers" these and they end up in a single destination buffer
58 * with a single interrupt.
59 *
60 * There are several "contexts" managed by this layer -- more, it
61 * may seem -- than should be needed. These are provided mainly for
62 * maximum flexibility and especially to facilitate a simpler HIF
63 * implementation. There are per-CopyEngine recv, send, and watermark
64 * contexts. These are supplied by the caller when a recv, send,
65 * or watermark handler is established and they are echoed back to
66 * the caller when the respective callbacks are invoked. There is
67 * also a per-transfer context supplied by the caller when a buffer
68 * (or sendlist) is sent and when a buffer is enqueued for recv.
69 * These per-transfer contexts are echoed back to the caller when
70 * the buffer is sent/received.
71 * Target TX harsh result toeplitz_hash_result
72 */
73
74#define CE_ADDR_COPY(desc, dma_addr) do {\
75 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76 0xFFFFFFFF);\
77 (desc)->buffer_addr_hi =\
78 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
79 } while (0)
80
Jeff Johnson05718132016-12-17 10:18:17 -080081static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053082ce_send_nolock_srng(struct CE_handle *copyeng,
83 void *per_transfer_context,
84 qdf_dma_addr_t buffer,
85 uint32_t nbytes,
86 uint32_t transfer_id,
87 uint32_t flags,
88 uint32_t user_flags)
89{
90 int status;
91 struct CE_state *CE_state = (struct CE_state *)copyeng;
92 struct CE_ring_state *src_ring = CE_state->src_ring;
93 unsigned int nentries_mask = src_ring->nentries_mask;
94 unsigned int write_index = src_ring->write_index;
95 uint64_t dma_addr = buffer;
96 struct hif_softc *scn = CE_state->scn;
97
98 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
99 return QDF_STATUS_E_FAILURE;
100 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
101 false) <= 0)) {
102 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
103 Q_TARGET_ACCESS_END(scn);
104 return QDF_STATUS_E_FAILURE;
105 }
106 {
107 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
108 struct ce_srng_src_desc *src_desc;
109
110 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
111 Q_TARGET_ACCESS_END(scn);
112 return QDF_STATUS_E_FAILURE;
113 }
114
115 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
116 src_ring->srng_ctx);
Aditya Sathish61f7fa32018-03-27 17:16:33 +0530117 if (!src_desc) {
118 Q_TARGET_ACCESS_END(scn);
119 return QDF_STATUS_E_INVAL;
120 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530121
122 /* Update low 32 bits source descriptor address */
123 src_desc->buffer_addr_lo =
124 (uint32_t)(dma_addr & 0xFFFFFFFF);
125 src_desc->buffer_addr_hi =
126 (uint32_t)((dma_addr >> 32) & 0xFF);
127
128 src_desc->meta_data = transfer_id;
129
130 /*
131 * Set the swap bit if:
132 * typical sends on this CE are swapped (host is big-endian)
133 * and this send doesn't disable the swapping
134 * (data is not bytestream)
135 */
136 src_desc->byte_swap =
137 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
138 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
139 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
140 src_desc->nbytes = nbytes;
141
142 src_ring->per_transfer_context[write_index] =
143 per_transfer_context;
144 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
145
146 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
147
148 /* src_ring->write index hasn't been updated event though
149 * the register has allready been written to.
150 */
151 hif_record_ce_desc_event(scn, CE_state->id, event_type,
152 (union ce_desc *) src_desc, per_transfer_context,
c_cgodavfda96ad2017-09-07 16:16:00 +0530153 src_ring->write_index, nbytes);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530154
155 src_ring->write_index = write_index;
156 status = QDF_STATUS_SUCCESS;
157 }
158 Q_TARGET_ACCESS_END(scn);
159 return status;
160}
161
Jeff Johnson05718132016-12-17 10:18:17 -0800162static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530163ce_sendlist_send_srng(struct CE_handle *copyeng,
164 void *per_transfer_context,
165 struct ce_sendlist *sendlist, unsigned int transfer_id)
166{
167 int status = -ENOMEM;
168 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
169 struct CE_state *CE_state = (struct CE_state *)copyeng;
170 struct CE_ring_state *src_ring = CE_state->src_ring;
171 unsigned int num_items = sl->num_items;
172 unsigned int sw_index;
173 unsigned int write_index;
174 struct hif_softc *scn = CE_state->scn;
175
176 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
177
178 qdf_spin_lock_bh(&CE_state->ce_index_lock);
179 sw_index = src_ring->sw_index;
180 write_index = src_ring->write_index;
181
182 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
183 num_items) {
184 struct ce_sendlist_item *item;
185 int i;
186
187 /* handle all but the last item uniformly */
188 for (i = 0; i < num_items - 1; i++) {
189 item = &sl->item[i];
190 /* TBDXXX: Support extensible sendlist_types? */
191 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
192 status = ce_send_nolock_srng(copyeng,
193 CE_SENDLIST_ITEM_CTXT,
194 (qdf_dma_addr_t) item->data,
195 item->u.nbytes, transfer_id,
196 item->flags | CE_SEND_FLAG_GATHER,
197 item->user_flags);
198 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
199 }
200 /* provide valid context pointer for final item */
201 item = &sl->item[i];
202 /* TBDXXX: Support extensible sendlist_types? */
203 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
204 status = ce_send_nolock_srng(copyeng, per_transfer_context,
205 (qdf_dma_addr_t) item->data,
206 item->u.nbytes,
207 transfer_id, item->flags,
208 item->user_flags);
209 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
210 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
211 QDF_NBUF_TX_PKT_CE);
212 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
213 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +0530214 QDF_TRACE_DEFAULT_PDEV_ID,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530215 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700216 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530217 } else {
218 /*
219 * Probably not worth the additional complexity to support
220 * partial sends with continuation or notification. We expect
221 * to use large rings and small sendlists. If we can't handle
222 * the entire request at once, punt it back to the caller.
223 */
224 }
225 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
226
227 return status;
228}
229
230#define SLOTS_PER_DATAPATH_TX 2
231
232#ifndef AH_NEED_TX_DATA_SWAP
233#define AH_NEED_TX_DATA_SWAP 0
234#endif
235/**
236 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
237 * @coyeng: copy engine handle
238 * @per_recv_context: virtual address of the nbuf
239 * @buffer: physical address of the nbuf
240 *
241 * Return: 0 if the buffer is enqueued
242 */
Jeff Johnson05718132016-12-17 10:18:17 -0800243static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530244ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
245 void *per_recv_context, qdf_dma_addr_t buffer)
246{
247 int status;
248 struct CE_state *CE_state = (struct CE_state *)copyeng;
249 struct CE_ring_state *dest_ring = CE_state->dest_ring;
250 unsigned int nentries_mask = dest_ring->nentries_mask;
251 unsigned int write_index;
252 unsigned int sw_index;
253 uint64_t dma_addr = buffer;
254 struct hif_softc *scn = CE_state->scn;
255
256 qdf_spin_lock_bh(&CE_state->ce_index_lock);
257 write_index = dest_ring->write_index;
258 sw_index = dest_ring->sw_index;
259
260 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
261 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
262 return -EIO;
263 }
264
265 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
266 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
267 return QDF_STATUS_E_FAILURE;
268 }
269
270 if ((hal_srng_src_num_avail(scn->hal_soc,
271 dest_ring->srng_ctx, false) > 0)) {
272 struct ce_srng_dest_desc *dest_desc =
273 hal_srng_src_get_next(scn->hal_soc,
274 dest_ring->srng_ctx);
275
276 if (dest_desc == NULL) {
277 status = QDF_STATUS_E_FAILURE;
278 } else {
279
280 CE_ADDR_COPY(dest_desc, dma_addr);
281
282 dest_ring->per_transfer_context[write_index] =
283 per_recv_context;
284
285 /* Update Destination Ring Write Index */
286 write_index = CE_RING_IDX_INCR(nentries_mask,
287 write_index);
288 status = QDF_STATUS_SUCCESS;
289 }
290 } else
291 status = QDF_STATUS_E_FAILURE;
292
293 dest_ring->write_index = write_index;
294 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
295 Q_TARGET_ACCESS_END(scn);
296 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
297 return status;
298}
299
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530300/*
301 * Guts of ce_recv_entries_done.
302 * The caller takes responsibility for any necessary locking.
303 */
Jeff Johnson05718132016-12-17 10:18:17 -0800304static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530305ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
306 struct CE_state *CE_state)
307{
308 struct CE_ring_state *status_ring = CE_state->status_ring;
309
310 return hal_srng_dst_num_valid(scn->hal_soc,
311 status_ring->srng_ctx, false);
312}
313
314/*
315 * Guts of ce_send_entries_done.
316 * The caller takes responsibility for any necessary locking.
317 */
Jeff Johnson05718132016-12-17 10:18:17 -0800318static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530319ce_send_entries_done_nolock_srng(struct hif_softc *scn,
320 struct CE_state *CE_state)
321{
322
323 struct CE_ring_state *src_ring = CE_state->src_ring;
324 int count = 0;
325
326 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
327 return 0;
328
329 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
330
331 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
332
333 return count;
334}
335
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530336/*
337 * Guts of ce_completed_recv_next.
338 * The caller takes responsibility for any necessary locking.
339 */
Jeff Johnson05718132016-12-17 10:18:17 -0800340static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530341ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
342 void **per_CE_contextp,
343 void **per_transfer_contextp,
344 qdf_dma_addr_t *bufferp,
345 unsigned int *nbytesp,
346 unsigned int *transfer_idp,
347 unsigned int *flagsp)
348{
349 int status;
350 struct CE_ring_state *dest_ring = CE_state->dest_ring;
351 struct CE_ring_state *status_ring = CE_state->status_ring;
352 unsigned int nentries_mask = dest_ring->nentries_mask;
353 unsigned int sw_index = dest_ring->sw_index;
354 struct hif_softc *scn = CE_state->scn;
355 struct ce_srng_dest_status_desc *dest_status;
356 int nbytes;
357 struct ce_srng_dest_status_desc dest_status_info;
358
359 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
360 status = QDF_STATUS_E_FAILURE;
361 goto done;
362 }
363
364 dest_status = hal_srng_dst_get_next(scn->hal_soc,
365 status_ring->srng_ctx);
366
367 if (dest_status == NULL) {
368 status = QDF_STATUS_E_FAILURE;
369 goto done;
370 }
371 /*
372 * By copying the dest_desc_info element to local memory, we could
373 * avoid extra memory read from non-cachable memory.
374 */
375 dest_status_info = *dest_status;
376 nbytes = dest_status_info.nbytes;
377 if (nbytes == 0) {
378 /*
379 * This closes a relatively unusual race where the Host
380 * sees the updated DRRI before the update to the
381 * corresponding descriptor has completed. We treat this
382 * as a descriptor that is not yet done.
383 */
384 status = QDF_STATUS_E_FAILURE;
385 goto done;
386 }
387
388 dest_status->nbytes = 0;
389
390 *nbytesp = nbytes;
391 *transfer_idp = dest_status_info.meta_data;
392 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
393
394 if (per_CE_contextp)
395 *per_CE_contextp = CE_state->recv_context;
396
397 /* NOTE: sw_index is more like a read_index in this context. It has a
398 * one-to-one mapping with status ring.
399 * Get the per trasnfer context from dest_ring.
400 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530401 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800402 *per_transfer_contextp =
403 dest_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530404
405 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
406
407 /* Update sw_index */
408 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
409 dest_ring->sw_index = sw_index;
410 status = QDF_STATUS_SUCCESS;
411
412done:
413 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
414
415 return status;
416}
417
Jeff Johnson05718132016-12-17 10:18:17 -0800418static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530419ce_revoke_recv_next_srng(struct CE_handle *copyeng,
420 void **per_CE_contextp,
421 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
422{
Houston Hoffman72ddc022017-01-30 14:21:46 -0800423 struct CE_state *CE_state = (struct CE_state *)copyeng;
424 struct CE_ring_state *dest_ring = CE_state->dest_ring;
425 unsigned int sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530426
Houston Hoffman72ddc022017-01-30 14:21:46 -0800427 if (!dest_ring)
428 return QDF_STATUS_E_FAILURE;
429
430 sw_index = dest_ring->sw_index;
431
432 if (per_CE_contextp)
433 *per_CE_contextp = CE_state->recv_context;
434
435 /* NOTE: sw_index is more like a read_index in this context. It has a
436 * one-to-one mapping with status ring.
437 * Get the per trasnfer context from dest_ring.
438 */
439 if (per_transfer_contextp)
440 *per_transfer_contextp =
441 dest_ring->per_transfer_context[sw_index];
442
443 if (dest_ring->per_transfer_context[sw_index] == NULL)
444 return QDF_STATUS_E_FAILURE;
445
446 /* provide end condition */
447 dest_ring->per_transfer_context[sw_index] = NULL;
448
449 /* Update sw_index */
450 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
451 dest_ring->sw_index = sw_index;
452 return QDF_STATUS_SUCCESS;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530453}
454
455/*
456 * Guts of ce_completed_send_next.
457 * The caller takes responsibility for any necessary locking.
458 */
Jeff Johnson05718132016-12-17 10:18:17 -0800459static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530460ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
461 void **per_CE_contextp,
462 void **per_transfer_contextp,
463 qdf_dma_addr_t *bufferp,
464 unsigned int *nbytesp,
465 unsigned int *transfer_idp,
466 unsigned int *sw_idx,
467 unsigned int *hw_idx,
468 uint32_t *toeplitz_hash_result)
469{
470 int status = QDF_STATUS_E_FAILURE;
471 struct CE_ring_state *src_ring = CE_state->src_ring;
472 unsigned int nentries_mask = src_ring->nentries_mask;
473 unsigned int sw_index = src_ring->sw_index;
c_cgodavfda96ad2017-09-07 16:16:00 +0530474 unsigned int swi = src_ring->sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530475 struct hif_softc *scn = CE_state->scn;
476 struct ce_srng_src_desc *src_desc;
477
478 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
479 status = QDF_STATUS_E_FAILURE;
480 return status;
481 }
482
483 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
484 if (src_desc) {
c_cgodavfda96ad2017-09-07 16:16:00 +0530485 hif_record_ce_desc_event(scn, CE_state->id,
486 HIF_TX_DESC_COMPLETION,
487 (union ce_desc *)src_desc,
488 src_ring->per_transfer_context[swi],
489 swi, src_desc->nbytes);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530490
491 /* Return data from completed source descriptor */
492 *bufferp = (qdf_dma_addr_t)
493 (((uint64_t)(src_desc)->buffer_addr_lo +
494 ((uint64_t)((src_desc)->buffer_addr_hi &
495 0xFF) << 32)));
496 *nbytesp = src_desc->nbytes;
497 *transfer_idp = src_desc->meta_data;
498 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
499
500 if (per_CE_contextp)
501 *per_CE_contextp = CE_state->send_context;
502
503 /* sw_index is used more like read index */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530504 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800505 *per_transfer_contextp =
506 src_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530507
508 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
509
510 /* Update sw_index */
511 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
512 src_ring->sw_index = sw_index;
513 status = QDF_STATUS_SUCCESS;
514 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530515 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530516
517 return status;
518}
519
520/* NB: Modelled after ce_completed_send_next */
Jeff Johnson05718132016-12-17 10:18:17 -0800521static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530522ce_cancel_send_next_srng(struct CE_handle *copyeng,
523 void **per_CE_contextp,
524 void **per_transfer_contextp,
525 qdf_dma_addr_t *bufferp,
526 unsigned int *nbytesp,
527 unsigned int *transfer_idp,
528 uint32_t *toeplitz_hash_result)
529{
Kiran Venkatappac6862752017-07-09 22:44:17 +0530530 struct CE_state *CE_state;
531 int status = QDF_STATUS_E_FAILURE;
532 struct CE_ring_state *src_ring;
533 unsigned int nentries_mask;
534 unsigned int sw_index;
535 struct hif_softc *scn;
536 struct ce_srng_src_desc *src_desc;
537
538 CE_state = (struct CE_state *)copyeng;
539 src_ring = CE_state->src_ring;
540 if (!src_ring)
541 return QDF_STATUS_E_FAILURE;
542
543 nentries_mask = src_ring->nentries_mask;
544 sw_index = src_ring->sw_index;
545 scn = CE_state->scn;
546
547 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
548 status = QDF_STATUS_E_FAILURE;
549 return status;
550 }
551
552 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
553 src_ring->srng_ctx);
554 if (src_desc) {
555 /* Return data from completed source descriptor */
556 *bufferp = (qdf_dma_addr_t)
557 (((uint64_t)(src_desc)->buffer_addr_lo +
558 ((uint64_t)((src_desc)->buffer_addr_hi &
559 0xFF) << 32)));
560 *nbytesp = src_desc->nbytes;
561 *transfer_idp = src_desc->meta_data;
562 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
563
564 if (per_CE_contextp)
565 *per_CE_contextp = CE_state->send_context;
566
567 /* sw_index is used more like read index */
568 if (per_transfer_contextp)
569 *per_transfer_contextp =
570 src_ring->per_transfer_context[sw_index];
571
572 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
573
574 /* Update sw_index */
575 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
576 src_ring->sw_index = sw_index;
577 status = QDF_STATUS_SUCCESS;
578 }
579 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
580
581 return status;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530582}
583
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530584/*
585 * Adjust interrupts for the copy complete handler.
586 * If it's needed for either send or recv, then unmask
587 * this interrupt; otherwise, mask it.
588 *
589 * Called with target_lock held.
590 */
591static void
592ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
593 int disable_copy_compl_intr)
594{
595}
596
Jeff Johnson05718132016-12-17 10:18:17 -0800597static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
598 unsigned int *flags)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530599{
600 /*TODO*/
601 return false;
602}
603
Jeff Johnson05718132016-12-17 10:18:17 -0800604static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530605{
606 switch (ring_type) {
607 case CE_RING_SRC:
608 return sizeof(struct ce_srng_src_desc);
609 case CE_RING_DEST:
610 return sizeof(struct ce_srng_dest_desc);
611 case CE_RING_STATUS:
612 return sizeof(struct ce_srng_dest_status_desc);
613 default:
614 return 0;
615 }
616 return 0;
617}
618
Houston Hoffman15010772016-09-16 14:01:13 -0700619static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
620 struct hal_srng_params *ring_params)
621{
622 uint32_t addr_low;
623 uint32_t addr_high;
624 uint32_t msi_data_start;
625 uint32_t msi_data_count;
626 uint32_t msi_irq_start;
627 int ret;
628
629 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
630 &msi_data_count, &msi_data_start,
631 &msi_irq_start);
632
633 /* msi config not found */
634 if (ret)
635 return;
636
Houston Hoffman15010772016-09-16 14:01:13 -0700637 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
638
639 ring_params->msi_addr = addr_low;
640 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
641 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
642 ring_params->flags |= HAL_SRNG_MSI_INTR;
643
Jeff Johnsonb9450212017-09-18 10:12:38 -0700644 HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
Houston Hoffman15010772016-09-16 14:01:13 -0700645 (void *)ring_params->msi_addr, ring_params->msi_data);
646}
647
Jeff Johnson05718132016-12-17 10:18:17 -0800648static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700649 struct CE_ring_state *src_ring,
650 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530651{
652 struct hal_srng_params ring_params = {0};
653
Houston Hoffman15010772016-09-16 14:01:13 -0700654 HIF_INFO("%s: ce_id %d", __func__, ce_id);
655
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530656 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
657 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
658 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700659 /*
660 * The minimum increment for the timer is 8us
661 * A default value of 0 disables the timer
662 * A valid default value caused continuous interrupts to
663 * fire with MSI enabled. Need to revisit usage of the timer
664 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665
Houston Hoffman648a9182017-05-21 23:27:50 -0700666 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
667 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
668
669 ring_params.intr_timer_thres_us = 0;
670 ring_params.intr_batch_cntr_thres_entries = 1;
671 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530672
673 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
674 &ring_params);
675}
676
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700677/**
678 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
679 * @dest_ring: ring being initialized
680 * @ring_params: pointer to initialized parameters
681 *
682 * For Napier & Hawkeye v1, the status ring timer interrupts do not work
683 * As a work arround host configures the destination rings to be a proxy for
684 * work needing to be done.
685 *
686 * The interrupts are setup such that if the destination ring is less than fully
687 * posted, there is likely undone work for the status ring that the host should
688 * process.
689 *
690 * There is a timing bug in srng based copy engines such that a fully posted
691 * srng based copy engine has 2 empty entries instead of just one. The copy
692 * engine data sturctures work with 1 empty entry, but the software frequently
693 * fails to post the last entry due to the race condition.
694 */
695static void ce_srng_initialize_dest_timer_interrupt_war(
696 struct CE_ring_state *dest_ring,
697 struct hal_srng_params *ring_params) {
698 int num_buffers_when_fully_posted = dest_ring->nentries - 2;
699
700 ring_params->low_threshold = num_buffers_when_fully_posted - 1;
701 ring_params->intr_timer_thres_us = 1024;
702 ring_params->intr_batch_cntr_thres_entries = 0;
703 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
704}
705
Jeff Johnson05718132016-12-17 10:18:17 -0800706static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700707 struct CE_ring_state *dest_ring,
708 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530709{
710 struct hal_srng_params ring_params = {0};
Houston Hoffman648a9182017-05-21 23:27:50 -0700711 bool status_ring_timer_thresh_work_arround = true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530712
Houston Hoffman15010772016-09-16 14:01:13 -0700713 HIF_INFO("%s: ce_id %d", __func__, ce_id);
714
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530715 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
716 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
717 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman74109122016-10-21 14:58:34 -0700718 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530719
Houston Hoffman648a9182017-05-21 23:27:50 -0700720 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
721 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
722 if (status_ring_timer_thresh_work_arround) {
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700723 ce_srng_initialize_dest_timer_interrupt_war(
724 dest_ring, &ring_params);
Houston Hoffman648a9182017-05-21 23:27:50 -0700725 } else {
726 /* normal behavior for future chips */
727 ring_params.low_threshold = dest_ring->nentries >> 3;
728 ring_params.intr_timer_thres_us = 100000;
729 ring_params.intr_batch_cntr_thres_entries = 0;
730 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
731 }
732 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530733
734 /*Dest ring is also source ring*/
735 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
736 &ring_params);
737}
738
Jeff Johnson05718132016-12-17 10:18:17 -0800739static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700740 struct CE_ring_state *status_ring,
741 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530742{
743 struct hal_srng_params ring_params = {0};
744
Houston Hoffman15010772016-09-16 14:01:13 -0700745 HIF_INFO("%s: ce_id %d", __func__, ce_id);
746
747 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
748
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530749 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
750 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
751 ring_params.num_entries = status_ring->nentries;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530752
Houston Hoffman648a9182017-05-21 23:27:50 -0700753 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
754 ring_params.intr_timer_thres_us = 0x1000;
755 ring_params.intr_batch_cntr_thres_entries = 0x1;
756 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530757
758 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
759 ce_id, 0, &ring_params);
760}
761
Yun Park3fb36442017-08-17 17:37:53 -0700762static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530763 uint32_t ce_id, struct CE_ring_state *ring,
764 struct CE_attr *attr)
765{
766 switch (ring_type) {
767 case CE_RING_SRC:
Houston Hoffman648a9182017-05-21 23:27:50 -0700768 ce_srng_src_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530769 break;
770 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700771 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530772 break;
773 case CE_RING_STATUS:
Houston Hoffman648a9182017-05-21 23:27:50 -0700774 ce_srng_status_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530775 break;
776 default:
777 qdf_assert(0);
778 break;
779 }
Yun Park3fb36442017-08-17 17:37:53 -0700780
781 return 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530782}
Jeff Johnson05718132016-12-17 10:18:17 -0800783
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800784static void ce_construct_shadow_config_srng(struct hif_softc *scn)
785{
786 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
787 int ce_id;
788
789 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
790 if (hif_state->host_ce_config[ce_id].src_nentries)
791 hal_set_one_shadow_config(scn->hal_soc,
792 CE_SRC, ce_id);
793
794 if (hif_state->host_ce_config[ce_id].dest_nentries) {
795 hal_set_one_shadow_config(scn->hal_soc,
796 CE_DST, ce_id);
797
798 hal_set_one_shadow_config(scn->hal_soc,
799 CE_DST_STATUS, ce_id);
800 }
801 }
802}
803
804static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
805 struct pld_shadow_reg_v2_cfg **shadow_config,
806 int *num_shadow_registers_configured)
807{
808 if (scn->hal_soc == NULL) {
809 HIF_ERROR("%s: hal not initialized: not initializing shadow config",
810 __func__);
811 return;
812 }
813
814 hal_get_shadow_config(scn->hal_soc, shadow_config,
815 num_shadow_registers_configured);
816
817 if (*num_shadow_registers_configured != 0) {
818 HIF_ERROR("%s: hal shadow register configuration allready constructed",
819 __func__);
820
821 /* return with original configuration*/
822 return;
823 }
824
825 hal_construct_shadow_config(scn->hal_soc);
826 ce_construct_shadow_config_srng(scn);
827
828 /* get updated configuration */
829 hal_get_shadow_config(scn->hal_soc, shadow_config,
830 num_shadow_registers_configured);
831}
832
Jeff Johnson05718132016-12-17 10:18:17 -0800833static struct ce_ops ce_service_srng = {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530834 .ce_get_desc_size = ce_get_desc_size_srng,
835 .ce_ring_setup = ce_ring_setup_srng,
836 .ce_sendlist_send = ce_sendlist_send_srng,
837 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
838 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
839 .ce_cancel_send_next = ce_cancel_send_next_srng,
840 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
841 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
842 .ce_send_nolock = ce_send_nolock_srng,
843 .watermark_int = ce_check_int_watermark_srng,
844 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
845 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
846 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800847 .ce_prepare_shadow_register_v2_cfg =
848 ce_prepare_shadow_register_v2_cfg_srng,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530849};
850
851struct ce_ops *ce_services_srng()
852{
853 return &ce_service_srng;
854}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530855qdf_export_symbol(ce_services_srng);
Sathish Kumar86876492018-08-27 13:39:20 +0530856
857void ce_service_srng_init(void)
858{
859 ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
860}