blob: 18593b1219ae4a13837b74659d56d7984832fabd [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
Jeff Johnson8d639a02019-03-18 09:51:11 -07002 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053018#include "hif_io32.h"
19#include "reg_struct.h"
20#include "ce_api.h"
21#include "ce_main.h"
22#include "ce_internal.h"
23#include "ce_reg.h"
24#include "qdf_lock.h"
25#include "regtable.h"
26#include "hif_main.h"
27#include "hif_debug.h"
28#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070029#include "pld_common.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053030#include "qdf_module.h"
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -070031#include "hif.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053032
33/*
34 * Support for Copy Engine hardware, which is mainly used for
35 * communication between Host and Target over a PCIe interconnect.
36 */
37
38/*
39 * A single CopyEngine (CE) comprises two "rings":
40 * a source ring
41 * a destination ring
42 *
43 * Each ring consists of a number of descriptors which specify
44 * an address, length, and meta-data.
45 *
46 * Typically, one side of the PCIe interconnect (Host or Target)
47 * controls one ring and the other side controls the other ring.
48 * The source side chooses when to initiate a transfer and it
49 * chooses what to send (buffer address, length). The destination
50 * side keeps a supply of "anonymous receive buffers" available and
51 * it handles incoming data as it arrives (when the destination
52 * receives an interrupt).
53 *
54 * The sender may send a simple buffer (address/length) or it may
55 * send a small list of buffers. When a small list is sent, hardware
56 * "gathers" these and they end up in a single destination buffer
57 * with a single interrupt.
58 *
59 * There are several "contexts" managed by this layer -- more, it
60 * may seem -- than should be needed. These are provided mainly for
61 * maximum flexibility and especially to facilitate a simpler HIF
62 * implementation. There are per-CopyEngine recv, send, and watermark
63 * contexts. These are supplied by the caller when a recv, send,
64 * or watermark handler is established and they are echoed back to
65 * the caller when the respective callbacks are invoked. There is
66 * also a per-transfer context supplied by the caller when a buffer
67 * (or sendlist) is sent and when a buffer is enqueued for recv.
68 * These per-transfer contexts are echoed back to the caller when
69 * the buffer is sent/received.
70 * Target TX harsh result toeplitz_hash_result
71 */
72
73#define CE_ADDR_COPY(desc, dma_addr) do {\
74 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 0xFFFFFFFF);\
76 (desc)->buffer_addr_hi =\
77 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 } while (0)
79
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -070080#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
81void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
82 enum hif_ce_event_type type,
83 union ce_srng_desc *descriptor,
84 void *memory, int index,
85 int len, void *hal_ring)
86{
87 int record_index;
88 struct hif_ce_desc_event *event;
89 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
90 struct hif_ce_desc_event *hist_ev = NULL;
91
92 if (ce_id < CE_COUNT_MAX)
93 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
94 else
95 return;
96
97 if (ce_id >= CE_COUNT_MAX)
98 return;
99
100 if (!ce_hist->enable[ce_id])
101 return;
102
103 if (!hist_ev)
104 return;
105
106 record_index = get_next_record_index(
107 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
108
109 event = &hist_ev[record_index];
110
111 qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
112
113 event->type = type;
114 event->time = qdf_get_log_timestamp();
115
116 if (descriptor)
117 qdf_mem_copy(&event->descriptor, descriptor,
118 hal_get_entrysize_from_srng(hal_ring));
119
120 if (hal_ring)
121 hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
122 &event->current_hp);
123
124 event->memory = memory;
125 event->index = index;
126
127 if (ce_hist->data_enable[ce_id])
128 hif_ce_desc_data_record(event, len);
129}
130#endif
131
Jeff Johnson05718132016-12-17 10:18:17 -0800132static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530133ce_send_nolock_srng(struct CE_handle *copyeng,
134 void *per_transfer_context,
135 qdf_dma_addr_t buffer,
136 uint32_t nbytes,
137 uint32_t transfer_id,
138 uint32_t flags,
139 uint32_t user_flags)
140{
141 int status;
142 struct CE_state *CE_state = (struct CE_state *)copyeng;
143 struct CE_ring_state *src_ring = CE_state->src_ring;
144 unsigned int nentries_mask = src_ring->nentries_mask;
145 unsigned int write_index = src_ring->write_index;
146 uint64_t dma_addr = buffer;
147 struct hif_softc *scn = CE_state->scn;
148
149 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
150 return QDF_STATUS_E_FAILURE;
151 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
152 false) <= 0)) {
153 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
154 Q_TARGET_ACCESS_END(scn);
155 return QDF_STATUS_E_FAILURE;
156 }
157 {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700158 enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530159 struct ce_srng_src_desc *src_desc;
160
161 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
162 Q_TARGET_ACCESS_END(scn);
163 return QDF_STATUS_E_FAILURE;
164 }
165
166 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
167 src_ring->srng_ctx);
Aditya Sathish61f7fa32018-03-27 17:16:33 +0530168 if (!src_desc) {
169 Q_TARGET_ACCESS_END(scn);
170 return QDF_STATUS_E_INVAL;
171 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530172
173 /* Update low 32 bits source descriptor address */
174 src_desc->buffer_addr_lo =
175 (uint32_t)(dma_addr & 0xFFFFFFFF);
176 src_desc->buffer_addr_hi =
177 (uint32_t)((dma_addr >> 32) & 0xFF);
178
179 src_desc->meta_data = transfer_id;
180
181 /*
182 * Set the swap bit if:
183 * typical sends on this CE are swapped (host is big-endian)
184 * and this send doesn't disable the swapping
185 * (data is not bytestream)
186 */
187 src_desc->byte_swap =
188 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
189 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
190 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
191 src_desc->nbytes = nbytes;
192
193 src_ring->per_transfer_context[write_index] =
194 per_transfer_context;
195 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
196
197 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
198
199 /* src_ring->write index hasn't been updated event though
200 * the register has allready been written to.
201 */
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700202 hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
203 (union ce_srng_desc *)src_desc,
204 per_transfer_context,
205 src_ring->write_index, nbytes,
206 src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530207
208 src_ring->write_index = write_index;
209 status = QDF_STATUS_SUCCESS;
210 }
211 Q_TARGET_ACCESS_END(scn);
212 return status;
213}
214
Jeff Johnson05718132016-12-17 10:18:17 -0800215static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530216ce_sendlist_send_srng(struct CE_handle *copyeng,
217 void *per_transfer_context,
218 struct ce_sendlist *sendlist, unsigned int transfer_id)
219{
220 int status = -ENOMEM;
221 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
222 struct CE_state *CE_state = (struct CE_state *)copyeng;
223 struct CE_ring_state *src_ring = CE_state->src_ring;
224 unsigned int num_items = sl->num_items;
225 unsigned int sw_index;
226 unsigned int write_index;
227 struct hif_softc *scn = CE_state->scn;
228
229 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
230
231 qdf_spin_lock_bh(&CE_state->ce_index_lock);
232 sw_index = src_ring->sw_index;
233 write_index = src_ring->write_index;
234
235 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
236 num_items) {
237 struct ce_sendlist_item *item;
238 int i;
239
240 /* handle all but the last item uniformly */
241 for (i = 0; i < num_items - 1; i++) {
242 item = &sl->item[i];
243 /* TBDXXX: Support extensible sendlist_types? */
244 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
245 status = ce_send_nolock_srng(copyeng,
246 CE_SENDLIST_ITEM_CTXT,
247 (qdf_dma_addr_t) item->data,
248 item->u.nbytes, transfer_id,
249 item->flags | CE_SEND_FLAG_GATHER,
250 item->user_flags);
251 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
252 }
253 /* provide valid context pointer for final item */
254 item = &sl->item[i];
255 /* TBDXXX: Support extensible sendlist_types? */
256 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
257 status = ce_send_nolock_srng(copyeng, per_transfer_context,
258 (qdf_dma_addr_t) item->data,
259 item->u.nbytes,
260 transfer_id, item->flags,
261 item->user_flags);
262 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
263 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
264 QDF_NBUF_TX_PKT_CE);
265 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
266 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +0530267 QDF_TRACE_DEFAULT_PDEV_ID,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530268 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700269 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530270 } else {
271 /*
272 * Probably not worth the additional complexity to support
273 * partial sends with continuation or notification. We expect
274 * to use large rings and small sendlists. If we can't handle
275 * the entire request at once, punt it back to the caller.
276 */
277 }
278 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
279
280 return status;
281}
282
283#define SLOTS_PER_DATAPATH_TX 2
284
285#ifndef AH_NEED_TX_DATA_SWAP
286#define AH_NEED_TX_DATA_SWAP 0
287#endif
288/**
289 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
290 * @coyeng: copy engine handle
291 * @per_recv_context: virtual address of the nbuf
292 * @buffer: physical address of the nbuf
293 *
294 * Return: 0 if the buffer is enqueued
295 */
Jeff Johnson05718132016-12-17 10:18:17 -0800296static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530297ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
298 void *per_recv_context, qdf_dma_addr_t buffer)
299{
300 int status;
301 struct CE_state *CE_state = (struct CE_state *)copyeng;
302 struct CE_ring_state *dest_ring = CE_state->dest_ring;
303 unsigned int nentries_mask = dest_ring->nentries_mask;
304 unsigned int write_index;
305 unsigned int sw_index;
306 uint64_t dma_addr = buffer;
307 struct hif_softc *scn = CE_state->scn;
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700308 struct ce_srng_dest_desc *dest_desc;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530309
310 qdf_spin_lock_bh(&CE_state->ce_index_lock);
311 write_index = dest_ring->write_index;
312 sw_index = dest_ring->sw_index;
313
314 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
315 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
316 return -EIO;
317 }
318
319 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
320 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
321 return QDF_STATUS_E_FAILURE;
322 }
323
324 if ((hal_srng_src_num_avail(scn->hal_soc,
325 dest_ring->srng_ctx, false) > 0)) {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700326 dest_desc = hal_srng_src_get_next(scn->hal_soc,
327 dest_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530328
Jeff Johnson8d639a02019-03-18 09:51:11 -0700329 if (!dest_desc) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530330 status = QDF_STATUS_E_FAILURE;
331 } else {
332
333 CE_ADDR_COPY(dest_desc, dma_addr);
334
335 dest_ring->per_transfer_context[write_index] =
336 per_recv_context;
337
338 /* Update Destination Ring Write Index */
339 write_index = CE_RING_IDX_INCR(nentries_mask,
340 write_index);
341 status = QDF_STATUS_SUCCESS;
342 }
343 } else
344 status = QDF_STATUS_E_FAILURE;
345
346 dest_ring->write_index = write_index;
347 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700348 hif_record_ce_srng_desc_event(scn, CE_state->id,
349 HIF_CE_DEST_RING_BUFFER_POST,
350 (union ce_srng_desc *)dest_desc,
351 per_recv_context,
352 dest_ring->write_index, 0,
353 dest_ring->srng_ctx);
354
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530355 Q_TARGET_ACCESS_END(scn);
356 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
357 return status;
358}
359
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530360/*
361 * Guts of ce_recv_entries_done.
362 * The caller takes responsibility for any necessary locking.
363 */
Jeff Johnson05718132016-12-17 10:18:17 -0800364static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530365ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
366 struct CE_state *CE_state)
367{
368 struct CE_ring_state *status_ring = CE_state->status_ring;
369
370 return hal_srng_dst_num_valid(scn->hal_soc,
371 status_ring->srng_ctx, false);
372}
373
374/*
375 * Guts of ce_send_entries_done.
376 * The caller takes responsibility for any necessary locking.
377 */
Jeff Johnson05718132016-12-17 10:18:17 -0800378static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530379ce_send_entries_done_nolock_srng(struct hif_softc *scn,
380 struct CE_state *CE_state)
381{
382
383 struct CE_ring_state *src_ring = CE_state->src_ring;
384 int count = 0;
385
386 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
387 return 0;
388
389 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
390
Manjunathappa Prakash05f70da2019-04-09 23:39:48 -0700391 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530392
393 return count;
394}
395
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530396/*
397 * Guts of ce_completed_recv_next.
398 * The caller takes responsibility for any necessary locking.
399 */
Jeff Johnson05718132016-12-17 10:18:17 -0800400static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530401ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
402 void **per_CE_contextp,
403 void **per_transfer_contextp,
404 qdf_dma_addr_t *bufferp,
405 unsigned int *nbytesp,
406 unsigned int *transfer_idp,
407 unsigned int *flagsp)
408{
409 int status;
410 struct CE_ring_state *dest_ring = CE_state->dest_ring;
411 struct CE_ring_state *status_ring = CE_state->status_ring;
412 unsigned int nentries_mask = dest_ring->nentries_mask;
413 unsigned int sw_index = dest_ring->sw_index;
414 struct hif_softc *scn = CE_state->scn;
415 struct ce_srng_dest_status_desc *dest_status;
416 int nbytes;
417 struct ce_srng_dest_status_desc dest_status_info;
418
419 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
420 status = QDF_STATUS_E_FAILURE;
421 goto done;
422 }
423
424 dest_status = hal_srng_dst_get_next(scn->hal_soc,
425 status_ring->srng_ctx);
426
Jeff Johnson8d639a02019-03-18 09:51:11 -0700427 if (!dest_status) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530428 status = QDF_STATUS_E_FAILURE;
429 goto done;
430 }
431 /*
432 * By copying the dest_desc_info element to local memory, we could
433 * avoid extra memory read from non-cachable memory.
434 */
435 dest_status_info = *dest_status;
436 nbytes = dest_status_info.nbytes;
437 if (nbytes == 0) {
438 /*
439 * This closes a relatively unusual race where the Host
440 * sees the updated DRRI before the update to the
441 * corresponding descriptor has completed. We treat this
442 * as a descriptor that is not yet done.
443 */
444 status = QDF_STATUS_E_FAILURE;
445 goto done;
446 }
447
448 dest_status->nbytes = 0;
449
450 *nbytesp = nbytes;
451 *transfer_idp = dest_status_info.meta_data;
452 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
453
454 if (per_CE_contextp)
455 *per_CE_contextp = CE_state->recv_context;
456
457 /* NOTE: sw_index is more like a read_index in this context. It has a
458 * one-to-one mapping with status ring.
459 * Get the per trasnfer context from dest_ring.
460 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530461 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800462 *per_transfer_contextp =
463 dest_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530464
465 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
466
467 /* Update sw_index */
468 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
469 dest_ring->sw_index = sw_index;
470 status = QDF_STATUS_SUCCESS;
471
472done:
473 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
474
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700475 if (status == QDF_STATUS_SUCCESS) {
476 hif_record_ce_srng_desc_event(scn, CE_state->id,
477 HIF_CE_DEST_RING_BUFFER_REAP,
478 NULL,
479 dest_ring->
480 per_transfer_context[sw_index],
481 dest_ring->sw_index, nbytes,
482 dest_ring->srng_ctx);
483
484 hif_record_ce_srng_desc_event(scn, CE_state->id,
485 HIF_CE_DEST_STATUS_RING_REAP,
486 (union ce_srng_desc *)dest_status,
487 NULL,
488 -1, 0,
489 status_ring->srng_ctx);
490 }
491
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530492 return status;
493}
494
Jeff Johnson05718132016-12-17 10:18:17 -0800495static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530496ce_revoke_recv_next_srng(struct CE_handle *copyeng,
497 void **per_CE_contextp,
498 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
499{
Houston Hoffman72ddc022017-01-30 14:21:46 -0800500 struct CE_state *CE_state = (struct CE_state *)copyeng;
501 struct CE_ring_state *dest_ring = CE_state->dest_ring;
502 unsigned int sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530503
Houston Hoffman72ddc022017-01-30 14:21:46 -0800504 if (!dest_ring)
505 return QDF_STATUS_E_FAILURE;
506
507 sw_index = dest_ring->sw_index;
508
509 if (per_CE_contextp)
510 *per_CE_contextp = CE_state->recv_context;
511
512 /* NOTE: sw_index is more like a read_index in this context. It has a
513 * one-to-one mapping with status ring.
514 * Get the per trasnfer context from dest_ring.
515 */
516 if (per_transfer_contextp)
517 *per_transfer_contextp =
518 dest_ring->per_transfer_context[sw_index];
519
Jeff Johnson8d639a02019-03-18 09:51:11 -0700520 if (!dest_ring->per_transfer_context[sw_index])
Houston Hoffman72ddc022017-01-30 14:21:46 -0800521 return QDF_STATUS_E_FAILURE;
522
523 /* provide end condition */
524 dest_ring->per_transfer_context[sw_index] = NULL;
525
526 /* Update sw_index */
527 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
528 dest_ring->sw_index = sw_index;
529 return QDF_STATUS_SUCCESS;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530530}
531
532/*
533 * Guts of ce_completed_send_next.
534 * The caller takes responsibility for any necessary locking.
535 */
Jeff Johnson05718132016-12-17 10:18:17 -0800536static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530537ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
538 void **per_CE_contextp,
539 void **per_transfer_contextp,
540 qdf_dma_addr_t *bufferp,
541 unsigned int *nbytesp,
542 unsigned int *transfer_idp,
543 unsigned int *sw_idx,
544 unsigned int *hw_idx,
545 uint32_t *toeplitz_hash_result)
546{
547 int status = QDF_STATUS_E_FAILURE;
548 struct CE_ring_state *src_ring = CE_state->src_ring;
549 unsigned int nentries_mask = src_ring->nentries_mask;
550 unsigned int sw_index = src_ring->sw_index;
c_cgodavfda96ad2017-09-07 16:16:00 +0530551 unsigned int swi = src_ring->sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530552 struct hif_softc *scn = CE_state->scn;
553 struct ce_srng_src_desc *src_desc;
554
555 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
556 status = QDF_STATUS_E_FAILURE;
557 return status;
558 }
559
560 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
561 if (src_desc) {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700562 hif_record_ce_srng_desc_event(scn, CE_state->id,
563 HIF_TX_DESC_COMPLETION,
564 (union ce_srng_desc *)src_desc,
565 src_ring->
566 per_transfer_context[swi],
567 swi, src_desc->nbytes,
568 src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530569
570 /* Return data from completed source descriptor */
571 *bufferp = (qdf_dma_addr_t)
572 (((uint64_t)(src_desc)->buffer_addr_lo +
573 ((uint64_t)((src_desc)->buffer_addr_hi &
574 0xFF) << 32)));
575 *nbytesp = src_desc->nbytes;
576 *transfer_idp = src_desc->meta_data;
577 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
578
579 if (per_CE_contextp)
580 *per_CE_contextp = CE_state->send_context;
581
582 /* sw_index is used more like read index */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530583 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800584 *per_transfer_contextp =
585 src_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530586
587 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
588
589 /* Update sw_index */
590 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
591 src_ring->sw_index = sw_index;
592 status = QDF_STATUS_SUCCESS;
593 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530594 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530595
596 return status;
597}
598
599/* NB: Modelled after ce_completed_send_next */
Jeff Johnson05718132016-12-17 10:18:17 -0800600static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530601ce_cancel_send_next_srng(struct CE_handle *copyeng,
602 void **per_CE_contextp,
603 void **per_transfer_contextp,
604 qdf_dma_addr_t *bufferp,
605 unsigned int *nbytesp,
606 unsigned int *transfer_idp,
607 uint32_t *toeplitz_hash_result)
608{
Kiran Venkatappac6862752017-07-09 22:44:17 +0530609 struct CE_state *CE_state;
610 int status = QDF_STATUS_E_FAILURE;
611 struct CE_ring_state *src_ring;
612 unsigned int nentries_mask;
613 unsigned int sw_index;
614 struct hif_softc *scn;
615 struct ce_srng_src_desc *src_desc;
616
617 CE_state = (struct CE_state *)copyeng;
618 src_ring = CE_state->src_ring;
619 if (!src_ring)
620 return QDF_STATUS_E_FAILURE;
621
622 nentries_mask = src_ring->nentries_mask;
623 sw_index = src_ring->sw_index;
624 scn = CE_state->scn;
625
626 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
627 status = QDF_STATUS_E_FAILURE;
628 return status;
629 }
630
631 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
632 src_ring->srng_ctx);
633 if (src_desc) {
634 /* Return data from completed source descriptor */
635 *bufferp = (qdf_dma_addr_t)
636 (((uint64_t)(src_desc)->buffer_addr_lo +
637 ((uint64_t)((src_desc)->buffer_addr_hi &
638 0xFF) << 32)));
639 *nbytesp = src_desc->nbytes;
640 *transfer_idp = src_desc->meta_data;
641 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
642
643 if (per_CE_contextp)
644 *per_CE_contextp = CE_state->send_context;
645
646 /* sw_index is used more like read index */
647 if (per_transfer_contextp)
648 *per_transfer_contextp =
649 src_ring->per_transfer_context[sw_index];
650
651 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
652
653 /* Update sw_index */
654 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
655 src_ring->sw_index = sw_index;
656 status = QDF_STATUS_SUCCESS;
657 }
658 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
659
660 return status;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530661}
662
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530663/*
664 * Adjust interrupts for the copy complete handler.
665 * If it's needed for either send or recv, then unmask
666 * this interrupt; otherwise, mask it.
667 *
668 * Called with target_lock held.
669 */
670static void
671ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
672 int disable_copy_compl_intr)
673{
674}
675
Jeff Johnson05718132016-12-17 10:18:17 -0800676static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
677 unsigned int *flags)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530678{
679 /*TODO*/
680 return false;
681}
682
Jeff Johnson05718132016-12-17 10:18:17 -0800683static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530684{
685 switch (ring_type) {
686 case CE_RING_SRC:
687 return sizeof(struct ce_srng_src_desc);
688 case CE_RING_DEST:
689 return sizeof(struct ce_srng_dest_desc);
690 case CE_RING_STATUS:
691 return sizeof(struct ce_srng_dest_status_desc);
692 default:
693 return 0;
694 }
695 return 0;
696}
697
Houston Hoffman15010772016-09-16 14:01:13 -0700698static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
699 struct hal_srng_params *ring_params)
700{
701 uint32_t addr_low;
702 uint32_t addr_high;
703 uint32_t msi_data_start;
704 uint32_t msi_data_count;
705 uint32_t msi_irq_start;
706 int ret;
707
708 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
709 &msi_data_count, &msi_data_start,
710 &msi_irq_start);
711
712 /* msi config not found */
713 if (ret)
714 return;
715
Houston Hoffman15010772016-09-16 14:01:13 -0700716 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
717
718 ring_params->msi_addr = addr_low;
719 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
720 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
721 ring_params->flags |= HAL_SRNG_MSI_INTR;
722
Jeff Johnsonb9450212017-09-18 10:12:38 -0700723 HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
Houston Hoffman15010772016-09-16 14:01:13 -0700724 (void *)ring_params->msi_addr, ring_params->msi_data);
725}
726
Jeff Johnson05718132016-12-17 10:18:17 -0800727static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700728 struct CE_ring_state *src_ring,
729 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530730{
731 struct hal_srng_params ring_params = {0};
732
Rakesh Pillai51264a62019-05-08 19:15:56 +0530733 hif_debug("%s: ce_id %d", __func__, ce_id);
Houston Hoffman15010772016-09-16 14:01:13 -0700734
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530735 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
736 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
737 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700738 /*
739 * The minimum increment for the timer is 8us
740 * A default value of 0 disables the timer
741 * A valid default value caused continuous interrupts to
742 * fire with MSI enabled. Need to revisit usage of the timer
743 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530744
Houston Hoffman648a9182017-05-21 23:27:50 -0700745 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
746 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
747
748 ring_params.intr_timer_thres_us = 0;
749 ring_params.intr_batch_cntr_thres_entries = 1;
750 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530751
752 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
753 &ring_params);
754}
755
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700756/**
757 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
758 * @dest_ring: ring being initialized
759 * @ring_params: pointer to initialized parameters
760 *
761 * For Napier & Hawkeye v1, the status ring timer interrupts do not work
762 * As a work arround host configures the destination rings to be a proxy for
763 * work needing to be done.
764 *
765 * The interrupts are setup such that if the destination ring is less than fully
766 * posted, there is likely undone work for the status ring that the host should
767 * process.
768 *
769 * There is a timing bug in srng based copy engines such that a fully posted
770 * srng based copy engine has 2 empty entries instead of just one. The copy
771 * engine data sturctures work with 1 empty entry, but the software frequently
772 * fails to post the last entry due to the race condition.
773 */
774static void ce_srng_initialize_dest_timer_interrupt_war(
775 struct CE_ring_state *dest_ring,
776 struct hal_srng_params *ring_params) {
777 int num_buffers_when_fully_posted = dest_ring->nentries - 2;
778
779 ring_params->low_threshold = num_buffers_when_fully_posted - 1;
780 ring_params->intr_timer_thres_us = 1024;
781 ring_params->intr_batch_cntr_thres_entries = 0;
782 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
783}
784
Jeff Johnson05718132016-12-17 10:18:17 -0800785static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700786 struct CE_ring_state *dest_ring,
787 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530788{
789 struct hal_srng_params ring_params = {0};
Houston Hoffman648a9182017-05-21 23:27:50 -0700790 bool status_ring_timer_thresh_work_arround = true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530791
Houston Hoffman15010772016-09-16 14:01:13 -0700792 HIF_INFO("%s: ce_id %d", __func__, ce_id);
793
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530794 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
795 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
796 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman74109122016-10-21 14:58:34 -0700797 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530798
Houston Hoffman648a9182017-05-21 23:27:50 -0700799 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
800 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
801 if (status_ring_timer_thresh_work_arround) {
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700802 ce_srng_initialize_dest_timer_interrupt_war(
803 dest_ring, &ring_params);
Houston Hoffman648a9182017-05-21 23:27:50 -0700804 } else {
805 /* normal behavior for future chips */
806 ring_params.low_threshold = dest_ring->nentries >> 3;
807 ring_params.intr_timer_thres_us = 100000;
808 ring_params.intr_batch_cntr_thres_entries = 0;
809 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
810 }
811 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530812
813 /*Dest ring is also source ring*/
814 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
815 &ring_params);
816}
817
Jeff Johnson05718132016-12-17 10:18:17 -0800818static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700819 struct CE_ring_state *status_ring,
820 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530821{
822 struct hal_srng_params ring_params = {0};
823
Houston Hoffman15010772016-09-16 14:01:13 -0700824 HIF_INFO("%s: ce_id %d", __func__, ce_id);
825
826 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
827
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530828 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
829 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
830 ring_params.num_entries = status_ring->nentries;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530831
Houston Hoffman648a9182017-05-21 23:27:50 -0700832 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
833 ring_params.intr_timer_thres_us = 0x1000;
834 ring_params.intr_batch_cntr_thres_entries = 0x1;
835 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530836
837 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
838 ce_id, 0, &ring_params);
839}
840
Yun Park3fb36442017-08-17 17:37:53 -0700841static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530842 uint32_t ce_id, struct CE_ring_state *ring,
843 struct CE_attr *attr)
844{
845 switch (ring_type) {
846 case CE_RING_SRC:
Houston Hoffman648a9182017-05-21 23:27:50 -0700847 ce_srng_src_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530848 break;
849 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700850 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530851 break;
852 case CE_RING_STATUS:
Houston Hoffman648a9182017-05-21 23:27:50 -0700853 ce_srng_status_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530854 break;
855 default:
856 qdf_assert(0);
857 break;
858 }
Yun Park3fb36442017-08-17 17:37:53 -0700859
860 return 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530861}
Jeff Johnson05718132016-12-17 10:18:17 -0800862
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800863static void ce_construct_shadow_config_srng(struct hif_softc *scn)
864{
865 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
866 int ce_id;
867
868 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
869 if (hif_state->host_ce_config[ce_id].src_nentries)
870 hal_set_one_shadow_config(scn->hal_soc,
871 CE_SRC, ce_id);
872
873 if (hif_state->host_ce_config[ce_id].dest_nentries) {
874 hal_set_one_shadow_config(scn->hal_soc,
875 CE_DST, ce_id);
876
877 hal_set_one_shadow_config(scn->hal_soc,
878 CE_DST_STATUS, ce_id);
879 }
880 }
881}
882
883static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
884 struct pld_shadow_reg_v2_cfg **shadow_config,
885 int *num_shadow_registers_configured)
886{
Jeff Johnson8d639a02019-03-18 09:51:11 -0700887 if (!scn->hal_soc) {
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800888 HIF_ERROR("%s: hal not initialized: not initializing shadow config",
889 __func__);
890 return;
891 }
892
893 hal_get_shadow_config(scn->hal_soc, shadow_config,
894 num_shadow_registers_configured);
895
896 if (*num_shadow_registers_configured != 0) {
897 HIF_ERROR("%s: hal shadow register configuration allready constructed",
898 __func__);
899
900 /* return with original configuration*/
901 return;
902 }
903
904 hal_construct_shadow_config(scn->hal_soc);
905 ce_construct_shadow_config_srng(scn);
906
907 /* get updated configuration */
908 hal_get_shadow_config(scn->hal_soc, shadow_config,
909 num_shadow_registers_configured);
910}
911
Jeff Johnson05718132016-12-17 10:18:17 -0800912static struct ce_ops ce_service_srng = {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530913 .ce_get_desc_size = ce_get_desc_size_srng,
914 .ce_ring_setup = ce_ring_setup_srng,
915 .ce_sendlist_send = ce_sendlist_send_srng,
916 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
917 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
918 .ce_cancel_send_next = ce_cancel_send_next_srng,
919 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
920 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
921 .ce_send_nolock = ce_send_nolock_srng,
922 .watermark_int = ce_check_int_watermark_srng,
923 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
924 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
925 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800926 .ce_prepare_shadow_register_v2_cfg =
927 ce_prepare_shadow_register_v2_cfg_srng,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530928};
929
930struct ce_ops *ce_services_srng()
931{
932 return &ce_service_srng;
933}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530934qdf_export_symbol(ce_services_srng);
Sathish Kumar86876492018-08-27 13:39:20 +0530935
936void ce_service_srng_init(void)
937{
938 ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
939}