blob: 7018f4c0121b2217a4c9e48db2c281a1d98a665a [file] [log] [blame]
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301/*
Jeff Johnson8d639a02019-03-18 09:51:11 -07002 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053018#include "hif_io32.h"
19#include "reg_struct.h"
20#include "ce_api.h"
21#include "ce_main.h"
22#include "ce_internal.h"
23#include "ce_reg.h"
24#include "qdf_lock.h"
25#include "regtable.h"
26#include "hif_main.h"
27#include "hif_debug.h"
28#include "hal_api.h"
Houston Hoffman15010772016-09-16 14:01:13 -070029#include "pld_common.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053030#include "qdf_module.h"
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -070031#include "hif.h"
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +053032
33/*
34 * Support for Copy Engine hardware, which is mainly used for
35 * communication between Host and Target over a PCIe interconnect.
36 */
37
38/*
39 * A single CopyEngine (CE) comprises two "rings":
40 * a source ring
41 * a destination ring
42 *
43 * Each ring consists of a number of descriptors which specify
44 * an address, length, and meta-data.
45 *
46 * Typically, one side of the PCIe interconnect (Host or Target)
47 * controls one ring and the other side controls the other ring.
48 * The source side chooses when to initiate a transfer and it
49 * chooses what to send (buffer address, length). The destination
50 * side keeps a supply of "anonymous receive buffers" available and
51 * it handles incoming data as it arrives (when the destination
52 * receives an interrupt).
53 *
54 * The sender may send a simple buffer (address/length) or it may
55 * send a small list of buffers. When a small list is sent, hardware
56 * "gathers" these and they end up in a single destination buffer
57 * with a single interrupt.
58 *
59 * There are several "contexts" managed by this layer -- more, it
60 * may seem -- than should be needed. These are provided mainly for
61 * maximum flexibility and especially to facilitate a simpler HIF
62 * implementation. There are per-CopyEngine recv, send, and watermark
63 * contexts. These are supplied by the caller when a recv, send,
64 * or watermark handler is established and they are echoed back to
65 * the caller when the respective callbacks are invoked. There is
66 * also a per-transfer context supplied by the caller when a buffer
67 * (or sendlist) is sent and when a buffer is enqueued for recv.
68 * These per-transfer contexts are echoed back to the caller when
69 * the buffer is sent/received.
70 * Target TX harsh result toeplitz_hash_result
71 */
72
73#define CE_ADDR_COPY(desc, dma_addr) do {\
74 (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 0xFFFFFFFF);\
76 (desc)->buffer_addr_hi =\
77 (uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 } while (0)
79
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -070080#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
81void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
82 enum hif_ce_event_type type,
83 union ce_srng_desc *descriptor,
84 void *memory, int index,
85 int len, void *hal_ring)
86{
87 int record_index;
88 struct hif_ce_desc_event *event;
89 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
90 struct hif_ce_desc_event *hist_ev = NULL;
91
92 if (ce_id < CE_COUNT_MAX)
93 hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
94 else
95 return;
96
97 if (ce_id >= CE_COUNT_MAX)
98 return;
99
100 if (!ce_hist->enable[ce_id])
101 return;
102
103 if (!hist_ev)
104 return;
105
106 record_index = get_next_record_index(
107 &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
108
109 event = &hist_ev[record_index];
110
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -0700111 hif_clear_ce_desc_debug_data(event);
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700112
113 event->type = type;
114 event->time = qdf_get_log_timestamp();
115
116 if (descriptor)
117 qdf_mem_copy(&event->descriptor, descriptor,
118 hal_get_entrysize_from_srng(hal_ring));
119
120 if (hal_ring)
121 hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
122 &event->current_hp);
123
124 event->memory = memory;
125 event->index = index;
126
127 if (ce_hist->data_enable[ce_id])
128 hif_ce_desc_data_record(event, len);
129}
130#endif
131
Jeff Johnson05718132016-12-17 10:18:17 -0800132static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530133ce_send_nolock_srng(struct CE_handle *copyeng,
134 void *per_transfer_context,
135 qdf_dma_addr_t buffer,
136 uint32_t nbytes,
137 uint32_t transfer_id,
138 uint32_t flags,
139 uint32_t user_flags)
140{
141 int status;
142 struct CE_state *CE_state = (struct CE_state *)copyeng;
143 struct CE_ring_state *src_ring = CE_state->src_ring;
144 unsigned int nentries_mask = src_ring->nentries_mask;
145 unsigned int write_index = src_ring->write_index;
146 uint64_t dma_addr = buffer;
147 struct hif_softc *scn = CE_state->scn;
148
149 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
150 return QDF_STATUS_E_FAILURE;
151 if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
152 false) <= 0)) {
153 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
154 Q_TARGET_ACCESS_END(scn);
155 return QDF_STATUS_E_FAILURE;
156 }
157 {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700158 enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530159 struct ce_srng_src_desc *src_desc;
160
161 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
162 Q_TARGET_ACCESS_END(scn);
163 return QDF_STATUS_E_FAILURE;
164 }
165
166 src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
167 src_ring->srng_ctx);
Aditya Sathish61f7fa32018-03-27 17:16:33 +0530168 if (!src_desc) {
169 Q_TARGET_ACCESS_END(scn);
170 return QDF_STATUS_E_INVAL;
171 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530172
173 /* Update low 32 bits source descriptor address */
174 src_desc->buffer_addr_lo =
175 (uint32_t)(dma_addr & 0xFFFFFFFF);
176 src_desc->buffer_addr_hi =
177 (uint32_t)((dma_addr >> 32) & 0xFF);
178
179 src_desc->meta_data = transfer_id;
180
181 /*
182 * Set the swap bit if:
183 * typical sends on this CE are swapped (host is big-endian)
184 * and this send doesn't disable the swapping
185 * (data is not bytestream)
186 */
187 src_desc->byte_swap =
188 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
189 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
190 src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
191 src_desc->nbytes = nbytes;
192
193 src_ring->per_transfer_context[write_index] =
194 per_transfer_context;
195 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
196
197 hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
198
199 /* src_ring->write index hasn't been updated event though
200 * the register has allready been written to.
201 */
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700202 hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
203 (union ce_srng_desc *)src_desc,
204 per_transfer_context,
205 src_ring->write_index, nbytes,
206 src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530207
208 src_ring->write_index = write_index;
209 status = QDF_STATUS_SUCCESS;
210 }
211 Q_TARGET_ACCESS_END(scn);
212 return status;
213}
214
Jeff Johnson05718132016-12-17 10:18:17 -0800215static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530216ce_sendlist_send_srng(struct CE_handle *copyeng,
217 void *per_transfer_context,
218 struct ce_sendlist *sendlist, unsigned int transfer_id)
219{
220 int status = -ENOMEM;
221 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
222 struct CE_state *CE_state = (struct CE_state *)copyeng;
223 struct CE_ring_state *src_ring = CE_state->src_ring;
224 unsigned int num_items = sl->num_items;
225 unsigned int sw_index;
226 unsigned int write_index;
227 struct hif_softc *scn = CE_state->scn;
228
229 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
230
231 qdf_spin_lock_bh(&CE_state->ce_index_lock);
232 sw_index = src_ring->sw_index;
233 write_index = src_ring->write_index;
234
235 if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
236 num_items) {
237 struct ce_sendlist_item *item;
238 int i;
239
240 /* handle all but the last item uniformly */
241 for (i = 0; i < num_items - 1; i++) {
242 item = &sl->item[i];
243 /* TBDXXX: Support extensible sendlist_types? */
244 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
245 status = ce_send_nolock_srng(copyeng,
246 CE_SENDLIST_ITEM_CTXT,
247 (qdf_dma_addr_t) item->data,
248 item->u.nbytes, transfer_id,
249 item->flags | CE_SEND_FLAG_GATHER,
250 item->user_flags);
251 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
252 }
253 /* provide valid context pointer for final item */
254 item = &sl->item[i];
255 /* TBDXXX: Support extensible sendlist_types? */
256 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
257 status = ce_send_nolock_srng(copyeng, per_transfer_context,
258 (qdf_dma_addr_t) item->data,
259 item->u.nbytes,
260 transfer_id, item->flags,
261 item->user_flags);
262 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
263 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
264 QDF_NBUF_TX_PKT_CE);
265 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
266 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +0530267 QDF_TRACE_DEFAULT_PDEV_ID,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530268 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
Houston Hoffman9a1b3912016-10-17 18:56:32 -0700269 sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530270 } else {
271 /*
272 * Probably not worth the additional complexity to support
273 * partial sends with continuation or notification. We expect
274 * to use large rings and small sendlists. If we can't handle
275 * the entire request at once, punt it back to the caller.
276 */
277 }
278 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
279
280 return status;
281}
282
283#define SLOTS_PER_DATAPATH_TX 2
284
285#ifndef AH_NEED_TX_DATA_SWAP
286#define AH_NEED_TX_DATA_SWAP 0
287#endif
288/**
289 * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
290 * @coyeng: copy engine handle
291 * @per_recv_context: virtual address of the nbuf
292 * @buffer: physical address of the nbuf
293 *
294 * Return: 0 if the buffer is enqueued
295 */
Jeff Johnson05718132016-12-17 10:18:17 -0800296static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530297ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
298 void *per_recv_context, qdf_dma_addr_t buffer)
299{
300 int status;
301 struct CE_state *CE_state = (struct CE_state *)copyeng;
302 struct CE_ring_state *dest_ring = CE_state->dest_ring;
303 unsigned int nentries_mask = dest_ring->nentries_mask;
304 unsigned int write_index;
305 unsigned int sw_index;
306 uint64_t dma_addr = buffer;
307 struct hif_softc *scn = CE_state->scn;
Mohit Khanna973308a2019-05-13 18:31:33 -0700308 struct ce_srng_dest_desc *dest_desc = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530309
310 qdf_spin_lock_bh(&CE_state->ce_index_lock);
311 write_index = dest_ring->write_index;
312 sw_index = dest_ring->sw_index;
313
314 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
315 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
316 return -EIO;
317 }
318
319 if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
320 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
321 return QDF_STATUS_E_FAILURE;
322 }
323
324 if ((hal_srng_src_num_avail(scn->hal_soc,
325 dest_ring->srng_ctx, false) > 0)) {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700326 dest_desc = hal_srng_src_get_next(scn->hal_soc,
327 dest_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530328
Jeff Johnson8d639a02019-03-18 09:51:11 -0700329 if (!dest_desc) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530330 status = QDF_STATUS_E_FAILURE;
331 } else {
332
333 CE_ADDR_COPY(dest_desc, dma_addr);
334
335 dest_ring->per_transfer_context[write_index] =
336 per_recv_context;
337
338 /* Update Destination Ring Write Index */
339 write_index = CE_RING_IDX_INCR(nentries_mask,
340 write_index);
341 status = QDF_STATUS_SUCCESS;
342 }
Lin Bai14143c22019-09-02 09:39:07 +0800343 } else {
344 dest_desc = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530345 status = QDF_STATUS_E_FAILURE;
Lin Bai14143c22019-09-02 09:39:07 +0800346 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530347
348 dest_ring->write_index = write_index;
349 hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700350 hif_record_ce_srng_desc_event(scn, CE_state->id,
351 HIF_CE_DEST_RING_BUFFER_POST,
352 (union ce_srng_desc *)dest_desc,
353 per_recv_context,
354 dest_ring->write_index, 0,
355 dest_ring->srng_ctx);
356
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530357 Q_TARGET_ACCESS_END(scn);
358 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
359 return status;
360}
361
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530362/*
363 * Guts of ce_recv_entries_done.
364 * The caller takes responsibility for any necessary locking.
365 */
Jeff Johnson05718132016-12-17 10:18:17 -0800366static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530367ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
368 struct CE_state *CE_state)
369{
370 struct CE_ring_state *status_ring = CE_state->status_ring;
371
372 return hal_srng_dst_num_valid(scn->hal_soc,
373 status_ring->srng_ctx, false);
374}
375
376/*
377 * Guts of ce_send_entries_done.
378 * The caller takes responsibility for any necessary locking.
379 */
Jeff Johnson05718132016-12-17 10:18:17 -0800380static unsigned int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530381ce_send_entries_done_nolock_srng(struct hif_softc *scn,
382 struct CE_state *CE_state)
383{
384
385 struct CE_ring_state *src_ring = CE_state->src_ring;
386 int count = 0;
387
388 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
389 return 0;
390
391 count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
392
Manjunathappa Prakash05f70da2019-04-09 23:39:48 -0700393 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530394
395 return count;
396}
397
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530398/*
399 * Guts of ce_completed_recv_next.
400 * The caller takes responsibility for any necessary locking.
401 */
Jeff Johnson05718132016-12-17 10:18:17 -0800402static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530403ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
404 void **per_CE_contextp,
405 void **per_transfer_contextp,
406 qdf_dma_addr_t *bufferp,
407 unsigned int *nbytesp,
408 unsigned int *transfer_idp,
409 unsigned int *flagsp)
410{
411 int status;
412 struct CE_ring_state *dest_ring = CE_state->dest_ring;
413 struct CE_ring_state *status_ring = CE_state->status_ring;
414 unsigned int nentries_mask = dest_ring->nentries_mask;
415 unsigned int sw_index = dest_ring->sw_index;
416 struct hif_softc *scn = CE_state->scn;
417 struct ce_srng_dest_status_desc *dest_status;
418 int nbytes;
419 struct ce_srng_dest_status_desc dest_status_info;
420
421 if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
422 status = QDF_STATUS_E_FAILURE;
423 goto done;
424 }
425
426 dest_status = hal_srng_dst_get_next(scn->hal_soc,
427 status_ring->srng_ctx);
428
Jeff Johnson8d639a02019-03-18 09:51:11 -0700429 if (!dest_status) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530430 status = QDF_STATUS_E_FAILURE;
431 goto done;
432 }
433 /*
434 * By copying the dest_desc_info element to local memory, we could
435 * avoid extra memory read from non-cachable memory.
436 */
437 dest_status_info = *dest_status;
438 nbytes = dest_status_info.nbytes;
439 if (nbytes == 0) {
440 /*
441 * This closes a relatively unusual race where the Host
442 * sees the updated DRRI before the update to the
443 * corresponding descriptor has completed. We treat this
444 * as a descriptor that is not yet done.
445 */
446 status = QDF_STATUS_E_FAILURE;
447 goto done;
448 }
449
450 dest_status->nbytes = 0;
451
452 *nbytesp = nbytes;
453 *transfer_idp = dest_status_info.meta_data;
454 *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
455
456 if (per_CE_contextp)
457 *per_CE_contextp = CE_state->recv_context;
458
459 /* NOTE: sw_index is more like a read_index in this context. It has a
460 * one-to-one mapping with status ring.
461 * Get the per trasnfer context from dest_ring.
462 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530463 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800464 *per_transfer_contextp =
465 dest_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530466
467 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
468
469 /* Update sw_index */
470 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
471 dest_ring->sw_index = sw_index;
472 status = QDF_STATUS_SUCCESS;
473
474done:
475 hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
476
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700477 if (status == QDF_STATUS_SUCCESS) {
478 hif_record_ce_srng_desc_event(scn, CE_state->id,
479 HIF_CE_DEST_RING_BUFFER_REAP,
480 NULL,
481 dest_ring->
482 per_transfer_context[sw_index],
483 dest_ring->sw_index, nbytes,
484 dest_ring->srng_ctx);
485
486 hif_record_ce_srng_desc_event(scn, CE_state->id,
487 HIF_CE_DEST_STATUS_RING_REAP,
488 (union ce_srng_desc *)dest_status,
489 NULL,
490 -1, 0,
491 status_ring->srng_ctx);
492 }
493
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530494 return status;
495}
496
Jeff Johnson05718132016-12-17 10:18:17 -0800497static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530498ce_revoke_recv_next_srng(struct CE_handle *copyeng,
499 void **per_CE_contextp,
500 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
501{
Houston Hoffman72ddc022017-01-30 14:21:46 -0800502 struct CE_state *CE_state = (struct CE_state *)copyeng;
503 struct CE_ring_state *dest_ring = CE_state->dest_ring;
504 unsigned int sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530505
Houston Hoffman72ddc022017-01-30 14:21:46 -0800506 if (!dest_ring)
507 return QDF_STATUS_E_FAILURE;
508
509 sw_index = dest_ring->sw_index;
510
511 if (per_CE_contextp)
512 *per_CE_contextp = CE_state->recv_context;
513
514 /* NOTE: sw_index is more like a read_index in this context. It has a
515 * one-to-one mapping with status ring.
516 * Get the per trasnfer context from dest_ring.
517 */
518 if (per_transfer_contextp)
519 *per_transfer_contextp =
520 dest_ring->per_transfer_context[sw_index];
521
Jeff Johnson8d639a02019-03-18 09:51:11 -0700522 if (!dest_ring->per_transfer_context[sw_index])
Houston Hoffman72ddc022017-01-30 14:21:46 -0800523 return QDF_STATUS_E_FAILURE;
524
525 /* provide end condition */
526 dest_ring->per_transfer_context[sw_index] = NULL;
527
528 /* Update sw_index */
529 sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
530 dest_ring->sw_index = sw_index;
531 return QDF_STATUS_SUCCESS;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530532}
533
534/*
535 * Guts of ce_completed_send_next.
536 * The caller takes responsibility for any necessary locking.
537 */
Jeff Johnson05718132016-12-17 10:18:17 -0800538static int
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530539ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
540 void **per_CE_contextp,
541 void **per_transfer_contextp,
542 qdf_dma_addr_t *bufferp,
543 unsigned int *nbytesp,
544 unsigned int *transfer_idp,
545 unsigned int *sw_idx,
546 unsigned int *hw_idx,
547 uint32_t *toeplitz_hash_result)
548{
549 int status = QDF_STATUS_E_FAILURE;
550 struct CE_ring_state *src_ring = CE_state->src_ring;
551 unsigned int nentries_mask = src_ring->nentries_mask;
552 unsigned int sw_index = src_ring->sw_index;
c_cgodavfda96ad2017-09-07 16:16:00 +0530553 unsigned int swi = src_ring->sw_index;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530554 struct hif_softc *scn = CE_state->scn;
555 struct ce_srng_src_desc *src_desc;
556
557 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
558 status = QDF_STATUS_E_FAILURE;
559 return status;
560 }
561
562 src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
563 if (src_desc) {
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700564 hif_record_ce_srng_desc_event(scn, CE_state->id,
565 HIF_TX_DESC_COMPLETION,
566 (union ce_srng_desc *)src_desc,
567 src_ring->
568 per_transfer_context[swi],
569 swi, src_desc->nbytes,
570 src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530571
572 /* Return data from completed source descriptor */
573 *bufferp = (qdf_dma_addr_t)
574 (((uint64_t)(src_desc)->buffer_addr_lo +
575 ((uint64_t)((src_desc)->buffer_addr_hi &
576 0xFF) << 32)));
577 *nbytesp = src_desc->nbytes;
578 *transfer_idp = src_desc->meta_data;
579 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
580
581 if (per_CE_contextp)
582 *per_CE_contextp = CE_state->send_context;
583
584 /* sw_index is used more like read index */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530585 if (per_transfer_contextp)
Houston Hoffman3274fbc2017-01-26 22:32:26 -0800586 *per_transfer_contextp =
587 src_ring->per_transfer_context[sw_index];
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530588
589 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
590
591 /* Update sw_index */
592 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
593 src_ring->sw_index = sw_index;
594 status = QDF_STATUS_SUCCESS;
595 }
Pamidipati, Vijaydfe618e2016-10-09 09:17:24 +0530596 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530597
598 return status;
599}
600
601/* NB: Modelled after ce_completed_send_next */
Jeff Johnson05718132016-12-17 10:18:17 -0800602static QDF_STATUS
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530603ce_cancel_send_next_srng(struct CE_handle *copyeng,
604 void **per_CE_contextp,
605 void **per_transfer_contextp,
606 qdf_dma_addr_t *bufferp,
607 unsigned int *nbytesp,
608 unsigned int *transfer_idp,
609 uint32_t *toeplitz_hash_result)
610{
Kiran Venkatappac6862752017-07-09 22:44:17 +0530611 struct CE_state *CE_state;
612 int status = QDF_STATUS_E_FAILURE;
613 struct CE_ring_state *src_ring;
614 unsigned int nentries_mask;
615 unsigned int sw_index;
616 struct hif_softc *scn;
617 struct ce_srng_src_desc *src_desc;
618
619 CE_state = (struct CE_state *)copyeng;
620 src_ring = CE_state->src_ring;
621 if (!src_ring)
622 return QDF_STATUS_E_FAILURE;
623
624 nentries_mask = src_ring->nentries_mask;
625 sw_index = src_ring->sw_index;
626 scn = CE_state->scn;
627
628 if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
629 status = QDF_STATUS_E_FAILURE;
630 return status;
631 }
632
633 src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
634 src_ring->srng_ctx);
635 if (src_desc) {
636 /* Return data from completed source descriptor */
637 *bufferp = (qdf_dma_addr_t)
638 (((uint64_t)(src_desc)->buffer_addr_lo +
639 ((uint64_t)((src_desc)->buffer_addr_hi &
640 0xFF) << 32)));
641 *nbytesp = src_desc->nbytes;
642 *transfer_idp = src_desc->meta_data;
643 *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
644
645 if (per_CE_contextp)
646 *per_CE_contextp = CE_state->send_context;
647
648 /* sw_index is used more like read index */
649 if (per_transfer_contextp)
650 *per_transfer_contextp =
651 src_ring->per_transfer_context[sw_index];
652
653 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
654
655 /* Update sw_index */
656 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
657 src_ring->sw_index = sw_index;
658 status = QDF_STATUS_SUCCESS;
659 }
660 hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
661
662 return status;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530663}
664
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665/*
666 * Adjust interrupts for the copy complete handler.
667 * If it's needed for either send or recv, then unmask
668 * this interrupt; otherwise, mask it.
669 *
670 * Called with target_lock held.
671 */
672static void
673ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
674 int disable_copy_compl_intr)
675{
676}
677
Jeff Johnson05718132016-12-17 10:18:17 -0800678static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
679 unsigned int *flags)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530680{
681 /*TODO*/
682 return false;
683}
684
Jeff Johnson05718132016-12-17 10:18:17 -0800685static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530686{
687 switch (ring_type) {
688 case CE_RING_SRC:
689 return sizeof(struct ce_srng_src_desc);
690 case CE_RING_DEST:
691 return sizeof(struct ce_srng_dest_desc);
692 case CE_RING_STATUS:
693 return sizeof(struct ce_srng_dest_status_desc);
694 default:
695 return 0;
696 }
697 return 0;
698}
699
Houston Hoffman15010772016-09-16 14:01:13 -0700700static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
701 struct hal_srng_params *ring_params)
702{
703 uint32_t addr_low;
704 uint32_t addr_high;
705 uint32_t msi_data_start;
706 uint32_t msi_data_count;
707 uint32_t msi_irq_start;
708 int ret;
709
710 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
711 &msi_data_count, &msi_data_start,
712 &msi_irq_start);
713
714 /* msi config not found */
715 if (ret)
716 return;
717
Houston Hoffman15010772016-09-16 14:01:13 -0700718 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
719
720 ring_params->msi_addr = addr_low;
721 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
722 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
723 ring_params->flags |= HAL_SRNG_MSI_INTR;
724
Jeff Johnsonb9450212017-09-18 10:12:38 -0700725 HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
Houston Hoffman15010772016-09-16 14:01:13 -0700726 (void *)ring_params->msi_addr, ring_params->msi_data);
727}
728
Jeff Johnson05718132016-12-17 10:18:17 -0800729static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700730 struct CE_ring_state *src_ring,
731 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530732{
733 struct hal_srng_params ring_params = {0};
734
Rakesh Pillai51264a62019-05-08 19:15:56 +0530735 hif_debug("%s: ce_id %d", __func__, ce_id);
Houston Hoffman15010772016-09-16 14:01:13 -0700736
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530737 ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
738 ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
739 ring_params.num_entries = src_ring->nentries;
Houston Hoffman202425d2016-10-17 19:42:48 -0700740 /*
741 * The minimum increment for the timer is 8us
742 * A default value of 0 disables the timer
743 * A valid default value caused continuous interrupts to
744 * fire with MSI enabled. Need to revisit usage of the timer
745 */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530746
Houston Hoffman648a9182017-05-21 23:27:50 -0700747 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
748 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
749
750 ring_params.intr_timer_thres_us = 0;
751 ring_params.intr_batch_cntr_thres_entries = 1;
752 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530753
754 src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
755 &ring_params);
756}
757
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700758/**
759 * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
760 * @dest_ring: ring being initialized
761 * @ring_params: pointer to initialized parameters
762 *
763 * For Napier & Hawkeye v1, the status ring timer interrupts do not work
764 * As a work arround host configures the destination rings to be a proxy for
765 * work needing to be done.
766 *
767 * The interrupts are setup such that if the destination ring is less than fully
768 * posted, there is likely undone work for the status ring that the host should
769 * process.
770 *
771 * There is a timing bug in srng based copy engines such that a fully posted
772 * srng based copy engine has 2 empty entries instead of just one. The copy
773 * engine data sturctures work with 1 empty entry, but the software frequently
774 * fails to post the last entry due to the race condition.
775 */
776static void ce_srng_initialize_dest_timer_interrupt_war(
777 struct CE_ring_state *dest_ring,
778 struct hal_srng_params *ring_params) {
779 int num_buffers_when_fully_posted = dest_ring->nentries - 2;
780
781 ring_params->low_threshold = num_buffers_when_fully_posted - 1;
782 ring_params->intr_timer_thres_us = 1024;
783 ring_params->intr_batch_cntr_thres_entries = 0;
784 ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
785}
786
Jeff Johnson05718132016-12-17 10:18:17 -0800787static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman74109122016-10-21 14:58:34 -0700788 struct CE_ring_state *dest_ring,
789 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530790{
791 struct hal_srng_params ring_params = {0};
Houston Hoffman648a9182017-05-21 23:27:50 -0700792 bool status_ring_timer_thresh_work_arround = true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530793
Houston Hoffman15010772016-09-16 14:01:13 -0700794 HIF_INFO("%s: ce_id %d", __func__, ce_id);
795
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530796 ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
797 ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
798 ring_params.num_entries = dest_ring->nentries;
Houston Hoffman74109122016-10-21 14:58:34 -0700799 ring_params.max_buffer_length = attr->src_sz_max;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530800
Houston Hoffman648a9182017-05-21 23:27:50 -0700801 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
802 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
803 if (status_ring_timer_thresh_work_arround) {
Houston Hoffman9b55b5f2017-08-07 14:26:35 -0700804 ce_srng_initialize_dest_timer_interrupt_war(
805 dest_ring, &ring_params);
Houston Hoffman648a9182017-05-21 23:27:50 -0700806 } else {
807 /* normal behavior for future chips */
808 ring_params.low_threshold = dest_ring->nentries >> 3;
809 ring_params.intr_timer_thres_us = 100000;
810 ring_params.intr_batch_cntr_thres_entries = 0;
811 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
812 }
813 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530814
815 /*Dest ring is also source ring*/
816 dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
817 &ring_params);
818}
819
Jeff Johnson05718132016-12-17 10:18:17 -0800820static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
Houston Hoffman648a9182017-05-21 23:27:50 -0700821 struct CE_ring_state *status_ring,
822 struct CE_attr *attr)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530823{
824 struct hal_srng_params ring_params = {0};
825
Houston Hoffman15010772016-09-16 14:01:13 -0700826 HIF_INFO("%s: ce_id %d", __func__, ce_id);
827
828 ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
829
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530830 ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
831 ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
832 ring_params.num_entries = status_ring->nentries;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530833
Houston Hoffman648a9182017-05-21 23:27:50 -0700834 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
835 ring_params.intr_timer_thres_us = 0x1000;
836 ring_params.intr_batch_cntr_thres_entries = 0x1;
837 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530838
839 status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
840 ce_id, 0, &ring_params);
841}
842
Yun Park3fb36442017-08-17 17:37:53 -0700843static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530844 uint32_t ce_id, struct CE_ring_state *ring,
845 struct CE_attr *attr)
846{
847 switch (ring_type) {
848 case CE_RING_SRC:
Houston Hoffman648a9182017-05-21 23:27:50 -0700849 ce_srng_src_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530850 break;
851 case CE_RING_DEST:
Houston Hoffman74109122016-10-21 14:58:34 -0700852 ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530853 break;
854 case CE_RING_STATUS:
Houston Hoffman648a9182017-05-21 23:27:50 -0700855 ce_srng_status_ring_setup(scn, ce_id, ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530856 break;
857 default:
858 qdf_assert(0);
859 break;
860 }
Yun Park3fb36442017-08-17 17:37:53 -0700861
862 return 0;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530863}
Jeff Johnson05718132016-12-17 10:18:17 -0800864
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800865static void ce_construct_shadow_config_srng(struct hif_softc *scn)
866{
867 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
868 int ce_id;
869
870 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
871 if (hif_state->host_ce_config[ce_id].src_nentries)
872 hal_set_one_shadow_config(scn->hal_soc,
873 CE_SRC, ce_id);
874
875 if (hif_state->host_ce_config[ce_id].dest_nentries) {
876 hal_set_one_shadow_config(scn->hal_soc,
877 CE_DST, ce_id);
878
879 hal_set_one_shadow_config(scn->hal_soc,
880 CE_DST_STATUS, ce_id);
881 }
882 }
883}
884
885static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
886 struct pld_shadow_reg_v2_cfg **shadow_config,
887 int *num_shadow_registers_configured)
888{
Jeff Johnson8d639a02019-03-18 09:51:11 -0700889 if (!scn->hal_soc) {
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800890 HIF_ERROR("%s: hal not initialized: not initializing shadow config",
891 __func__);
892 return;
893 }
894
895 hal_get_shadow_config(scn->hal_soc, shadow_config,
896 num_shadow_registers_configured);
897
898 if (*num_shadow_registers_configured != 0) {
899 HIF_ERROR("%s: hal shadow register configuration allready constructed",
900 __func__);
901
902 /* return with original configuration*/
903 return;
904 }
905
906 hal_construct_shadow_config(scn->hal_soc);
907 ce_construct_shadow_config_srng(scn);
908
909 /* get updated configuration */
910 hal_get_shadow_config(scn->hal_soc, shadow_config,
911 num_shadow_registers_configured);
912}
913
Jeff Johnson05718132016-12-17 10:18:17 -0800914static struct ce_ops ce_service_srng = {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530915 .ce_get_desc_size = ce_get_desc_size_srng,
916 .ce_ring_setup = ce_ring_setup_srng,
917 .ce_sendlist_send = ce_sendlist_send_srng,
918 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
919 .ce_revoke_recv_next = ce_revoke_recv_next_srng,
920 .ce_cancel_send_next = ce_cancel_send_next_srng,
921 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
922 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
923 .ce_send_nolock = ce_send_nolock_srng,
924 .watermark_int = ce_check_int_watermark_srng,
925 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
926 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
927 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800928 .ce_prepare_shadow_register_v2_cfg =
929 ce_prepare_shadow_register_v2_cfg_srng,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530930};
931
932struct ce_ops *ce_services_srng()
933{
934 return &ce_service_srng;
935}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530936qdf_export_symbol(ce_services_srng);
Sathish Kumar86876492018-08-27 13:39:20 +0530937
938void ce_service_srng_init(void)
939{
940 ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
941}