blob: 366069e97a25567979b72ceb27fc4b8361bf71fd [file] [log] [blame]
Sathish Kumar86876492018-08-27 13:39:20 +05301/*
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05302 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
Sathish Kumar86876492018-08-27 13:39:20 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "ce_api.h"
20#include "ce_internal.h"
21#include "ce_main.h"
22#include "ce_reg.h"
23#include "hif.h"
24#include "hif_debug.h"
25#include "hif_io32.h"
26#include "qdf_lock.h"
27#include "hif_main.h"
28#include "hif_napi.h"
29#include "qdf_module.h"
30#include "regtable.h"
31
32/*
33 * Support for Copy Engine hardware, which is mainly used for
34 * communication between Host and Target over a PCIe interconnect.
35 */
36
37/*
38 * A single CopyEngine (CE) comprises two "rings":
39 * a source ring
40 * a destination ring
41 *
42 * Each ring consists of a number of descriptors which specify
43 * an address, length, and meta-data.
44 *
45 * Typically, one side of the PCIe interconnect (Host or Target)
46 * controls one ring and the other side controls the other ring.
47 * The source side chooses when to initiate a transfer and it
48 * chooses what to send (buffer address, length). The destination
49 * side keeps a supply of "anonymous receive buffers" available and
50 * it handles incoming data as it arrives (when the destination
51 * receives an interrupt).
52 *
53 * The sender may send a simple buffer (address/length) or it may
54 * send a small list of buffers. When a small list is sent, hardware
55 * "gathers" these and they end up in a single destination buffer
56 * with a single interrupt.
57 *
58 * There are several "contexts" managed by this layer -- more, it
59 * may seem -- than should be needed. These are provided mainly for
60 * maximum flexibility and especially to facilitate a simpler HIF
61 * implementation. There are per-CopyEngine recv, send, and watermark
62 * contexts. These are supplied by the caller when a recv, send,
63 * or watermark handler is established and they are echoed back to
64 * the caller when the respective callbacks are invoked. There is
65 * also a per-transfer context supplied by the caller when a buffer
66 * (or sendlist) is sent and when a buffer is enqueued for recv.
67 * These per-transfer contexts are echoed back to the caller when
68 * the buffer is sent/received.
69 * Target TX harsh result toeplitz_hash_result
70 */
71
72/* NB: Modeled after ce_completed_send_next */
73/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
74#define CE_WM_SHFT 1
75
76#ifdef WLAN_FEATURE_FASTPATH
Aditya Sathish80bbaef2018-10-25 10:02:05 +053077#ifdef QCA_WIFI_3_0
78static inline void
79ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
80 uint64_t dma_addr,
81 uint32_t user_flags)
82{
83 shadow_src_desc->buffer_addr_hi =
84 (uint32_t)((dma_addr >> 32) & 0x1F);
85 user_flags |= shadow_src_desc->buffer_addr_hi;
86 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
87 sizeof(uint32_t));
88}
89#else
90static inline void
91ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
92 uint64_t dma_addr,
93 uint32_t user_flags)
94{
95}
96#endif
97
98#define SLOTS_PER_DATAPATH_TX 2
99
100/**
101 * ce_send_fast() CE layer Tx buffer posting function
102 * @copyeng: copy engine handle
103 * @msdu: msdu to be sent
104 * @transfer_id: transfer_id
105 * @download_len: packet download length
106 *
107 * Assumption : Called with an array of MSDU's
108 * Function:
109 * For each msdu in the array
110 * 1. Check no. of available entries
111 * 2. Create src ring entries (allocated in consistent memory
112 * 3. Write index to h/w
113 *
114 * Return: No. of packets that could be sent
115 */
116int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
117 unsigned int transfer_id, uint32_t download_len)
118{
119 struct CE_state *ce_state = (struct CE_state *)copyeng;
120 struct hif_softc *scn = ce_state->scn;
121 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
122 struct CE_ring_state *src_ring = ce_state->src_ring;
123 u_int32_t ctrl_addr = ce_state->ctrl_addr;
124 unsigned int nentries_mask = src_ring->nentries_mask;
125 unsigned int write_index;
126 unsigned int sw_index;
127 unsigned int frag_len;
128 uint64_t dma_addr;
129 uint32_t user_flags;
130 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
131 bool ok_to_send = true;
132
133 /*
134 * Create a log assuming the call will go through, and if not, we would
135 * add an error trace as well.
136 * Please add the same failure log for any additional error paths.
137 */
138 DPTRACE(qdf_dp_trace(msdu,
139 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
140 QDF_TRACE_DEFAULT_PDEV_ID,
141 qdf_nbuf_data_addr(msdu),
142 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
143
144 qdf_spin_lock_bh(&ce_state->ce_index_lock);
145
146 /*
147 * Request runtime PM resume if it has already suspended and make
148 * sure there is no PCIe link access.
149 */
Jingxiang Geba59ac22020-04-08 13:59:43 +0800150 if (hif_pm_runtime_get(hif_hdl,
151 RTPM_ID_CE_SEND_FAST) != 0)
Aditya Sathish80bbaef2018-10-25 10:02:05 +0530152 ok_to_send = false;
153
154 if (ok_to_send) {
155 Q_TARGET_ACCESS_BEGIN(scn);
156 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
157 }
158
159 write_index = src_ring->write_index;
160 sw_index = src_ring->sw_index;
161 hif_record_ce_desc_event(scn, ce_state->id,
162 FAST_TX_SOFTWARE_INDEX_UPDATE,
163 NULL, NULL, sw_index, 0);
164
165 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
166 < SLOTS_PER_DATAPATH_TX)) {
167 hif_err_rl("Source ring full, required %d, available %d",
168 SLOTS_PER_DATAPATH_TX,
169 CE_RING_DELTA(nentries_mask, write_index,
170 sw_index - 1));
171 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
172 if (ok_to_send)
173 Q_TARGET_ACCESS_END(scn);
174 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
175
176 DPTRACE(qdf_dp_trace(NULL,
177 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
178 QDF_TRACE_DEFAULT_PDEV_ID,
179 NULL, 0, QDF_TX));
180
181 return 0;
182 }
183
184 {
185 struct CE_src_desc *src_ring_base =
186 (struct CE_src_desc *)src_ring->base_addr_owner_space;
187 struct CE_src_desc *shadow_base =
188 (struct CE_src_desc *)src_ring->shadow_base;
189 struct CE_src_desc *src_desc =
190 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
191 struct CE_src_desc *shadow_src_desc =
192 CE_SRC_RING_TO_DESC(shadow_base, write_index);
193
Jingxiang Geba59ac22020-04-08 13:59:43 +0800194 hif_pm_runtime_get_noresume(hif_hdl, RTPM_ID_HTC);
Aditya Sathish80bbaef2018-10-25 10:02:05 +0530195
196 /*
197 * First fill out the ring descriptor for the HTC HTT frame
198 * header. These are uncached writes. Should we use a local
199 * structure instead?
200 */
201 /* HTT/HTC header can be passed as a argument */
202 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
203 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
204 0xFFFFFFFF);
205 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
206 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
207 shadow_src_desc->meta_data = transfer_id;
208 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
209 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
210 download_len -= shadow_src_desc->nbytes;
211 /*
212 * HTC HTT header is a word stream, so byte swap if CE byte
213 * swap enabled
214 */
215 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
216 CE_ATTR_BYTE_SWAP_DATA) != 0);
217 /* For the first one, it still does not need to write */
218 shadow_src_desc->gather = 1;
219 *src_desc = *shadow_src_desc;
220 /* By default we could initialize the transfer context to this
221 * value
222 */
223 src_ring->per_transfer_context[write_index] =
224 CE_SENDLIST_ITEM_CTXT;
225 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
226
227 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
228 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
229 /*
230 * Now fill out the ring descriptor for the actual data
231 * packet
232 */
233 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
234 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
235 0xFFFFFFFF);
236 /*
237 * Clear packet offset for all but the first CE desc.
238 */
239 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
240 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
241 shadow_src_desc->meta_data = transfer_id;
242
243 /* get actual packet length */
244 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
245
246 /* download remaining bytes of payload */
247 shadow_src_desc->nbytes = download_len;
248 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
249 if (shadow_src_desc->nbytes > frag_len)
250 shadow_src_desc->nbytes = frag_len;
251
252 /* Data packet is a byte stream, so disable byte swap */
253 shadow_src_desc->byte_swap = 0;
254 /* For the last one, gather is not set */
255 shadow_src_desc->gather = 0;
256 *src_desc = *shadow_src_desc;
257 src_ring->per_transfer_context[write_index] = msdu;
258
259 hif_record_ce_desc_event(scn, ce_state->id, type,
260 (union ce_desc *)src_desc,
261 src_ring->per_transfer_context[write_index],
262 write_index, shadow_src_desc->nbytes);
263
264 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
265
266 DPTRACE(qdf_dp_trace(msdu,
267 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
268 QDF_TRACE_DEFAULT_PDEV_ID,
269 qdf_nbuf_data_addr(msdu),
270 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
271 }
272
273 src_ring->write_index = write_index;
274
275 if (ok_to_send) {
276 if (qdf_likely(ce_state->state == CE_RUNNING)) {
277 type = FAST_TX_WRITE_INDEX_UPDATE;
278 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
279 write_index);
280 Q_TARGET_ACCESS_END(scn);
281 } else {
282 ce_state->state = CE_PENDING;
283 }
Jingxiang Geba59ac22020-04-08 13:59:43 +0800284 hif_pm_runtime_put(hif_hdl, RTPM_ID_CE_SEND_FAST);
Aditya Sathish80bbaef2018-10-25 10:02:05 +0530285 }
286
287 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
288
289 /* sent 1 packet */
290 return 1;
291}
292
293/**
294 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
295 * @ce_state: handle to copy engine state
296 * @cmpl_msdus: Rx msdus
297 * @num_cmpls: number of Rx msdus
298 * @ctrl_addr: CE control address
299 *
300 * Return: None
301 */
302static void ce_fastpath_rx_handle(struct CE_state *ce_state,
303 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
304 uint32_t ctrl_addr)
305{
306 struct hif_softc *scn = ce_state->scn;
307 struct CE_ring_state *dest_ring = ce_state->dest_ring;
308 uint32_t nentries_mask = dest_ring->nentries_mask;
309 uint32_t write_index;
310
311 qdf_spin_unlock(&ce_state->ce_index_lock);
312 ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls);
313 qdf_spin_lock(&ce_state->ce_index_lock);
314
315 /* Update Destination Ring Write Index */
316 write_index = dest_ring->write_index;
317 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
318
319 hif_record_ce_desc_event(scn, ce_state->id,
320 FAST_RX_WRITE_INDEX_UPDATE,
321 NULL, NULL, write_index, 0);
322
323 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
324 dest_ring->write_index = write_index;
325}
326
327/**
328 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
329 * @scn: hif_context
330 * @ce_id: Copy engine ID
331 * 1) Go through the CE ring, and find the completions
332 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
333 * 3) Unmap buffer & accumulate in an array.
334 * 4) Call message handler when array is full or when exiting the handler
335 *
336 * Return: void
337 */
338
339void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
340{
341 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
342 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
343 struct CE_ring_state *dest_ring = ce_state->dest_ring;
344 struct CE_dest_desc *dest_ring_base =
345 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
346
347 uint32_t nentries_mask = dest_ring->nentries_mask;
348 uint32_t sw_index = dest_ring->sw_index;
349 uint32_t nbytes;
350 qdf_nbuf_t nbuf;
351 dma_addr_t paddr;
352 struct CE_dest_desc *dest_desc;
353 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
354 uint32_t ctrl_addr = ce_state->ctrl_addr;
355 uint32_t nbuf_cmpl_idx = 0;
356 unsigned int more_comp_cnt = 0;
357 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
358 struct ce_ops *ce_services = hif_state->ce_services;
359
360more_data:
361 for (;;) {
362 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
363 sw_index);
364
365 /*
366 * The following 2 reads are from non-cached memory
367 */
368 nbytes = dest_desc->nbytes;
369
370 /* If completion is invalid, break */
371 if (qdf_unlikely(nbytes == 0))
372 break;
373
374 /*
375 * Build the nbuf list from valid completions
376 */
377 nbuf = dest_ring->per_transfer_context[sw_index];
378
379 /*
380 * No lock is needed here, since this is the only thread
381 * that accesses the sw_index
382 */
383 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
384
385 /*
386 * CAREFUL : Uncached write, but still less expensive,
387 * since most modern caches use "write-combining" to
388 * flush multiple cache-writes all at once.
389 */
390 dest_desc->nbytes = 0;
391
392 /*
393 * Per our understanding this is not required on our
394 * since we are doing the same cache invalidation
395 * operation on the same buffer twice in succession,
396 * without any modifiication to this buffer by CPU in
397 * between.
398 * However, this code with 2 syncs in succession has
399 * been undergoing some testing at a customer site,
400 * and seemed to be showing no problems so far. Would
401 * like to validate from the customer, that this line
402 * is really not required, before we remove this line
403 * completely.
404 */
405 paddr = QDF_NBUF_CB_PADDR(nbuf);
406
407 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
408 (skb_end_pointer(nbuf) -
409 (nbuf)->data),
410 DMA_FROM_DEVICE);
411
412 qdf_nbuf_put_tail(nbuf, nbytes);
413
414 qdf_assert_always(nbuf->data);
415
416 QDF_NBUF_CB_RX_CTX_ID(nbuf) =
417 hif_get_rx_ctx_id(ce_state->id, hif_hdl);
418 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
419
420 /*
421 * we are not posting the buffers back instead
422 * reusing the buffers
423 */
424 if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
425 hif_record_ce_desc_event(scn, ce_state->id,
426 FAST_RX_SOFTWARE_INDEX_UPDATE,
427 NULL, NULL, sw_index, 0);
428 dest_ring->sw_index = sw_index;
429 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
430 nbuf_cmpl_idx, ctrl_addr);
431
432 ce_state->receive_count += nbuf_cmpl_idx;
433 if (qdf_unlikely(hif_ce_service_should_yield(
434 scn, ce_state))) {
435 ce_state->force_break = 1;
436 qdf_atomic_set(&ce_state->rx_pending, 1);
437 return;
438 }
439
440 nbuf_cmpl_idx = 0;
441 more_comp_cnt = 0;
442 }
443 }
444
445 hif_record_ce_desc_event(scn, ce_state->id,
446 FAST_RX_SOFTWARE_INDEX_UPDATE,
447 NULL, NULL, sw_index, 0);
448
449 dest_ring->sw_index = sw_index;
450
451 /*
452 * If there are not enough completions to fill the array,
453 * just call the message handler here
454 */
455 if (nbuf_cmpl_idx) {
456 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
457 nbuf_cmpl_idx, ctrl_addr);
458
459 ce_state->receive_count += nbuf_cmpl_idx;
460 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
461 ce_state->force_break = 1;
462 qdf_atomic_set(&ce_state->rx_pending, 1);
463 return;
464 }
465
466 /* check for more packets after upper layer processing */
467 nbuf_cmpl_idx = 0;
468 more_comp_cnt = 0;
469 goto more_data;
470 }
471
472 hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
473
474 qdf_atomic_set(&ce_state->rx_pending, 0);
475 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
476 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
477 HOST_IS_COPY_COMPLETE_MASK);
478 } else {
479 hif_err_rl("%s: target access is not allowed", __func__);
480 return;
481 }
482
483 if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
484 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
485 goto more_data;
486 } else {
487 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
488 __func__, nentries_mask,
489 ce_state->dest_ring->sw_index,
490 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
491 }
492 }
493#ifdef NAPI_YIELD_BUDGET_BASED
494 /*
495 * Caution : Before you modify this code, please refer hif_napi_poll
496 * function to understand how napi_complete gets called and make the
497 * necessary changes. Force break has to be done till WIN disables the
498 * interrupt at source
499 */
500 ce_state->force_break = 1;
501#endif
502}
503
Sathish Kumar86876492018-08-27 13:39:20 +0530504/**
505 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
506 * @scn: Handle to HIF context
507 *
508 * Return: true if fastpath is enabled else false.
509 */
510static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
511{
512 return scn->fastpath_mode_on;
513}
514#else
Aditya Sathish80bbaef2018-10-25 10:02:05 +0530515void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
516{
517}
518
Sathish Kumar86876492018-08-27 13:39:20 +0530519static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
520{
521 return false;
522}
523#endif /* WLAN_FEATURE_FASTPATH */
524
525static int
526ce_send_nolock_legacy(struct CE_handle *copyeng,
527 void *per_transfer_context,
528 qdf_dma_addr_t buffer,
529 uint32_t nbytes,
530 uint32_t transfer_id,
531 uint32_t flags,
532 uint32_t user_flags)
533{
534 int status;
535 struct CE_state *CE_state = (struct CE_state *)copyeng;
536 struct CE_ring_state *src_ring = CE_state->src_ring;
537 uint32_t ctrl_addr = CE_state->ctrl_addr;
538 unsigned int nentries_mask = src_ring->nentries_mask;
539 unsigned int sw_index = src_ring->sw_index;
540 unsigned int write_index = src_ring->write_index;
541 uint64_t dma_addr = buffer;
542 struct hif_softc *scn = CE_state->scn;
543
544 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
545 return QDF_STATUS_E_FAILURE;
546 if (unlikely(CE_RING_DELTA(nentries_mask,
547 write_index, sw_index - 1) <= 0)) {
548 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
549 Q_TARGET_ACCESS_END(scn);
550 return QDF_STATUS_E_FAILURE;
551 }
552 {
553 enum hif_ce_event_type event_type;
554 struct CE_src_desc *src_ring_base =
555 (struct CE_src_desc *)src_ring->base_addr_owner_space;
556 struct CE_src_desc *shadow_base =
557 (struct CE_src_desc *)src_ring->shadow_base;
558 struct CE_src_desc *src_desc =
559 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
560 struct CE_src_desc *shadow_src_desc =
561 CE_SRC_RING_TO_DESC(shadow_base, write_index);
562
563 /* Update low 32 bits source descriptor address */
564 shadow_src_desc->buffer_addr =
565 (uint32_t)(dma_addr & 0xFFFFFFFF);
566#ifdef QCA_WIFI_3_0
567 shadow_src_desc->buffer_addr_hi =
568 (uint32_t)((dma_addr >> 32) & 0x1F);
569 user_flags |= shadow_src_desc->buffer_addr_hi;
570 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
571 sizeof(uint32_t));
572#endif
573 shadow_src_desc->target_int_disable = 0;
574 shadow_src_desc->host_int_disable = 0;
575
576 shadow_src_desc->meta_data = transfer_id;
577
578 /*
579 * Set the swap bit if:
580 * typical sends on this CE are swapped (host is big-endian)
581 * and this send doesn't disable the swapping
582 * (data is not bytestream)
583 */
584 shadow_src_desc->byte_swap =
585 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
586 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
587 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
588 shadow_src_desc->nbytes = nbytes;
589 ce_validate_nbytes(nbytes, CE_state);
590
591 *src_desc = *shadow_src_desc;
592
593 src_ring->per_transfer_context[write_index] =
594 per_transfer_context;
595
596 /* Update Source Ring Write Index */
597 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
598
599 /* WORKAROUND */
600 if (shadow_src_desc->gather) {
601 event_type = HIF_TX_GATHER_DESC_POST;
602 } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
603 event_type = HIF_TX_DESC_SOFTWARE_POST;
604 CE_state->state = CE_PENDING;
605 } else {
606 event_type = HIF_TX_DESC_POST;
607 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
608 write_index);
609 }
610
611 /* src_ring->write index hasn't been updated event though
612 * the register has allready been written to.
613 */
614 hif_record_ce_desc_event(scn, CE_state->id, event_type,
615 (union ce_desc *)shadow_src_desc, per_transfer_context,
616 src_ring->write_index, nbytes);
617
618 src_ring->write_index = write_index;
619 status = QDF_STATUS_SUCCESS;
620 }
621 Q_TARGET_ACCESS_END(scn);
622 return status;
623}
624
625static int
626ce_sendlist_send_legacy(struct CE_handle *copyeng,
627 void *per_transfer_context,
628 struct ce_sendlist *sendlist, unsigned int transfer_id)
629{
630 int status = -ENOMEM;
631 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
632 struct CE_state *CE_state = (struct CE_state *)copyeng;
633 struct CE_ring_state *src_ring = CE_state->src_ring;
634 unsigned int nentries_mask = src_ring->nentries_mask;
635 unsigned int num_items = sl->num_items;
636 unsigned int sw_index;
637 unsigned int write_index;
638 struct hif_softc *scn = CE_state->scn;
639
640 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
641
642 qdf_spin_lock_bh(&CE_state->ce_index_lock);
643
644 if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
645 Q_TARGET_ACCESS_BEGIN(scn) == 0) {
646 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
647 scn, CE_state->ctrl_addr);
648 Q_TARGET_ACCESS_END(scn);
649 }
650
651 sw_index = src_ring->sw_index;
652 write_index = src_ring->write_index;
653
654 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
655 num_items) {
656 struct ce_sendlist_item *item;
657 int i;
658
659 /* handle all but the last item uniformly */
660 for (i = 0; i < num_items - 1; i++) {
661 item = &sl->item[i];
662 /* TBDXXX: Support extensible sendlist_types? */
663 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
664 status = ce_send_nolock_legacy(copyeng,
665 CE_SENDLIST_ITEM_CTXT,
666 (qdf_dma_addr_t)item->data,
667 item->u.nbytes, transfer_id,
668 item->flags | CE_SEND_FLAG_GATHER,
669 item->user_flags);
670 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
671 }
672 /* provide valid context pointer for final item */
673 item = &sl->item[i];
674 /* TBDXXX: Support extensible sendlist_types? */
675 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
676 status = ce_send_nolock_legacy(copyeng, per_transfer_context,
677 (qdf_dma_addr_t) item->data,
678 item->u.nbytes,
679 transfer_id, item->flags,
680 item->user_flags);
681 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
682 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
683 QDF_NBUF_TX_PKT_CE);
684 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
685 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
686 QDF_TRACE_DEFAULT_PDEV_ID,
687 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
688 sizeof(((qdf_nbuf_t)per_transfer_context)->data),
689 QDF_TX));
690 } else {
691 /*
692 * Probably not worth the additional complexity to support
693 * partial sends with continuation or notification. We expect
694 * to use large rings and small sendlists. If we can't handle
695 * the entire request at once, punt it back to the caller.
696 */
697 }
698 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
699
700 return status;
701}
702
703/**
704 * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
705 * @coyeng: copy engine handle
706 * @per_recv_context: virtual address of the nbuf
707 * @buffer: physical address of the nbuf
708 *
709 * Return: 0 if the buffer is enqueued
710 */
711static int
712ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
713 void *per_recv_context, qdf_dma_addr_t buffer)
714{
715 int status;
716 struct CE_state *CE_state = (struct CE_state *)copyeng;
717 struct CE_ring_state *dest_ring = CE_state->dest_ring;
718 uint32_t ctrl_addr = CE_state->ctrl_addr;
719 unsigned int nentries_mask = dest_ring->nentries_mask;
720 unsigned int write_index;
721 unsigned int sw_index;
722 uint64_t dma_addr = buffer;
723 struct hif_softc *scn = CE_state->scn;
724
725 qdf_spin_lock_bh(&CE_state->ce_index_lock);
726 write_index = dest_ring->write_index;
727 sw_index = dest_ring->sw_index;
728
729 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
730 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
731 return -EIO;
732 }
733
734 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
735 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
736 struct CE_dest_desc *dest_ring_base =
737 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
738 struct CE_dest_desc *dest_desc =
739 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
740
741 /* Update low 32 bit destination descriptor */
742 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
743#ifdef QCA_WIFI_3_0
744 dest_desc->buffer_addr_hi =
745 (uint32_t)((dma_addr >> 32) & 0x1F);
746#endif
747 dest_desc->nbytes = 0;
748
749 dest_ring->per_transfer_context[write_index] =
750 per_recv_context;
751
Venkata Sharath Chandra Manchalad8b05b52019-05-31 19:25:33 -0700752 hif_record_ce_desc_event(scn, CE_state->id,
753 HIF_RX_DESC_POST,
754 (union ce_desc *)dest_desc,
755 per_recv_context,
756 write_index, 0);
Sathish Kumar86876492018-08-27 13:39:20 +0530757
758 /* Update Destination Ring Write Index */
759 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
760 if (write_index != sw_index) {
761 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
762 dest_ring->write_index = write_index;
763 }
764 status = QDF_STATUS_SUCCESS;
765 } else
766 status = QDF_STATUS_E_FAILURE;
767
768 Q_TARGET_ACCESS_END(scn);
769 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
770 return status;
771}
772
773static unsigned int
774ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
775 struct CE_state *CE_state)
776{
777 struct CE_ring_state *src_ring = CE_state->src_ring;
778 uint32_t ctrl_addr = CE_state->ctrl_addr;
779 unsigned int nentries_mask = src_ring->nentries_mask;
780 unsigned int sw_index;
781 unsigned int read_index;
782
783 sw_index = src_ring->sw_index;
784 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
785
786 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
787}
788
789static unsigned int
790ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
791 struct CE_state *CE_state)
792{
793 struct CE_ring_state *dest_ring = CE_state->dest_ring;
794 uint32_t ctrl_addr = CE_state->ctrl_addr;
795 unsigned int nentries_mask = dest_ring->nentries_mask;
796 unsigned int sw_index;
797 unsigned int read_index;
798
799 sw_index = dest_ring->sw_index;
800 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
801
802 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
803}
804
805static int
806ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
807 void **per_CE_contextp,
808 void **per_transfer_contextp,
809 qdf_dma_addr_t *bufferp,
810 unsigned int *nbytesp,
811 unsigned int *transfer_idp,
812 unsigned int *flagsp)
813{
814 int status;
815 struct CE_ring_state *dest_ring = CE_state->dest_ring;
816 unsigned int nentries_mask = dest_ring->nentries_mask;
817 unsigned int sw_index = dest_ring->sw_index;
818 struct hif_softc *scn = CE_state->scn;
819 struct CE_dest_desc *dest_ring_base =
820 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
821 struct CE_dest_desc *dest_desc =
822 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
823 int nbytes;
824 struct CE_dest_desc dest_desc_info;
825 /*
826 * By copying the dest_desc_info element to local memory, we could
827 * avoid extra memory read from non-cachable memory.
828 */
829 dest_desc_info = *dest_desc;
830 nbytes = dest_desc_info.nbytes;
831 if (nbytes == 0) {
832 /*
833 * This closes a relatively unusual race where the Host
834 * sees the updated DRRI before the update to the
835 * corresponding descriptor has completed. We treat this
836 * as a descriptor that is not yet done.
837 */
838 status = QDF_STATUS_E_FAILURE;
839 goto done;
840 }
841
842 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
843 (union ce_desc *)dest_desc,
844 dest_ring->per_transfer_context[sw_index],
845 sw_index, 0);
846
847 dest_desc->nbytes = 0;
848
849 /* Return data from completed destination descriptor */
850 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
851 *nbytesp = nbytes;
852 *transfer_idp = dest_desc_info.meta_data;
853 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
854
855 if (per_CE_contextp)
856 *per_CE_contextp = CE_state->recv_context;
857
858 if (per_transfer_contextp) {
859 *per_transfer_contextp =
860 dest_ring->per_transfer_context[sw_index];
861 }
862 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
863
864 /* Update sw_index */
865 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
866 dest_ring->sw_index = sw_index;
867 status = QDF_STATUS_SUCCESS;
868
869done:
870 return status;
871}
872
873/* NB: Modeled after ce_completed_recv_next_nolock */
874static QDF_STATUS
875ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
876 void **per_CE_contextp,
877 void **per_transfer_contextp,
878 qdf_dma_addr_t *bufferp)
879{
880 struct CE_state *CE_state;
881 struct CE_ring_state *dest_ring;
882 unsigned int nentries_mask;
883 unsigned int sw_index;
884 unsigned int write_index;
885 QDF_STATUS status;
886 struct hif_softc *scn;
887
888 CE_state = (struct CE_state *)copyeng;
889 dest_ring = CE_state->dest_ring;
890 if (!dest_ring)
891 return QDF_STATUS_E_FAILURE;
892
893 scn = CE_state->scn;
894 qdf_spin_lock(&CE_state->ce_index_lock);
895 nentries_mask = dest_ring->nentries_mask;
896 sw_index = dest_ring->sw_index;
897 write_index = dest_ring->write_index;
898 if (write_index != sw_index) {
899 struct CE_dest_desc *dest_ring_base =
900 (struct CE_dest_desc *)dest_ring->
901 base_addr_owner_space;
902 struct CE_dest_desc *dest_desc =
903 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
904
905 /* Return data from completed destination descriptor */
906 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
907
908 if (per_CE_contextp)
909 *per_CE_contextp = CE_state->recv_context;
910
911 if (per_transfer_contextp) {
912 *per_transfer_contextp =
913 dest_ring->per_transfer_context[sw_index];
914 }
915 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
916
917 /* Update sw_index */
918 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
919 dest_ring->sw_index = sw_index;
920 status = QDF_STATUS_SUCCESS;
921 } else {
922 status = QDF_STATUS_E_FAILURE;
923 }
924 qdf_spin_unlock(&CE_state->ce_index_lock);
925
926 return status;
927}
928
929/*
930 * Guts of ce_completed_send_next.
931 * The caller takes responsibility for any necessary locking.
932 */
933static int
934ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
935 void **per_CE_contextp,
936 void **per_transfer_contextp,
937 qdf_dma_addr_t *bufferp,
938 unsigned int *nbytesp,
939 unsigned int *transfer_idp,
940 unsigned int *sw_idx,
941 unsigned int *hw_idx,
942 uint32_t *toeplitz_hash_result)
943{
944 int status = QDF_STATUS_E_FAILURE;
945 struct CE_ring_state *src_ring = CE_state->src_ring;
946 uint32_t ctrl_addr = CE_state->ctrl_addr;
947 unsigned int nentries_mask = src_ring->nentries_mask;
948 unsigned int sw_index = src_ring->sw_index;
949 unsigned int read_index;
950 struct hif_softc *scn = CE_state->scn;
951
952 if (src_ring->hw_index == sw_index) {
953 /*
954 * The SW completion index has caught up with the cached
955 * version of the HW completion index.
956 * Update the cached HW completion index to see whether
957 * the SW has really caught up to the HW, or if the cached
958 * value of the HW index has become stale.
959 */
960 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
961 return QDF_STATUS_E_FAILURE;
962 src_ring->hw_index =
963 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
964 if (Q_TARGET_ACCESS_END(scn) < 0)
965 return QDF_STATUS_E_FAILURE;
966 }
967 read_index = src_ring->hw_index;
968
969 if (sw_idx)
970 *sw_idx = sw_index;
971
972 if (hw_idx)
973 *hw_idx = read_index;
974
975 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
976 struct CE_src_desc *shadow_base =
977 (struct CE_src_desc *)src_ring->shadow_base;
978 struct CE_src_desc *shadow_src_desc =
979 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
980#ifdef QCA_WIFI_3_0
981 struct CE_src_desc *src_ring_base =
982 (struct CE_src_desc *)src_ring->base_addr_owner_space;
983 struct CE_src_desc *src_desc =
984 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
985#endif
986 hif_record_ce_desc_event(scn, CE_state->id,
987 HIF_TX_DESC_COMPLETION,
988 (union ce_desc *)shadow_src_desc,
989 src_ring->per_transfer_context[sw_index],
990 sw_index, shadow_src_desc->nbytes);
991
992 /* Return data from completed source descriptor */
993 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
994 *nbytesp = shadow_src_desc->nbytes;
995 *transfer_idp = shadow_src_desc->meta_data;
996#ifdef QCA_WIFI_3_0
997 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
998#else
999 *toeplitz_hash_result = 0;
1000#endif
1001 if (per_CE_contextp)
1002 *per_CE_contextp = CE_state->send_context;
1003
1004 if (per_transfer_contextp) {
1005 *per_transfer_contextp =
1006 src_ring->per_transfer_context[sw_index];
1007 }
1008 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1009
1010 /* Update sw_index */
1011 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1012 src_ring->sw_index = sw_index;
1013 status = QDF_STATUS_SUCCESS;
1014 }
1015
1016 return status;
1017}
1018
1019static QDF_STATUS
1020ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1021 void **per_CE_contextp,
1022 void **per_transfer_contextp,
1023 qdf_dma_addr_t *bufferp,
1024 unsigned int *nbytesp,
1025 unsigned int *transfer_idp,
1026 uint32_t *toeplitz_hash_result)
1027{
1028 struct CE_state *CE_state;
1029 struct CE_ring_state *src_ring;
1030 unsigned int nentries_mask;
1031 unsigned int sw_index;
1032 unsigned int write_index;
1033 QDF_STATUS status;
1034 struct hif_softc *scn;
1035
1036 CE_state = (struct CE_state *)copyeng;
1037 src_ring = CE_state->src_ring;
1038 if (!src_ring)
1039 return QDF_STATUS_E_FAILURE;
1040
1041 scn = CE_state->scn;
1042 qdf_spin_lock(&CE_state->ce_index_lock);
1043 nentries_mask = src_ring->nentries_mask;
1044 sw_index = src_ring->sw_index;
1045 write_index = src_ring->write_index;
1046
1047 if (write_index != sw_index) {
1048 struct CE_src_desc *src_ring_base =
1049 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1050 struct CE_src_desc *src_desc =
1051 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1052
1053 /* Return data from completed source descriptor */
1054 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1055 *nbytesp = src_desc->nbytes;
1056 *transfer_idp = src_desc->meta_data;
1057#ifdef QCA_WIFI_3_0
1058 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1059#else
1060 *toeplitz_hash_result = 0;
1061#endif
1062
1063 if (per_CE_contextp)
1064 *per_CE_contextp = CE_state->send_context;
1065
1066 if (per_transfer_contextp) {
1067 *per_transfer_contextp =
1068 src_ring->per_transfer_context[sw_index];
1069 }
1070 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1071
1072 /* Update sw_index */
1073 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1074 src_ring->sw_index = sw_index;
1075 status = QDF_STATUS_SUCCESS;
1076 } else {
1077 status = QDF_STATUS_E_FAILURE;
1078 }
1079 qdf_spin_unlock(&CE_state->ce_index_lock);
1080
1081 return status;
1082}
1083
1084/*
1085 * Adjust interrupts for the copy complete handler.
1086 * If it's needed for either send or recv, then unmask
1087 * this interrupt; otherwise, mask it.
1088 *
1089 * Called with target_lock held.
1090 */
1091static void
1092ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
1093 int disable_copy_compl_intr)
1094{
1095 uint32_t ctrl_addr = CE_state->ctrl_addr;
1096 struct hif_softc *scn = CE_state->scn;
1097
1098 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1099
1100 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1101 return;
1102
1103 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1104 hif_err_rl("%s: target access is not allowed", __func__);
1105 return;
1106 }
1107
1108 if ((!disable_copy_compl_intr) &&
1109 (CE_state->send_cb || CE_state->recv_cb))
1110 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1111 else
1112 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1113
1114 if (CE_state->watermark_cb)
1115 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1116 else
1117 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1118 Q_TARGET_ACCESS_END(scn);
1119}
1120
1121static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1122 struct CE_ring_state *src_ring,
1123 struct CE_attr *attr)
1124{
1125 uint32_t ctrl_addr;
1126 uint64_t dma_addr;
1127
1128 QDF_ASSERT(ce_id < scn->ce_count);
1129 ctrl_addr = CE_BASE_ADDRESS(ce_id);
1130
1131 src_ring->hw_index =
1132 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1133 src_ring->sw_index = src_ring->hw_index;
1134 src_ring->write_index =
1135 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1136 dma_addr = src_ring->base_addr_CE_space;
1137 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
1138 (uint32_t)(dma_addr & 0xFFFFFFFF));
1139
1140 /* if SR_BA_ADDRESS_HIGH register exists */
1141 if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
1142 uint32_t tmp;
1143
1144 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
1145 scn, ctrl_addr);
1146 tmp &= ~0x1F;
1147 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
1148 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
1149 ctrl_addr, (uint32_t)dma_addr);
1150 }
1151 CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
1152 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
1153#ifdef BIG_ENDIAN_HOST
1154 /* Enable source ring byte swap for big endian host */
1155 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1156#endif
1157 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1158 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
1159}
1160
1161static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1162 struct CE_ring_state *dest_ring,
1163 struct CE_attr *attr)
1164{
1165 uint32_t ctrl_addr;
1166 uint64_t dma_addr;
1167
1168 QDF_ASSERT(ce_id < scn->ce_count);
1169 ctrl_addr = CE_BASE_ADDRESS(ce_id);
1170 dest_ring->sw_index =
1171 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1172 dest_ring->write_index =
1173 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1174 dma_addr = dest_ring->base_addr_CE_space;
1175 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
1176 (uint32_t)(dma_addr & 0xFFFFFFFF));
1177
1178 /* if DR_BA_ADDRESS_HIGH exists */
1179 if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
1180 uint32_t tmp;
1181
1182 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
1183 ctrl_addr);
1184 tmp &= ~0x1F;
1185 dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
1186 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
1187 ctrl_addr, (uint32_t)dma_addr);
1188 }
1189
1190 CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
1191#ifdef BIG_ENDIAN_HOST
1192 /* Enable Dest ring byte swap for big endian host */
1193 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1194#endif
1195 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1196 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
1197}
1198
1199static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
1200{
1201 switch (ring_type) {
1202 case CE_RING_SRC:
1203 return sizeof(struct CE_src_desc);
1204 case CE_RING_DEST:
1205 return sizeof(struct CE_dest_desc);
1206 case CE_RING_STATUS:
1207 qdf_assert(0);
1208 return 0;
1209 default:
1210 return 0;
1211 }
1212
1213 return 0;
1214}
1215
1216static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
1217 uint32_t ce_id, struct CE_ring_state *ring,
1218 struct CE_attr *attr)
1219{
1220 int status = Q_TARGET_ACCESS_BEGIN(scn);
1221
1222 if (status < 0)
1223 goto out;
1224
1225 switch (ring_type) {
1226 case CE_RING_SRC:
1227 ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
1228 break;
1229 case CE_RING_DEST:
1230 ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
1231 break;
1232 case CE_RING_STATUS:
1233 default:
1234 qdf_assert(0);
1235 break;
1236 }
1237
1238 Q_TARGET_ACCESS_END(scn);
1239out:
1240 return status;
1241}
1242
1243static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
1244 struct pld_shadow_reg_v2_cfg **shadow_config,
1245 int *num_shadow_registers_configured)
1246{
1247 *num_shadow_registers_configured = 0;
1248 *shadow_config = NULL;
1249}
1250
1251static bool ce_check_int_watermark(struct CE_state *CE_state,
1252 unsigned int *flags)
1253{
1254 uint32_t ce_int_status;
1255 uint32_t ctrl_addr = CE_state->ctrl_addr;
1256 struct hif_softc *scn = CE_state->scn;
1257
1258 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1259 if (ce_int_status & CE_WATERMARK_MASK) {
1260 /* Convert HW IS bits to software flags */
1261 *flags =
1262 (ce_int_status & CE_WATERMARK_MASK) >>
1263 CE_WM_SHFT;
1264 return true;
1265 }
1266
1267 return false;
1268}
1269
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05301270#ifdef HIF_CE_LOG_INFO
1271/**
1272 * ce_get_index_info_legacy(): Get CE index info
1273 * @scn: HIF Context
1274 * @ce_state: CE opaque handle
1275 * @info: CE info
1276 *
1277 * Return: 0 for success and non zero for failure
1278 */
1279static
1280int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state,
1281 struct ce_index *info)
1282{
1283 struct CE_state *state = (struct CE_state *)ce_state;
1284
1285 info->id = state->id;
1286 if (state->src_ring) {
1287 info->u.legacy_info.sw_index = state->src_ring->sw_index;
1288 info->u.legacy_info.write_index = state->src_ring->write_index;
1289 } else if (state->dest_ring) {
1290 info->u.legacy_info.sw_index = state->dest_ring->sw_index;
1291 info->u.legacy_info.write_index = state->dest_ring->write_index;
1292 }
1293
1294 return 0;
1295}
1296#endif
1297
Sathish Kumar86876492018-08-27 13:39:20 +05301298struct ce_ops ce_service_legacy = {
1299 .ce_get_desc_size = ce_get_desc_size_legacy,
1300 .ce_ring_setup = ce_ring_setup_legacy,
1301 .ce_sendlist_send = ce_sendlist_send_legacy,
1302 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
1303 .ce_revoke_recv_next = ce_revoke_recv_next_legacy,
1304 .ce_cancel_send_next = ce_cancel_send_next_legacy,
1305 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
1306 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
1307 .ce_send_nolock = ce_send_nolock_legacy,
1308 .watermark_int = ce_check_int_watermark,
1309 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
1310 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
1311 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
1312 .ce_prepare_shadow_register_v2_cfg =
1313 ce_prepare_shadow_register_v2_cfg_legacy,
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05301314#ifdef HIF_CE_LOG_INFO
1315 .ce_get_index_info =
1316 ce_get_index_info_legacy,
1317#endif
Sathish Kumar86876492018-08-27 13:39:20 +05301318};
1319
Luca Weiss515673d2023-02-02 12:05:33 +01001320struct ce_ops *ce_services_legacy(void)
Sathish Kumar86876492018-08-27 13:39:20 +05301321{
1322 return &ce_service_legacy;
1323}
1324
1325qdf_export_symbol(ce_services_legacy);
1326
1327void ce_service_legacy_init(void)
1328{
1329 ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
1330}