blob: b544c4f8d81205a052dae0f87a7870bce81db92c [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "cdf_lock.h"
39#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053043#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#ifdef IPA_OFFLOAD
46#ifdef QCA_WIFI_3_0
47#define CE_IPA_RING_INIT(ce_desc) \
48 do { \
49 ce_desc->gather = 0; \
50 ce_desc->enable_11h = 0; \
51 ce_desc->meta_data_low = 0; \
52 ce_desc->packet_result_offset = 64; \
53 ce_desc->toeplitz_hash_enable = 0; \
54 ce_desc->addr_y_search_disable = 0; \
55 ce_desc->addr_x_search_disable = 0; \
56 ce_desc->misc_int_disable = 0; \
57 ce_desc->target_int_disable = 0; \
58 ce_desc->host_int_disable = 0; \
59 ce_desc->dest_byte_swap = 0; \
60 ce_desc->byte_swap = 0; \
61 ce_desc->type = 2; \
62 ce_desc->tx_classify = 1; \
63 ce_desc->buffer_addr_hi = 0; \
64 ce_desc->meta_data = 0; \
65 ce_desc->nbytes = 128; \
66 } while (0)
67#else
68#define CE_IPA_RING_INIT(ce_desc) \
69 do { \
70 ce_desc->byte_swap = 0; \
71 ce_desc->nbytes = 60; \
72 ce_desc->gather = 0; \
73 } while (0)
74#endif /* QCA_WIFI_3_0 */
75#endif /* IPA_OFFLOAD */
76
77static int war1_allow_sleep;
78/* io32 write workaround */
79static int hif_ce_war1;
80
Houston Hoffman68e837e2015-12-04 12:57:24 -080081#ifdef CONFIG_SLUB_DEBUG_ON
82
83/**
84 * struct hif_ce_event - structure for detailing a ce event
85 * @type: what the event was
86 * @time: when it happened
87 * @descriptor: descriptor enqueued or dequeued
88 * @memory: virtual address that was used
89 * @index: location of the descriptor in the ce ring;
90 */
91struct hif_ce_desc_event {
92 uint16_t index;
93 enum hif_ce_event_type type;
94 uint64_t time;
95 union ce_desc descriptor;
96 void *memory;
97};
98
99/* max history to record per copy engine */
100#define HIF_CE_HISTORY_MAX 512
101cdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
102struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
103
Houston Hoffman4275ba22015-12-06 21:02:11 -0800104
Houston Hoffman68e837e2015-12-04 12:57:24 -0800105/**
106 * get_next_record_index() - get the next record index
107 * @table_index: atomic index variable to increment
108 * @array_size: array size of the circular buffer
109 *
110 * Increment the atomic index and reserve the value.
111 * Takes care of buffer wrap.
112 * Guaranteed to be thread safe as long as fewer than array_size contexts
113 * try to access the array. If there are more than array_size contexts
114 * trying to access the array, full locking of the recording process would
115 * be needed to have sane logging.
116 */
117static int get_next_record_index(cdf_atomic_t *table_index, int array_size)
118{
119 int record_index = cdf_atomic_inc_return(table_index);
120 if (record_index == array_size)
121 cdf_atomic_sub(array_size, table_index);
122
123 while (record_index >= array_size)
124 record_index -= array_size;
125 return record_index;
126}
127
128/**
129 * hif_record_ce_desc_event() - record ce descriptor events
130 * @ce_id: which ce is the event occuring on
131 * @type: what happened
132 * @descriptor: pointer to the descriptor posted/completed
133 * @memory: virtual address of buffer related to the descriptor
134 * @index: index that the descriptor was/will be at.
135 */
136void hif_record_ce_desc_event(int ce_id, enum hif_ce_event_type type,
137 union ce_desc *descriptor, void *memory, int index)
138{
139 int record_index = get_next_record_index(
140 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
141
142 struct hif_ce_desc_event *event =
143 &hif_ce_desc_history[ce_id][record_index];
144 event->type = type;
145 event->time = cds_get_monotonic_boottime();
Houston Hoffman4275ba22015-12-06 21:02:11 -0800146 if (descriptor != NULL)
147 event->descriptor = *descriptor;
148 else
149 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800150 event->memory = memory;
151 event->index = index;
152}
153
154/**
155 * ce_init_ce_desc_event_log() - initialize the ce event log
156 * @ce_id: copy engine id for which we are initializing the log
157 * @size: size of array to dedicate
158 *
159 * Currently the passed size is ignored in favor of a precompiled value.
160 */
161void ce_init_ce_desc_event_log(int ce_id, int size)
162{
163 cdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
164}
165#else
166void hif_record_ce_desc_event(
167 int ce_id, enum hif_ce_event_type type,
168 union ce_desc *descriptor, void *memory,
169 int index)
170{
171}
172
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800173inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800174{
175}
176#endif
177
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800178/*
179 * Support for Copy Engine hardware, which is mainly used for
180 * communication between Host and Target over a PCIe interconnect.
181 */
182
183/*
184 * A single CopyEngine (CE) comprises two "rings":
185 * a source ring
186 * a destination ring
187 *
188 * Each ring consists of a number of descriptors which specify
189 * an address, length, and meta-data.
190 *
191 * Typically, one side of the PCIe interconnect (Host or Target)
192 * controls one ring and the other side controls the other ring.
193 * The source side chooses when to initiate a transfer and it
194 * chooses what to send (buffer address, length). The destination
195 * side keeps a supply of "anonymous receive buffers" available and
196 * it handles incoming data as it arrives (when the destination
197 * recieves an interrupt).
198 *
199 * The sender may send a simple buffer (address/length) or it may
200 * send a small list of buffers. When a small list is sent, hardware
201 * "gathers" these and they end up in a single destination buffer
202 * with a single interrupt.
203 *
204 * There are several "contexts" managed by this layer -- more, it
205 * may seem -- than should be needed. These are provided mainly for
206 * maximum flexibility and especially to facilitate a simpler HIF
207 * implementation. There are per-CopyEngine recv, send, and watermark
208 * contexts. These are supplied by the caller when a recv, send,
209 * or watermark handler is established and they are echoed back to
210 * the caller when the respective callbacks are invoked. There is
211 * also a per-transfer context supplied by the caller when a buffer
212 * (or sendlist) is sent and when a buffer is enqueued for recv.
213 * These per-transfer contexts are echoed back to the caller when
214 * the buffer is sent/received.
215 * Target TX harsh result toeplitz_hash_result
216 */
217
218/*
219 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
220 * The caller takes responsibility for any needed locking.
221 */
222int
223ce_completed_send_next_nolock(struct CE_state *CE_state,
224 void **per_CE_contextp,
225 void **per_transfer_contextp,
226 cdf_dma_addr_t *bufferp,
227 unsigned int *nbytesp,
228 unsigned int *transfer_idp,
229 unsigned int *sw_idx, unsigned int *hw_idx,
230 uint32_t *toeplitz_hash_result);
231
Komal Seelam644263d2016-02-22 20:45:49 +0530232void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800233 u32 ctrl_addr, unsigned int write_index)
234{
235 if (hif_ce_war1) {
236 void __iomem *indicator_addr;
237
238 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
239
240 if (!war1_allow_sleep
241 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
242 hif_write32_mb(indicator_addr,
243 (CDC_WAR_MAGIC_STR | write_index));
244 } else {
245 unsigned long irq_flags;
246 local_irq_save(irq_flags);
247 hif_write32_mb(indicator_addr, 1);
248
249 /*
250 * PCIE write waits for ACK in IPQ8K, there is no
251 * need to read back value.
252 */
253 (void)hif_read32_mb(indicator_addr);
254 (void)hif_read32_mb(indicator_addr); /* conservative */
255
256 CE_SRC_RING_WRITE_IDX_SET(scn,
257 ctrl_addr, write_index);
258
259 hif_write32_mb(indicator_addr, 0);
260 local_irq_restore(irq_flags);
261 }
262 } else
263 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
264}
265
266int
267ce_send_nolock(struct CE_handle *copyeng,
268 void *per_transfer_context,
269 cdf_dma_addr_t buffer,
270 uint32_t nbytes,
271 uint32_t transfer_id,
272 uint32_t flags,
273 uint32_t user_flags)
274{
275 int status;
276 struct CE_state *CE_state = (struct CE_state *)copyeng;
277 struct CE_ring_state *src_ring = CE_state->src_ring;
278 uint32_t ctrl_addr = CE_state->ctrl_addr;
279 unsigned int nentries_mask = src_ring->nentries_mask;
280 unsigned int sw_index = src_ring->sw_index;
281 unsigned int write_index = src_ring->write_index;
282 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530283 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800284
285 A_TARGET_ACCESS_BEGIN_RET(scn);
286 if (unlikely(CE_RING_DELTA(nentries_mask,
287 write_index, sw_index - 1) <= 0)) {
288 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
289 status = CDF_STATUS_E_FAILURE;
290 A_TARGET_ACCESS_END_RET(scn);
291 return status;
292 }
293 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800294 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800295 struct CE_src_desc *src_ring_base =
296 (struct CE_src_desc *)src_ring->base_addr_owner_space;
297 struct CE_src_desc *shadow_base =
298 (struct CE_src_desc *)src_ring->shadow_base;
299 struct CE_src_desc *src_desc =
300 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
301 struct CE_src_desc *shadow_src_desc =
302 CE_SRC_RING_TO_DESC(shadow_base, write_index);
303
304 /* Update low 32 bits source descriptor address */
305 shadow_src_desc->buffer_addr =
306 (uint32_t)(dma_addr & 0xFFFFFFFF);
307#ifdef QCA_WIFI_3_0
308 shadow_src_desc->buffer_addr_hi =
309 (uint32_t)((dma_addr >> 32) & 0x1F);
310 user_flags |= shadow_src_desc->buffer_addr_hi;
311 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
312 sizeof(uint32_t));
313#endif
314 shadow_src_desc->meta_data = transfer_id;
315
316 /*
317 * Set the swap bit if:
318 * typical sends on this CE are swapped (host is big-endian)
319 * and this send doesn't disable the swapping
320 * (data is not bytestream)
321 */
322 shadow_src_desc->byte_swap =
323 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
324 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
325 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
326 shadow_src_desc->nbytes = nbytes;
327
328 *src_desc = *shadow_src_desc;
329
330 src_ring->per_transfer_context[write_index] =
331 per_transfer_context;
332
333 /* Update Source Ring Write Index */
334 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
335
336 /* WORKAROUND */
337 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800338 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800339 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
340 write_index);
341 }
342
Houston Hoffman68e837e2015-12-04 12:57:24 -0800343 /* src_ring->write index hasn't been updated event though
344 * the register has allready been written to.
345 */
346 hif_record_ce_desc_event(CE_state->id, event_type,
347 (union ce_desc *) shadow_src_desc, per_transfer_context,
348 src_ring->write_index);
349
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800350 src_ring->write_index = write_index;
351 status = CDF_STATUS_SUCCESS;
352 }
353 A_TARGET_ACCESS_END_RET(scn);
354
355 return status;
356}
357
358int
359ce_send(struct CE_handle *copyeng,
360 void *per_transfer_context,
361 cdf_dma_addr_t buffer,
362 uint32_t nbytes,
363 uint32_t transfer_id,
364 uint32_t flags,
365 uint32_t user_flag)
366{
367 struct CE_state *CE_state = (struct CE_state *)copyeng;
368 int status;
369
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700370 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
372 transfer_id, flags, user_flag);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700373 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800374
375 return status;
376}
377
378unsigned int ce_sendlist_sizeof(void)
379{
380 return sizeof(struct ce_sendlist);
381}
382
383void ce_sendlist_init(struct ce_sendlist *sendlist)
384{
385 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
386 sl->num_items = 0;
387}
388
389int
390ce_sendlist_buf_add(struct ce_sendlist *sendlist,
391 cdf_dma_addr_t buffer,
392 uint32_t nbytes,
393 uint32_t flags,
394 uint32_t user_flags)
395{
396 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
397 unsigned int num_items = sl->num_items;
398 struct ce_sendlist_item *item;
399
400 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
401 CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
402 return CDF_STATUS_E_RESOURCES;
403 }
404
405 item = &sl->item[num_items];
406 item->send_type = CE_SIMPLE_BUFFER_TYPE;
407 item->data = buffer;
408 item->u.nbytes = nbytes;
409 item->flags = flags;
410 item->user_flags = user_flags;
411 sl->num_items = num_items + 1;
412 return CDF_STATUS_SUCCESS;
413}
414
415int
416ce_sendlist_send(struct CE_handle *copyeng,
417 void *per_transfer_context,
418 struct ce_sendlist *sendlist, unsigned int transfer_id)
419{
420 int status = -ENOMEM;
421 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
422 struct CE_state *CE_state = (struct CE_state *)copyeng;
423 struct CE_ring_state *src_ring = CE_state->src_ring;
424 unsigned int nentries_mask = src_ring->nentries_mask;
425 unsigned int num_items = sl->num_items;
426 unsigned int sw_index;
427 unsigned int write_index;
428
429 CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
430
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700431 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800432 sw_index = src_ring->sw_index;
433 write_index = src_ring->write_index;
434
435 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
436 num_items) {
437 struct ce_sendlist_item *item;
438 int i;
439
440 /* handle all but the last item uniformly */
441 for (i = 0; i < num_items - 1; i++) {
442 item = &sl->item[i];
443 /* TBDXXX: Support extensible sendlist_types? */
444 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
445 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
446 (cdf_dma_addr_t) item->data,
447 item->u.nbytes, transfer_id,
448 item->flags | CE_SEND_FLAG_GATHER,
449 item->user_flags);
450 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
451 }
452 /* provide valid context pointer for final item */
453 item = &sl->item[i];
454 /* TBDXXX: Support extensible sendlist_types? */
455 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
456 status = ce_send_nolock(copyeng, per_transfer_context,
457 (cdf_dma_addr_t) item->data,
458 item->u.nbytes,
459 transfer_id, item->flags,
460 item->user_flags);
461 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
462 NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
463 NBUF_TX_PKT_CE);
464 DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
465 CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
466 (uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
467 sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
468 } else {
469 /*
470 * Probably not worth the additional complexity to support
471 * partial sends with continuation or notification. We expect
472 * to use large rings and small sendlists. If we can't handle
473 * the entire request at once, punt it back to the caller.
474 */
475 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700476 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800477
478 return status;
479}
480
481#ifdef WLAN_FEATURE_FASTPATH
482#ifdef QCA_WIFI_3_0
483static inline void
484ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
485 uint64_t dma_addr,
486 uint32_t user_flags)
487{
488 shadow_src_desc->buffer_addr_hi =
489 (uint32_t)((dma_addr >> 32) & 0x1F);
490 user_flags |= shadow_src_desc->buffer_addr_hi;
491 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
492 sizeof(uint32_t));
493}
494#else
495static inline void
496ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
497 uint64_t dma_addr,
498 uint32_t user_flags)
499{
500}
501#endif
502
503/**
504 * ce_send_fast() CE layer Tx buffer posting function
505 * @copyeng: copy engine handle
506 * @msdus: iarray of msdu to be sent
507 * @num_msdus: number of msdus in an array
508 * @transfer_id: transfer_id
509 *
510 * Assumption : Called with an array of MSDU's
511 * Function:
512 * For each msdu in the array
513 * 1. Check no. of available entries
514 * 2. Create src ring entries (allocated in consistent memory
515 * 3. Write index to h/w
516 *
517 * Return: No. of packets that could be sent
518 */
519
520int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
521 unsigned int num_msdus, unsigned int transfer_id)
522{
523 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530524 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530525 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800526 struct CE_ring_state *src_ring = ce_state->src_ring;
527 u_int32_t ctrl_addr = ce_state->ctrl_addr;
528 unsigned int nentries_mask = src_ring->nentries_mask;
529 unsigned int write_index;
530 unsigned int sw_index;
531 unsigned int frag_len;
532 cdf_nbuf_t msdu;
533 int i;
534 uint64_t dma_addr;
535 uint32_t user_flags = 0;
536
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700537 cdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800538 sw_index = src_ring->sw_index;
539 write_index = src_ring->write_index;
540
541 /* 2 msdus per packet */
542 for (i = 0; i < num_msdus; i++) {
543 struct CE_src_desc *src_ring_base =
544 (struct CE_src_desc *)src_ring->base_addr_owner_space;
545 struct CE_src_desc *shadow_base =
546 (struct CE_src_desc *)src_ring->shadow_base;
547 struct CE_src_desc *src_desc =
548 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
549 struct CE_src_desc *shadow_src_desc =
550 CE_SRC_RING_TO_DESC(shadow_base, write_index);
551
Komal Seelam644263d2016-02-22 20:45:49 +0530552 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800553 msdu = msdus[i];
554
555 /*
556 * First fill out the ring descriptor for the HTC HTT frame
557 * header. These are uncached writes. Should we use a local
558 * structure instead?
559 */
560 /* HTT/HTC header can be passed as a argument */
561 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 0);
562 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
563 0xFFFFFFFF);
564 user_flags = cdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
565 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
566
567 shadow_src_desc->meta_data = transfer_id;
568 shadow_src_desc->nbytes = cdf_nbuf_get_frag_len(msdu, 0);
569
570 /*
571 * HTC HTT header is a word stream, so byte swap if CE byte
572 * swap enabled
573 */
574 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
575 CE_ATTR_BYTE_SWAP_DATA) != 0);
576 /* For the first one, it still does not need to write */
577 shadow_src_desc->gather = 1;
578 *src_desc = *shadow_src_desc;
579
580 /* By default we could initialize the transfer context to this
581 * value
582 */
583 src_ring->per_transfer_context[write_index] =
584 CE_SENDLIST_ITEM_CTXT;
585
586 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
587
588 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
589 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
590 /*
591 * Now fill out the ring descriptor for the actual data
592 * packet
593 */
594 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 1);
595 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
596 0xFFFFFFFF);
597 /*
598 * Clear packet offset for all but the first CE desc.
599 */
600 user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
601 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
602 shadow_src_desc->meta_data = transfer_id;
603
604 /* get actual packet length */
605 frag_len = cdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700606
607 /* only read download_len once */
608 shadow_src_desc->nbytes = ce_state->download_len;
609 if (shadow_src_desc->nbytes > frag_len)
610 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800611
612 /* Data packet is a byte stream, so disable byte swap */
613 shadow_src_desc->byte_swap = 0;
614 /* For the last one, gather is not set */
615 shadow_src_desc->gather = 0;
616 *src_desc = *shadow_src_desc;
617 src_ring->per_transfer_context[write_index] = msdu;
618 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
619 }
620
621 /* Write the final index to h/w one-shot */
622 if (i) {
623 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800624
Komal Seelam644263d2016-02-22 20:45:49 +0530625 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800626 /* Don't call WAR_XXX from here
627 * Just call XXX instead, that has the reqd. intel
628 */
629 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
630 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530631 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800632 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800633 }
634
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700635 cdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800636
637 /*
638 * If all packets in the array are transmitted,
639 * i = num_msdus
640 * Temporarily add an ASSERT
641 */
642 ASSERT(i == num_msdus);
643 return i;
644}
645#endif /* WLAN_FEATURE_FASTPATH */
646
647int
648ce_recv_buf_enqueue(struct CE_handle *copyeng,
649 void *per_recv_context, cdf_dma_addr_t buffer)
650{
651 int status;
652 struct CE_state *CE_state = (struct CE_state *)copyeng;
653 struct CE_ring_state *dest_ring = CE_state->dest_ring;
654 uint32_t ctrl_addr = CE_state->ctrl_addr;
655 unsigned int nentries_mask = dest_ring->nentries_mask;
656 unsigned int write_index;
657 unsigned int sw_index;
658 int val = 0;
659 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530660 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800661
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700662 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800663 write_index = dest_ring->write_index;
664 sw_index = dest_ring->sw_index;
665
666 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
667 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700668 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800669 return val;
670 }
671
672 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
673 struct CE_dest_desc *dest_ring_base =
674 (struct CE_dest_desc *)dest_ring->
675 base_addr_owner_space;
676 struct CE_dest_desc *dest_desc =
677 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
678
679 /* Update low 32 bit destination descriptor */
680 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
681#ifdef QCA_WIFI_3_0
682 dest_desc->buffer_addr_hi =
683 (uint32_t)((dma_addr >> 32) & 0x1F);
684#endif
685 dest_desc->nbytes = 0;
686
687 dest_ring->per_transfer_context[write_index] =
688 per_recv_context;
689
Houston Hoffman68e837e2015-12-04 12:57:24 -0800690 hif_record_ce_desc_event(CE_state->id, HIF_RX_DESC_POST,
691 (union ce_desc *) dest_desc, per_recv_context,
692 write_index);
693
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800694 /* Update Destination Ring Write Index */
695 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
696 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
697 dest_ring->write_index = write_index;
698 status = CDF_STATUS_SUCCESS;
699 } else {
700 status = CDF_STATUS_E_FAILURE;
701 }
702 A_TARGET_ACCESS_END_RET_EXT(scn, val);
703 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700704 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800705 return val;
706 }
707
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700708 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800709
710 return status;
711}
712
713void
714ce_send_watermarks_set(struct CE_handle *copyeng,
715 unsigned int low_alert_nentries,
716 unsigned int high_alert_nentries)
717{
718 struct CE_state *CE_state = (struct CE_state *)copyeng;
719 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530720 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800721
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800722 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
723 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800724}
725
726void
727ce_recv_watermarks_set(struct CE_handle *copyeng,
728 unsigned int low_alert_nentries,
729 unsigned int high_alert_nentries)
730{
731 struct CE_state *CE_state = (struct CE_state *)copyeng;
732 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530733 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800734
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800735 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
736 low_alert_nentries);
737 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
738 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800739}
740
741unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
742{
743 struct CE_state *CE_state = (struct CE_state *)copyeng;
744 struct CE_ring_state *src_ring = CE_state->src_ring;
745 unsigned int nentries_mask = src_ring->nentries_mask;
746 unsigned int sw_index;
747 unsigned int write_index;
748
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700749 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800750 sw_index = src_ring->sw_index;
751 write_index = src_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700752 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800753
754 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
755}
756
757unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
758{
759 struct CE_state *CE_state = (struct CE_state *)copyeng;
760 struct CE_ring_state *dest_ring = CE_state->dest_ring;
761 unsigned int nentries_mask = dest_ring->nentries_mask;
762 unsigned int sw_index;
763 unsigned int write_index;
764
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700765 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800766 sw_index = dest_ring->sw_index;
767 write_index = dest_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700768 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800769
770 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
771}
772
773/*
774 * Guts of ce_send_entries_done.
775 * The caller takes responsibility for any necessary locking.
776 */
777unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530778ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800779 struct CE_state *CE_state)
780{
781 struct CE_ring_state *src_ring = CE_state->src_ring;
782 uint32_t ctrl_addr = CE_state->ctrl_addr;
783 unsigned int nentries_mask = src_ring->nentries_mask;
784 unsigned int sw_index;
785 unsigned int read_index;
786
787 sw_index = src_ring->sw_index;
788 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
789
790 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
791}
792
793unsigned int ce_send_entries_done(struct CE_handle *copyeng)
794{
795 struct CE_state *CE_state = (struct CE_state *)copyeng;
796 unsigned int nentries;
797
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700798 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800799 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700800 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800801
802 return nentries;
803}
804
805/*
806 * Guts of ce_recv_entries_done.
807 * The caller takes responsibility for any necessary locking.
808 */
809unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530810ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811 struct CE_state *CE_state)
812{
813 struct CE_ring_state *dest_ring = CE_state->dest_ring;
814 uint32_t ctrl_addr = CE_state->ctrl_addr;
815 unsigned int nentries_mask = dest_ring->nentries_mask;
816 unsigned int sw_index;
817 unsigned int read_index;
818
819 sw_index = dest_ring->sw_index;
820 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
821
822 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
823}
824
825unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
826{
827 struct CE_state *CE_state = (struct CE_state *)copyeng;
828 unsigned int nentries;
829
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700830 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800831 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700832 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800833
834 return nentries;
835}
836
837/* Debug support */
838void *ce_debug_cmplrn_context; /* completed recv next context */
839void *ce_debug_cnclsn_context; /* cancel send next context */
840void *ce_debug_rvkrn_context; /* revoke receive next context */
841void *ce_debug_cmplsn_context; /* completed send next context */
842
843/*
844 * Guts of ce_completed_recv_next.
845 * The caller takes responsibility for any necessary locking.
846 */
847int
848ce_completed_recv_next_nolock(struct CE_state *CE_state,
849 void **per_CE_contextp,
850 void **per_transfer_contextp,
851 cdf_dma_addr_t *bufferp,
852 unsigned int *nbytesp,
853 unsigned int *transfer_idp,
854 unsigned int *flagsp)
855{
856 int status;
857 struct CE_ring_state *dest_ring = CE_state->dest_ring;
858 unsigned int nentries_mask = dest_ring->nentries_mask;
859 unsigned int sw_index = dest_ring->sw_index;
860
861 struct CE_dest_desc *dest_ring_base =
862 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
863 struct CE_dest_desc *dest_desc =
864 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
865 int nbytes;
866 struct CE_dest_desc dest_desc_info;
867 /*
868 * By copying the dest_desc_info element to local memory, we could
869 * avoid extra memory read from non-cachable memory.
870 */
871 dest_desc_info = *dest_desc;
872 nbytes = dest_desc_info.nbytes;
873 if (nbytes == 0) {
874 /*
875 * This closes a relatively unusual race where the Host
876 * sees the updated DRRI before the update to the
877 * corresponding descriptor has completed. We treat this
878 * as a descriptor that is not yet done.
879 */
880 status = CDF_STATUS_E_FAILURE;
881 goto done;
882 }
883
Houston Hoffman68e837e2015-12-04 12:57:24 -0800884 hif_record_ce_desc_event(CE_state->id, HIF_RX_DESC_COMPLETION,
885 (union ce_desc *) dest_desc,
886 dest_ring->per_transfer_context[sw_index],
887 sw_index);
888
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800889 dest_desc->nbytes = 0;
890
891 /* Return data from completed destination descriptor */
892 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
893 *nbytesp = nbytes;
894 *transfer_idp = dest_desc_info.meta_data;
895 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
896
897 if (per_CE_contextp) {
898 *per_CE_contextp = CE_state->recv_context;
899 }
900
901 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
902 if (per_transfer_contextp) {
903 *per_transfer_contextp = ce_debug_cmplrn_context;
904 }
905 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
906
907 /* Update sw_index */
908 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
909 dest_ring->sw_index = sw_index;
910 status = CDF_STATUS_SUCCESS;
911
912done:
913 return status;
914}
915
916int
917ce_completed_recv_next(struct CE_handle *copyeng,
918 void **per_CE_contextp,
919 void **per_transfer_contextp,
920 cdf_dma_addr_t *bufferp,
921 unsigned int *nbytesp,
922 unsigned int *transfer_idp, unsigned int *flagsp)
923{
924 struct CE_state *CE_state = (struct CE_state *)copyeng;
925 int status;
926
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700927 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800928 status =
929 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
930 per_transfer_contextp, bufferp,
931 nbytesp, transfer_idp, flagsp);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700932 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800933
934 return status;
935}
936
937/* NB: Modeled after ce_completed_recv_next_nolock */
938CDF_STATUS
939ce_revoke_recv_next(struct CE_handle *copyeng,
940 void **per_CE_contextp,
941 void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
942{
943 struct CE_state *CE_state;
944 struct CE_ring_state *dest_ring;
945 unsigned int nentries_mask;
946 unsigned int sw_index;
947 unsigned int write_index;
948 CDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +0530949 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950
951 CE_state = (struct CE_state *)copyeng;
952 dest_ring = CE_state->dest_ring;
953 if (!dest_ring) {
954 return CDF_STATUS_E_FAILURE;
955 }
956
957 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700958 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800959 nentries_mask = dest_ring->nentries_mask;
960 sw_index = dest_ring->sw_index;
961 write_index = dest_ring->write_index;
962 if (write_index != sw_index) {
963 struct CE_dest_desc *dest_ring_base =
964 (struct CE_dest_desc *)dest_ring->
965 base_addr_owner_space;
966 struct CE_dest_desc *dest_desc =
967 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
968
969 /* Return data from completed destination descriptor */
970 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
971
972 if (per_CE_contextp) {
973 *per_CE_contextp = CE_state->recv_context;
974 }
975
976 ce_debug_rvkrn_context =
977 dest_ring->per_transfer_context[sw_index];
978 if (per_transfer_contextp) {
979 *per_transfer_contextp = ce_debug_rvkrn_context;
980 }
981 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
982
983 /* Update sw_index */
984 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
985 dest_ring->sw_index = sw_index;
986 status = CDF_STATUS_SUCCESS;
987 } else {
988 status = CDF_STATUS_E_FAILURE;
989 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700990 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800991
992 return status;
993}
994
995/*
996 * Guts of ce_completed_send_next.
997 * The caller takes responsibility for any necessary locking.
998 */
999int
1000ce_completed_send_next_nolock(struct CE_state *CE_state,
1001 void **per_CE_contextp,
1002 void **per_transfer_contextp,
1003 cdf_dma_addr_t *bufferp,
1004 unsigned int *nbytesp,
1005 unsigned int *transfer_idp,
1006 unsigned int *sw_idx,
1007 unsigned int *hw_idx,
1008 uint32_t *toeplitz_hash_result)
1009{
1010 int status = CDF_STATUS_E_FAILURE;
1011 struct CE_ring_state *src_ring = CE_state->src_ring;
1012 uint32_t ctrl_addr = CE_state->ctrl_addr;
1013 unsigned int nentries_mask = src_ring->nentries_mask;
1014 unsigned int sw_index = src_ring->sw_index;
1015 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301016 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001017
1018 if (src_ring->hw_index == sw_index) {
1019 /*
1020 * The SW completion index has caught up with the cached
1021 * version of the HW completion index.
1022 * Update the cached HW completion index to see whether
1023 * the SW has really caught up to the HW, or if the cached
1024 * value of the HW index has become stale.
1025 */
1026 A_TARGET_ACCESS_BEGIN_RET(scn);
1027 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001028 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001029 A_TARGET_ACCESS_END_RET(scn);
1030 }
1031 read_index = src_ring->hw_index;
1032
1033 if (sw_idx)
1034 *sw_idx = sw_index;
1035
1036 if (hw_idx)
1037 *hw_idx = read_index;
1038
1039 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1040 struct CE_src_desc *shadow_base =
1041 (struct CE_src_desc *)src_ring->shadow_base;
1042 struct CE_src_desc *shadow_src_desc =
1043 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1044#ifdef QCA_WIFI_3_0
1045 struct CE_src_desc *src_ring_base =
1046 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1047 struct CE_src_desc *src_desc =
1048 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1049#endif
Houston Hoffman68e837e2015-12-04 12:57:24 -08001050 hif_record_ce_desc_event(CE_state->id, HIF_TX_DESC_COMPLETION,
1051 (union ce_desc *) shadow_src_desc,
1052 src_ring->per_transfer_context[sw_index],
1053 sw_index);
1054
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001055 /* Return data from completed source descriptor */
1056 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1057 *nbytesp = shadow_src_desc->nbytes;
1058 *transfer_idp = shadow_src_desc->meta_data;
1059#ifdef QCA_WIFI_3_0
1060 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1061#else
1062 *toeplitz_hash_result = 0;
1063#endif
1064 if (per_CE_contextp) {
1065 *per_CE_contextp = CE_state->send_context;
1066 }
1067
1068 ce_debug_cmplsn_context =
1069 src_ring->per_transfer_context[sw_index];
1070 if (per_transfer_contextp) {
1071 *per_transfer_contextp = ce_debug_cmplsn_context;
1072 }
1073 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1074
1075 /* Update sw_index */
1076 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1077 src_ring->sw_index = sw_index;
1078 status = CDF_STATUS_SUCCESS;
1079 }
1080
1081 return status;
1082}
1083
1084/* NB: Modeled after ce_completed_send_next */
1085CDF_STATUS
1086ce_cancel_send_next(struct CE_handle *copyeng,
1087 void **per_CE_contextp,
1088 void **per_transfer_contextp,
1089 cdf_dma_addr_t *bufferp,
1090 unsigned int *nbytesp,
1091 unsigned int *transfer_idp,
1092 uint32_t *toeplitz_hash_result)
1093{
1094 struct CE_state *CE_state;
1095 struct CE_ring_state *src_ring;
1096 unsigned int nentries_mask;
1097 unsigned int sw_index;
1098 unsigned int write_index;
1099 CDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301100 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001101
1102 CE_state = (struct CE_state *)copyeng;
1103 src_ring = CE_state->src_ring;
1104 if (!src_ring) {
1105 return CDF_STATUS_E_FAILURE;
1106 }
1107
1108 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001109 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001110 nentries_mask = src_ring->nentries_mask;
1111 sw_index = src_ring->sw_index;
1112 write_index = src_ring->write_index;
1113
1114 if (write_index != sw_index) {
1115 struct CE_src_desc *src_ring_base =
1116 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1117 struct CE_src_desc *src_desc =
1118 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1119
1120 /* Return data from completed source descriptor */
1121 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1122 *nbytesp = src_desc->nbytes;
1123 *transfer_idp = src_desc->meta_data;
1124#ifdef QCA_WIFI_3_0
1125 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1126#else
1127 *toeplitz_hash_result = 0;
1128#endif
1129
1130 if (per_CE_contextp) {
1131 *per_CE_contextp = CE_state->send_context;
1132 }
1133
1134 ce_debug_cnclsn_context =
1135 src_ring->per_transfer_context[sw_index];
1136 if (per_transfer_contextp) {
1137 *per_transfer_contextp = ce_debug_cnclsn_context;
1138 }
1139 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1140
1141 /* Update sw_index */
1142 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1143 src_ring->sw_index = sw_index;
1144 status = CDF_STATUS_SUCCESS;
1145 } else {
1146 status = CDF_STATUS_E_FAILURE;
1147 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001148 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001149
1150 return status;
1151}
1152
1153/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1154#define CE_WM_SHFT 1
1155
1156int
1157ce_completed_send_next(struct CE_handle *copyeng,
1158 void **per_CE_contextp,
1159 void **per_transfer_contextp,
1160 cdf_dma_addr_t *bufferp,
1161 unsigned int *nbytesp,
1162 unsigned int *transfer_idp,
1163 unsigned int *sw_idx,
1164 unsigned int *hw_idx,
1165 unsigned int *toeplitz_hash_result)
1166{
1167 struct CE_state *CE_state = (struct CE_state *)copyeng;
1168 int status;
1169
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001170 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001171 status =
1172 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1173 per_transfer_contextp, bufferp,
1174 nbytesp, transfer_idp, sw_idx,
1175 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001176 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001177
1178 return status;
1179}
1180
1181#ifdef ATH_11AC_TXCOMPACT
1182/* CE engine descriptor reap
1183 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1184 * does recieve and reaping of completed descriptor ,
1185 * This function only handles reaping of Tx complete descriptor.
1186 * The Function is called from threshold reap poll routine
1187 * hif_send_complete_check so should not countain recieve functionality
1188 * within it .
1189 */
1190
Komal Seelam644263d2016-02-22 20:45:49 +05301191void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001192{
1193 void *CE_context;
1194 void *transfer_context;
1195 cdf_dma_addr_t buf;
1196 unsigned int nbytes;
1197 unsigned int id;
1198 unsigned int sw_idx, hw_idx;
1199 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001200 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201
1202 A_TARGET_ACCESS_BEGIN(scn);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001203 hif_record_ce_desc_event(ce_id, HIF_CE_REAP_ENTRY,
1204 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001205
1206 /* Since this function is called from both user context and
1207 * tasklet context the spinlock has to lock the bottom halves.
1208 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1209 * enabled in TX polling mode. If this is not the case, more
1210 * bottom halve spin lock changes are needed. Due to data path
1211 * performance concern, after internal discussion we've decided
1212 * to make minimum change, i.e., only address the issue occured
1213 * in this function. The possible negative effect of this minimum
1214 * change is that, in the future, if some other function will also
1215 * be opened to let the user context to use, those cases need to be
1216 * addressed by change spin_lock to spin_lock_bh also.
1217 */
1218
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001219 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001220
1221 if (CE_state->send_cb) {
1222 {
1223 /* Pop completed send buffers and call the
1224 * registered send callback for each
1225 */
1226 while (ce_completed_send_next_nolock
1227 (CE_state, &CE_context,
1228 &transfer_context, &buf,
1229 &nbytes, &id, &sw_idx, &hw_idx,
1230 &toeplitz_hash_result) ==
1231 CDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001232 if (ce_id != CE_HTT_H2T_MSG) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001233 cdf_spin_unlock_bh(
1234 &CE_state->ce_index_lock);
1235 CE_state->send_cb(
1236 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237 CE_state, CE_context,
1238 transfer_context, buf,
1239 nbytes, id, sw_idx, hw_idx,
1240 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001241 cdf_spin_lock_bh(
1242 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001243 } else {
1244 struct HIF_CE_pipe_info *pipe_info =
1245 (struct HIF_CE_pipe_info *)
1246 CE_context;
1247
1248 cdf_spin_lock_bh(&pipe_info->
1249 completion_freeq_lock);
1250 pipe_info->num_sends_allowed++;
1251 cdf_spin_unlock_bh(&pipe_info->
1252 completion_freeq_lock);
1253 }
1254 }
1255 }
1256 }
1257
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001258 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001259
1260 hif_record_ce_desc_event(ce_id, HIF_CE_REAP_EXIT,
1261 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001262 A_TARGET_ACCESS_END(scn);
1263}
1264
1265#endif /*ATH_11AC_TXCOMPACT */
1266
1267/*
1268 * Number of times to check for any pending tx/rx completion on
1269 * a copy engine, this count should be big enough. Once we hit
1270 * this threashold we'll not check for any Tx/Rx comlpetion in same
1271 * interrupt handling. Note that this threashold is only used for
1272 * Rx interrupt processing, this can be used tor Tx as well if we
1273 * suspect any infinite loop in checking for pending Tx completion.
1274 */
1275#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1276
1277/*
1278 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1279 *
1280 * Invokes registered callbacks for recv_complete,
1281 * send_complete, and watermarks.
1282 *
1283 * Returns: number of messages processed
1284 */
1285
Komal Seelam644263d2016-02-22 20:45:49 +05301286int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001287{
1288 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1289 uint32_t ctrl_addr = CE_state->ctrl_addr;
1290 void *CE_context;
1291 void *transfer_context;
1292 cdf_dma_addr_t buf;
1293 unsigned int nbytes;
1294 unsigned int id;
1295 unsigned int flags;
1296 uint32_t CE_int_status;
1297 unsigned int more_comp_cnt = 0;
1298 unsigned int more_snd_comp_cnt = 0;
1299 unsigned int sw_idx, hw_idx;
1300 uint32_t toeplitz_hash_result;
1301
1302 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1303 HIF_ERROR("[premature rc=0]\n");
1304 return 0; /* no work done */
1305 }
1306
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001307 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001308
1309 /* Clear force_break flag and re-initialize receive_count to 0 */
1310
1311 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001312 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001313 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001314more_completions:
1315 if (CE_state->recv_cb) {
1316
1317 /* Pop completed recv buffers and call
1318 * the registered recv callback for each
1319 */
1320 while (ce_completed_recv_next_nolock
1321 (CE_state, &CE_context, &transfer_context,
1322 &buf, &nbytes, &id, &flags) ==
1323 CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001324 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001325 CE_state->recv_cb((struct CE_handle *)CE_state,
1326 CE_context, transfer_context, buf,
1327 nbytes, id, flags);
1328
1329 /*
1330 * EV #112693 -
1331 * [Peregrine][ES1][WB342][Win8x86][Performance]
1332 * BSoD_0x133 occurred in VHT80 UDP_DL
1333 * Break out DPC by force if number of loops in
1334 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1335 * to avoid spending too long time in
1336 * DPC for each interrupt handling. Schedule another
1337 * DPC to avoid data loss if we had taken
1338 * force-break action before apply to Windows OS
1339 * only currently, Linux/MAC os can expand to their
1340 * platform if necessary
1341 */
1342
1343 /* Break the receive processes by
1344 * force if force_break set up
1345 */
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001346 if (cdf_unlikely(CE_state->force_break)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001347 cdf_atomic_set(&CE_state->rx_pending, 1);
1348 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1349 HOST_IS_COPY_COMPLETE_MASK);
1350 if (Q_TARGET_ACCESS_END(scn) < 0)
1351 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001352 CE_state->receive_count);
1353 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001354 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001355 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001356 }
1357 }
1358
1359 /*
1360 * Attention: We may experience potential infinite loop for below
1361 * While Loop during Sending Stress test.
1362 * Resolve the same way as Receive Case (Refer to EV #112693)
1363 */
1364
1365 if (CE_state->send_cb) {
1366 /* Pop completed send buffers and call
1367 * the registered send callback for each
1368 */
1369
1370#ifdef ATH_11AC_TXCOMPACT
1371 while (ce_completed_send_next_nolock
1372 (CE_state, &CE_context,
1373 &transfer_context, &buf, &nbytes,
1374 &id, &sw_idx, &hw_idx,
1375 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1376
1377 if (CE_id != CE_HTT_H2T_MSG ||
1378 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001379 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001380 CE_state->send_cb((struct CE_handle *)CE_state,
1381 CE_context, transfer_context,
1382 buf, nbytes, id, sw_idx,
1383 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001384 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001385 } else {
1386 struct HIF_CE_pipe_info *pipe_info =
1387 (struct HIF_CE_pipe_info *)CE_context;
1388
1389 cdf_spin_lock(&pipe_info->
1390 completion_freeq_lock);
1391 pipe_info->num_sends_allowed++;
1392 cdf_spin_unlock(&pipe_info->
1393 completion_freeq_lock);
1394 }
1395 }
1396#else /*ATH_11AC_TXCOMPACT */
1397 while (ce_completed_send_next_nolock
1398 (CE_state, &CE_context,
1399 &transfer_context, &buf, &nbytes,
1400 &id, &sw_idx, &hw_idx,
1401 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001402 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001403 CE_state->send_cb((struct CE_handle *)CE_state,
1404 CE_context, transfer_context, buf,
1405 nbytes, id, sw_idx, hw_idx,
1406 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001407 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 }
1409#endif /*ATH_11AC_TXCOMPACT */
1410 }
1411
1412more_watermarks:
1413 if (CE_state->misc_cbs) {
1414 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1415 if (CE_int_status & CE_WATERMARK_MASK) {
1416 if (CE_state->watermark_cb) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001417 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 /* Convert HW IS bits to software flags */
1419 flags =
1420 (CE_int_status & CE_WATERMARK_MASK) >>
1421 CE_WM_SHFT;
1422
1423 CE_state->
1424 watermark_cb((struct CE_handle *)CE_state,
1425 CE_state->wm_context, flags);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001426 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001427 }
1428 }
1429 }
1430
1431 /*
1432 * Clear the misc interrupts (watermark) that were handled above,
1433 * and that will be checked again below.
1434 * Clear and check for copy-complete interrupts again, just in case
1435 * more copy completions happened while the misc interrupts were being
1436 * handled.
1437 */
1438 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1439 CE_WATERMARK_MASK |
1440 HOST_IS_COPY_COMPLETE_MASK);
1441
1442 /*
1443 * Now that per-engine interrupts are cleared, verify that
1444 * no recv interrupts arrive while processing send interrupts,
1445 * and no recv or send interrupts happened while processing
1446 * misc interrupts.Go back and check again.Keep checking until
1447 * we find no more events to process.
1448 */
1449 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
1450 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1451 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1452 goto more_completions;
1453 } else {
1454 HIF_ERROR(
1455 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1456 __func__, CE_state->dest_ring->nentries_mask,
1457 CE_state->dest_ring->sw_index,
1458 CE_DEST_RING_READ_IDX_GET(scn,
1459 CE_state->ctrl_addr));
1460 }
1461 }
1462
1463 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
1464 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1465 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1466 goto more_completions;
1467 } else {
1468 HIF_ERROR(
1469 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1470 __func__, CE_state->src_ring->nentries_mask,
1471 CE_state->src_ring->sw_index,
1472 CE_SRC_RING_READ_IDX_GET(scn,
1473 CE_state->ctrl_addr));
1474 }
1475 }
1476
1477 if (CE_state->misc_cbs) {
1478 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1479 if (CE_int_status & CE_WATERMARK_MASK) {
1480 if (CE_state->watermark_cb) {
1481 goto more_watermarks;
1482 }
1483 }
1484 }
1485
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001486 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 cdf_atomic_set(&CE_state->rx_pending, 0);
1488
1489 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001490 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1491 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001492}
1493
1494/*
1495 * Handler for per-engine interrupts on ALL active CEs.
1496 * This is used in cases where the system is sharing a
1497 * single interrput for all CEs
1498 */
1499
Komal Seelam644263d2016-02-22 20:45:49 +05301500void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501{
1502 int CE_id;
1503 uint32_t intr_summary;
1504
1505 A_TARGET_ACCESS_BEGIN(scn);
1506 if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
1507 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1508 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1509 if (cdf_atomic_read(&CE_state->rx_pending)) {
1510 cdf_atomic_set(&CE_state->rx_pending, 0);
1511 ce_per_engine_service(scn, CE_id);
1512 }
1513 }
1514
1515 A_TARGET_ACCESS_END(scn);
1516 return;
1517 }
1518
1519 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1520
1521 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1522 if (intr_summary & (1 << CE_id)) {
1523 intr_summary &= ~(1 << CE_id);
1524 } else {
1525 continue; /* no intr pending on this CE */
1526 }
1527
1528 ce_per_engine_service(scn, CE_id);
1529 }
1530
1531 A_TARGET_ACCESS_END(scn);
1532}
1533
1534/*
1535 * Adjust interrupts for the copy complete handler.
1536 * If it's needed for either send or recv, then unmask
1537 * this interrupt; otherwise, mask it.
1538 *
1539 * Called with target_lock held.
1540 */
1541static void
1542ce_per_engine_handler_adjust(struct CE_state *CE_state,
1543 int disable_copy_compl_intr)
1544{
1545 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301546 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547
1548 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1549 A_TARGET_ACCESS_BEGIN(scn);
1550 if ((!disable_copy_compl_intr) &&
1551 (CE_state->send_cb || CE_state->recv_cb)) {
1552 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1553 } else {
1554 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1555 }
1556
1557 if (CE_state->watermark_cb) {
1558 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1559 } else {
1560 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1561 }
1562 A_TARGET_ACCESS_END(scn);
1563
1564}
1565
1566/*Iterate the CE_state list and disable the compl interrupt
1567 * if it has been registered already.
1568 */
Komal Seelam644263d2016-02-22 20:45:49 +05301569void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001570{
1571 int CE_id;
1572
1573 A_TARGET_ACCESS_BEGIN(scn);
1574 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1575 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1576 uint32_t ctrl_addr = CE_state->ctrl_addr;
1577
1578 /* if the interrupt is currently enabled, disable it */
1579 if (!CE_state->disable_copy_compl_intr
1580 && (CE_state->send_cb || CE_state->recv_cb)) {
1581 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1582 }
1583
1584 if (CE_state->watermark_cb) {
1585 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1586 }
1587 }
1588 A_TARGET_ACCESS_END(scn);
1589}
1590
Komal Seelam644263d2016-02-22 20:45:49 +05301591void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592{
1593 int CE_id;
1594
1595 A_TARGET_ACCESS_BEGIN(scn);
1596 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1597 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1598 uint32_t ctrl_addr = CE_state->ctrl_addr;
1599
1600 /*
1601 * If the CE is supposed to have copy complete interrupts
1602 * enabled (i.e. there a callback registered, and the
1603 * "disable" flag is not set), then re-enable the interrupt.
1604 */
1605 if (!CE_state->disable_copy_compl_intr
1606 && (CE_state->send_cb || CE_state->recv_cb)) {
1607 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1608 }
1609
1610 if (CE_state->watermark_cb) {
1611 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1612 }
1613 }
1614 A_TARGET_ACCESS_END(scn);
1615}
1616
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001617/**
1618 * ce_send_cb_register(): register completion handler
1619 * @copyeng: CE_state representing the ce we are adding the behavior to
1620 * @fn_ptr: callback that the ce should use when processing tx completions
1621 * @disable_interrupts: if the interupts should be enabled or not.
1622 *
1623 * Caller should guarantee that no transactions are in progress before
1624 * switching the callback function.
1625 *
1626 * Registers the send context before the fn pointer so that if the cb is valid
1627 * the context should be valid.
1628 *
1629 * Beware that currently this function will enable completion interrupts.
1630 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001631void
1632ce_send_cb_register(struct CE_handle *copyeng,
1633 ce_send_cb fn_ptr,
1634 void *ce_send_context, int disable_interrupts)
1635{
1636 struct CE_state *CE_state = (struct CE_state *)copyeng;
1637
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001638 if (CE_state == NULL) {
1639 pr_err("%s: Error CE state = NULL\n", __func__);
1640 return;
1641 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001643 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001644 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001645}
1646
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001647/**
1648 * ce_recv_cb_register(): register completion handler
1649 * @copyeng: CE_state representing the ce we are adding the behavior to
1650 * @fn_ptr: callback that the ce should use when processing rx completions
1651 * @disable_interrupts: if the interupts should be enabled or not.
1652 *
1653 * Registers the send context before the fn pointer so that if the cb is valid
1654 * the context should be valid.
1655 *
1656 * Caller should guarantee that no transactions are in progress before
1657 * switching the callback function.
1658 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001659void
1660ce_recv_cb_register(struct CE_handle *copyeng,
1661 CE_recv_cb fn_ptr,
1662 void *CE_recv_context, int disable_interrupts)
1663{
1664 struct CE_state *CE_state = (struct CE_state *)copyeng;
1665
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001666 if (CE_state == NULL) {
1667 pr_err("%s: ERROR CE state = NULL\n", __func__);
1668 return;
1669 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001670 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001671 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001672 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673}
1674
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001675/**
1676 * ce_watermark_cb_register(): register completion handler
1677 * @copyeng: CE_state representing the ce we are adding the behavior to
1678 * @fn_ptr: callback that the ce should use when processing watermark events
1679 *
1680 * Caller should guarantee that no watermark events are being processed before
1681 * switching the callback function.
1682 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001683void
1684ce_watermark_cb_register(struct CE_handle *copyeng,
1685 CE_watermark_cb fn_ptr, void *CE_wm_context)
1686{
1687 struct CE_state *CE_state = (struct CE_state *)copyeng;
1688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001689 CE_state->watermark_cb = fn_ptr;
1690 CE_state->wm_context = CE_wm_context;
1691 ce_per_engine_handler_adjust(CE_state, 0);
1692 if (fn_ptr) {
1693 CE_state->misc_cbs = 1;
1694 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001695}
1696
1697#ifdef WLAN_FEATURE_FASTPATH
1698/**
1699 * ce_pkt_dl_len_set() set the HTT packet download length
1700 * @hif_sc: HIF context
1701 * @pkt_download_len: download length
1702 *
1703 * Return: None
1704 */
1705void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1706{
Komal Seelam644263d2016-02-22 20:45:49 +05301707 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001708 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1709
1710 cdf_assert_always(ce_state);
1711
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001712 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001713
1714 cdf_print("%s CE %d Pkt download length %d\n", __func__,
1715 ce_state->id, ce_state->download_len);
1716}
1717#else
1718void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1719{
1720}
1721#endif /* WLAN_FEATURE_FASTPATH */
1722
Komal Seelam644263d2016-02-22 20:45:49 +05301723bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724{
1725 int CE_id;
1726
1727 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1728 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1729 if (cdf_atomic_read(&CE_state->rx_pending))
1730 return true;
1731 }
1732
1733 return false;
1734}
1735
1736/**
1737 * ce_check_rx_pending() - ce_check_rx_pending
Komal Seelam644263d2016-02-22 20:45:49 +05301738 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001739 * @ce_id: ce_id
1740 *
1741 * Return: bool
1742 */
Komal Seelam644263d2016-02-22 20:45:49 +05301743bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001744{
1745 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1746 if (cdf_atomic_read(&CE_state->rx_pending))
1747 return true;
1748 else
1749 return false;
1750}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001751
1752/**
1753 * ce_enable_msi(): write the msi configuration to the target
1754 * @scn: hif context
1755 * @CE_id: which copy engine will be configured for msi interupts
1756 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1757 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1758 * @msi_data: Hardware will write this data to generate an interrupt
1759 *
1760 * should be done in the initialization sequence so no locking would be needed
1761 */
Komal Seelam644263d2016-02-22 20:45:49 +05301762void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001763 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1764 uint32_t msi_data)
1765{
1766#ifdef WLAN_ENABLE_QCA6180
1767 struct CE_state *CE_state;
1768 A_target_id_t targid;
1769 u_int32_t ctrl_addr;
1770 uint32_t tmp;
1771
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001772 CE_state = scn->ce_id_to_state[CE_id];
1773 if (!CE_state) {
1774 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001775 return;
1776 }
1777 targid = TARGID(sc);
1778 ctrl_addr = CE_state->ctrl_addr;
1779 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1780 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1781 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1782 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1783 tmp |= (1 << CE_MSI_ENABLE_BIT);
1784 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785#endif
1786}
1787
1788#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001789/**
1790 * ce_ipa_get_resource() - get uc resource on copyengine
1791 * @ce: copyengine context
1792 * @ce_sr_base_paddr: copyengine source ring base physical address
1793 * @ce_sr_ring_size: copyengine source ring size
1794 * @ce_reg_paddr: copyengine register physical address
1795 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001796 * Copy engine should release resource to micro controller
1797 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08001798 * - Copy engine source descriptor base address
1799 * - Copy engine source descriptor size
1800 * - PCI BAR address to access copy engine regiser
1801 *
1802 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 */
1804void ce_ipa_get_resource(struct CE_handle *ce,
Leo Changd85f78d2015-11-13 10:55:34 -08001805 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001806 uint32_t *ce_sr_ring_size,
1807 cdf_dma_addr_t *ce_reg_paddr)
1808{
1809 struct CE_state *CE_state = (struct CE_state *)ce;
1810 uint32_t ring_loop;
1811 struct CE_src_desc *ce_desc;
1812 cdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05301813 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814
1815 if (CE_RUNNING != CE_state->state) {
1816 *ce_sr_base_paddr = 0;
1817 *ce_sr_ring_size = 0;
1818 return;
1819 }
1820
1821 /* Update default value for descriptor */
1822 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1823 ring_loop++) {
1824 ce_desc = (struct CE_src_desc *)
1825 ((char *)CE_state->src_ring->base_addr_owner_space +
1826 ring_loop * (sizeof(struct CE_src_desc)));
1827 CE_IPA_RING_INIT(ce_desc);
1828 }
1829
1830 /* Get BAR address */
1831 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1832
Leo Changd85f78d2015-11-13 10:55:34 -08001833 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
1834 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
1835 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1837 SR_WR_INDEX_ADDRESS;
1838 return;
1839}
1840#endif /* IPA_OFFLOAD */
1841