blob: cd6b40f782fe3ec3c1f601484e77d216119bc332 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "cdf_lock.h"
39#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053043#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#ifdef IPA_OFFLOAD
46#ifdef QCA_WIFI_3_0
47#define CE_IPA_RING_INIT(ce_desc) \
48 do { \
49 ce_desc->gather = 0; \
50 ce_desc->enable_11h = 0; \
51 ce_desc->meta_data_low = 0; \
52 ce_desc->packet_result_offset = 64; \
53 ce_desc->toeplitz_hash_enable = 0; \
54 ce_desc->addr_y_search_disable = 0; \
55 ce_desc->addr_x_search_disable = 0; \
56 ce_desc->misc_int_disable = 0; \
57 ce_desc->target_int_disable = 0; \
58 ce_desc->host_int_disable = 0; \
59 ce_desc->dest_byte_swap = 0; \
60 ce_desc->byte_swap = 0; \
61 ce_desc->type = 2; \
62 ce_desc->tx_classify = 1; \
63 ce_desc->buffer_addr_hi = 0; \
64 ce_desc->meta_data = 0; \
65 ce_desc->nbytes = 128; \
66 } while (0)
67#else
68#define CE_IPA_RING_INIT(ce_desc) \
69 do { \
70 ce_desc->byte_swap = 0; \
71 ce_desc->nbytes = 60; \
72 ce_desc->gather = 0; \
73 } while (0)
74#endif /* QCA_WIFI_3_0 */
75#endif /* IPA_OFFLOAD */
76
77static int war1_allow_sleep;
78/* io32 write workaround */
79static int hif_ce_war1;
80
81/*
82 * Support for Copy Engine hardware, which is mainly used for
83 * communication between Host and Target over a PCIe interconnect.
84 */
85
86/*
87 * A single CopyEngine (CE) comprises two "rings":
88 * a source ring
89 * a destination ring
90 *
91 * Each ring consists of a number of descriptors which specify
92 * an address, length, and meta-data.
93 *
94 * Typically, one side of the PCIe interconnect (Host or Target)
95 * controls one ring and the other side controls the other ring.
96 * The source side chooses when to initiate a transfer and it
97 * chooses what to send (buffer address, length). The destination
98 * side keeps a supply of "anonymous receive buffers" available and
99 * it handles incoming data as it arrives (when the destination
100 * recieves an interrupt).
101 *
102 * The sender may send a simple buffer (address/length) or it may
103 * send a small list of buffers. When a small list is sent, hardware
104 * "gathers" these and they end up in a single destination buffer
105 * with a single interrupt.
106 *
107 * There are several "contexts" managed by this layer -- more, it
108 * may seem -- than should be needed. These are provided mainly for
109 * maximum flexibility and especially to facilitate a simpler HIF
110 * implementation. There are per-CopyEngine recv, send, and watermark
111 * contexts. These are supplied by the caller when a recv, send,
112 * or watermark handler is established and they are echoed back to
113 * the caller when the respective callbacks are invoked. There is
114 * also a per-transfer context supplied by the caller when a buffer
115 * (or sendlist) is sent and when a buffer is enqueued for recv.
116 * These per-transfer contexts are echoed back to the caller when
117 * the buffer is sent/received.
118 * Target TX harsh result toeplitz_hash_result
119 */
120
121/*
122 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
123 * The caller takes responsibility for any needed locking.
124 */
125int
126ce_completed_send_next_nolock(struct CE_state *CE_state,
127 void **per_CE_contextp,
128 void **per_transfer_contextp,
129 cdf_dma_addr_t *bufferp,
130 unsigned int *nbytesp,
131 unsigned int *transfer_idp,
132 unsigned int *sw_idx, unsigned int *hw_idx,
133 uint32_t *toeplitz_hash_result);
134
135void war_ce_src_ring_write_idx_set(struct ol_softc *scn,
136 u32 ctrl_addr, unsigned int write_index)
137{
138 if (hif_ce_war1) {
139 void __iomem *indicator_addr;
140
141 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
142
143 if (!war1_allow_sleep
144 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
145 hif_write32_mb(indicator_addr,
146 (CDC_WAR_MAGIC_STR | write_index));
147 } else {
148 unsigned long irq_flags;
149 local_irq_save(irq_flags);
150 hif_write32_mb(indicator_addr, 1);
151
152 /*
153 * PCIE write waits for ACK in IPQ8K, there is no
154 * need to read back value.
155 */
156 (void)hif_read32_mb(indicator_addr);
157 (void)hif_read32_mb(indicator_addr); /* conservative */
158
159 CE_SRC_RING_WRITE_IDX_SET(scn,
160 ctrl_addr, write_index);
161
162 hif_write32_mb(indicator_addr, 0);
163 local_irq_restore(irq_flags);
164 }
165 } else
166 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
167}
168
169int
170ce_send_nolock(struct CE_handle *copyeng,
171 void *per_transfer_context,
172 cdf_dma_addr_t buffer,
173 uint32_t nbytes,
174 uint32_t transfer_id,
175 uint32_t flags,
176 uint32_t user_flags)
177{
178 int status;
179 struct CE_state *CE_state = (struct CE_state *)copyeng;
180 struct CE_ring_state *src_ring = CE_state->src_ring;
181 uint32_t ctrl_addr = CE_state->ctrl_addr;
182 unsigned int nentries_mask = src_ring->nentries_mask;
183 unsigned int sw_index = src_ring->sw_index;
184 unsigned int write_index = src_ring->write_index;
185 uint64_t dma_addr = buffer;
186 struct ol_softc *scn = CE_state->scn;
187
188 A_TARGET_ACCESS_BEGIN_RET(scn);
189 if (unlikely(CE_RING_DELTA(nentries_mask,
190 write_index, sw_index - 1) <= 0)) {
191 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
192 status = CDF_STATUS_E_FAILURE;
193 A_TARGET_ACCESS_END_RET(scn);
194 return status;
195 }
196 {
197 struct CE_src_desc *src_ring_base =
198 (struct CE_src_desc *)src_ring->base_addr_owner_space;
199 struct CE_src_desc *shadow_base =
200 (struct CE_src_desc *)src_ring->shadow_base;
201 struct CE_src_desc *src_desc =
202 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
203 struct CE_src_desc *shadow_src_desc =
204 CE_SRC_RING_TO_DESC(shadow_base, write_index);
205
206 /* Update low 32 bits source descriptor address */
207 shadow_src_desc->buffer_addr =
208 (uint32_t)(dma_addr & 0xFFFFFFFF);
209#ifdef QCA_WIFI_3_0
210 shadow_src_desc->buffer_addr_hi =
211 (uint32_t)((dma_addr >> 32) & 0x1F);
212 user_flags |= shadow_src_desc->buffer_addr_hi;
213 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
214 sizeof(uint32_t));
215#endif
216 shadow_src_desc->meta_data = transfer_id;
217
218 /*
219 * Set the swap bit if:
220 * typical sends on this CE are swapped (host is big-endian)
221 * and this send doesn't disable the swapping
222 * (data is not bytestream)
223 */
224 shadow_src_desc->byte_swap =
225 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
226 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
227 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
228 shadow_src_desc->nbytes = nbytes;
229
230 *src_desc = *shadow_src_desc;
231
232 src_ring->per_transfer_context[write_index] =
233 per_transfer_context;
234
235 /* Update Source Ring Write Index */
236 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
237
238 /* WORKAROUND */
239 if (!shadow_src_desc->gather) {
240 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
241 write_index);
242 }
243
244 src_ring->write_index = write_index;
245 status = CDF_STATUS_SUCCESS;
246 }
247 A_TARGET_ACCESS_END_RET(scn);
248
249 return status;
250}
251
252int
253ce_send(struct CE_handle *copyeng,
254 void *per_transfer_context,
255 cdf_dma_addr_t buffer,
256 uint32_t nbytes,
257 uint32_t transfer_id,
258 uint32_t flags,
259 uint32_t user_flag)
260{
261 struct CE_state *CE_state = (struct CE_state *)copyeng;
262 int status;
263
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700264 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800265 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
266 transfer_id, flags, user_flag);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700267 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800268
269 return status;
270}
271
272unsigned int ce_sendlist_sizeof(void)
273{
274 return sizeof(struct ce_sendlist);
275}
276
277void ce_sendlist_init(struct ce_sendlist *sendlist)
278{
279 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
280 sl->num_items = 0;
281}
282
283int
284ce_sendlist_buf_add(struct ce_sendlist *sendlist,
285 cdf_dma_addr_t buffer,
286 uint32_t nbytes,
287 uint32_t flags,
288 uint32_t user_flags)
289{
290 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
291 unsigned int num_items = sl->num_items;
292 struct ce_sendlist_item *item;
293
294 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
295 CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
296 return CDF_STATUS_E_RESOURCES;
297 }
298
299 item = &sl->item[num_items];
300 item->send_type = CE_SIMPLE_BUFFER_TYPE;
301 item->data = buffer;
302 item->u.nbytes = nbytes;
303 item->flags = flags;
304 item->user_flags = user_flags;
305 sl->num_items = num_items + 1;
306 return CDF_STATUS_SUCCESS;
307}
308
309int
310ce_sendlist_send(struct CE_handle *copyeng,
311 void *per_transfer_context,
312 struct ce_sendlist *sendlist, unsigned int transfer_id)
313{
314 int status = -ENOMEM;
315 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
316 struct CE_state *CE_state = (struct CE_state *)copyeng;
317 struct CE_ring_state *src_ring = CE_state->src_ring;
318 unsigned int nentries_mask = src_ring->nentries_mask;
319 unsigned int num_items = sl->num_items;
320 unsigned int sw_index;
321 unsigned int write_index;
322
323 CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
324
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700325 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800326 sw_index = src_ring->sw_index;
327 write_index = src_ring->write_index;
328
329 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
330 num_items) {
331 struct ce_sendlist_item *item;
332 int i;
333
334 /* handle all but the last item uniformly */
335 for (i = 0; i < num_items - 1; i++) {
336 item = &sl->item[i];
337 /* TBDXXX: Support extensible sendlist_types? */
338 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
339 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
340 (cdf_dma_addr_t) item->data,
341 item->u.nbytes, transfer_id,
342 item->flags | CE_SEND_FLAG_GATHER,
343 item->user_flags);
344 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
345 }
346 /* provide valid context pointer for final item */
347 item = &sl->item[i];
348 /* TBDXXX: Support extensible sendlist_types? */
349 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
350 status = ce_send_nolock(copyeng, per_transfer_context,
351 (cdf_dma_addr_t) item->data,
352 item->u.nbytes,
353 transfer_id, item->flags,
354 item->user_flags);
355 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
356 NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
357 NBUF_TX_PKT_CE);
358 DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
359 CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
360 (uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
361 sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
362 } else {
363 /*
364 * Probably not worth the additional complexity to support
365 * partial sends with continuation or notification. We expect
366 * to use large rings and small sendlists. If we can't handle
367 * the entire request at once, punt it back to the caller.
368 */
369 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700370 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371
372 return status;
373}
374
375#ifdef WLAN_FEATURE_FASTPATH
376#ifdef QCA_WIFI_3_0
377static inline void
378ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
379 uint64_t dma_addr,
380 uint32_t user_flags)
381{
382 shadow_src_desc->buffer_addr_hi =
383 (uint32_t)((dma_addr >> 32) & 0x1F);
384 user_flags |= shadow_src_desc->buffer_addr_hi;
385 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
386 sizeof(uint32_t));
387}
388#else
389static inline void
390ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
391 uint64_t dma_addr,
392 uint32_t user_flags)
393{
394}
395#endif
396
397/**
398 * ce_send_fast() CE layer Tx buffer posting function
399 * @copyeng: copy engine handle
400 * @msdus: iarray of msdu to be sent
401 * @num_msdus: number of msdus in an array
402 * @transfer_id: transfer_id
403 *
404 * Assumption : Called with an array of MSDU's
405 * Function:
406 * For each msdu in the array
407 * 1. Check no. of available entries
408 * 2. Create src ring entries (allocated in consistent memory
409 * 3. Write index to h/w
410 *
411 * Return: No. of packets that could be sent
412 */
413
414int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
415 unsigned int num_msdus, unsigned int transfer_id)
416{
417 struct CE_state *ce_state = (struct CE_state *)copyeng;
418 struct ol_softc *scn = ce_state->scn;
419 struct CE_ring_state *src_ring = ce_state->src_ring;
420 u_int32_t ctrl_addr = ce_state->ctrl_addr;
421 unsigned int nentries_mask = src_ring->nentries_mask;
422 unsigned int write_index;
423 unsigned int sw_index;
424 unsigned int frag_len;
425 cdf_nbuf_t msdu;
426 int i;
427 uint64_t dma_addr;
428 uint32_t user_flags = 0;
429
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700430 cdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800431 sw_index = src_ring->sw_index;
432 write_index = src_ring->write_index;
433
434 /* 2 msdus per packet */
435 for (i = 0; i < num_msdus; i++) {
436 struct CE_src_desc *src_ring_base =
437 (struct CE_src_desc *)src_ring->base_addr_owner_space;
438 struct CE_src_desc *shadow_base =
439 (struct CE_src_desc *)src_ring->shadow_base;
440 struct CE_src_desc *src_desc =
441 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
442 struct CE_src_desc *shadow_src_desc =
443 CE_SRC_RING_TO_DESC(shadow_base, write_index);
444
445 msdu = msdus[i];
446
447 /*
448 * First fill out the ring descriptor for the HTC HTT frame
449 * header. These are uncached writes. Should we use a local
450 * structure instead?
451 */
452 /* HTT/HTC header can be passed as a argument */
453 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 0);
454 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
455 0xFFFFFFFF);
456 user_flags = cdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
457 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
458
459 shadow_src_desc->meta_data = transfer_id;
460 shadow_src_desc->nbytes = cdf_nbuf_get_frag_len(msdu, 0);
461
462 /*
463 * HTC HTT header is a word stream, so byte swap if CE byte
464 * swap enabled
465 */
466 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
467 CE_ATTR_BYTE_SWAP_DATA) != 0);
468 /* For the first one, it still does not need to write */
469 shadow_src_desc->gather = 1;
470 *src_desc = *shadow_src_desc;
471
472 /* By default we could initialize the transfer context to this
473 * value
474 */
475 src_ring->per_transfer_context[write_index] =
476 CE_SENDLIST_ITEM_CTXT;
477
478 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
479
480 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
481 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
482 /*
483 * Now fill out the ring descriptor for the actual data
484 * packet
485 */
486 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 1);
487 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
488 0xFFFFFFFF);
489 /*
490 * Clear packet offset for all but the first CE desc.
491 */
492 user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
493 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
494 shadow_src_desc->meta_data = transfer_id;
495
496 /* get actual packet length */
497 frag_len = cdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700498
499 /* only read download_len once */
500 shadow_src_desc->nbytes = ce_state->download_len;
501 if (shadow_src_desc->nbytes > frag_len)
502 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800503
504 /* Data packet is a byte stream, so disable byte swap */
505 shadow_src_desc->byte_swap = 0;
506 /* For the last one, gather is not set */
507 shadow_src_desc->gather = 0;
508 *src_desc = *shadow_src_desc;
509 src_ring->per_transfer_context[write_index] = msdu;
510 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
511 }
512
513 /* Write the final index to h/w one-shot */
514 if (i) {
515 src_ring->write_index = write_index;
516 /* Don't call WAR_XXX from here
517 * Just call XXX instead, that has the reqd. intel
518 */
519 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
520 }
521
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700522 cdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800523
524 /*
525 * If all packets in the array are transmitted,
526 * i = num_msdus
527 * Temporarily add an ASSERT
528 */
529 ASSERT(i == num_msdus);
530 return i;
531}
532#endif /* WLAN_FEATURE_FASTPATH */
533
534int
535ce_recv_buf_enqueue(struct CE_handle *copyeng,
536 void *per_recv_context, cdf_dma_addr_t buffer)
537{
538 int status;
539 struct CE_state *CE_state = (struct CE_state *)copyeng;
540 struct CE_ring_state *dest_ring = CE_state->dest_ring;
541 uint32_t ctrl_addr = CE_state->ctrl_addr;
542 unsigned int nentries_mask = dest_ring->nentries_mask;
543 unsigned int write_index;
544 unsigned int sw_index;
545 int val = 0;
546 uint64_t dma_addr = buffer;
547 struct ol_softc *scn = CE_state->scn;
548
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700549 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800550 write_index = dest_ring->write_index;
551 sw_index = dest_ring->sw_index;
552
553 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
554 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700555 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800556 return val;
557 }
558
559 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
560 struct CE_dest_desc *dest_ring_base =
561 (struct CE_dest_desc *)dest_ring->
562 base_addr_owner_space;
563 struct CE_dest_desc *dest_desc =
564 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
565
566 /* Update low 32 bit destination descriptor */
567 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
568#ifdef QCA_WIFI_3_0
569 dest_desc->buffer_addr_hi =
570 (uint32_t)((dma_addr >> 32) & 0x1F);
571#endif
572 dest_desc->nbytes = 0;
573
574 dest_ring->per_transfer_context[write_index] =
575 per_recv_context;
576
577 /* Update Destination Ring Write Index */
578 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
579 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
580 dest_ring->write_index = write_index;
581 status = CDF_STATUS_SUCCESS;
582 } else {
583 status = CDF_STATUS_E_FAILURE;
584 }
585 A_TARGET_ACCESS_END_RET_EXT(scn, val);
586 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700587 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800588 return val;
589 }
590
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700591 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800592
593 return status;
594}
595
596void
597ce_send_watermarks_set(struct CE_handle *copyeng,
598 unsigned int low_alert_nentries,
599 unsigned int high_alert_nentries)
600{
601 struct CE_state *CE_state = (struct CE_state *)copyeng;
602 uint32_t ctrl_addr = CE_state->ctrl_addr;
603 struct ol_softc *scn = CE_state->scn;
604
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800605 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
606 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800607}
608
609void
610ce_recv_watermarks_set(struct CE_handle *copyeng,
611 unsigned int low_alert_nentries,
612 unsigned int high_alert_nentries)
613{
614 struct CE_state *CE_state = (struct CE_state *)copyeng;
615 uint32_t ctrl_addr = CE_state->ctrl_addr;
616 struct ol_softc *scn = CE_state->scn;
617
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800618 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
619 low_alert_nentries);
620 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
621 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800622}
623
624unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
625{
626 struct CE_state *CE_state = (struct CE_state *)copyeng;
627 struct CE_ring_state *src_ring = CE_state->src_ring;
628 unsigned int nentries_mask = src_ring->nentries_mask;
629 unsigned int sw_index;
630 unsigned int write_index;
631
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700632 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800633 sw_index = src_ring->sw_index;
634 write_index = src_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700635 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800636
637 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
638}
639
640unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
641{
642 struct CE_state *CE_state = (struct CE_state *)copyeng;
643 struct CE_ring_state *dest_ring = CE_state->dest_ring;
644 unsigned int nentries_mask = dest_ring->nentries_mask;
645 unsigned int sw_index;
646 unsigned int write_index;
647
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700648 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800649 sw_index = dest_ring->sw_index;
650 write_index = dest_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700651 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800652
653 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
654}
655
656/*
657 * Guts of ce_send_entries_done.
658 * The caller takes responsibility for any necessary locking.
659 */
660unsigned int
661ce_send_entries_done_nolock(struct ol_softc *scn,
662 struct CE_state *CE_state)
663{
664 struct CE_ring_state *src_ring = CE_state->src_ring;
665 uint32_t ctrl_addr = CE_state->ctrl_addr;
666 unsigned int nentries_mask = src_ring->nentries_mask;
667 unsigned int sw_index;
668 unsigned int read_index;
669
670 sw_index = src_ring->sw_index;
671 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
672
673 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
674}
675
676unsigned int ce_send_entries_done(struct CE_handle *copyeng)
677{
678 struct CE_state *CE_state = (struct CE_state *)copyeng;
679 unsigned int nentries;
680
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700681 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800682 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700683 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800684
685 return nentries;
686}
687
688/*
689 * Guts of ce_recv_entries_done.
690 * The caller takes responsibility for any necessary locking.
691 */
692unsigned int
693ce_recv_entries_done_nolock(struct ol_softc *scn,
694 struct CE_state *CE_state)
695{
696 struct CE_ring_state *dest_ring = CE_state->dest_ring;
697 uint32_t ctrl_addr = CE_state->ctrl_addr;
698 unsigned int nentries_mask = dest_ring->nentries_mask;
699 unsigned int sw_index;
700 unsigned int read_index;
701
702 sw_index = dest_ring->sw_index;
703 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
704
705 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
706}
707
708unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
709{
710 struct CE_state *CE_state = (struct CE_state *)copyeng;
711 unsigned int nentries;
712
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700713 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800714 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700715 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800716
717 return nentries;
718}
719
720/* Debug support */
721void *ce_debug_cmplrn_context; /* completed recv next context */
722void *ce_debug_cnclsn_context; /* cancel send next context */
723void *ce_debug_rvkrn_context; /* revoke receive next context */
724void *ce_debug_cmplsn_context; /* completed send next context */
725
726/*
727 * Guts of ce_completed_recv_next.
728 * The caller takes responsibility for any necessary locking.
729 */
730int
731ce_completed_recv_next_nolock(struct CE_state *CE_state,
732 void **per_CE_contextp,
733 void **per_transfer_contextp,
734 cdf_dma_addr_t *bufferp,
735 unsigned int *nbytesp,
736 unsigned int *transfer_idp,
737 unsigned int *flagsp)
738{
739 int status;
740 struct CE_ring_state *dest_ring = CE_state->dest_ring;
741 unsigned int nentries_mask = dest_ring->nentries_mask;
742 unsigned int sw_index = dest_ring->sw_index;
743
744 struct CE_dest_desc *dest_ring_base =
745 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
746 struct CE_dest_desc *dest_desc =
747 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
748 int nbytes;
749 struct CE_dest_desc dest_desc_info;
750 /*
751 * By copying the dest_desc_info element to local memory, we could
752 * avoid extra memory read from non-cachable memory.
753 */
754 dest_desc_info = *dest_desc;
755 nbytes = dest_desc_info.nbytes;
756 if (nbytes == 0) {
757 /*
758 * This closes a relatively unusual race where the Host
759 * sees the updated DRRI before the update to the
760 * corresponding descriptor has completed. We treat this
761 * as a descriptor that is not yet done.
762 */
763 status = CDF_STATUS_E_FAILURE;
764 goto done;
765 }
766
767 dest_desc->nbytes = 0;
768
769 /* Return data from completed destination descriptor */
770 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
771 *nbytesp = nbytes;
772 *transfer_idp = dest_desc_info.meta_data;
773 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
774
775 if (per_CE_contextp) {
776 *per_CE_contextp = CE_state->recv_context;
777 }
778
779 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
780 if (per_transfer_contextp) {
781 *per_transfer_contextp = ce_debug_cmplrn_context;
782 }
783 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
784
785 /* Update sw_index */
786 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
787 dest_ring->sw_index = sw_index;
788 status = CDF_STATUS_SUCCESS;
789
790done:
791 return status;
792}
793
794int
795ce_completed_recv_next(struct CE_handle *copyeng,
796 void **per_CE_contextp,
797 void **per_transfer_contextp,
798 cdf_dma_addr_t *bufferp,
799 unsigned int *nbytesp,
800 unsigned int *transfer_idp, unsigned int *flagsp)
801{
802 struct CE_state *CE_state = (struct CE_state *)copyeng;
803 int status;
804
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700805 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800806 status =
807 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
808 per_transfer_contextp, bufferp,
809 nbytesp, transfer_idp, flagsp);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700810 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811
812 return status;
813}
814
815/* NB: Modeled after ce_completed_recv_next_nolock */
816CDF_STATUS
817ce_revoke_recv_next(struct CE_handle *copyeng,
818 void **per_CE_contextp,
819 void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
820{
821 struct CE_state *CE_state;
822 struct CE_ring_state *dest_ring;
823 unsigned int nentries_mask;
824 unsigned int sw_index;
825 unsigned int write_index;
826 CDF_STATUS status;
827 struct ol_softc *scn;
828
829 CE_state = (struct CE_state *)copyeng;
830 dest_ring = CE_state->dest_ring;
831 if (!dest_ring) {
832 return CDF_STATUS_E_FAILURE;
833 }
834
835 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700836 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800837 nentries_mask = dest_ring->nentries_mask;
838 sw_index = dest_ring->sw_index;
839 write_index = dest_ring->write_index;
840 if (write_index != sw_index) {
841 struct CE_dest_desc *dest_ring_base =
842 (struct CE_dest_desc *)dest_ring->
843 base_addr_owner_space;
844 struct CE_dest_desc *dest_desc =
845 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
846
847 /* Return data from completed destination descriptor */
848 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
849
850 if (per_CE_contextp) {
851 *per_CE_contextp = CE_state->recv_context;
852 }
853
854 ce_debug_rvkrn_context =
855 dest_ring->per_transfer_context[sw_index];
856 if (per_transfer_contextp) {
857 *per_transfer_contextp = ce_debug_rvkrn_context;
858 }
859 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
860
861 /* Update sw_index */
862 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
863 dest_ring->sw_index = sw_index;
864 status = CDF_STATUS_SUCCESS;
865 } else {
866 status = CDF_STATUS_E_FAILURE;
867 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700868 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869
870 return status;
871}
872
873/*
874 * Guts of ce_completed_send_next.
875 * The caller takes responsibility for any necessary locking.
876 */
877int
878ce_completed_send_next_nolock(struct CE_state *CE_state,
879 void **per_CE_contextp,
880 void **per_transfer_contextp,
881 cdf_dma_addr_t *bufferp,
882 unsigned int *nbytesp,
883 unsigned int *transfer_idp,
884 unsigned int *sw_idx,
885 unsigned int *hw_idx,
886 uint32_t *toeplitz_hash_result)
887{
888 int status = CDF_STATUS_E_FAILURE;
889 struct CE_ring_state *src_ring = CE_state->src_ring;
890 uint32_t ctrl_addr = CE_state->ctrl_addr;
891 unsigned int nentries_mask = src_ring->nentries_mask;
892 unsigned int sw_index = src_ring->sw_index;
893 unsigned int read_index;
894 struct ol_softc *scn = CE_state->scn;
895
896 if (src_ring->hw_index == sw_index) {
897 /*
898 * The SW completion index has caught up with the cached
899 * version of the HW completion index.
900 * Update the cached HW completion index to see whether
901 * the SW has really caught up to the HW, or if the cached
902 * value of the HW index has become stale.
903 */
904 A_TARGET_ACCESS_BEGIN_RET(scn);
905 src_ring->hw_index =
906 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
907 A_TARGET_ACCESS_END_RET(scn);
908 }
909 read_index = src_ring->hw_index;
910
911 if (sw_idx)
912 *sw_idx = sw_index;
913
914 if (hw_idx)
915 *hw_idx = read_index;
916
917 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
918 struct CE_src_desc *shadow_base =
919 (struct CE_src_desc *)src_ring->shadow_base;
920 struct CE_src_desc *shadow_src_desc =
921 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
922#ifdef QCA_WIFI_3_0
923 struct CE_src_desc *src_ring_base =
924 (struct CE_src_desc *)src_ring->base_addr_owner_space;
925 struct CE_src_desc *src_desc =
926 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
927#endif
928 /* Return data from completed source descriptor */
929 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
930 *nbytesp = shadow_src_desc->nbytes;
931 *transfer_idp = shadow_src_desc->meta_data;
932#ifdef QCA_WIFI_3_0
933 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
934#else
935 *toeplitz_hash_result = 0;
936#endif
937 if (per_CE_contextp) {
938 *per_CE_contextp = CE_state->send_context;
939 }
940
941 ce_debug_cmplsn_context =
942 src_ring->per_transfer_context[sw_index];
943 if (per_transfer_contextp) {
944 *per_transfer_contextp = ce_debug_cmplsn_context;
945 }
946 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
947
948 /* Update sw_index */
949 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
950 src_ring->sw_index = sw_index;
951 status = CDF_STATUS_SUCCESS;
952 }
953
954 return status;
955}
956
957/* NB: Modeled after ce_completed_send_next */
958CDF_STATUS
959ce_cancel_send_next(struct CE_handle *copyeng,
960 void **per_CE_contextp,
961 void **per_transfer_contextp,
962 cdf_dma_addr_t *bufferp,
963 unsigned int *nbytesp,
964 unsigned int *transfer_idp,
965 uint32_t *toeplitz_hash_result)
966{
967 struct CE_state *CE_state;
968 struct CE_ring_state *src_ring;
969 unsigned int nentries_mask;
970 unsigned int sw_index;
971 unsigned int write_index;
972 CDF_STATUS status;
973 struct ol_softc *scn;
974
975 CE_state = (struct CE_state *)copyeng;
976 src_ring = CE_state->src_ring;
977 if (!src_ring) {
978 return CDF_STATUS_E_FAILURE;
979 }
980
981 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700982 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800983 nentries_mask = src_ring->nentries_mask;
984 sw_index = src_ring->sw_index;
985 write_index = src_ring->write_index;
986
987 if (write_index != sw_index) {
988 struct CE_src_desc *src_ring_base =
989 (struct CE_src_desc *)src_ring->base_addr_owner_space;
990 struct CE_src_desc *src_desc =
991 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
992
993 /* Return data from completed source descriptor */
994 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
995 *nbytesp = src_desc->nbytes;
996 *transfer_idp = src_desc->meta_data;
997#ifdef QCA_WIFI_3_0
998 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
999#else
1000 *toeplitz_hash_result = 0;
1001#endif
1002
1003 if (per_CE_contextp) {
1004 *per_CE_contextp = CE_state->send_context;
1005 }
1006
1007 ce_debug_cnclsn_context =
1008 src_ring->per_transfer_context[sw_index];
1009 if (per_transfer_contextp) {
1010 *per_transfer_contextp = ce_debug_cnclsn_context;
1011 }
1012 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1013
1014 /* Update sw_index */
1015 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1016 src_ring->sw_index = sw_index;
1017 status = CDF_STATUS_SUCCESS;
1018 } else {
1019 status = CDF_STATUS_E_FAILURE;
1020 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001021 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001022
1023 return status;
1024}
1025
1026/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1027#define CE_WM_SHFT 1
1028
1029int
1030ce_completed_send_next(struct CE_handle *copyeng,
1031 void **per_CE_contextp,
1032 void **per_transfer_contextp,
1033 cdf_dma_addr_t *bufferp,
1034 unsigned int *nbytesp,
1035 unsigned int *transfer_idp,
1036 unsigned int *sw_idx,
1037 unsigned int *hw_idx,
1038 unsigned int *toeplitz_hash_result)
1039{
1040 struct CE_state *CE_state = (struct CE_state *)copyeng;
1041 int status;
1042
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001043 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001044 status =
1045 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1046 per_transfer_contextp, bufferp,
1047 nbytesp, transfer_idp, sw_idx,
1048 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001049 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001050
1051 return status;
1052}
1053
1054#ifdef ATH_11AC_TXCOMPACT
1055/* CE engine descriptor reap
1056 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1057 * does recieve and reaping of completed descriptor ,
1058 * This function only handles reaping of Tx complete descriptor.
1059 * The Function is called from threshold reap poll routine
1060 * hif_send_complete_check so should not countain recieve functionality
1061 * within it .
1062 */
1063
1064void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
1065{
1066 void *CE_context;
1067 void *transfer_context;
1068 cdf_dma_addr_t buf;
1069 unsigned int nbytes;
1070 unsigned int id;
1071 unsigned int sw_idx, hw_idx;
1072 uint32_t toeplitz_hash_result;
1073 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1074
1075 A_TARGET_ACCESS_BEGIN(scn);
1076
1077 /* Since this function is called from both user context and
1078 * tasklet context the spinlock has to lock the bottom halves.
1079 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1080 * enabled in TX polling mode. If this is not the case, more
1081 * bottom halve spin lock changes are needed. Due to data path
1082 * performance concern, after internal discussion we've decided
1083 * to make minimum change, i.e., only address the issue occured
1084 * in this function. The possible negative effect of this minimum
1085 * change is that, in the future, if some other function will also
1086 * be opened to let the user context to use, those cases need to be
1087 * addressed by change spin_lock to spin_lock_bh also.
1088 */
1089
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001090 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001091
1092 if (CE_state->send_cb) {
1093 {
1094 /* Pop completed send buffers and call the
1095 * registered send callback for each
1096 */
1097 while (ce_completed_send_next_nolock
1098 (CE_state, &CE_context,
1099 &transfer_context, &buf,
1100 &nbytes, &id, &sw_idx, &hw_idx,
1101 &toeplitz_hash_result) ==
1102 CDF_STATUS_SUCCESS) {
1103 if (CE_id != CE_HTT_H2T_MSG) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001104 cdf_spin_unlock_bh(
1105 &CE_state->ce_index_lock);
1106 CE_state->send_cb(
1107 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001108 CE_state, CE_context,
1109 transfer_context, buf,
1110 nbytes, id, sw_idx, hw_idx,
1111 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001112 cdf_spin_lock_bh(
1113 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001114 } else {
1115 struct HIF_CE_pipe_info *pipe_info =
1116 (struct HIF_CE_pipe_info *)
1117 CE_context;
1118
1119 cdf_spin_lock_bh(&pipe_info->
1120 completion_freeq_lock);
1121 pipe_info->num_sends_allowed++;
1122 cdf_spin_unlock_bh(&pipe_info->
1123 completion_freeq_lock);
1124 }
1125 }
1126 }
1127 }
1128
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001129 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001130 A_TARGET_ACCESS_END(scn);
1131}
1132
1133#endif /*ATH_11AC_TXCOMPACT */
1134
1135/*
1136 * Number of times to check for any pending tx/rx completion on
1137 * a copy engine, this count should be big enough. Once we hit
1138 * this threashold we'll not check for any Tx/Rx comlpetion in same
1139 * interrupt handling. Note that this threashold is only used for
1140 * Rx interrupt processing, this can be used tor Tx as well if we
1141 * suspect any infinite loop in checking for pending Tx completion.
1142 */
1143#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1144
1145/*
1146 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1147 *
1148 * Invokes registered callbacks for recv_complete,
1149 * send_complete, and watermarks.
1150 *
1151 * Returns: number of messages processed
1152 */
1153
1154int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id)
1155{
1156 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1157 uint32_t ctrl_addr = CE_state->ctrl_addr;
1158 void *CE_context;
1159 void *transfer_context;
1160 cdf_dma_addr_t buf;
1161 unsigned int nbytes;
1162 unsigned int id;
1163 unsigned int flags;
1164 uint32_t CE_int_status;
1165 unsigned int more_comp_cnt = 0;
1166 unsigned int more_snd_comp_cnt = 0;
1167 unsigned int sw_idx, hw_idx;
1168 uint32_t toeplitz_hash_result;
1169
1170 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1171 HIF_ERROR("[premature rc=0]\n");
1172 return 0; /* no work done */
1173 }
1174
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001175 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001176
1177 /* Clear force_break flag and re-initialize receive_count to 0 */
1178
1179 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001180 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001181 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182more_completions:
1183 if (CE_state->recv_cb) {
1184
1185 /* Pop completed recv buffers and call
1186 * the registered recv callback for each
1187 */
1188 while (ce_completed_recv_next_nolock
1189 (CE_state, &CE_context, &transfer_context,
1190 &buf, &nbytes, &id, &flags) ==
1191 CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001192 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001193 CE_state->recv_cb((struct CE_handle *)CE_state,
1194 CE_context, transfer_context, buf,
1195 nbytes, id, flags);
1196
1197 /*
1198 * EV #112693 -
1199 * [Peregrine][ES1][WB342][Win8x86][Performance]
1200 * BSoD_0x133 occurred in VHT80 UDP_DL
1201 * Break out DPC by force if number of loops in
1202 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1203 * to avoid spending too long time in
1204 * DPC for each interrupt handling. Schedule another
1205 * DPC to avoid data loss if we had taken
1206 * force-break action before apply to Windows OS
1207 * only currently, Linux/MAC os can expand to their
1208 * platform if necessary
1209 */
1210
1211 /* Break the receive processes by
1212 * force if force_break set up
1213 */
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001214 if (cdf_unlikely(CE_state->force_break)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001215 cdf_atomic_set(&CE_state->rx_pending, 1);
1216 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1217 HOST_IS_COPY_COMPLETE_MASK);
1218 if (Q_TARGET_ACCESS_END(scn) < 0)
1219 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001220 CE_state->receive_count);
1221 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001223 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001224 }
1225 }
1226
1227 /*
1228 * Attention: We may experience potential infinite loop for below
1229 * While Loop during Sending Stress test.
1230 * Resolve the same way as Receive Case (Refer to EV #112693)
1231 */
1232
1233 if (CE_state->send_cb) {
1234 /* Pop completed send buffers and call
1235 * the registered send callback for each
1236 */
1237
1238#ifdef ATH_11AC_TXCOMPACT
1239 while (ce_completed_send_next_nolock
1240 (CE_state, &CE_context,
1241 &transfer_context, &buf, &nbytes,
1242 &id, &sw_idx, &hw_idx,
1243 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1244
1245 if (CE_id != CE_HTT_H2T_MSG ||
1246 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001247 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001248 CE_state->send_cb((struct CE_handle *)CE_state,
1249 CE_context, transfer_context,
1250 buf, nbytes, id, sw_idx,
1251 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001252 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001253 } else {
1254 struct HIF_CE_pipe_info *pipe_info =
1255 (struct HIF_CE_pipe_info *)CE_context;
1256
1257 cdf_spin_lock(&pipe_info->
1258 completion_freeq_lock);
1259 pipe_info->num_sends_allowed++;
1260 cdf_spin_unlock(&pipe_info->
1261 completion_freeq_lock);
1262 }
1263 }
1264#else /*ATH_11AC_TXCOMPACT */
1265 while (ce_completed_send_next_nolock
1266 (CE_state, &CE_context,
1267 &transfer_context, &buf, &nbytes,
1268 &id, &sw_idx, &hw_idx,
1269 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001270 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271 CE_state->send_cb((struct CE_handle *)CE_state,
1272 CE_context, transfer_context, buf,
1273 nbytes, id, sw_idx, hw_idx,
1274 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001275 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001276 }
1277#endif /*ATH_11AC_TXCOMPACT */
1278 }
1279
1280more_watermarks:
1281 if (CE_state->misc_cbs) {
1282 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1283 if (CE_int_status & CE_WATERMARK_MASK) {
1284 if (CE_state->watermark_cb) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001285 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001286 /* Convert HW IS bits to software flags */
1287 flags =
1288 (CE_int_status & CE_WATERMARK_MASK) >>
1289 CE_WM_SHFT;
1290
1291 CE_state->
1292 watermark_cb((struct CE_handle *)CE_state,
1293 CE_state->wm_context, flags);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001294 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001295 }
1296 }
1297 }
1298
1299 /*
1300 * Clear the misc interrupts (watermark) that were handled above,
1301 * and that will be checked again below.
1302 * Clear and check for copy-complete interrupts again, just in case
1303 * more copy completions happened while the misc interrupts were being
1304 * handled.
1305 */
1306 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1307 CE_WATERMARK_MASK |
1308 HOST_IS_COPY_COMPLETE_MASK);
1309
1310 /*
1311 * Now that per-engine interrupts are cleared, verify that
1312 * no recv interrupts arrive while processing send interrupts,
1313 * and no recv or send interrupts happened while processing
1314 * misc interrupts.Go back and check again.Keep checking until
1315 * we find no more events to process.
1316 */
1317 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
1318 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1319 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1320 goto more_completions;
1321 } else {
1322 HIF_ERROR(
1323 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1324 __func__, CE_state->dest_ring->nentries_mask,
1325 CE_state->dest_ring->sw_index,
1326 CE_DEST_RING_READ_IDX_GET(scn,
1327 CE_state->ctrl_addr));
1328 }
1329 }
1330
1331 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
1332 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1333 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1334 goto more_completions;
1335 } else {
1336 HIF_ERROR(
1337 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1338 __func__, CE_state->src_ring->nentries_mask,
1339 CE_state->src_ring->sw_index,
1340 CE_SRC_RING_READ_IDX_GET(scn,
1341 CE_state->ctrl_addr));
1342 }
1343 }
1344
1345 if (CE_state->misc_cbs) {
1346 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1347 if (CE_int_status & CE_WATERMARK_MASK) {
1348 if (CE_state->watermark_cb) {
1349 goto more_watermarks;
1350 }
1351 }
1352 }
1353
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001354 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001355 cdf_atomic_set(&CE_state->rx_pending, 0);
1356
1357 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001358 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1359 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001360}
1361
1362/*
1363 * Handler for per-engine interrupts on ALL active CEs.
1364 * This is used in cases where the system is sharing a
1365 * single interrput for all CEs
1366 */
1367
1368void ce_per_engine_service_any(int irq, struct ol_softc *scn)
1369{
1370 int CE_id;
1371 uint32_t intr_summary;
1372
1373 A_TARGET_ACCESS_BEGIN(scn);
1374 if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
1375 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1376 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1377 if (cdf_atomic_read(&CE_state->rx_pending)) {
1378 cdf_atomic_set(&CE_state->rx_pending, 0);
1379 ce_per_engine_service(scn, CE_id);
1380 }
1381 }
1382
1383 A_TARGET_ACCESS_END(scn);
1384 return;
1385 }
1386
1387 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1388
1389 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1390 if (intr_summary & (1 << CE_id)) {
1391 intr_summary &= ~(1 << CE_id);
1392 } else {
1393 continue; /* no intr pending on this CE */
1394 }
1395
1396 ce_per_engine_service(scn, CE_id);
1397 }
1398
1399 A_TARGET_ACCESS_END(scn);
1400}
1401
1402/*
1403 * Adjust interrupts for the copy complete handler.
1404 * If it's needed for either send or recv, then unmask
1405 * this interrupt; otherwise, mask it.
1406 *
1407 * Called with target_lock held.
1408 */
1409static void
1410ce_per_engine_handler_adjust(struct CE_state *CE_state,
1411 int disable_copy_compl_intr)
1412{
1413 uint32_t ctrl_addr = CE_state->ctrl_addr;
1414 struct ol_softc *scn = CE_state->scn;
1415
1416 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1417 A_TARGET_ACCESS_BEGIN(scn);
1418 if ((!disable_copy_compl_intr) &&
1419 (CE_state->send_cb || CE_state->recv_cb)) {
1420 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1421 } else {
1422 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1423 }
1424
1425 if (CE_state->watermark_cb) {
1426 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1427 } else {
1428 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1429 }
1430 A_TARGET_ACCESS_END(scn);
1431
1432}
1433
1434/*Iterate the CE_state list and disable the compl interrupt
1435 * if it has been registered already.
1436 */
1437void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1438{
1439 int CE_id;
1440
1441 A_TARGET_ACCESS_BEGIN(scn);
1442 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1443 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1444 uint32_t ctrl_addr = CE_state->ctrl_addr;
1445
1446 /* if the interrupt is currently enabled, disable it */
1447 if (!CE_state->disable_copy_compl_intr
1448 && (CE_state->send_cb || CE_state->recv_cb)) {
1449 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1450 }
1451
1452 if (CE_state->watermark_cb) {
1453 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1454 }
1455 }
1456 A_TARGET_ACCESS_END(scn);
1457}
1458
1459void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1460{
1461 int CE_id;
1462
1463 A_TARGET_ACCESS_BEGIN(scn);
1464 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1465 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1466 uint32_t ctrl_addr = CE_state->ctrl_addr;
1467
1468 /*
1469 * If the CE is supposed to have copy complete interrupts
1470 * enabled (i.e. there a callback registered, and the
1471 * "disable" flag is not set), then re-enable the interrupt.
1472 */
1473 if (!CE_state->disable_copy_compl_intr
1474 && (CE_state->send_cb || CE_state->recv_cb)) {
1475 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1476 }
1477
1478 if (CE_state->watermark_cb) {
1479 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1480 }
1481 }
1482 A_TARGET_ACCESS_END(scn);
1483}
1484
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001485/**
1486 * ce_send_cb_register(): register completion handler
1487 * @copyeng: CE_state representing the ce we are adding the behavior to
1488 * @fn_ptr: callback that the ce should use when processing tx completions
1489 * @disable_interrupts: if the interupts should be enabled or not.
1490 *
1491 * Caller should guarantee that no transactions are in progress before
1492 * switching the callback function.
1493 *
1494 * Registers the send context before the fn pointer so that if the cb is valid
1495 * the context should be valid.
1496 *
1497 * Beware that currently this function will enable completion interrupts.
1498 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001499void
1500ce_send_cb_register(struct CE_handle *copyeng,
1501 ce_send_cb fn_ptr,
1502 void *ce_send_context, int disable_interrupts)
1503{
1504 struct CE_state *CE_state = (struct CE_state *)copyeng;
1505
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001506 if (CE_state == NULL) {
1507 pr_err("%s: Error CE state = NULL\n", __func__);
1508 return;
1509 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001510 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001511 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001513}
1514
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001515/**
1516 * ce_recv_cb_register(): register completion handler
1517 * @copyeng: CE_state representing the ce we are adding the behavior to
1518 * @fn_ptr: callback that the ce should use when processing rx completions
1519 * @disable_interrupts: if the interupts should be enabled or not.
1520 *
1521 * Registers the send context before the fn pointer so that if the cb is valid
1522 * the context should be valid.
1523 *
1524 * Caller should guarantee that no transactions are in progress before
1525 * switching the callback function.
1526 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527void
1528ce_recv_cb_register(struct CE_handle *copyeng,
1529 CE_recv_cb fn_ptr,
1530 void *CE_recv_context, int disable_interrupts)
1531{
1532 struct CE_state *CE_state = (struct CE_state *)copyeng;
1533
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001534 if (CE_state == NULL) {
1535 pr_err("%s: ERROR CE state = NULL\n", __func__);
1536 return;
1537 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001538 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001539 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541}
1542
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001543/**
1544 * ce_watermark_cb_register(): register completion handler
1545 * @copyeng: CE_state representing the ce we are adding the behavior to
1546 * @fn_ptr: callback that the ce should use when processing watermark events
1547 *
1548 * Caller should guarantee that no watermark events are being processed before
1549 * switching the callback function.
1550 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001551void
1552ce_watermark_cb_register(struct CE_handle *copyeng,
1553 CE_watermark_cb fn_ptr, void *CE_wm_context)
1554{
1555 struct CE_state *CE_state = (struct CE_state *)copyeng;
1556
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001557 CE_state->watermark_cb = fn_ptr;
1558 CE_state->wm_context = CE_wm_context;
1559 ce_per_engine_handler_adjust(CE_state, 0);
1560 if (fn_ptr) {
1561 CE_state->misc_cbs = 1;
1562 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001563}
1564
1565#ifdef WLAN_FEATURE_FASTPATH
1566/**
1567 * ce_pkt_dl_len_set() set the HTT packet download length
1568 * @hif_sc: HIF context
1569 * @pkt_download_len: download length
1570 *
1571 * Return: None
1572 */
1573void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1574{
1575 struct ol_softc *sc = (struct ol_softc *)(hif_sc);
1576 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1577
1578 cdf_assert_always(ce_state);
1579
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001580 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001581
1582 cdf_print("%s CE %d Pkt download length %d\n", __func__,
1583 ce_state->id, ce_state->download_len);
1584}
1585#else
1586void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1587{
1588}
1589#endif /* WLAN_FEATURE_FASTPATH */
1590
1591bool ce_get_rx_pending(struct ol_softc *scn)
1592{
1593 int CE_id;
1594
1595 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1596 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1597 if (cdf_atomic_read(&CE_state->rx_pending))
1598 return true;
1599 }
1600
1601 return false;
1602}
1603
1604/**
1605 * ce_check_rx_pending() - ce_check_rx_pending
1606 * @scn: ol_softc
1607 * @ce_id: ce_id
1608 *
1609 * Return: bool
1610 */
1611bool ce_check_rx_pending(struct ol_softc *scn, int ce_id)
1612{
1613 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1614 if (cdf_atomic_read(&CE_state->rx_pending))
1615 return true;
1616 else
1617 return false;
1618}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001619
1620/**
1621 * ce_enable_msi(): write the msi configuration to the target
1622 * @scn: hif context
1623 * @CE_id: which copy engine will be configured for msi interupts
1624 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1625 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1626 * @msi_data: Hardware will write this data to generate an interrupt
1627 *
1628 * should be done in the initialization sequence so no locking would be needed
1629 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001630void ce_enable_msi(struct ol_softc *scn, unsigned int CE_id,
1631 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1632 uint32_t msi_data)
1633{
1634#ifdef WLAN_ENABLE_QCA6180
1635 struct CE_state *CE_state;
1636 A_target_id_t targid;
1637 u_int32_t ctrl_addr;
1638 uint32_t tmp;
1639
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001640 CE_state = scn->ce_id_to_state[CE_id];
1641 if (!CE_state) {
1642 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643 return;
1644 }
1645 targid = TARGID(sc);
1646 ctrl_addr = CE_state->ctrl_addr;
1647 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1648 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1649 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1650 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1651 tmp |= (1 << CE_MSI_ENABLE_BIT);
1652 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001653#endif
1654}
1655
1656#ifdef IPA_OFFLOAD
1657/*
1658 * Copy engine should release resource to micro controller
1659 * Micro controller needs
1660 - Copy engine source descriptor base address
1661 - Copy engine source descriptor size
1662 - PCI BAR address to access copy engine regiser
1663 */
1664void ce_ipa_get_resource(struct CE_handle *ce,
1665 uint32_t *ce_sr_base_paddr,
1666 uint32_t *ce_sr_ring_size,
1667 cdf_dma_addr_t *ce_reg_paddr)
1668{
1669 struct CE_state *CE_state = (struct CE_state *)ce;
1670 uint32_t ring_loop;
1671 struct CE_src_desc *ce_desc;
1672 cdf_dma_addr_t phy_mem_base;
1673 struct ol_softc *scn = CE_state->scn;
1674
1675 if (CE_RUNNING != CE_state->state) {
1676 *ce_sr_base_paddr = 0;
1677 *ce_sr_ring_size = 0;
1678 return;
1679 }
1680
1681 /* Update default value for descriptor */
1682 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1683 ring_loop++) {
1684 ce_desc = (struct CE_src_desc *)
1685 ((char *)CE_state->src_ring->base_addr_owner_space +
1686 ring_loop * (sizeof(struct CE_src_desc)));
1687 CE_IPA_RING_INIT(ce_desc);
1688 }
1689
1690 /* Get BAR address */
1691 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1692
1693 *ce_sr_base_paddr = (uint32_t) CE_state->src_ring->base_addr_CE_space;
1694 *ce_sr_ring_size = (uint32_t) CE_state->src_ring->nentries;
1695 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1696 SR_WR_INDEX_ADDRESS;
1697 return;
1698}
1699#endif /* IPA_OFFLOAD */
1700