blob: 4b6c37bc7cccddbc014b3a71eb012d86f36d7012 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "cdf_lock.h"
39#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053043#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#ifdef IPA_OFFLOAD
46#ifdef QCA_WIFI_3_0
47#define CE_IPA_RING_INIT(ce_desc) \
48 do { \
49 ce_desc->gather = 0; \
50 ce_desc->enable_11h = 0; \
51 ce_desc->meta_data_low = 0; \
52 ce_desc->packet_result_offset = 64; \
53 ce_desc->toeplitz_hash_enable = 0; \
54 ce_desc->addr_y_search_disable = 0; \
55 ce_desc->addr_x_search_disable = 0; \
56 ce_desc->misc_int_disable = 0; \
57 ce_desc->target_int_disable = 0; \
58 ce_desc->host_int_disable = 0; \
59 ce_desc->dest_byte_swap = 0; \
60 ce_desc->byte_swap = 0; \
61 ce_desc->type = 2; \
62 ce_desc->tx_classify = 1; \
63 ce_desc->buffer_addr_hi = 0; \
64 ce_desc->meta_data = 0; \
65 ce_desc->nbytes = 128; \
66 } while (0)
67#else
68#define CE_IPA_RING_INIT(ce_desc) \
69 do { \
70 ce_desc->byte_swap = 0; \
71 ce_desc->nbytes = 60; \
72 ce_desc->gather = 0; \
73 } while (0)
74#endif /* QCA_WIFI_3_0 */
75#endif /* IPA_OFFLOAD */
76
77static int war1_allow_sleep;
78/* io32 write workaround */
79static int hif_ce_war1;
80
81/*
82 * Support for Copy Engine hardware, which is mainly used for
83 * communication between Host and Target over a PCIe interconnect.
84 */
85
86/*
87 * A single CopyEngine (CE) comprises two "rings":
88 * a source ring
89 * a destination ring
90 *
91 * Each ring consists of a number of descriptors which specify
92 * an address, length, and meta-data.
93 *
94 * Typically, one side of the PCIe interconnect (Host or Target)
95 * controls one ring and the other side controls the other ring.
96 * The source side chooses when to initiate a transfer and it
97 * chooses what to send (buffer address, length). The destination
98 * side keeps a supply of "anonymous receive buffers" available and
99 * it handles incoming data as it arrives (when the destination
100 * recieves an interrupt).
101 *
102 * The sender may send a simple buffer (address/length) or it may
103 * send a small list of buffers. When a small list is sent, hardware
104 * "gathers" these and they end up in a single destination buffer
105 * with a single interrupt.
106 *
107 * There are several "contexts" managed by this layer -- more, it
108 * may seem -- than should be needed. These are provided mainly for
109 * maximum flexibility and especially to facilitate a simpler HIF
110 * implementation. There are per-CopyEngine recv, send, and watermark
111 * contexts. These are supplied by the caller when a recv, send,
112 * or watermark handler is established and they are echoed back to
113 * the caller when the respective callbacks are invoked. There is
114 * also a per-transfer context supplied by the caller when a buffer
115 * (or sendlist) is sent and when a buffer is enqueued for recv.
116 * These per-transfer contexts are echoed back to the caller when
117 * the buffer is sent/received.
118 * Target TX harsh result toeplitz_hash_result
119 */
120
121/*
122 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
123 * The caller takes responsibility for any needed locking.
124 */
125int
126ce_completed_send_next_nolock(struct CE_state *CE_state,
127 void **per_CE_contextp,
128 void **per_transfer_contextp,
129 cdf_dma_addr_t *bufferp,
130 unsigned int *nbytesp,
131 unsigned int *transfer_idp,
132 unsigned int *sw_idx, unsigned int *hw_idx,
133 uint32_t *toeplitz_hash_result);
134
135void war_ce_src_ring_write_idx_set(struct ol_softc *scn,
136 u32 ctrl_addr, unsigned int write_index)
137{
138 if (hif_ce_war1) {
139 void __iomem *indicator_addr;
140
141 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
142
143 if (!war1_allow_sleep
144 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
145 hif_write32_mb(indicator_addr,
146 (CDC_WAR_MAGIC_STR | write_index));
147 } else {
148 unsigned long irq_flags;
149 local_irq_save(irq_flags);
150 hif_write32_mb(indicator_addr, 1);
151
152 /*
153 * PCIE write waits for ACK in IPQ8K, there is no
154 * need to read back value.
155 */
156 (void)hif_read32_mb(indicator_addr);
157 (void)hif_read32_mb(indicator_addr); /* conservative */
158
159 CE_SRC_RING_WRITE_IDX_SET(scn,
160 ctrl_addr, write_index);
161
162 hif_write32_mb(indicator_addr, 0);
163 local_irq_restore(irq_flags);
164 }
165 } else
166 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
167}
168
169int
170ce_send_nolock(struct CE_handle *copyeng,
171 void *per_transfer_context,
172 cdf_dma_addr_t buffer,
173 uint32_t nbytes,
174 uint32_t transfer_id,
175 uint32_t flags,
176 uint32_t user_flags)
177{
178 int status;
179 struct CE_state *CE_state = (struct CE_state *)copyeng;
180 struct CE_ring_state *src_ring = CE_state->src_ring;
181 uint32_t ctrl_addr = CE_state->ctrl_addr;
182 unsigned int nentries_mask = src_ring->nentries_mask;
183 unsigned int sw_index = src_ring->sw_index;
184 unsigned int write_index = src_ring->write_index;
185 uint64_t dma_addr = buffer;
186 struct ol_softc *scn = CE_state->scn;
187
188 A_TARGET_ACCESS_BEGIN_RET(scn);
189 if (unlikely(CE_RING_DELTA(nentries_mask,
190 write_index, sw_index - 1) <= 0)) {
191 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
192 status = CDF_STATUS_E_FAILURE;
193 A_TARGET_ACCESS_END_RET(scn);
194 return status;
195 }
196 {
197 struct CE_src_desc *src_ring_base =
198 (struct CE_src_desc *)src_ring->base_addr_owner_space;
199 struct CE_src_desc *shadow_base =
200 (struct CE_src_desc *)src_ring->shadow_base;
201 struct CE_src_desc *src_desc =
202 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
203 struct CE_src_desc *shadow_src_desc =
204 CE_SRC_RING_TO_DESC(shadow_base, write_index);
205
206 /* Update low 32 bits source descriptor address */
207 shadow_src_desc->buffer_addr =
208 (uint32_t)(dma_addr & 0xFFFFFFFF);
209#ifdef QCA_WIFI_3_0
210 shadow_src_desc->buffer_addr_hi =
211 (uint32_t)((dma_addr >> 32) & 0x1F);
212 user_flags |= shadow_src_desc->buffer_addr_hi;
213 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
214 sizeof(uint32_t));
215#endif
216 shadow_src_desc->meta_data = transfer_id;
217
218 /*
219 * Set the swap bit if:
220 * typical sends on this CE are swapped (host is big-endian)
221 * and this send doesn't disable the swapping
222 * (data is not bytestream)
223 */
224 shadow_src_desc->byte_swap =
225 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
226 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
227 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
228 shadow_src_desc->nbytes = nbytes;
229
230 *src_desc = *shadow_src_desc;
231
232 src_ring->per_transfer_context[write_index] =
233 per_transfer_context;
234
235 /* Update Source Ring Write Index */
236 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
237
238 /* WORKAROUND */
239 if (!shadow_src_desc->gather) {
240 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
241 write_index);
242 }
243
244 src_ring->write_index = write_index;
245 status = CDF_STATUS_SUCCESS;
246 }
247 A_TARGET_ACCESS_END_RET(scn);
248
249 return status;
250}
251
252int
253ce_send(struct CE_handle *copyeng,
254 void *per_transfer_context,
255 cdf_dma_addr_t buffer,
256 uint32_t nbytes,
257 uint32_t transfer_id,
258 uint32_t flags,
259 uint32_t user_flag)
260{
261 struct CE_state *CE_state = (struct CE_state *)copyeng;
262 int status;
263
264 cdf_spin_lock_bh(&CE_state->scn->target_lock);
265 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
266 transfer_id, flags, user_flag);
267 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
268
269 return status;
270}
271
272unsigned int ce_sendlist_sizeof(void)
273{
274 return sizeof(struct ce_sendlist);
275}
276
277void ce_sendlist_init(struct ce_sendlist *sendlist)
278{
279 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
280 sl->num_items = 0;
281}
282
283int
284ce_sendlist_buf_add(struct ce_sendlist *sendlist,
285 cdf_dma_addr_t buffer,
286 uint32_t nbytes,
287 uint32_t flags,
288 uint32_t user_flags)
289{
290 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
291 unsigned int num_items = sl->num_items;
292 struct ce_sendlist_item *item;
293
294 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
295 CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
296 return CDF_STATUS_E_RESOURCES;
297 }
298
299 item = &sl->item[num_items];
300 item->send_type = CE_SIMPLE_BUFFER_TYPE;
301 item->data = buffer;
302 item->u.nbytes = nbytes;
303 item->flags = flags;
304 item->user_flags = user_flags;
305 sl->num_items = num_items + 1;
306 return CDF_STATUS_SUCCESS;
307}
308
309int
310ce_sendlist_send(struct CE_handle *copyeng,
311 void *per_transfer_context,
312 struct ce_sendlist *sendlist, unsigned int transfer_id)
313{
314 int status = -ENOMEM;
315 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
316 struct CE_state *CE_state = (struct CE_state *)copyeng;
317 struct CE_ring_state *src_ring = CE_state->src_ring;
318 unsigned int nentries_mask = src_ring->nentries_mask;
319 unsigned int num_items = sl->num_items;
320 unsigned int sw_index;
321 unsigned int write_index;
322
323 CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
324
325 cdf_spin_lock_bh(&CE_state->scn->target_lock);
326 sw_index = src_ring->sw_index;
327 write_index = src_ring->write_index;
328
329 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
330 num_items) {
331 struct ce_sendlist_item *item;
332 int i;
333
334 /* handle all but the last item uniformly */
335 for (i = 0; i < num_items - 1; i++) {
336 item = &sl->item[i];
337 /* TBDXXX: Support extensible sendlist_types? */
338 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
339 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
340 (cdf_dma_addr_t) item->data,
341 item->u.nbytes, transfer_id,
342 item->flags | CE_SEND_FLAG_GATHER,
343 item->user_flags);
344 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
345 }
346 /* provide valid context pointer for final item */
347 item = &sl->item[i];
348 /* TBDXXX: Support extensible sendlist_types? */
349 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
350 status = ce_send_nolock(copyeng, per_transfer_context,
351 (cdf_dma_addr_t) item->data,
352 item->u.nbytes,
353 transfer_id, item->flags,
354 item->user_flags);
355 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
356 NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
357 NBUF_TX_PKT_CE);
358 DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
359 CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
360 (uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
361 sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
362 } else {
363 /*
364 * Probably not worth the additional complexity to support
365 * partial sends with continuation or notification. We expect
366 * to use large rings and small sendlists. If we can't handle
367 * the entire request at once, punt it back to the caller.
368 */
369 }
370 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
371
372 return status;
373}
374
375#ifdef WLAN_FEATURE_FASTPATH
376#ifdef QCA_WIFI_3_0
377static inline void
378ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
379 uint64_t dma_addr,
380 uint32_t user_flags)
381{
382 shadow_src_desc->buffer_addr_hi =
383 (uint32_t)((dma_addr >> 32) & 0x1F);
384 user_flags |= shadow_src_desc->buffer_addr_hi;
385 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
386 sizeof(uint32_t));
387}
388#else
389static inline void
390ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
391 uint64_t dma_addr,
392 uint32_t user_flags)
393{
394}
395#endif
396
397/**
398 * ce_send_fast() CE layer Tx buffer posting function
399 * @copyeng: copy engine handle
400 * @msdus: iarray of msdu to be sent
401 * @num_msdus: number of msdus in an array
402 * @transfer_id: transfer_id
403 *
404 * Assumption : Called with an array of MSDU's
405 * Function:
406 * For each msdu in the array
407 * 1. Check no. of available entries
408 * 2. Create src ring entries (allocated in consistent memory
409 * 3. Write index to h/w
410 *
411 * Return: No. of packets that could be sent
412 */
413
414int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
415 unsigned int num_msdus, unsigned int transfer_id)
416{
417 struct CE_state *ce_state = (struct CE_state *)copyeng;
418 struct ol_softc *scn = ce_state->scn;
419 struct CE_ring_state *src_ring = ce_state->src_ring;
420 u_int32_t ctrl_addr = ce_state->ctrl_addr;
421 unsigned int nentries_mask = src_ring->nentries_mask;
422 unsigned int write_index;
423 unsigned int sw_index;
424 unsigned int frag_len;
425 cdf_nbuf_t msdu;
426 int i;
427 uint64_t dma_addr;
428 uint32_t user_flags = 0;
429
430 /*
431 * This lock could be more fine-grained, one per CE,
432 * TODO : Add this lock now.
433 * That is the next step of optimization.
434 */
435 cdf_spin_lock_bh(&scn->target_lock);
436 sw_index = src_ring->sw_index;
437 write_index = src_ring->write_index;
438
439 /* 2 msdus per packet */
440 for (i = 0; i < num_msdus; i++) {
441 struct CE_src_desc *src_ring_base =
442 (struct CE_src_desc *)src_ring->base_addr_owner_space;
443 struct CE_src_desc *shadow_base =
444 (struct CE_src_desc *)src_ring->shadow_base;
445 struct CE_src_desc *src_desc =
446 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
447 struct CE_src_desc *shadow_src_desc =
448 CE_SRC_RING_TO_DESC(shadow_base, write_index);
449
450 msdu = msdus[i];
451
452 /*
453 * First fill out the ring descriptor for the HTC HTT frame
454 * header. These are uncached writes. Should we use a local
455 * structure instead?
456 */
457 /* HTT/HTC header can be passed as a argument */
458 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 0);
459 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
460 0xFFFFFFFF);
461 user_flags = cdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
462 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
463
464 shadow_src_desc->meta_data = transfer_id;
465 shadow_src_desc->nbytes = cdf_nbuf_get_frag_len(msdu, 0);
466
467 /*
468 * HTC HTT header is a word stream, so byte swap if CE byte
469 * swap enabled
470 */
471 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
472 CE_ATTR_BYTE_SWAP_DATA) != 0);
473 /* For the first one, it still does not need to write */
474 shadow_src_desc->gather = 1;
475 *src_desc = *shadow_src_desc;
476
477 /* By default we could initialize the transfer context to this
478 * value
479 */
480 src_ring->per_transfer_context[write_index] =
481 CE_SENDLIST_ITEM_CTXT;
482
483 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
484
485 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
486 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
487 /*
488 * Now fill out the ring descriptor for the actual data
489 * packet
490 */
491 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 1);
492 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
493 0xFFFFFFFF);
494 /*
495 * Clear packet offset for all but the first CE desc.
496 */
497 user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
498 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
499 shadow_src_desc->meta_data = transfer_id;
500
501 /* get actual packet length */
502 frag_len = cdf_nbuf_get_frag_len(msdu, 1);
503 shadow_src_desc->nbytes = frag_len > ce_state->download_len ?
504 ce_state->download_len : frag_len;
505
506 /* Data packet is a byte stream, so disable byte swap */
507 shadow_src_desc->byte_swap = 0;
508 /* For the last one, gather is not set */
509 shadow_src_desc->gather = 0;
510 *src_desc = *shadow_src_desc;
511 src_ring->per_transfer_context[write_index] = msdu;
512 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
513 }
514
515 /* Write the final index to h/w one-shot */
516 if (i) {
517 src_ring->write_index = write_index;
518 /* Don't call WAR_XXX from here
519 * Just call XXX instead, that has the reqd. intel
520 */
521 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
522 }
523
524 cdf_spin_unlock_bh(&scn->target_lock);
525
526 /*
527 * If all packets in the array are transmitted,
528 * i = num_msdus
529 * Temporarily add an ASSERT
530 */
531 ASSERT(i == num_msdus);
532 return i;
533}
534#endif /* WLAN_FEATURE_FASTPATH */
535
536int
537ce_recv_buf_enqueue(struct CE_handle *copyeng,
538 void *per_recv_context, cdf_dma_addr_t buffer)
539{
540 int status;
541 struct CE_state *CE_state = (struct CE_state *)copyeng;
542 struct CE_ring_state *dest_ring = CE_state->dest_ring;
543 uint32_t ctrl_addr = CE_state->ctrl_addr;
544 unsigned int nentries_mask = dest_ring->nentries_mask;
545 unsigned int write_index;
546 unsigned int sw_index;
547 int val = 0;
548 uint64_t dma_addr = buffer;
549 struct ol_softc *scn = CE_state->scn;
550
551 cdf_spin_lock_bh(&scn->target_lock);
552 write_index = dest_ring->write_index;
553 sw_index = dest_ring->sw_index;
554
555 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
556 if (val == -1) {
557 cdf_spin_unlock_bh(&scn->target_lock);
558 return val;
559 }
560
561 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
562 struct CE_dest_desc *dest_ring_base =
563 (struct CE_dest_desc *)dest_ring->
564 base_addr_owner_space;
565 struct CE_dest_desc *dest_desc =
566 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
567
568 /* Update low 32 bit destination descriptor */
569 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
570#ifdef QCA_WIFI_3_0
571 dest_desc->buffer_addr_hi =
572 (uint32_t)((dma_addr >> 32) & 0x1F);
573#endif
574 dest_desc->nbytes = 0;
575
576 dest_ring->per_transfer_context[write_index] =
577 per_recv_context;
578
579 /* Update Destination Ring Write Index */
580 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
581 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
582 dest_ring->write_index = write_index;
583 status = CDF_STATUS_SUCCESS;
584 } else {
585 status = CDF_STATUS_E_FAILURE;
586 }
587 A_TARGET_ACCESS_END_RET_EXT(scn, val);
588 if (val == -1) {
589 cdf_spin_unlock_bh(&scn->target_lock);
590 return val;
591 }
592
593 cdf_spin_unlock_bh(&scn->target_lock);
594
595 return status;
596}
597
598void
599ce_send_watermarks_set(struct CE_handle *copyeng,
600 unsigned int low_alert_nentries,
601 unsigned int high_alert_nentries)
602{
603 struct CE_state *CE_state = (struct CE_state *)copyeng;
604 uint32_t ctrl_addr = CE_state->ctrl_addr;
605 struct ol_softc *scn = CE_state->scn;
606
607 cdf_spin_lock(&scn->target_lock);
608 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
609 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
610 cdf_spin_unlock(&scn->target_lock);
611}
612
613void
614ce_recv_watermarks_set(struct CE_handle *copyeng,
615 unsigned int low_alert_nentries,
616 unsigned int high_alert_nentries)
617{
618 struct CE_state *CE_state = (struct CE_state *)copyeng;
619 uint32_t ctrl_addr = CE_state->ctrl_addr;
620 struct ol_softc *scn = CE_state->scn;
621
622 cdf_spin_lock(&scn->target_lock);
623 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
624 low_alert_nentries);
625 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
626 high_alert_nentries);
627 cdf_spin_unlock(&scn->target_lock);
628}
629
630unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
631{
632 struct CE_state *CE_state = (struct CE_state *)copyeng;
633 struct CE_ring_state *src_ring = CE_state->src_ring;
634 unsigned int nentries_mask = src_ring->nentries_mask;
635 unsigned int sw_index;
636 unsigned int write_index;
637
638 cdf_spin_lock(&CE_state->scn->target_lock);
639 sw_index = src_ring->sw_index;
640 write_index = src_ring->write_index;
641 cdf_spin_unlock(&CE_state->scn->target_lock);
642
643 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
644}
645
646unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
647{
648 struct CE_state *CE_state = (struct CE_state *)copyeng;
649 struct CE_ring_state *dest_ring = CE_state->dest_ring;
650 unsigned int nentries_mask = dest_ring->nentries_mask;
651 unsigned int sw_index;
652 unsigned int write_index;
653
654 cdf_spin_lock(&CE_state->scn->target_lock);
655 sw_index = dest_ring->sw_index;
656 write_index = dest_ring->write_index;
657 cdf_spin_unlock(&CE_state->scn->target_lock);
658
659 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
660}
661
662/*
663 * Guts of ce_send_entries_done.
664 * The caller takes responsibility for any necessary locking.
665 */
666unsigned int
667ce_send_entries_done_nolock(struct ol_softc *scn,
668 struct CE_state *CE_state)
669{
670 struct CE_ring_state *src_ring = CE_state->src_ring;
671 uint32_t ctrl_addr = CE_state->ctrl_addr;
672 unsigned int nentries_mask = src_ring->nentries_mask;
673 unsigned int sw_index;
674 unsigned int read_index;
675
676 sw_index = src_ring->sw_index;
677 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
678
679 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
680}
681
682unsigned int ce_send_entries_done(struct CE_handle *copyeng)
683{
684 struct CE_state *CE_state = (struct CE_state *)copyeng;
685 unsigned int nentries;
686
687 cdf_spin_lock(&CE_state->scn->target_lock);
688 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
689 cdf_spin_unlock(&CE_state->scn->target_lock);
690
691 return nentries;
692}
693
694/*
695 * Guts of ce_recv_entries_done.
696 * The caller takes responsibility for any necessary locking.
697 */
698unsigned int
699ce_recv_entries_done_nolock(struct ol_softc *scn,
700 struct CE_state *CE_state)
701{
702 struct CE_ring_state *dest_ring = CE_state->dest_ring;
703 uint32_t ctrl_addr = CE_state->ctrl_addr;
704 unsigned int nentries_mask = dest_ring->nentries_mask;
705 unsigned int sw_index;
706 unsigned int read_index;
707
708 sw_index = dest_ring->sw_index;
709 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
710
711 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
712}
713
714unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
715{
716 struct CE_state *CE_state = (struct CE_state *)copyeng;
717 unsigned int nentries;
718
719 cdf_spin_lock(&CE_state->scn->target_lock);
720 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
721 cdf_spin_unlock(&CE_state->scn->target_lock);
722
723 return nentries;
724}
725
726/* Debug support */
727void *ce_debug_cmplrn_context; /* completed recv next context */
728void *ce_debug_cnclsn_context; /* cancel send next context */
729void *ce_debug_rvkrn_context; /* revoke receive next context */
730void *ce_debug_cmplsn_context; /* completed send next context */
731
732/*
733 * Guts of ce_completed_recv_next.
734 * The caller takes responsibility for any necessary locking.
735 */
736int
737ce_completed_recv_next_nolock(struct CE_state *CE_state,
738 void **per_CE_contextp,
739 void **per_transfer_contextp,
740 cdf_dma_addr_t *bufferp,
741 unsigned int *nbytesp,
742 unsigned int *transfer_idp,
743 unsigned int *flagsp)
744{
745 int status;
746 struct CE_ring_state *dest_ring = CE_state->dest_ring;
747 unsigned int nentries_mask = dest_ring->nentries_mask;
748 unsigned int sw_index = dest_ring->sw_index;
749
750 struct CE_dest_desc *dest_ring_base =
751 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
752 struct CE_dest_desc *dest_desc =
753 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
754 int nbytes;
755 struct CE_dest_desc dest_desc_info;
756 /*
757 * By copying the dest_desc_info element to local memory, we could
758 * avoid extra memory read from non-cachable memory.
759 */
760 dest_desc_info = *dest_desc;
761 nbytes = dest_desc_info.nbytes;
762 if (nbytes == 0) {
763 /*
764 * This closes a relatively unusual race where the Host
765 * sees the updated DRRI before the update to the
766 * corresponding descriptor has completed. We treat this
767 * as a descriptor that is not yet done.
768 */
769 status = CDF_STATUS_E_FAILURE;
770 goto done;
771 }
772
773 dest_desc->nbytes = 0;
774
775 /* Return data from completed destination descriptor */
776 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
777 *nbytesp = nbytes;
778 *transfer_idp = dest_desc_info.meta_data;
779 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
780
781 if (per_CE_contextp) {
782 *per_CE_contextp = CE_state->recv_context;
783 }
784
785 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
786 if (per_transfer_contextp) {
787 *per_transfer_contextp = ce_debug_cmplrn_context;
788 }
789 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
790
791 /* Update sw_index */
792 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
793 dest_ring->sw_index = sw_index;
794 status = CDF_STATUS_SUCCESS;
795
796done:
797 return status;
798}
799
800int
801ce_completed_recv_next(struct CE_handle *copyeng,
802 void **per_CE_contextp,
803 void **per_transfer_contextp,
804 cdf_dma_addr_t *bufferp,
805 unsigned int *nbytesp,
806 unsigned int *transfer_idp, unsigned int *flagsp)
807{
808 struct CE_state *CE_state = (struct CE_state *)copyeng;
809 int status;
810
811 cdf_spin_lock_bh(&CE_state->scn->target_lock);
812 status =
813 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
814 per_transfer_contextp, bufferp,
815 nbytesp, transfer_idp, flagsp);
816 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
817
818 return status;
819}
820
821/* NB: Modeled after ce_completed_recv_next_nolock */
822CDF_STATUS
823ce_revoke_recv_next(struct CE_handle *copyeng,
824 void **per_CE_contextp,
825 void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
826{
827 struct CE_state *CE_state;
828 struct CE_ring_state *dest_ring;
829 unsigned int nentries_mask;
830 unsigned int sw_index;
831 unsigned int write_index;
832 CDF_STATUS status;
833 struct ol_softc *scn;
834
835 CE_state = (struct CE_state *)copyeng;
836 dest_ring = CE_state->dest_ring;
837 if (!dest_ring) {
838 return CDF_STATUS_E_FAILURE;
839 }
840
841 scn = CE_state->scn;
842 cdf_spin_lock(&scn->target_lock);
843 nentries_mask = dest_ring->nentries_mask;
844 sw_index = dest_ring->sw_index;
845 write_index = dest_ring->write_index;
846 if (write_index != sw_index) {
847 struct CE_dest_desc *dest_ring_base =
848 (struct CE_dest_desc *)dest_ring->
849 base_addr_owner_space;
850 struct CE_dest_desc *dest_desc =
851 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
852
853 /* Return data from completed destination descriptor */
854 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
855
856 if (per_CE_contextp) {
857 *per_CE_contextp = CE_state->recv_context;
858 }
859
860 ce_debug_rvkrn_context =
861 dest_ring->per_transfer_context[sw_index];
862 if (per_transfer_contextp) {
863 *per_transfer_contextp = ce_debug_rvkrn_context;
864 }
865 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
866
867 /* Update sw_index */
868 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
869 dest_ring->sw_index = sw_index;
870 status = CDF_STATUS_SUCCESS;
871 } else {
872 status = CDF_STATUS_E_FAILURE;
873 }
874 cdf_spin_unlock(&scn->target_lock);
875
876 return status;
877}
878
879/*
880 * Guts of ce_completed_send_next.
881 * The caller takes responsibility for any necessary locking.
882 */
883int
884ce_completed_send_next_nolock(struct CE_state *CE_state,
885 void **per_CE_contextp,
886 void **per_transfer_contextp,
887 cdf_dma_addr_t *bufferp,
888 unsigned int *nbytesp,
889 unsigned int *transfer_idp,
890 unsigned int *sw_idx,
891 unsigned int *hw_idx,
892 uint32_t *toeplitz_hash_result)
893{
894 int status = CDF_STATUS_E_FAILURE;
895 struct CE_ring_state *src_ring = CE_state->src_ring;
896 uint32_t ctrl_addr = CE_state->ctrl_addr;
897 unsigned int nentries_mask = src_ring->nentries_mask;
898 unsigned int sw_index = src_ring->sw_index;
899 unsigned int read_index;
900 struct ol_softc *scn = CE_state->scn;
901
902 if (src_ring->hw_index == sw_index) {
903 /*
904 * The SW completion index has caught up with the cached
905 * version of the HW completion index.
906 * Update the cached HW completion index to see whether
907 * the SW has really caught up to the HW, or if the cached
908 * value of the HW index has become stale.
909 */
910 A_TARGET_ACCESS_BEGIN_RET(scn);
911 src_ring->hw_index =
912 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
913 A_TARGET_ACCESS_END_RET(scn);
914 }
915 read_index = src_ring->hw_index;
916
917 if (sw_idx)
918 *sw_idx = sw_index;
919
920 if (hw_idx)
921 *hw_idx = read_index;
922
923 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
924 struct CE_src_desc *shadow_base =
925 (struct CE_src_desc *)src_ring->shadow_base;
926 struct CE_src_desc *shadow_src_desc =
927 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
928#ifdef QCA_WIFI_3_0
929 struct CE_src_desc *src_ring_base =
930 (struct CE_src_desc *)src_ring->base_addr_owner_space;
931 struct CE_src_desc *src_desc =
932 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
933#endif
934 /* Return data from completed source descriptor */
935 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
936 *nbytesp = shadow_src_desc->nbytes;
937 *transfer_idp = shadow_src_desc->meta_data;
938#ifdef QCA_WIFI_3_0
939 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
940#else
941 *toeplitz_hash_result = 0;
942#endif
943 if (per_CE_contextp) {
944 *per_CE_contextp = CE_state->send_context;
945 }
946
947 ce_debug_cmplsn_context =
948 src_ring->per_transfer_context[sw_index];
949 if (per_transfer_contextp) {
950 *per_transfer_contextp = ce_debug_cmplsn_context;
951 }
952 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
953
954 /* Update sw_index */
955 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
956 src_ring->sw_index = sw_index;
957 status = CDF_STATUS_SUCCESS;
958 }
959
960 return status;
961}
962
963/* NB: Modeled after ce_completed_send_next */
964CDF_STATUS
965ce_cancel_send_next(struct CE_handle *copyeng,
966 void **per_CE_contextp,
967 void **per_transfer_contextp,
968 cdf_dma_addr_t *bufferp,
969 unsigned int *nbytesp,
970 unsigned int *transfer_idp,
971 uint32_t *toeplitz_hash_result)
972{
973 struct CE_state *CE_state;
974 struct CE_ring_state *src_ring;
975 unsigned int nentries_mask;
976 unsigned int sw_index;
977 unsigned int write_index;
978 CDF_STATUS status;
979 struct ol_softc *scn;
980
981 CE_state = (struct CE_state *)copyeng;
982 src_ring = CE_state->src_ring;
983 if (!src_ring) {
984 return CDF_STATUS_E_FAILURE;
985 }
986
987 scn = CE_state->scn;
988 cdf_spin_lock(&CE_state->scn->target_lock);
989 nentries_mask = src_ring->nentries_mask;
990 sw_index = src_ring->sw_index;
991 write_index = src_ring->write_index;
992
993 if (write_index != sw_index) {
994 struct CE_src_desc *src_ring_base =
995 (struct CE_src_desc *)src_ring->base_addr_owner_space;
996 struct CE_src_desc *src_desc =
997 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
998
999 /* Return data from completed source descriptor */
1000 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1001 *nbytesp = src_desc->nbytes;
1002 *transfer_idp = src_desc->meta_data;
1003#ifdef QCA_WIFI_3_0
1004 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1005#else
1006 *toeplitz_hash_result = 0;
1007#endif
1008
1009 if (per_CE_contextp) {
1010 *per_CE_contextp = CE_state->send_context;
1011 }
1012
1013 ce_debug_cnclsn_context =
1014 src_ring->per_transfer_context[sw_index];
1015 if (per_transfer_contextp) {
1016 *per_transfer_contextp = ce_debug_cnclsn_context;
1017 }
1018 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1019
1020 /* Update sw_index */
1021 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1022 src_ring->sw_index = sw_index;
1023 status = CDF_STATUS_SUCCESS;
1024 } else {
1025 status = CDF_STATUS_E_FAILURE;
1026 }
1027 cdf_spin_unlock(&CE_state->scn->target_lock);
1028
1029 return status;
1030}
1031
1032/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1033#define CE_WM_SHFT 1
1034
1035int
1036ce_completed_send_next(struct CE_handle *copyeng,
1037 void **per_CE_contextp,
1038 void **per_transfer_contextp,
1039 cdf_dma_addr_t *bufferp,
1040 unsigned int *nbytesp,
1041 unsigned int *transfer_idp,
1042 unsigned int *sw_idx,
1043 unsigned int *hw_idx,
1044 unsigned int *toeplitz_hash_result)
1045{
1046 struct CE_state *CE_state = (struct CE_state *)copyeng;
1047 int status;
1048
1049 cdf_spin_lock_bh(&CE_state->scn->target_lock);
1050 status =
1051 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1052 per_transfer_contextp, bufferp,
1053 nbytesp, transfer_idp, sw_idx,
1054 hw_idx, toeplitz_hash_result);
1055 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
1056
1057 return status;
1058}
1059
1060#ifdef ATH_11AC_TXCOMPACT
1061/* CE engine descriptor reap
1062 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1063 * does recieve and reaping of completed descriptor ,
1064 * This function only handles reaping of Tx complete descriptor.
1065 * The Function is called from threshold reap poll routine
1066 * hif_send_complete_check so should not countain recieve functionality
1067 * within it .
1068 */
1069
1070void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
1071{
1072 void *CE_context;
1073 void *transfer_context;
1074 cdf_dma_addr_t buf;
1075 unsigned int nbytes;
1076 unsigned int id;
1077 unsigned int sw_idx, hw_idx;
1078 uint32_t toeplitz_hash_result;
1079 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1080
1081 A_TARGET_ACCESS_BEGIN(scn);
1082
1083 /* Since this function is called from both user context and
1084 * tasklet context the spinlock has to lock the bottom halves.
1085 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1086 * enabled in TX polling mode. If this is not the case, more
1087 * bottom halve spin lock changes are needed. Due to data path
1088 * performance concern, after internal discussion we've decided
1089 * to make minimum change, i.e., only address the issue occured
1090 * in this function. The possible negative effect of this minimum
1091 * change is that, in the future, if some other function will also
1092 * be opened to let the user context to use, those cases need to be
1093 * addressed by change spin_lock to spin_lock_bh also.
1094 */
1095
1096 cdf_spin_lock_bh(&scn->target_lock);
1097
1098 if (CE_state->send_cb) {
1099 {
1100 /* Pop completed send buffers and call the
1101 * registered send callback for each
1102 */
1103 while (ce_completed_send_next_nolock
1104 (CE_state, &CE_context,
1105 &transfer_context, &buf,
1106 &nbytes, &id, &sw_idx, &hw_idx,
1107 &toeplitz_hash_result) ==
1108 CDF_STATUS_SUCCESS) {
1109 if (CE_id != CE_HTT_H2T_MSG) {
1110 cdf_spin_unlock_bh(&scn->target_lock);
1111 CE_state->
1112 send_cb((struct CE_handle *)
1113 CE_state, CE_context,
1114 transfer_context, buf,
1115 nbytes, id, sw_idx, hw_idx,
1116 toeplitz_hash_result);
1117 cdf_spin_lock_bh(&scn->target_lock);
1118 } else {
1119 struct HIF_CE_pipe_info *pipe_info =
1120 (struct HIF_CE_pipe_info *)
1121 CE_context;
1122
1123 cdf_spin_lock_bh(&pipe_info->
1124 completion_freeq_lock);
1125 pipe_info->num_sends_allowed++;
1126 cdf_spin_unlock_bh(&pipe_info->
1127 completion_freeq_lock);
1128 }
1129 }
1130 }
1131 }
1132
1133 cdf_spin_unlock_bh(&scn->target_lock);
1134 A_TARGET_ACCESS_END(scn);
1135}
1136
1137#endif /*ATH_11AC_TXCOMPACT */
1138
1139/*
1140 * Number of times to check for any pending tx/rx completion on
1141 * a copy engine, this count should be big enough. Once we hit
1142 * this threashold we'll not check for any Tx/Rx comlpetion in same
1143 * interrupt handling. Note that this threashold is only used for
1144 * Rx interrupt processing, this can be used tor Tx as well if we
1145 * suspect any infinite loop in checking for pending Tx completion.
1146 */
1147#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1148
1149/*
1150 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1151 *
1152 * Invokes registered callbacks for recv_complete,
1153 * send_complete, and watermarks.
1154 *
1155 * Returns: number of messages processed
1156 */
1157
1158int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id)
1159{
1160 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1161 uint32_t ctrl_addr = CE_state->ctrl_addr;
1162 void *CE_context;
1163 void *transfer_context;
1164 cdf_dma_addr_t buf;
1165 unsigned int nbytes;
1166 unsigned int id;
1167 unsigned int flags;
1168 uint32_t CE_int_status;
1169 unsigned int more_comp_cnt = 0;
1170 unsigned int more_snd_comp_cnt = 0;
1171 unsigned int sw_idx, hw_idx;
1172 uint32_t toeplitz_hash_result;
1173
1174 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1175 HIF_ERROR("[premature rc=0]\n");
1176 return 0; /* no work done */
1177 }
1178
1179 cdf_spin_lock(&scn->target_lock);
1180
1181 /* Clear force_break flag and re-initialize receive_count to 0 */
1182
1183 /* NAPI: scn variables- thread/multi-processing safety? */
1184 scn->receive_count = 0;
1185 scn->force_break = 0;
1186more_completions:
1187 if (CE_state->recv_cb) {
1188
1189 /* Pop completed recv buffers and call
1190 * the registered recv callback for each
1191 */
1192 while (ce_completed_recv_next_nolock
1193 (CE_state, &CE_context, &transfer_context,
1194 &buf, &nbytes, &id, &flags) ==
1195 CDF_STATUS_SUCCESS) {
1196 cdf_spin_unlock(&scn->target_lock);
1197 CE_state->recv_cb((struct CE_handle *)CE_state,
1198 CE_context, transfer_context, buf,
1199 nbytes, id, flags);
1200
1201 /*
1202 * EV #112693 -
1203 * [Peregrine][ES1][WB342][Win8x86][Performance]
1204 * BSoD_0x133 occurred in VHT80 UDP_DL
1205 * Break out DPC by force if number of loops in
1206 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1207 * to avoid spending too long time in
1208 * DPC for each interrupt handling. Schedule another
1209 * DPC to avoid data loss if we had taken
1210 * force-break action before apply to Windows OS
1211 * only currently, Linux/MAC os can expand to their
1212 * platform if necessary
1213 */
1214
1215 /* Break the receive processes by
1216 * force if force_break set up
1217 */
1218 if (cdf_unlikely(scn->force_break)) {
1219 cdf_atomic_set(&CE_state->rx_pending, 1);
1220 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1221 HOST_IS_COPY_COMPLETE_MASK);
1222 if (Q_TARGET_ACCESS_END(scn) < 0)
1223 HIF_ERROR("<--[premature rc=%d]\n",
1224 scn->receive_count);
1225 return scn->receive_count;
1226 }
1227 cdf_spin_lock(&scn->target_lock);
1228 }
1229 }
1230
1231 /*
1232 * Attention: We may experience potential infinite loop for below
1233 * While Loop during Sending Stress test.
1234 * Resolve the same way as Receive Case (Refer to EV #112693)
1235 */
1236
1237 if (CE_state->send_cb) {
1238 /* Pop completed send buffers and call
1239 * the registered send callback for each
1240 */
1241
1242#ifdef ATH_11AC_TXCOMPACT
1243 while (ce_completed_send_next_nolock
1244 (CE_state, &CE_context,
1245 &transfer_context, &buf, &nbytes,
1246 &id, &sw_idx, &hw_idx,
1247 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1248
1249 if (CE_id != CE_HTT_H2T_MSG ||
1250 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1251 cdf_spin_unlock(&scn->target_lock);
1252 CE_state->send_cb((struct CE_handle *)CE_state,
1253 CE_context, transfer_context,
1254 buf, nbytes, id, sw_idx,
1255 hw_idx, toeplitz_hash_result);
1256 cdf_spin_lock(&scn->target_lock);
1257 } else {
1258 struct HIF_CE_pipe_info *pipe_info =
1259 (struct HIF_CE_pipe_info *)CE_context;
1260
1261 cdf_spin_lock(&pipe_info->
1262 completion_freeq_lock);
1263 pipe_info->num_sends_allowed++;
1264 cdf_spin_unlock(&pipe_info->
1265 completion_freeq_lock);
1266 }
1267 }
1268#else /*ATH_11AC_TXCOMPACT */
1269 while (ce_completed_send_next_nolock
1270 (CE_state, &CE_context,
1271 &transfer_context, &buf, &nbytes,
1272 &id, &sw_idx, &hw_idx,
1273 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1274 cdf_spin_unlock(&scn->target_lock);
1275 CE_state->send_cb((struct CE_handle *)CE_state,
1276 CE_context, transfer_context, buf,
1277 nbytes, id, sw_idx, hw_idx,
1278 toeplitz_hash_result);
1279 cdf_spin_lock(&scn->target_lock);
1280 }
1281#endif /*ATH_11AC_TXCOMPACT */
1282 }
1283
1284more_watermarks:
1285 if (CE_state->misc_cbs) {
1286 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1287 if (CE_int_status & CE_WATERMARK_MASK) {
1288 if (CE_state->watermark_cb) {
1289
1290 cdf_spin_unlock(&scn->target_lock);
1291 /* Convert HW IS bits to software flags */
1292 flags =
1293 (CE_int_status & CE_WATERMARK_MASK) >>
1294 CE_WM_SHFT;
1295
1296 CE_state->
1297 watermark_cb((struct CE_handle *)CE_state,
1298 CE_state->wm_context, flags);
1299 cdf_spin_lock(&scn->target_lock);
1300 }
1301 }
1302 }
1303
1304 /*
1305 * Clear the misc interrupts (watermark) that were handled above,
1306 * and that will be checked again below.
1307 * Clear and check for copy-complete interrupts again, just in case
1308 * more copy completions happened while the misc interrupts were being
1309 * handled.
1310 */
1311 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1312 CE_WATERMARK_MASK |
1313 HOST_IS_COPY_COMPLETE_MASK);
1314
1315 /*
1316 * Now that per-engine interrupts are cleared, verify that
1317 * no recv interrupts arrive while processing send interrupts,
1318 * and no recv or send interrupts happened while processing
1319 * misc interrupts.Go back and check again.Keep checking until
1320 * we find no more events to process.
1321 */
1322 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
1323 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1324 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1325 goto more_completions;
1326 } else {
1327 HIF_ERROR(
1328 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1329 __func__, CE_state->dest_ring->nentries_mask,
1330 CE_state->dest_ring->sw_index,
1331 CE_DEST_RING_READ_IDX_GET(scn,
1332 CE_state->ctrl_addr));
1333 }
1334 }
1335
1336 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
1337 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1338 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1339 goto more_completions;
1340 } else {
1341 HIF_ERROR(
1342 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1343 __func__, CE_state->src_ring->nentries_mask,
1344 CE_state->src_ring->sw_index,
1345 CE_SRC_RING_READ_IDX_GET(scn,
1346 CE_state->ctrl_addr));
1347 }
1348 }
1349
1350 if (CE_state->misc_cbs) {
1351 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1352 if (CE_int_status & CE_WATERMARK_MASK) {
1353 if (CE_state->watermark_cb) {
1354 goto more_watermarks;
1355 }
1356 }
1357 }
1358
1359 cdf_spin_unlock(&scn->target_lock);
1360 cdf_atomic_set(&CE_state->rx_pending, 0);
1361
1362 if (Q_TARGET_ACCESS_END(scn) < 0)
1363 HIF_ERROR("<--[premature rc=%d]\n", scn->receive_count);
1364 return scn->receive_count;
1365}
1366
1367/*
1368 * Handler for per-engine interrupts on ALL active CEs.
1369 * This is used in cases where the system is sharing a
1370 * single interrput for all CEs
1371 */
1372
1373void ce_per_engine_service_any(int irq, struct ol_softc *scn)
1374{
1375 int CE_id;
1376 uint32_t intr_summary;
1377
1378 A_TARGET_ACCESS_BEGIN(scn);
1379 if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
1380 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1381 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1382 if (cdf_atomic_read(&CE_state->rx_pending)) {
1383 cdf_atomic_set(&CE_state->rx_pending, 0);
1384 ce_per_engine_service(scn, CE_id);
1385 }
1386 }
1387
1388 A_TARGET_ACCESS_END(scn);
1389 return;
1390 }
1391
1392 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1393
1394 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1395 if (intr_summary & (1 << CE_id)) {
1396 intr_summary &= ~(1 << CE_id);
1397 } else {
1398 continue; /* no intr pending on this CE */
1399 }
1400
1401 ce_per_engine_service(scn, CE_id);
1402 }
1403
1404 A_TARGET_ACCESS_END(scn);
1405}
1406
1407/*
1408 * Adjust interrupts for the copy complete handler.
1409 * If it's needed for either send or recv, then unmask
1410 * this interrupt; otherwise, mask it.
1411 *
1412 * Called with target_lock held.
1413 */
1414static void
1415ce_per_engine_handler_adjust(struct CE_state *CE_state,
1416 int disable_copy_compl_intr)
1417{
1418 uint32_t ctrl_addr = CE_state->ctrl_addr;
1419 struct ol_softc *scn = CE_state->scn;
1420
1421 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1422 A_TARGET_ACCESS_BEGIN(scn);
1423 if ((!disable_copy_compl_intr) &&
1424 (CE_state->send_cb || CE_state->recv_cb)) {
1425 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1426 } else {
1427 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1428 }
1429
1430 if (CE_state->watermark_cb) {
1431 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1432 } else {
1433 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1434 }
1435 A_TARGET_ACCESS_END(scn);
1436
1437}
1438
1439/*Iterate the CE_state list and disable the compl interrupt
1440 * if it has been registered already.
1441 */
1442void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1443{
1444 int CE_id;
1445
1446 A_TARGET_ACCESS_BEGIN(scn);
1447 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1448 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1449 uint32_t ctrl_addr = CE_state->ctrl_addr;
1450
1451 /* if the interrupt is currently enabled, disable it */
1452 if (!CE_state->disable_copy_compl_intr
1453 && (CE_state->send_cb || CE_state->recv_cb)) {
1454 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1455 }
1456
1457 if (CE_state->watermark_cb) {
1458 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1459 }
1460 }
1461 A_TARGET_ACCESS_END(scn);
1462}
1463
1464void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1465{
1466 int CE_id;
1467
1468 A_TARGET_ACCESS_BEGIN(scn);
1469 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1470 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1471 uint32_t ctrl_addr = CE_state->ctrl_addr;
1472
1473 /*
1474 * If the CE is supposed to have copy complete interrupts
1475 * enabled (i.e. there a callback registered, and the
1476 * "disable" flag is not set), then re-enable the interrupt.
1477 */
1478 if (!CE_state->disable_copy_compl_intr
1479 && (CE_state->send_cb || CE_state->recv_cb)) {
1480 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1481 }
1482
1483 if (CE_state->watermark_cb) {
1484 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1485 }
1486 }
1487 A_TARGET_ACCESS_END(scn);
1488}
1489
1490void ce_disable_any_copy_compl_intr(struct ol_softc *scn)
1491{
1492 cdf_spin_lock(&scn->target_lock);
1493 ce_disable_any_copy_compl_intr_nolock(scn);
1494 cdf_spin_unlock(&scn->target_lock);
1495}
1496
1497/*Re-enable the copy compl interrupt if it has not been disabled before.*/
1498void ce_enable_any_copy_compl_intr(struct ol_softc *scn)
1499{
1500 cdf_spin_lock(&scn->target_lock);
1501 ce_enable_any_copy_compl_intr_nolock(scn);
1502 cdf_spin_unlock(&scn->target_lock);
1503}
1504
1505void
1506ce_send_cb_register(struct CE_handle *copyeng,
1507 ce_send_cb fn_ptr,
1508 void *ce_send_context, int disable_interrupts)
1509{
1510 struct CE_state *CE_state = (struct CE_state *)copyeng;
1511
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001512 if (CE_state == NULL) {
1513 pr_err("%s: Error CE state = NULL\n", __func__);
1514 return;
1515 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001516 cdf_spin_lock(&CE_state->scn->target_lock);
1517 CE_state->send_cb = fn_ptr;
1518 CE_state->send_context = ce_send_context;
1519 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
1520 cdf_spin_unlock(&CE_state->scn->target_lock);
1521}
1522
1523void
1524ce_recv_cb_register(struct CE_handle *copyeng,
1525 CE_recv_cb fn_ptr,
1526 void *CE_recv_context, int disable_interrupts)
1527{
1528 struct CE_state *CE_state = (struct CE_state *)copyeng;
1529
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001530 if (CE_state == NULL) {
1531 pr_err("%s: ERROR CE state = NULL\n", __func__);
1532 return;
1533 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 cdf_spin_lock(&CE_state->scn->target_lock);
1535 CE_state->recv_cb = fn_ptr;
1536 CE_state->recv_context = CE_recv_context;
1537 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
1538 cdf_spin_unlock(&CE_state->scn->target_lock);
1539}
1540
1541void
1542ce_watermark_cb_register(struct CE_handle *copyeng,
1543 CE_watermark_cb fn_ptr, void *CE_wm_context)
1544{
1545 struct CE_state *CE_state = (struct CE_state *)copyeng;
1546
1547 cdf_spin_lock(&CE_state->scn->target_lock);
1548 CE_state->watermark_cb = fn_ptr;
1549 CE_state->wm_context = CE_wm_context;
1550 ce_per_engine_handler_adjust(CE_state, 0);
1551 if (fn_ptr) {
1552 CE_state->misc_cbs = 1;
1553 }
1554 cdf_spin_unlock(&CE_state->scn->target_lock);
1555}
1556
1557#ifdef WLAN_FEATURE_FASTPATH
1558/**
1559 * ce_pkt_dl_len_set() set the HTT packet download length
1560 * @hif_sc: HIF context
1561 * @pkt_download_len: download length
1562 *
1563 * Return: None
1564 */
1565void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1566{
1567 struct ol_softc *sc = (struct ol_softc *)(hif_sc);
1568 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1569
1570 cdf_assert_always(ce_state);
1571
1572 cdf_spin_lock_bh(&sc->target_lock);
1573 ce_state->download_len = pkt_download_len;
1574 cdf_spin_unlock_bh(&sc->target_lock);
1575
1576 cdf_print("%s CE %d Pkt download length %d\n", __func__,
1577 ce_state->id, ce_state->download_len);
1578}
1579#else
1580void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1581{
1582}
1583#endif /* WLAN_FEATURE_FASTPATH */
1584
1585bool ce_get_rx_pending(struct ol_softc *scn)
1586{
1587 int CE_id;
1588
1589 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1590 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1591 if (cdf_atomic_read(&CE_state->rx_pending))
1592 return true;
1593 }
1594
1595 return false;
1596}
1597
1598/**
1599 * ce_check_rx_pending() - ce_check_rx_pending
1600 * @scn: ol_softc
1601 * @ce_id: ce_id
1602 *
1603 * Return: bool
1604 */
1605bool ce_check_rx_pending(struct ol_softc *scn, int ce_id)
1606{
1607 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1608 if (cdf_atomic_read(&CE_state->rx_pending))
1609 return true;
1610 else
1611 return false;
1612}
1613void ce_enable_msi(struct ol_softc *scn, unsigned int CE_id,
1614 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1615 uint32_t msi_data)
1616{
1617#ifdef WLAN_ENABLE_QCA6180
1618 struct CE_state *CE_state;
1619 A_target_id_t targid;
1620 u_int32_t ctrl_addr;
1621 uint32_t tmp;
1622
1623 adf_os_spin_lock(&scn->target_lock);
1624 CE_state = scn->ce_id_to_state[CE_id];
1625 if (!CE_state) {
1626 HIF_ERROR("%s: error - CE_state = NULL", __func__);
1627 adf_os_spin_unlock(&scn->target_lock);
1628 return;
1629 }
1630 targid = TARGID(sc);
1631 ctrl_addr = CE_state->ctrl_addr;
1632 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1633 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1634 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1635 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1636 tmp |= (1 << CE_MSI_ENABLE_BIT);
1637 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
1638 adf_os_spin_unlock(&scn->target_lock);
1639#endif
1640}
1641
1642#ifdef IPA_OFFLOAD
1643/*
1644 * Copy engine should release resource to micro controller
1645 * Micro controller needs
1646 - Copy engine source descriptor base address
1647 - Copy engine source descriptor size
1648 - PCI BAR address to access copy engine regiser
1649 */
1650void ce_ipa_get_resource(struct CE_handle *ce,
1651 uint32_t *ce_sr_base_paddr,
1652 uint32_t *ce_sr_ring_size,
1653 cdf_dma_addr_t *ce_reg_paddr)
1654{
1655 struct CE_state *CE_state = (struct CE_state *)ce;
1656 uint32_t ring_loop;
1657 struct CE_src_desc *ce_desc;
1658 cdf_dma_addr_t phy_mem_base;
1659 struct ol_softc *scn = CE_state->scn;
1660
1661 if (CE_RUNNING != CE_state->state) {
1662 *ce_sr_base_paddr = 0;
1663 *ce_sr_ring_size = 0;
1664 return;
1665 }
1666
1667 /* Update default value for descriptor */
1668 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1669 ring_loop++) {
1670 ce_desc = (struct CE_src_desc *)
1671 ((char *)CE_state->src_ring->base_addr_owner_space +
1672 ring_loop * (sizeof(struct CE_src_desc)));
1673 CE_IPA_RING_INIT(ce_desc);
1674 }
1675
1676 /* Get BAR address */
1677 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1678
1679 *ce_sr_base_paddr = (uint32_t) CE_state->src_ring->base_addr_CE_space;
1680 *ce_sr_ring_size = (uint32_t) CE_state->src_ring->nentries;
1681 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1682 SR_WR_INDEX_ADDRESS;
1683 return;
1684}
1685#endif /* IPA_OFFLOAD */
1686