blob: 99afda9bd98c35176791cec95ad70f2879b1d3e3 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "cdf_lock.h"
39#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053043#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#ifdef IPA_OFFLOAD
46#ifdef QCA_WIFI_3_0
47#define CE_IPA_RING_INIT(ce_desc) \
48 do { \
49 ce_desc->gather = 0; \
50 ce_desc->enable_11h = 0; \
51 ce_desc->meta_data_low = 0; \
52 ce_desc->packet_result_offset = 64; \
53 ce_desc->toeplitz_hash_enable = 0; \
54 ce_desc->addr_y_search_disable = 0; \
55 ce_desc->addr_x_search_disable = 0; \
56 ce_desc->misc_int_disable = 0; \
57 ce_desc->target_int_disable = 0; \
58 ce_desc->host_int_disable = 0; \
59 ce_desc->dest_byte_swap = 0; \
60 ce_desc->byte_swap = 0; \
61 ce_desc->type = 2; \
62 ce_desc->tx_classify = 1; \
63 ce_desc->buffer_addr_hi = 0; \
64 ce_desc->meta_data = 0; \
65 ce_desc->nbytes = 128; \
66 } while (0)
67#else
68#define CE_IPA_RING_INIT(ce_desc) \
69 do { \
70 ce_desc->byte_swap = 0; \
71 ce_desc->nbytes = 60; \
72 ce_desc->gather = 0; \
73 } while (0)
74#endif /* QCA_WIFI_3_0 */
75#endif /* IPA_OFFLOAD */
76
77static int war1_allow_sleep;
78/* io32 write workaround */
79static int hif_ce_war1;
80
81/*
82 * Support for Copy Engine hardware, which is mainly used for
83 * communication between Host and Target over a PCIe interconnect.
84 */
85
86/*
87 * A single CopyEngine (CE) comprises two "rings":
88 * a source ring
89 * a destination ring
90 *
91 * Each ring consists of a number of descriptors which specify
92 * an address, length, and meta-data.
93 *
94 * Typically, one side of the PCIe interconnect (Host or Target)
95 * controls one ring and the other side controls the other ring.
96 * The source side chooses when to initiate a transfer and it
97 * chooses what to send (buffer address, length). The destination
98 * side keeps a supply of "anonymous receive buffers" available and
99 * it handles incoming data as it arrives (when the destination
100 * recieves an interrupt).
101 *
102 * The sender may send a simple buffer (address/length) or it may
103 * send a small list of buffers. When a small list is sent, hardware
104 * "gathers" these and they end up in a single destination buffer
105 * with a single interrupt.
106 *
107 * There are several "contexts" managed by this layer -- more, it
108 * may seem -- than should be needed. These are provided mainly for
109 * maximum flexibility and especially to facilitate a simpler HIF
110 * implementation. There are per-CopyEngine recv, send, and watermark
111 * contexts. These are supplied by the caller when a recv, send,
112 * or watermark handler is established and they are echoed back to
113 * the caller when the respective callbacks are invoked. There is
114 * also a per-transfer context supplied by the caller when a buffer
115 * (or sendlist) is sent and when a buffer is enqueued for recv.
116 * These per-transfer contexts are echoed back to the caller when
117 * the buffer is sent/received.
118 * Target TX harsh result toeplitz_hash_result
119 */
120
121/*
122 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
123 * The caller takes responsibility for any needed locking.
124 */
125int
126ce_completed_send_next_nolock(struct CE_state *CE_state,
127 void **per_CE_contextp,
128 void **per_transfer_contextp,
129 cdf_dma_addr_t *bufferp,
130 unsigned int *nbytesp,
131 unsigned int *transfer_idp,
132 unsigned int *sw_idx, unsigned int *hw_idx,
133 uint32_t *toeplitz_hash_result);
134
135void war_ce_src_ring_write_idx_set(struct ol_softc *scn,
136 u32 ctrl_addr, unsigned int write_index)
137{
138 if (hif_ce_war1) {
139 void __iomem *indicator_addr;
140
141 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
142
143 if (!war1_allow_sleep
144 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
145 hif_write32_mb(indicator_addr,
146 (CDC_WAR_MAGIC_STR | write_index));
147 } else {
148 unsigned long irq_flags;
149 local_irq_save(irq_flags);
150 hif_write32_mb(indicator_addr, 1);
151
152 /*
153 * PCIE write waits for ACK in IPQ8K, there is no
154 * need to read back value.
155 */
156 (void)hif_read32_mb(indicator_addr);
157 (void)hif_read32_mb(indicator_addr); /* conservative */
158
159 CE_SRC_RING_WRITE_IDX_SET(scn,
160 ctrl_addr, write_index);
161
162 hif_write32_mb(indicator_addr, 0);
163 local_irq_restore(irq_flags);
164 }
165 } else
166 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
167}
168
169int
170ce_send_nolock(struct CE_handle *copyeng,
171 void *per_transfer_context,
172 cdf_dma_addr_t buffer,
173 uint32_t nbytes,
174 uint32_t transfer_id,
175 uint32_t flags,
176 uint32_t user_flags)
177{
178 int status;
179 struct CE_state *CE_state = (struct CE_state *)copyeng;
180 struct CE_ring_state *src_ring = CE_state->src_ring;
181 uint32_t ctrl_addr = CE_state->ctrl_addr;
182 unsigned int nentries_mask = src_ring->nentries_mask;
183 unsigned int sw_index = src_ring->sw_index;
184 unsigned int write_index = src_ring->write_index;
185 uint64_t dma_addr = buffer;
186 struct ol_softc *scn = CE_state->scn;
187
188 A_TARGET_ACCESS_BEGIN_RET(scn);
189 if (unlikely(CE_RING_DELTA(nentries_mask,
190 write_index, sw_index - 1) <= 0)) {
191 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
192 status = CDF_STATUS_E_FAILURE;
193 A_TARGET_ACCESS_END_RET(scn);
194 return status;
195 }
196 {
197 struct CE_src_desc *src_ring_base =
198 (struct CE_src_desc *)src_ring->base_addr_owner_space;
199 struct CE_src_desc *shadow_base =
200 (struct CE_src_desc *)src_ring->shadow_base;
201 struct CE_src_desc *src_desc =
202 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
203 struct CE_src_desc *shadow_src_desc =
204 CE_SRC_RING_TO_DESC(shadow_base, write_index);
205
206 /* Update low 32 bits source descriptor address */
207 shadow_src_desc->buffer_addr =
208 (uint32_t)(dma_addr & 0xFFFFFFFF);
209#ifdef QCA_WIFI_3_0
210 shadow_src_desc->buffer_addr_hi =
211 (uint32_t)((dma_addr >> 32) & 0x1F);
212 user_flags |= shadow_src_desc->buffer_addr_hi;
213 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
214 sizeof(uint32_t));
215#endif
216 shadow_src_desc->meta_data = transfer_id;
217
218 /*
219 * Set the swap bit if:
220 * typical sends on this CE are swapped (host is big-endian)
221 * and this send doesn't disable the swapping
222 * (data is not bytestream)
223 */
224 shadow_src_desc->byte_swap =
225 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
226 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
227 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
228 shadow_src_desc->nbytes = nbytes;
229
230 *src_desc = *shadow_src_desc;
231
232 src_ring->per_transfer_context[write_index] =
233 per_transfer_context;
234
235 /* Update Source Ring Write Index */
236 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
237
238 /* WORKAROUND */
239 if (!shadow_src_desc->gather) {
240 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
241 write_index);
242 }
243
244 src_ring->write_index = write_index;
245 status = CDF_STATUS_SUCCESS;
246 }
247 A_TARGET_ACCESS_END_RET(scn);
248
249 return status;
250}
251
252int
253ce_send(struct CE_handle *copyeng,
254 void *per_transfer_context,
255 cdf_dma_addr_t buffer,
256 uint32_t nbytes,
257 uint32_t transfer_id,
258 uint32_t flags,
259 uint32_t user_flag)
260{
261 struct CE_state *CE_state = (struct CE_state *)copyeng;
262 int status;
263
264 cdf_spin_lock_bh(&CE_state->scn->target_lock);
265 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
266 transfer_id, flags, user_flag);
267 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
268
269 return status;
270}
271
272unsigned int ce_sendlist_sizeof(void)
273{
274 return sizeof(struct ce_sendlist);
275}
276
277void ce_sendlist_init(struct ce_sendlist *sendlist)
278{
279 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
280 sl->num_items = 0;
281}
282
283int
284ce_sendlist_buf_add(struct ce_sendlist *sendlist,
285 cdf_dma_addr_t buffer,
286 uint32_t nbytes,
287 uint32_t flags,
288 uint32_t user_flags)
289{
290 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
291 unsigned int num_items = sl->num_items;
292 struct ce_sendlist_item *item;
293
294 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
295 CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
296 return CDF_STATUS_E_RESOURCES;
297 }
298
299 item = &sl->item[num_items];
300 item->send_type = CE_SIMPLE_BUFFER_TYPE;
301 item->data = buffer;
302 item->u.nbytes = nbytes;
303 item->flags = flags;
304 item->user_flags = user_flags;
305 sl->num_items = num_items + 1;
306 return CDF_STATUS_SUCCESS;
307}
308
309int
310ce_sendlist_send(struct CE_handle *copyeng,
311 void *per_transfer_context,
312 struct ce_sendlist *sendlist, unsigned int transfer_id)
313{
314 int status = -ENOMEM;
315 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
316 struct CE_state *CE_state = (struct CE_state *)copyeng;
317 struct CE_ring_state *src_ring = CE_state->src_ring;
318 unsigned int nentries_mask = src_ring->nentries_mask;
319 unsigned int num_items = sl->num_items;
320 unsigned int sw_index;
321 unsigned int write_index;
322
323 CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
324
325 cdf_spin_lock_bh(&CE_state->scn->target_lock);
326 sw_index = src_ring->sw_index;
327 write_index = src_ring->write_index;
328
329 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
330 num_items) {
331 struct ce_sendlist_item *item;
332 int i;
333
334 /* handle all but the last item uniformly */
335 for (i = 0; i < num_items - 1; i++) {
336 item = &sl->item[i];
337 /* TBDXXX: Support extensible sendlist_types? */
338 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
339 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
340 (cdf_dma_addr_t) item->data,
341 item->u.nbytes, transfer_id,
342 item->flags | CE_SEND_FLAG_GATHER,
343 item->user_flags);
344 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
345 }
346 /* provide valid context pointer for final item */
347 item = &sl->item[i];
348 /* TBDXXX: Support extensible sendlist_types? */
349 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
350 status = ce_send_nolock(copyeng, per_transfer_context,
351 (cdf_dma_addr_t) item->data,
352 item->u.nbytes,
353 transfer_id, item->flags,
354 item->user_flags);
355 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
356 NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
357 NBUF_TX_PKT_CE);
358 DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
359 CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
360 (uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
361 sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
362 } else {
363 /*
364 * Probably not worth the additional complexity to support
365 * partial sends with continuation or notification. We expect
366 * to use large rings and small sendlists. If we can't handle
367 * the entire request at once, punt it back to the caller.
368 */
369 }
370 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
371
372 return status;
373}
374
375#ifdef WLAN_FEATURE_FASTPATH
376#ifdef QCA_WIFI_3_0
377static inline void
378ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
379 uint64_t dma_addr,
380 uint32_t user_flags)
381{
382 shadow_src_desc->buffer_addr_hi =
383 (uint32_t)((dma_addr >> 32) & 0x1F);
384 user_flags |= shadow_src_desc->buffer_addr_hi;
385 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
386 sizeof(uint32_t));
387}
388#else
389static inline void
390ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
391 uint64_t dma_addr,
392 uint32_t user_flags)
393{
394}
395#endif
396
397/**
398 * ce_send_fast() CE layer Tx buffer posting function
399 * @copyeng: copy engine handle
400 * @msdus: iarray of msdu to be sent
401 * @num_msdus: number of msdus in an array
402 * @transfer_id: transfer_id
403 *
404 * Assumption : Called with an array of MSDU's
405 * Function:
406 * For each msdu in the array
407 * 1. Check no. of available entries
408 * 2. Create src ring entries (allocated in consistent memory
409 * 3. Write index to h/w
410 *
411 * Return: No. of packets that could be sent
412 */
413
414int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
415 unsigned int num_msdus, unsigned int transfer_id)
416{
417 struct CE_state *ce_state = (struct CE_state *)copyeng;
418 struct ol_softc *scn = ce_state->scn;
419 struct CE_ring_state *src_ring = ce_state->src_ring;
420 u_int32_t ctrl_addr = ce_state->ctrl_addr;
421 unsigned int nentries_mask = src_ring->nentries_mask;
422 unsigned int write_index;
423 unsigned int sw_index;
424 unsigned int frag_len;
425 cdf_nbuf_t msdu;
426 int i;
427 uint64_t dma_addr;
428 uint32_t user_flags = 0;
429
430 /*
431 * This lock could be more fine-grained, one per CE,
432 * TODO : Add this lock now.
433 * That is the next step of optimization.
434 */
435 cdf_spin_lock_bh(&scn->target_lock);
436 sw_index = src_ring->sw_index;
437 write_index = src_ring->write_index;
438
439 /* 2 msdus per packet */
440 for (i = 0; i < num_msdus; i++) {
441 struct CE_src_desc *src_ring_base =
442 (struct CE_src_desc *)src_ring->base_addr_owner_space;
443 struct CE_src_desc *shadow_base =
444 (struct CE_src_desc *)src_ring->shadow_base;
445 struct CE_src_desc *src_desc =
446 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
447 struct CE_src_desc *shadow_src_desc =
448 CE_SRC_RING_TO_DESC(shadow_base, write_index);
449
450 msdu = msdus[i];
451
452 /*
453 * First fill out the ring descriptor for the HTC HTT frame
454 * header. These are uncached writes. Should we use a local
455 * structure instead?
456 */
457 /* HTT/HTC header can be passed as a argument */
458 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 0);
459 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
460 0xFFFFFFFF);
461 user_flags = cdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
462 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
463
464 shadow_src_desc->meta_data = transfer_id;
465 shadow_src_desc->nbytes = cdf_nbuf_get_frag_len(msdu, 0);
466
467 /*
468 * HTC HTT header is a word stream, so byte swap if CE byte
469 * swap enabled
470 */
471 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
472 CE_ATTR_BYTE_SWAP_DATA) != 0);
473 /* For the first one, it still does not need to write */
474 shadow_src_desc->gather = 1;
475 *src_desc = *shadow_src_desc;
476
477 /* By default we could initialize the transfer context to this
478 * value
479 */
480 src_ring->per_transfer_context[write_index] =
481 CE_SENDLIST_ITEM_CTXT;
482
483 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
484
485 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
486 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
487 /*
488 * Now fill out the ring descriptor for the actual data
489 * packet
490 */
491 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 1);
492 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
493 0xFFFFFFFF);
494 /*
495 * Clear packet offset for all but the first CE desc.
496 */
497 user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
498 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
499 shadow_src_desc->meta_data = transfer_id;
500
501 /* get actual packet length */
502 frag_len = cdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700503
504 /* only read download_len once */
505 shadow_src_desc->nbytes = ce_state->download_len;
506 if (shadow_src_desc->nbytes > frag_len)
507 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800508
509 /* Data packet is a byte stream, so disable byte swap */
510 shadow_src_desc->byte_swap = 0;
511 /* For the last one, gather is not set */
512 shadow_src_desc->gather = 0;
513 *src_desc = *shadow_src_desc;
514 src_ring->per_transfer_context[write_index] = msdu;
515 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
516 }
517
518 /* Write the final index to h/w one-shot */
519 if (i) {
520 src_ring->write_index = write_index;
521 /* Don't call WAR_XXX from here
522 * Just call XXX instead, that has the reqd. intel
523 */
524 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
525 }
526
527 cdf_spin_unlock_bh(&scn->target_lock);
528
529 /*
530 * If all packets in the array are transmitted,
531 * i = num_msdus
532 * Temporarily add an ASSERT
533 */
534 ASSERT(i == num_msdus);
535 return i;
536}
537#endif /* WLAN_FEATURE_FASTPATH */
538
539int
540ce_recv_buf_enqueue(struct CE_handle *copyeng,
541 void *per_recv_context, cdf_dma_addr_t buffer)
542{
543 int status;
544 struct CE_state *CE_state = (struct CE_state *)copyeng;
545 struct CE_ring_state *dest_ring = CE_state->dest_ring;
546 uint32_t ctrl_addr = CE_state->ctrl_addr;
547 unsigned int nentries_mask = dest_ring->nentries_mask;
548 unsigned int write_index;
549 unsigned int sw_index;
550 int val = 0;
551 uint64_t dma_addr = buffer;
552 struct ol_softc *scn = CE_state->scn;
553
554 cdf_spin_lock_bh(&scn->target_lock);
555 write_index = dest_ring->write_index;
556 sw_index = dest_ring->sw_index;
557
558 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
559 if (val == -1) {
560 cdf_spin_unlock_bh(&scn->target_lock);
561 return val;
562 }
563
564 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
565 struct CE_dest_desc *dest_ring_base =
566 (struct CE_dest_desc *)dest_ring->
567 base_addr_owner_space;
568 struct CE_dest_desc *dest_desc =
569 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
570
571 /* Update low 32 bit destination descriptor */
572 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
573#ifdef QCA_WIFI_3_0
574 dest_desc->buffer_addr_hi =
575 (uint32_t)((dma_addr >> 32) & 0x1F);
576#endif
577 dest_desc->nbytes = 0;
578
579 dest_ring->per_transfer_context[write_index] =
580 per_recv_context;
581
582 /* Update Destination Ring Write Index */
583 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
584 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
585 dest_ring->write_index = write_index;
586 status = CDF_STATUS_SUCCESS;
587 } else {
588 status = CDF_STATUS_E_FAILURE;
589 }
590 A_TARGET_ACCESS_END_RET_EXT(scn, val);
591 if (val == -1) {
592 cdf_spin_unlock_bh(&scn->target_lock);
593 return val;
594 }
595
596 cdf_spin_unlock_bh(&scn->target_lock);
597
598 return status;
599}
600
601void
602ce_send_watermarks_set(struct CE_handle *copyeng,
603 unsigned int low_alert_nentries,
604 unsigned int high_alert_nentries)
605{
606 struct CE_state *CE_state = (struct CE_state *)copyeng;
607 uint32_t ctrl_addr = CE_state->ctrl_addr;
608 struct ol_softc *scn = CE_state->scn;
609
610 cdf_spin_lock(&scn->target_lock);
611 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
612 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
613 cdf_spin_unlock(&scn->target_lock);
614}
615
616void
617ce_recv_watermarks_set(struct CE_handle *copyeng,
618 unsigned int low_alert_nentries,
619 unsigned int high_alert_nentries)
620{
621 struct CE_state *CE_state = (struct CE_state *)copyeng;
622 uint32_t ctrl_addr = CE_state->ctrl_addr;
623 struct ol_softc *scn = CE_state->scn;
624
625 cdf_spin_lock(&scn->target_lock);
626 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
627 low_alert_nentries);
628 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
629 high_alert_nentries);
630 cdf_spin_unlock(&scn->target_lock);
631}
632
633unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
634{
635 struct CE_state *CE_state = (struct CE_state *)copyeng;
636 struct CE_ring_state *src_ring = CE_state->src_ring;
637 unsigned int nentries_mask = src_ring->nentries_mask;
638 unsigned int sw_index;
639 unsigned int write_index;
640
641 cdf_spin_lock(&CE_state->scn->target_lock);
642 sw_index = src_ring->sw_index;
643 write_index = src_ring->write_index;
644 cdf_spin_unlock(&CE_state->scn->target_lock);
645
646 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
647}
648
649unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
650{
651 struct CE_state *CE_state = (struct CE_state *)copyeng;
652 struct CE_ring_state *dest_ring = CE_state->dest_ring;
653 unsigned int nentries_mask = dest_ring->nentries_mask;
654 unsigned int sw_index;
655 unsigned int write_index;
656
657 cdf_spin_lock(&CE_state->scn->target_lock);
658 sw_index = dest_ring->sw_index;
659 write_index = dest_ring->write_index;
660 cdf_spin_unlock(&CE_state->scn->target_lock);
661
662 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
663}
664
665/*
666 * Guts of ce_send_entries_done.
667 * The caller takes responsibility for any necessary locking.
668 */
669unsigned int
670ce_send_entries_done_nolock(struct ol_softc *scn,
671 struct CE_state *CE_state)
672{
673 struct CE_ring_state *src_ring = CE_state->src_ring;
674 uint32_t ctrl_addr = CE_state->ctrl_addr;
675 unsigned int nentries_mask = src_ring->nentries_mask;
676 unsigned int sw_index;
677 unsigned int read_index;
678
679 sw_index = src_ring->sw_index;
680 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
681
682 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
683}
684
685unsigned int ce_send_entries_done(struct CE_handle *copyeng)
686{
687 struct CE_state *CE_state = (struct CE_state *)copyeng;
688 unsigned int nentries;
689
690 cdf_spin_lock(&CE_state->scn->target_lock);
691 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
692 cdf_spin_unlock(&CE_state->scn->target_lock);
693
694 return nentries;
695}
696
697/*
698 * Guts of ce_recv_entries_done.
699 * The caller takes responsibility for any necessary locking.
700 */
701unsigned int
702ce_recv_entries_done_nolock(struct ol_softc *scn,
703 struct CE_state *CE_state)
704{
705 struct CE_ring_state *dest_ring = CE_state->dest_ring;
706 uint32_t ctrl_addr = CE_state->ctrl_addr;
707 unsigned int nentries_mask = dest_ring->nentries_mask;
708 unsigned int sw_index;
709 unsigned int read_index;
710
711 sw_index = dest_ring->sw_index;
712 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
713
714 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
715}
716
717unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
718{
719 struct CE_state *CE_state = (struct CE_state *)copyeng;
720 unsigned int nentries;
721
722 cdf_spin_lock(&CE_state->scn->target_lock);
723 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
724 cdf_spin_unlock(&CE_state->scn->target_lock);
725
726 return nentries;
727}
728
729/* Debug support */
730void *ce_debug_cmplrn_context; /* completed recv next context */
731void *ce_debug_cnclsn_context; /* cancel send next context */
732void *ce_debug_rvkrn_context; /* revoke receive next context */
733void *ce_debug_cmplsn_context; /* completed send next context */
734
735/*
736 * Guts of ce_completed_recv_next.
737 * The caller takes responsibility for any necessary locking.
738 */
739int
740ce_completed_recv_next_nolock(struct CE_state *CE_state,
741 void **per_CE_contextp,
742 void **per_transfer_contextp,
743 cdf_dma_addr_t *bufferp,
744 unsigned int *nbytesp,
745 unsigned int *transfer_idp,
746 unsigned int *flagsp)
747{
748 int status;
749 struct CE_ring_state *dest_ring = CE_state->dest_ring;
750 unsigned int nentries_mask = dest_ring->nentries_mask;
751 unsigned int sw_index = dest_ring->sw_index;
752
753 struct CE_dest_desc *dest_ring_base =
754 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
755 struct CE_dest_desc *dest_desc =
756 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
757 int nbytes;
758 struct CE_dest_desc dest_desc_info;
759 /*
760 * By copying the dest_desc_info element to local memory, we could
761 * avoid extra memory read from non-cachable memory.
762 */
763 dest_desc_info = *dest_desc;
764 nbytes = dest_desc_info.nbytes;
765 if (nbytes == 0) {
766 /*
767 * This closes a relatively unusual race where the Host
768 * sees the updated DRRI before the update to the
769 * corresponding descriptor has completed. We treat this
770 * as a descriptor that is not yet done.
771 */
772 status = CDF_STATUS_E_FAILURE;
773 goto done;
774 }
775
776 dest_desc->nbytes = 0;
777
778 /* Return data from completed destination descriptor */
779 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
780 *nbytesp = nbytes;
781 *transfer_idp = dest_desc_info.meta_data;
782 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
783
784 if (per_CE_contextp) {
785 *per_CE_contextp = CE_state->recv_context;
786 }
787
788 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
789 if (per_transfer_contextp) {
790 *per_transfer_contextp = ce_debug_cmplrn_context;
791 }
792 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
793
794 /* Update sw_index */
795 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
796 dest_ring->sw_index = sw_index;
797 status = CDF_STATUS_SUCCESS;
798
799done:
800 return status;
801}
802
803int
804ce_completed_recv_next(struct CE_handle *copyeng,
805 void **per_CE_contextp,
806 void **per_transfer_contextp,
807 cdf_dma_addr_t *bufferp,
808 unsigned int *nbytesp,
809 unsigned int *transfer_idp, unsigned int *flagsp)
810{
811 struct CE_state *CE_state = (struct CE_state *)copyeng;
812 int status;
813
814 cdf_spin_lock_bh(&CE_state->scn->target_lock);
815 status =
816 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
817 per_transfer_contextp, bufferp,
818 nbytesp, transfer_idp, flagsp);
819 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
820
821 return status;
822}
823
824/* NB: Modeled after ce_completed_recv_next_nolock */
825CDF_STATUS
826ce_revoke_recv_next(struct CE_handle *copyeng,
827 void **per_CE_contextp,
828 void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
829{
830 struct CE_state *CE_state;
831 struct CE_ring_state *dest_ring;
832 unsigned int nentries_mask;
833 unsigned int sw_index;
834 unsigned int write_index;
835 CDF_STATUS status;
836 struct ol_softc *scn;
837
838 CE_state = (struct CE_state *)copyeng;
839 dest_ring = CE_state->dest_ring;
840 if (!dest_ring) {
841 return CDF_STATUS_E_FAILURE;
842 }
843
844 scn = CE_state->scn;
845 cdf_spin_lock(&scn->target_lock);
846 nentries_mask = dest_ring->nentries_mask;
847 sw_index = dest_ring->sw_index;
848 write_index = dest_ring->write_index;
849 if (write_index != sw_index) {
850 struct CE_dest_desc *dest_ring_base =
851 (struct CE_dest_desc *)dest_ring->
852 base_addr_owner_space;
853 struct CE_dest_desc *dest_desc =
854 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
855
856 /* Return data from completed destination descriptor */
857 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
858
859 if (per_CE_contextp) {
860 *per_CE_contextp = CE_state->recv_context;
861 }
862
863 ce_debug_rvkrn_context =
864 dest_ring->per_transfer_context[sw_index];
865 if (per_transfer_contextp) {
866 *per_transfer_contextp = ce_debug_rvkrn_context;
867 }
868 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
869
870 /* Update sw_index */
871 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
872 dest_ring->sw_index = sw_index;
873 status = CDF_STATUS_SUCCESS;
874 } else {
875 status = CDF_STATUS_E_FAILURE;
876 }
877 cdf_spin_unlock(&scn->target_lock);
878
879 return status;
880}
881
882/*
883 * Guts of ce_completed_send_next.
884 * The caller takes responsibility for any necessary locking.
885 */
886int
887ce_completed_send_next_nolock(struct CE_state *CE_state,
888 void **per_CE_contextp,
889 void **per_transfer_contextp,
890 cdf_dma_addr_t *bufferp,
891 unsigned int *nbytesp,
892 unsigned int *transfer_idp,
893 unsigned int *sw_idx,
894 unsigned int *hw_idx,
895 uint32_t *toeplitz_hash_result)
896{
897 int status = CDF_STATUS_E_FAILURE;
898 struct CE_ring_state *src_ring = CE_state->src_ring;
899 uint32_t ctrl_addr = CE_state->ctrl_addr;
900 unsigned int nentries_mask = src_ring->nentries_mask;
901 unsigned int sw_index = src_ring->sw_index;
902 unsigned int read_index;
903 struct ol_softc *scn = CE_state->scn;
904
905 if (src_ring->hw_index == sw_index) {
906 /*
907 * The SW completion index has caught up with the cached
908 * version of the HW completion index.
909 * Update the cached HW completion index to see whether
910 * the SW has really caught up to the HW, or if the cached
911 * value of the HW index has become stale.
912 */
913 A_TARGET_ACCESS_BEGIN_RET(scn);
914 src_ring->hw_index =
915 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
916 A_TARGET_ACCESS_END_RET(scn);
917 }
918 read_index = src_ring->hw_index;
919
920 if (sw_idx)
921 *sw_idx = sw_index;
922
923 if (hw_idx)
924 *hw_idx = read_index;
925
926 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
927 struct CE_src_desc *shadow_base =
928 (struct CE_src_desc *)src_ring->shadow_base;
929 struct CE_src_desc *shadow_src_desc =
930 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
931#ifdef QCA_WIFI_3_0
932 struct CE_src_desc *src_ring_base =
933 (struct CE_src_desc *)src_ring->base_addr_owner_space;
934 struct CE_src_desc *src_desc =
935 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
936#endif
937 /* Return data from completed source descriptor */
938 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
939 *nbytesp = shadow_src_desc->nbytes;
940 *transfer_idp = shadow_src_desc->meta_data;
941#ifdef QCA_WIFI_3_0
942 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
943#else
944 *toeplitz_hash_result = 0;
945#endif
946 if (per_CE_contextp) {
947 *per_CE_contextp = CE_state->send_context;
948 }
949
950 ce_debug_cmplsn_context =
951 src_ring->per_transfer_context[sw_index];
952 if (per_transfer_contextp) {
953 *per_transfer_contextp = ce_debug_cmplsn_context;
954 }
955 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
956
957 /* Update sw_index */
958 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
959 src_ring->sw_index = sw_index;
960 status = CDF_STATUS_SUCCESS;
961 }
962
963 return status;
964}
965
966/* NB: Modeled after ce_completed_send_next */
967CDF_STATUS
968ce_cancel_send_next(struct CE_handle *copyeng,
969 void **per_CE_contextp,
970 void **per_transfer_contextp,
971 cdf_dma_addr_t *bufferp,
972 unsigned int *nbytesp,
973 unsigned int *transfer_idp,
974 uint32_t *toeplitz_hash_result)
975{
976 struct CE_state *CE_state;
977 struct CE_ring_state *src_ring;
978 unsigned int nentries_mask;
979 unsigned int sw_index;
980 unsigned int write_index;
981 CDF_STATUS status;
982 struct ol_softc *scn;
983
984 CE_state = (struct CE_state *)copyeng;
985 src_ring = CE_state->src_ring;
986 if (!src_ring) {
987 return CDF_STATUS_E_FAILURE;
988 }
989
990 scn = CE_state->scn;
991 cdf_spin_lock(&CE_state->scn->target_lock);
992 nentries_mask = src_ring->nentries_mask;
993 sw_index = src_ring->sw_index;
994 write_index = src_ring->write_index;
995
996 if (write_index != sw_index) {
997 struct CE_src_desc *src_ring_base =
998 (struct CE_src_desc *)src_ring->base_addr_owner_space;
999 struct CE_src_desc *src_desc =
1000 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1001
1002 /* Return data from completed source descriptor */
1003 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1004 *nbytesp = src_desc->nbytes;
1005 *transfer_idp = src_desc->meta_data;
1006#ifdef QCA_WIFI_3_0
1007 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1008#else
1009 *toeplitz_hash_result = 0;
1010#endif
1011
1012 if (per_CE_contextp) {
1013 *per_CE_contextp = CE_state->send_context;
1014 }
1015
1016 ce_debug_cnclsn_context =
1017 src_ring->per_transfer_context[sw_index];
1018 if (per_transfer_contextp) {
1019 *per_transfer_contextp = ce_debug_cnclsn_context;
1020 }
1021 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1022
1023 /* Update sw_index */
1024 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1025 src_ring->sw_index = sw_index;
1026 status = CDF_STATUS_SUCCESS;
1027 } else {
1028 status = CDF_STATUS_E_FAILURE;
1029 }
1030 cdf_spin_unlock(&CE_state->scn->target_lock);
1031
1032 return status;
1033}
1034
1035/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1036#define CE_WM_SHFT 1
1037
1038int
1039ce_completed_send_next(struct CE_handle *copyeng,
1040 void **per_CE_contextp,
1041 void **per_transfer_contextp,
1042 cdf_dma_addr_t *bufferp,
1043 unsigned int *nbytesp,
1044 unsigned int *transfer_idp,
1045 unsigned int *sw_idx,
1046 unsigned int *hw_idx,
1047 unsigned int *toeplitz_hash_result)
1048{
1049 struct CE_state *CE_state = (struct CE_state *)copyeng;
1050 int status;
1051
1052 cdf_spin_lock_bh(&CE_state->scn->target_lock);
1053 status =
1054 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1055 per_transfer_contextp, bufferp,
1056 nbytesp, transfer_idp, sw_idx,
1057 hw_idx, toeplitz_hash_result);
1058 cdf_spin_unlock_bh(&CE_state->scn->target_lock);
1059
1060 return status;
1061}
1062
1063#ifdef ATH_11AC_TXCOMPACT
1064/* CE engine descriptor reap
1065 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1066 * does recieve and reaping of completed descriptor ,
1067 * This function only handles reaping of Tx complete descriptor.
1068 * The Function is called from threshold reap poll routine
1069 * hif_send_complete_check so should not countain recieve functionality
1070 * within it .
1071 */
1072
1073void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
1074{
1075 void *CE_context;
1076 void *transfer_context;
1077 cdf_dma_addr_t buf;
1078 unsigned int nbytes;
1079 unsigned int id;
1080 unsigned int sw_idx, hw_idx;
1081 uint32_t toeplitz_hash_result;
1082 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1083
1084 A_TARGET_ACCESS_BEGIN(scn);
1085
1086 /* Since this function is called from both user context and
1087 * tasklet context the spinlock has to lock the bottom halves.
1088 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1089 * enabled in TX polling mode. If this is not the case, more
1090 * bottom halve spin lock changes are needed. Due to data path
1091 * performance concern, after internal discussion we've decided
1092 * to make minimum change, i.e., only address the issue occured
1093 * in this function. The possible negative effect of this minimum
1094 * change is that, in the future, if some other function will also
1095 * be opened to let the user context to use, those cases need to be
1096 * addressed by change spin_lock to spin_lock_bh also.
1097 */
1098
1099 cdf_spin_lock_bh(&scn->target_lock);
1100
1101 if (CE_state->send_cb) {
1102 {
1103 /* Pop completed send buffers and call the
1104 * registered send callback for each
1105 */
1106 while (ce_completed_send_next_nolock
1107 (CE_state, &CE_context,
1108 &transfer_context, &buf,
1109 &nbytes, &id, &sw_idx, &hw_idx,
1110 &toeplitz_hash_result) ==
1111 CDF_STATUS_SUCCESS) {
1112 if (CE_id != CE_HTT_H2T_MSG) {
1113 cdf_spin_unlock_bh(&scn->target_lock);
1114 CE_state->
1115 send_cb((struct CE_handle *)
1116 CE_state, CE_context,
1117 transfer_context, buf,
1118 nbytes, id, sw_idx, hw_idx,
1119 toeplitz_hash_result);
1120 cdf_spin_lock_bh(&scn->target_lock);
1121 } else {
1122 struct HIF_CE_pipe_info *pipe_info =
1123 (struct HIF_CE_pipe_info *)
1124 CE_context;
1125
1126 cdf_spin_lock_bh(&pipe_info->
1127 completion_freeq_lock);
1128 pipe_info->num_sends_allowed++;
1129 cdf_spin_unlock_bh(&pipe_info->
1130 completion_freeq_lock);
1131 }
1132 }
1133 }
1134 }
1135
1136 cdf_spin_unlock_bh(&scn->target_lock);
1137 A_TARGET_ACCESS_END(scn);
1138}
1139
1140#endif /*ATH_11AC_TXCOMPACT */
1141
1142/*
1143 * Number of times to check for any pending tx/rx completion on
1144 * a copy engine, this count should be big enough. Once we hit
1145 * this threashold we'll not check for any Tx/Rx comlpetion in same
1146 * interrupt handling. Note that this threashold is only used for
1147 * Rx interrupt processing, this can be used tor Tx as well if we
1148 * suspect any infinite loop in checking for pending Tx completion.
1149 */
1150#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1151
1152/*
1153 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1154 *
1155 * Invokes registered callbacks for recv_complete,
1156 * send_complete, and watermarks.
1157 *
1158 * Returns: number of messages processed
1159 */
1160
1161int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id)
1162{
1163 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1164 uint32_t ctrl_addr = CE_state->ctrl_addr;
1165 void *CE_context;
1166 void *transfer_context;
1167 cdf_dma_addr_t buf;
1168 unsigned int nbytes;
1169 unsigned int id;
1170 unsigned int flags;
1171 uint32_t CE_int_status;
1172 unsigned int more_comp_cnt = 0;
1173 unsigned int more_snd_comp_cnt = 0;
1174 unsigned int sw_idx, hw_idx;
1175 uint32_t toeplitz_hash_result;
1176
1177 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1178 HIF_ERROR("[premature rc=0]\n");
1179 return 0; /* no work done */
1180 }
1181
1182 cdf_spin_lock(&scn->target_lock);
1183
1184 /* Clear force_break flag and re-initialize receive_count to 0 */
1185
1186 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001187 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001188 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001189more_completions:
1190 if (CE_state->recv_cb) {
1191
1192 /* Pop completed recv buffers and call
1193 * the registered recv callback for each
1194 */
1195 while (ce_completed_recv_next_nolock
1196 (CE_state, &CE_context, &transfer_context,
1197 &buf, &nbytes, &id, &flags) ==
1198 CDF_STATUS_SUCCESS) {
1199 cdf_spin_unlock(&scn->target_lock);
1200 CE_state->recv_cb((struct CE_handle *)CE_state,
1201 CE_context, transfer_context, buf,
1202 nbytes, id, flags);
1203
1204 /*
1205 * EV #112693 -
1206 * [Peregrine][ES1][WB342][Win8x86][Performance]
1207 * BSoD_0x133 occurred in VHT80 UDP_DL
1208 * Break out DPC by force if number of loops in
1209 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1210 * to avoid spending too long time in
1211 * DPC for each interrupt handling. Schedule another
1212 * DPC to avoid data loss if we had taken
1213 * force-break action before apply to Windows OS
1214 * only currently, Linux/MAC os can expand to their
1215 * platform if necessary
1216 */
1217
1218 /* Break the receive processes by
1219 * force if force_break set up
1220 */
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001221 if (cdf_unlikely(CE_state->force_break)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222 cdf_atomic_set(&CE_state->rx_pending, 1);
1223 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1224 HOST_IS_COPY_COMPLETE_MASK);
1225 if (Q_TARGET_ACCESS_END(scn) < 0)
1226 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001227 CE_state->receive_count);
1228 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001229 }
1230 cdf_spin_lock(&scn->target_lock);
1231 }
1232 }
1233
1234 /*
1235 * Attention: We may experience potential infinite loop for below
1236 * While Loop during Sending Stress test.
1237 * Resolve the same way as Receive Case (Refer to EV #112693)
1238 */
1239
1240 if (CE_state->send_cb) {
1241 /* Pop completed send buffers and call
1242 * the registered send callback for each
1243 */
1244
1245#ifdef ATH_11AC_TXCOMPACT
1246 while (ce_completed_send_next_nolock
1247 (CE_state, &CE_context,
1248 &transfer_context, &buf, &nbytes,
1249 &id, &sw_idx, &hw_idx,
1250 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1251
1252 if (CE_id != CE_HTT_H2T_MSG ||
1253 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1254 cdf_spin_unlock(&scn->target_lock);
1255 CE_state->send_cb((struct CE_handle *)CE_state,
1256 CE_context, transfer_context,
1257 buf, nbytes, id, sw_idx,
1258 hw_idx, toeplitz_hash_result);
1259 cdf_spin_lock(&scn->target_lock);
1260 } else {
1261 struct HIF_CE_pipe_info *pipe_info =
1262 (struct HIF_CE_pipe_info *)CE_context;
1263
1264 cdf_spin_lock(&pipe_info->
1265 completion_freeq_lock);
1266 pipe_info->num_sends_allowed++;
1267 cdf_spin_unlock(&pipe_info->
1268 completion_freeq_lock);
1269 }
1270 }
1271#else /*ATH_11AC_TXCOMPACT */
1272 while (ce_completed_send_next_nolock
1273 (CE_state, &CE_context,
1274 &transfer_context, &buf, &nbytes,
1275 &id, &sw_idx, &hw_idx,
1276 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1277 cdf_spin_unlock(&scn->target_lock);
1278 CE_state->send_cb((struct CE_handle *)CE_state,
1279 CE_context, transfer_context, buf,
1280 nbytes, id, sw_idx, hw_idx,
1281 toeplitz_hash_result);
1282 cdf_spin_lock(&scn->target_lock);
1283 }
1284#endif /*ATH_11AC_TXCOMPACT */
1285 }
1286
1287more_watermarks:
1288 if (CE_state->misc_cbs) {
1289 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1290 if (CE_int_status & CE_WATERMARK_MASK) {
1291 if (CE_state->watermark_cb) {
1292
1293 cdf_spin_unlock(&scn->target_lock);
1294 /* Convert HW IS bits to software flags */
1295 flags =
1296 (CE_int_status & CE_WATERMARK_MASK) >>
1297 CE_WM_SHFT;
1298
1299 CE_state->
1300 watermark_cb((struct CE_handle *)CE_state,
1301 CE_state->wm_context, flags);
1302 cdf_spin_lock(&scn->target_lock);
1303 }
1304 }
1305 }
1306
1307 /*
1308 * Clear the misc interrupts (watermark) that were handled above,
1309 * and that will be checked again below.
1310 * Clear and check for copy-complete interrupts again, just in case
1311 * more copy completions happened while the misc interrupts were being
1312 * handled.
1313 */
1314 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1315 CE_WATERMARK_MASK |
1316 HOST_IS_COPY_COMPLETE_MASK);
1317
1318 /*
1319 * Now that per-engine interrupts are cleared, verify that
1320 * no recv interrupts arrive while processing send interrupts,
1321 * and no recv or send interrupts happened while processing
1322 * misc interrupts.Go back and check again.Keep checking until
1323 * we find no more events to process.
1324 */
1325 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
1326 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1327 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1328 goto more_completions;
1329 } else {
1330 HIF_ERROR(
1331 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1332 __func__, CE_state->dest_ring->nentries_mask,
1333 CE_state->dest_ring->sw_index,
1334 CE_DEST_RING_READ_IDX_GET(scn,
1335 CE_state->ctrl_addr));
1336 }
1337 }
1338
1339 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
1340 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1341 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1342 goto more_completions;
1343 } else {
1344 HIF_ERROR(
1345 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1346 __func__, CE_state->src_ring->nentries_mask,
1347 CE_state->src_ring->sw_index,
1348 CE_SRC_RING_READ_IDX_GET(scn,
1349 CE_state->ctrl_addr));
1350 }
1351 }
1352
1353 if (CE_state->misc_cbs) {
1354 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1355 if (CE_int_status & CE_WATERMARK_MASK) {
1356 if (CE_state->watermark_cb) {
1357 goto more_watermarks;
1358 }
1359 }
1360 }
1361
1362 cdf_spin_unlock(&scn->target_lock);
1363 cdf_atomic_set(&CE_state->rx_pending, 0);
1364
1365 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001366 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1367 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001368}
1369
1370/*
1371 * Handler for per-engine interrupts on ALL active CEs.
1372 * This is used in cases where the system is sharing a
1373 * single interrput for all CEs
1374 */
1375
1376void ce_per_engine_service_any(int irq, struct ol_softc *scn)
1377{
1378 int CE_id;
1379 uint32_t intr_summary;
1380
1381 A_TARGET_ACCESS_BEGIN(scn);
1382 if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
1383 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1384 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1385 if (cdf_atomic_read(&CE_state->rx_pending)) {
1386 cdf_atomic_set(&CE_state->rx_pending, 0);
1387 ce_per_engine_service(scn, CE_id);
1388 }
1389 }
1390
1391 A_TARGET_ACCESS_END(scn);
1392 return;
1393 }
1394
1395 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1396
1397 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1398 if (intr_summary & (1 << CE_id)) {
1399 intr_summary &= ~(1 << CE_id);
1400 } else {
1401 continue; /* no intr pending on this CE */
1402 }
1403
1404 ce_per_engine_service(scn, CE_id);
1405 }
1406
1407 A_TARGET_ACCESS_END(scn);
1408}
1409
1410/*
1411 * Adjust interrupts for the copy complete handler.
1412 * If it's needed for either send or recv, then unmask
1413 * this interrupt; otherwise, mask it.
1414 *
1415 * Called with target_lock held.
1416 */
1417static void
1418ce_per_engine_handler_adjust(struct CE_state *CE_state,
1419 int disable_copy_compl_intr)
1420{
1421 uint32_t ctrl_addr = CE_state->ctrl_addr;
1422 struct ol_softc *scn = CE_state->scn;
1423
1424 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1425 A_TARGET_ACCESS_BEGIN(scn);
1426 if ((!disable_copy_compl_intr) &&
1427 (CE_state->send_cb || CE_state->recv_cb)) {
1428 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1429 } else {
1430 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1431 }
1432
1433 if (CE_state->watermark_cb) {
1434 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1435 } else {
1436 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1437 }
1438 A_TARGET_ACCESS_END(scn);
1439
1440}
1441
1442/*Iterate the CE_state list and disable the compl interrupt
1443 * if it has been registered already.
1444 */
1445void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1446{
1447 int CE_id;
1448
1449 A_TARGET_ACCESS_BEGIN(scn);
1450 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1451 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1452 uint32_t ctrl_addr = CE_state->ctrl_addr;
1453
1454 /* if the interrupt is currently enabled, disable it */
1455 if (!CE_state->disable_copy_compl_intr
1456 && (CE_state->send_cb || CE_state->recv_cb)) {
1457 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1458 }
1459
1460 if (CE_state->watermark_cb) {
1461 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1462 }
1463 }
1464 A_TARGET_ACCESS_END(scn);
1465}
1466
1467void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1468{
1469 int CE_id;
1470
1471 A_TARGET_ACCESS_BEGIN(scn);
1472 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1473 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1474 uint32_t ctrl_addr = CE_state->ctrl_addr;
1475
1476 /*
1477 * If the CE is supposed to have copy complete interrupts
1478 * enabled (i.e. there a callback registered, and the
1479 * "disable" flag is not set), then re-enable the interrupt.
1480 */
1481 if (!CE_state->disable_copy_compl_intr
1482 && (CE_state->send_cb || CE_state->recv_cb)) {
1483 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1484 }
1485
1486 if (CE_state->watermark_cb) {
1487 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1488 }
1489 }
1490 A_TARGET_ACCESS_END(scn);
1491}
1492
1493void ce_disable_any_copy_compl_intr(struct ol_softc *scn)
1494{
1495 cdf_spin_lock(&scn->target_lock);
1496 ce_disable_any_copy_compl_intr_nolock(scn);
1497 cdf_spin_unlock(&scn->target_lock);
1498}
1499
1500/*Re-enable the copy compl interrupt if it has not been disabled before.*/
1501void ce_enable_any_copy_compl_intr(struct ol_softc *scn)
1502{
1503 cdf_spin_lock(&scn->target_lock);
1504 ce_enable_any_copy_compl_intr_nolock(scn);
1505 cdf_spin_unlock(&scn->target_lock);
1506}
1507
1508void
1509ce_send_cb_register(struct CE_handle *copyeng,
1510 ce_send_cb fn_ptr,
1511 void *ce_send_context, int disable_interrupts)
1512{
1513 struct CE_state *CE_state = (struct CE_state *)copyeng;
1514
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001515 if (CE_state == NULL) {
1516 pr_err("%s: Error CE state = NULL\n", __func__);
1517 return;
1518 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519 cdf_spin_lock(&CE_state->scn->target_lock);
1520 CE_state->send_cb = fn_ptr;
1521 CE_state->send_context = ce_send_context;
1522 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
1523 cdf_spin_unlock(&CE_state->scn->target_lock);
1524}
1525
1526void
1527ce_recv_cb_register(struct CE_handle *copyeng,
1528 CE_recv_cb fn_ptr,
1529 void *CE_recv_context, int disable_interrupts)
1530{
1531 struct CE_state *CE_state = (struct CE_state *)copyeng;
1532
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001533 if (CE_state == NULL) {
1534 pr_err("%s: ERROR CE state = NULL\n", __func__);
1535 return;
1536 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537 cdf_spin_lock(&CE_state->scn->target_lock);
1538 CE_state->recv_cb = fn_ptr;
1539 CE_state->recv_context = CE_recv_context;
1540 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
1541 cdf_spin_unlock(&CE_state->scn->target_lock);
1542}
1543
1544void
1545ce_watermark_cb_register(struct CE_handle *copyeng,
1546 CE_watermark_cb fn_ptr, void *CE_wm_context)
1547{
1548 struct CE_state *CE_state = (struct CE_state *)copyeng;
1549
1550 cdf_spin_lock(&CE_state->scn->target_lock);
1551 CE_state->watermark_cb = fn_ptr;
1552 CE_state->wm_context = CE_wm_context;
1553 ce_per_engine_handler_adjust(CE_state, 0);
1554 if (fn_ptr) {
1555 CE_state->misc_cbs = 1;
1556 }
1557 cdf_spin_unlock(&CE_state->scn->target_lock);
1558}
1559
1560#ifdef WLAN_FEATURE_FASTPATH
1561/**
1562 * ce_pkt_dl_len_set() set the HTT packet download length
1563 * @hif_sc: HIF context
1564 * @pkt_download_len: download length
1565 *
1566 * Return: None
1567 */
1568void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1569{
1570 struct ol_softc *sc = (struct ol_softc *)(hif_sc);
1571 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1572
1573 cdf_assert_always(ce_state);
1574
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001575 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001576
1577 cdf_print("%s CE %d Pkt download length %d\n", __func__,
1578 ce_state->id, ce_state->download_len);
1579}
1580#else
1581void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1582{
1583}
1584#endif /* WLAN_FEATURE_FASTPATH */
1585
1586bool ce_get_rx_pending(struct ol_softc *scn)
1587{
1588 int CE_id;
1589
1590 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1591 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1592 if (cdf_atomic_read(&CE_state->rx_pending))
1593 return true;
1594 }
1595
1596 return false;
1597}
1598
1599/**
1600 * ce_check_rx_pending() - ce_check_rx_pending
1601 * @scn: ol_softc
1602 * @ce_id: ce_id
1603 *
1604 * Return: bool
1605 */
1606bool ce_check_rx_pending(struct ol_softc *scn, int ce_id)
1607{
1608 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1609 if (cdf_atomic_read(&CE_state->rx_pending))
1610 return true;
1611 else
1612 return false;
1613}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001614
1615/**
1616 * ce_enable_msi(): write the msi configuration to the target
1617 * @scn: hif context
1618 * @CE_id: which copy engine will be configured for msi interupts
1619 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1620 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1621 * @msi_data: Hardware will write this data to generate an interrupt
1622 *
1623 * should be done in the initialization sequence so no locking would be needed
1624 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001625void ce_enable_msi(struct ol_softc *scn, unsigned int CE_id,
1626 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1627 uint32_t msi_data)
1628{
1629#ifdef WLAN_ENABLE_QCA6180
1630 struct CE_state *CE_state;
1631 A_target_id_t targid;
1632 u_int32_t ctrl_addr;
1633 uint32_t tmp;
1634
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001635 CE_state = scn->ce_id_to_state[CE_id];
1636 if (!CE_state) {
1637 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001638 return;
1639 }
1640 targid = TARGID(sc);
1641 ctrl_addr = CE_state->ctrl_addr;
1642 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1643 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1644 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1645 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1646 tmp |= (1 << CE_MSI_ENABLE_BIT);
1647 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648#endif
1649}
1650
1651#ifdef IPA_OFFLOAD
1652/*
1653 * Copy engine should release resource to micro controller
1654 * Micro controller needs
1655 - Copy engine source descriptor base address
1656 - Copy engine source descriptor size
1657 - PCI BAR address to access copy engine regiser
1658 */
1659void ce_ipa_get_resource(struct CE_handle *ce,
1660 uint32_t *ce_sr_base_paddr,
1661 uint32_t *ce_sr_ring_size,
1662 cdf_dma_addr_t *ce_reg_paddr)
1663{
1664 struct CE_state *CE_state = (struct CE_state *)ce;
1665 uint32_t ring_loop;
1666 struct CE_src_desc *ce_desc;
1667 cdf_dma_addr_t phy_mem_base;
1668 struct ol_softc *scn = CE_state->scn;
1669
1670 if (CE_RUNNING != CE_state->state) {
1671 *ce_sr_base_paddr = 0;
1672 *ce_sr_ring_size = 0;
1673 return;
1674 }
1675
1676 /* Update default value for descriptor */
1677 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1678 ring_loop++) {
1679 ce_desc = (struct CE_src_desc *)
1680 ((char *)CE_state->src_ring->base_addr_owner_space +
1681 ring_loop * (sizeof(struct CE_src_desc)));
1682 CE_IPA_RING_INIT(ce_desc);
1683 }
1684
1685 /* Get BAR address */
1686 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1687
1688 *ce_sr_base_paddr = (uint32_t) CE_state->src_ring->base_addr_CE_space;
1689 *ce_sr_ring_size = (uint32_t) CE_state->src_ring->nentries;
1690 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1691 SR_WR_INDEX_ADDRESS;
1692 return;
1693}
1694#endif /* IPA_OFFLOAD */
1695