blob: 6f48a2b96d9bc8039aebfadf50a9efc4acd810b6 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "cdf_lock.h"
39#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053043#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#ifdef IPA_OFFLOAD
46#ifdef QCA_WIFI_3_0
47#define CE_IPA_RING_INIT(ce_desc) \
48 do { \
49 ce_desc->gather = 0; \
50 ce_desc->enable_11h = 0; \
51 ce_desc->meta_data_low = 0; \
52 ce_desc->packet_result_offset = 64; \
53 ce_desc->toeplitz_hash_enable = 0; \
54 ce_desc->addr_y_search_disable = 0; \
55 ce_desc->addr_x_search_disable = 0; \
56 ce_desc->misc_int_disable = 0; \
57 ce_desc->target_int_disable = 0; \
58 ce_desc->host_int_disable = 0; \
59 ce_desc->dest_byte_swap = 0; \
60 ce_desc->byte_swap = 0; \
61 ce_desc->type = 2; \
62 ce_desc->tx_classify = 1; \
63 ce_desc->buffer_addr_hi = 0; \
64 ce_desc->meta_data = 0; \
65 ce_desc->nbytes = 128; \
66 } while (0)
67#else
68#define CE_IPA_RING_INIT(ce_desc) \
69 do { \
70 ce_desc->byte_swap = 0; \
71 ce_desc->nbytes = 60; \
72 ce_desc->gather = 0; \
73 } while (0)
74#endif /* QCA_WIFI_3_0 */
75#endif /* IPA_OFFLOAD */
76
77static int war1_allow_sleep;
78/* io32 write workaround */
79static int hif_ce_war1;
80
Houston Hoffman68e837e2015-12-04 12:57:24 -080081#ifdef CONFIG_SLUB_DEBUG_ON
82
83/**
84 * struct hif_ce_event - structure for detailing a ce event
85 * @type: what the event was
86 * @time: when it happened
87 * @descriptor: descriptor enqueued or dequeued
88 * @memory: virtual address that was used
89 * @index: location of the descriptor in the ce ring;
90 */
91struct hif_ce_desc_event {
92 uint16_t index;
93 enum hif_ce_event_type type;
94 uint64_t time;
95 union ce_desc descriptor;
96 void *memory;
97};
98
99/* max history to record per copy engine */
100#define HIF_CE_HISTORY_MAX 512
101cdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
102struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
103
104/**
105 * get_next_record_index() - get the next record index
106 * @table_index: atomic index variable to increment
107 * @array_size: array size of the circular buffer
108 *
109 * Increment the atomic index and reserve the value.
110 * Takes care of buffer wrap.
111 * Guaranteed to be thread safe as long as fewer than array_size contexts
112 * try to access the array. If there are more than array_size contexts
113 * trying to access the array, full locking of the recording process would
114 * be needed to have sane logging.
115 */
116static int get_next_record_index(cdf_atomic_t *table_index, int array_size)
117{
118 int record_index = cdf_atomic_inc_return(table_index);
119 if (record_index == array_size)
120 cdf_atomic_sub(array_size, table_index);
121
122 while (record_index >= array_size)
123 record_index -= array_size;
124 return record_index;
125}
126
127/**
128 * hif_record_ce_desc_event() - record ce descriptor events
129 * @ce_id: which ce is the event occuring on
130 * @type: what happened
131 * @descriptor: pointer to the descriptor posted/completed
132 * @memory: virtual address of buffer related to the descriptor
133 * @index: index that the descriptor was/will be at.
134 */
135void hif_record_ce_desc_event(int ce_id, enum hif_ce_event_type type,
136 union ce_desc *descriptor, void *memory, int index)
137{
138 int record_index = get_next_record_index(
139 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
140
141 struct hif_ce_desc_event *event =
142 &hif_ce_desc_history[ce_id][record_index];
143 event->type = type;
144 event->time = cds_get_monotonic_boottime();
145 event->descriptor = *descriptor;
146 event->memory = memory;
147 event->index = index;
148}
149
150/**
151 * ce_init_ce_desc_event_log() - initialize the ce event log
152 * @ce_id: copy engine id for which we are initializing the log
153 * @size: size of array to dedicate
154 *
155 * Currently the passed size is ignored in favor of a precompiled value.
156 */
157void ce_init_ce_desc_event_log(int ce_id, int size)
158{
159 cdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
160}
161#else
162void hif_record_ce_desc_event(
163 int ce_id, enum hif_ce_event_type type,
164 union ce_desc *descriptor, void *memory,
165 int index)
166{
167}
168
169static inline void ce_init_ce_desc_event_log(int ce_id, int size)
170{
171}
172#endif
173
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800174/*
175 * Support for Copy Engine hardware, which is mainly used for
176 * communication between Host and Target over a PCIe interconnect.
177 */
178
179/*
180 * A single CopyEngine (CE) comprises two "rings":
181 * a source ring
182 * a destination ring
183 *
184 * Each ring consists of a number of descriptors which specify
185 * an address, length, and meta-data.
186 *
187 * Typically, one side of the PCIe interconnect (Host or Target)
188 * controls one ring and the other side controls the other ring.
189 * The source side chooses when to initiate a transfer and it
190 * chooses what to send (buffer address, length). The destination
191 * side keeps a supply of "anonymous receive buffers" available and
192 * it handles incoming data as it arrives (when the destination
193 * recieves an interrupt).
194 *
195 * The sender may send a simple buffer (address/length) or it may
196 * send a small list of buffers. When a small list is sent, hardware
197 * "gathers" these and they end up in a single destination buffer
198 * with a single interrupt.
199 *
200 * There are several "contexts" managed by this layer -- more, it
201 * may seem -- than should be needed. These are provided mainly for
202 * maximum flexibility and especially to facilitate a simpler HIF
203 * implementation. There are per-CopyEngine recv, send, and watermark
204 * contexts. These are supplied by the caller when a recv, send,
205 * or watermark handler is established and they are echoed back to
206 * the caller when the respective callbacks are invoked. There is
207 * also a per-transfer context supplied by the caller when a buffer
208 * (or sendlist) is sent and when a buffer is enqueued for recv.
209 * These per-transfer contexts are echoed back to the caller when
210 * the buffer is sent/received.
211 * Target TX harsh result toeplitz_hash_result
212 */
213
214/*
215 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
216 * The caller takes responsibility for any needed locking.
217 */
218int
219ce_completed_send_next_nolock(struct CE_state *CE_state,
220 void **per_CE_contextp,
221 void **per_transfer_contextp,
222 cdf_dma_addr_t *bufferp,
223 unsigned int *nbytesp,
224 unsigned int *transfer_idp,
225 unsigned int *sw_idx, unsigned int *hw_idx,
226 uint32_t *toeplitz_hash_result);
227
228void war_ce_src_ring_write_idx_set(struct ol_softc *scn,
229 u32 ctrl_addr, unsigned int write_index)
230{
231 if (hif_ce_war1) {
232 void __iomem *indicator_addr;
233
234 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
235
236 if (!war1_allow_sleep
237 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
238 hif_write32_mb(indicator_addr,
239 (CDC_WAR_MAGIC_STR | write_index));
240 } else {
241 unsigned long irq_flags;
242 local_irq_save(irq_flags);
243 hif_write32_mb(indicator_addr, 1);
244
245 /*
246 * PCIE write waits for ACK in IPQ8K, there is no
247 * need to read back value.
248 */
249 (void)hif_read32_mb(indicator_addr);
250 (void)hif_read32_mb(indicator_addr); /* conservative */
251
252 CE_SRC_RING_WRITE_IDX_SET(scn,
253 ctrl_addr, write_index);
254
255 hif_write32_mb(indicator_addr, 0);
256 local_irq_restore(irq_flags);
257 }
258 } else
259 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
260}
261
262int
263ce_send_nolock(struct CE_handle *copyeng,
264 void *per_transfer_context,
265 cdf_dma_addr_t buffer,
266 uint32_t nbytes,
267 uint32_t transfer_id,
268 uint32_t flags,
269 uint32_t user_flags)
270{
271 int status;
272 struct CE_state *CE_state = (struct CE_state *)copyeng;
273 struct CE_ring_state *src_ring = CE_state->src_ring;
274 uint32_t ctrl_addr = CE_state->ctrl_addr;
275 unsigned int nentries_mask = src_ring->nentries_mask;
276 unsigned int sw_index = src_ring->sw_index;
277 unsigned int write_index = src_ring->write_index;
278 uint64_t dma_addr = buffer;
279 struct ol_softc *scn = CE_state->scn;
280
281 A_TARGET_ACCESS_BEGIN_RET(scn);
282 if (unlikely(CE_RING_DELTA(nentries_mask,
283 write_index, sw_index - 1) <= 0)) {
284 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
285 status = CDF_STATUS_E_FAILURE;
286 A_TARGET_ACCESS_END_RET(scn);
287 return status;
288 }
289 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800290 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800291 struct CE_src_desc *src_ring_base =
292 (struct CE_src_desc *)src_ring->base_addr_owner_space;
293 struct CE_src_desc *shadow_base =
294 (struct CE_src_desc *)src_ring->shadow_base;
295 struct CE_src_desc *src_desc =
296 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
297 struct CE_src_desc *shadow_src_desc =
298 CE_SRC_RING_TO_DESC(shadow_base, write_index);
299
300 /* Update low 32 bits source descriptor address */
301 shadow_src_desc->buffer_addr =
302 (uint32_t)(dma_addr & 0xFFFFFFFF);
303#ifdef QCA_WIFI_3_0
304 shadow_src_desc->buffer_addr_hi =
305 (uint32_t)((dma_addr >> 32) & 0x1F);
306 user_flags |= shadow_src_desc->buffer_addr_hi;
307 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
308 sizeof(uint32_t));
309#endif
310 shadow_src_desc->meta_data = transfer_id;
311
312 /*
313 * Set the swap bit if:
314 * typical sends on this CE are swapped (host is big-endian)
315 * and this send doesn't disable the swapping
316 * (data is not bytestream)
317 */
318 shadow_src_desc->byte_swap =
319 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
320 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
321 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
322 shadow_src_desc->nbytes = nbytes;
323
324 *src_desc = *shadow_src_desc;
325
326 src_ring->per_transfer_context[write_index] =
327 per_transfer_context;
328
329 /* Update Source Ring Write Index */
330 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
331
332 /* WORKAROUND */
333 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800334 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800335 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
336 write_index);
337 }
338
Houston Hoffman68e837e2015-12-04 12:57:24 -0800339 /* src_ring->write index hasn't been updated event though
340 * the register has allready been written to.
341 */
342 hif_record_ce_desc_event(CE_state->id, event_type,
343 (union ce_desc *) shadow_src_desc, per_transfer_context,
344 src_ring->write_index);
345
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800346 src_ring->write_index = write_index;
347 status = CDF_STATUS_SUCCESS;
348 }
349 A_TARGET_ACCESS_END_RET(scn);
350
351 return status;
352}
353
354int
355ce_send(struct CE_handle *copyeng,
356 void *per_transfer_context,
357 cdf_dma_addr_t buffer,
358 uint32_t nbytes,
359 uint32_t transfer_id,
360 uint32_t flags,
361 uint32_t user_flag)
362{
363 struct CE_state *CE_state = (struct CE_state *)copyeng;
364 int status;
365
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700366 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800367 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
368 transfer_id, flags, user_flag);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700369 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800370
371 return status;
372}
373
374unsigned int ce_sendlist_sizeof(void)
375{
376 return sizeof(struct ce_sendlist);
377}
378
379void ce_sendlist_init(struct ce_sendlist *sendlist)
380{
381 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
382 sl->num_items = 0;
383}
384
385int
386ce_sendlist_buf_add(struct ce_sendlist *sendlist,
387 cdf_dma_addr_t buffer,
388 uint32_t nbytes,
389 uint32_t flags,
390 uint32_t user_flags)
391{
392 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
393 unsigned int num_items = sl->num_items;
394 struct ce_sendlist_item *item;
395
396 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
397 CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
398 return CDF_STATUS_E_RESOURCES;
399 }
400
401 item = &sl->item[num_items];
402 item->send_type = CE_SIMPLE_BUFFER_TYPE;
403 item->data = buffer;
404 item->u.nbytes = nbytes;
405 item->flags = flags;
406 item->user_flags = user_flags;
407 sl->num_items = num_items + 1;
408 return CDF_STATUS_SUCCESS;
409}
410
411int
412ce_sendlist_send(struct CE_handle *copyeng,
413 void *per_transfer_context,
414 struct ce_sendlist *sendlist, unsigned int transfer_id)
415{
416 int status = -ENOMEM;
417 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
418 struct CE_state *CE_state = (struct CE_state *)copyeng;
419 struct CE_ring_state *src_ring = CE_state->src_ring;
420 unsigned int nentries_mask = src_ring->nentries_mask;
421 unsigned int num_items = sl->num_items;
422 unsigned int sw_index;
423 unsigned int write_index;
424
425 CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
426
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700427 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800428 sw_index = src_ring->sw_index;
429 write_index = src_ring->write_index;
430
431 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
432 num_items) {
433 struct ce_sendlist_item *item;
434 int i;
435
436 /* handle all but the last item uniformly */
437 for (i = 0; i < num_items - 1; i++) {
438 item = &sl->item[i];
439 /* TBDXXX: Support extensible sendlist_types? */
440 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
441 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
442 (cdf_dma_addr_t) item->data,
443 item->u.nbytes, transfer_id,
444 item->flags | CE_SEND_FLAG_GATHER,
445 item->user_flags);
446 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
447 }
448 /* provide valid context pointer for final item */
449 item = &sl->item[i];
450 /* TBDXXX: Support extensible sendlist_types? */
451 CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
452 status = ce_send_nolock(copyeng, per_transfer_context,
453 (cdf_dma_addr_t) item->data,
454 item->u.nbytes,
455 transfer_id, item->flags,
456 item->user_flags);
457 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
458 NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
459 NBUF_TX_PKT_CE);
460 DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
461 CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
462 (uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
463 sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
464 } else {
465 /*
466 * Probably not worth the additional complexity to support
467 * partial sends with continuation or notification. We expect
468 * to use large rings and small sendlists. If we can't handle
469 * the entire request at once, punt it back to the caller.
470 */
471 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700472 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800473
474 return status;
475}
476
477#ifdef WLAN_FEATURE_FASTPATH
478#ifdef QCA_WIFI_3_0
479static inline void
480ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
481 uint64_t dma_addr,
482 uint32_t user_flags)
483{
484 shadow_src_desc->buffer_addr_hi =
485 (uint32_t)((dma_addr >> 32) & 0x1F);
486 user_flags |= shadow_src_desc->buffer_addr_hi;
487 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
488 sizeof(uint32_t));
489}
490#else
491static inline void
492ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
493 uint64_t dma_addr,
494 uint32_t user_flags)
495{
496}
497#endif
498
499/**
500 * ce_send_fast() CE layer Tx buffer posting function
501 * @copyeng: copy engine handle
502 * @msdus: iarray of msdu to be sent
503 * @num_msdus: number of msdus in an array
504 * @transfer_id: transfer_id
505 *
506 * Assumption : Called with an array of MSDU's
507 * Function:
508 * For each msdu in the array
509 * 1. Check no. of available entries
510 * 2. Create src ring entries (allocated in consistent memory
511 * 3. Write index to h/w
512 *
513 * Return: No. of packets that could be sent
514 */
515
516int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
517 unsigned int num_msdus, unsigned int transfer_id)
518{
519 struct CE_state *ce_state = (struct CE_state *)copyeng;
520 struct ol_softc *scn = ce_state->scn;
521 struct CE_ring_state *src_ring = ce_state->src_ring;
522 u_int32_t ctrl_addr = ce_state->ctrl_addr;
523 unsigned int nentries_mask = src_ring->nentries_mask;
524 unsigned int write_index;
525 unsigned int sw_index;
526 unsigned int frag_len;
527 cdf_nbuf_t msdu;
528 int i;
529 uint64_t dma_addr;
530 uint32_t user_flags = 0;
531
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700532 cdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 sw_index = src_ring->sw_index;
534 write_index = src_ring->write_index;
535
536 /* 2 msdus per packet */
537 for (i = 0; i < num_msdus; i++) {
538 struct CE_src_desc *src_ring_base =
539 (struct CE_src_desc *)src_ring->base_addr_owner_space;
540 struct CE_src_desc *shadow_base =
541 (struct CE_src_desc *)src_ring->shadow_base;
542 struct CE_src_desc *src_desc =
543 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
544 struct CE_src_desc *shadow_src_desc =
545 CE_SRC_RING_TO_DESC(shadow_base, write_index);
546
547 msdu = msdus[i];
548
549 /*
550 * First fill out the ring descriptor for the HTC HTT frame
551 * header. These are uncached writes. Should we use a local
552 * structure instead?
553 */
554 /* HTT/HTC header can be passed as a argument */
555 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 0);
556 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
557 0xFFFFFFFF);
558 user_flags = cdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
559 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
560
561 shadow_src_desc->meta_data = transfer_id;
562 shadow_src_desc->nbytes = cdf_nbuf_get_frag_len(msdu, 0);
563
564 /*
565 * HTC HTT header is a word stream, so byte swap if CE byte
566 * swap enabled
567 */
568 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
569 CE_ATTR_BYTE_SWAP_DATA) != 0);
570 /* For the first one, it still does not need to write */
571 shadow_src_desc->gather = 1;
572 *src_desc = *shadow_src_desc;
573
574 /* By default we could initialize the transfer context to this
575 * value
576 */
577 src_ring->per_transfer_context[write_index] =
578 CE_SENDLIST_ITEM_CTXT;
579
580 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
581
582 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
583 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
584 /*
585 * Now fill out the ring descriptor for the actual data
586 * packet
587 */
588 dma_addr = cdf_nbuf_get_frag_paddr_lo(msdu, 1);
589 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
590 0xFFFFFFFF);
591 /*
592 * Clear packet offset for all but the first CE desc.
593 */
594 user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
595 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
596 shadow_src_desc->meta_data = transfer_id;
597
598 /* get actual packet length */
599 frag_len = cdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700600
601 /* only read download_len once */
602 shadow_src_desc->nbytes = ce_state->download_len;
603 if (shadow_src_desc->nbytes > frag_len)
604 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800605
606 /* Data packet is a byte stream, so disable byte swap */
607 shadow_src_desc->byte_swap = 0;
608 /* For the last one, gather is not set */
609 shadow_src_desc->gather = 0;
610 *src_desc = *shadow_src_desc;
611 src_ring->per_transfer_context[write_index] = msdu;
612 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
613 }
614
615 /* Write the final index to h/w one-shot */
616 if (i) {
617 src_ring->write_index = write_index;
618 /* Don't call WAR_XXX from here
619 * Just call XXX instead, that has the reqd. intel
620 */
621 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
622 }
623
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700624 cdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800625
626 /*
627 * If all packets in the array are transmitted,
628 * i = num_msdus
629 * Temporarily add an ASSERT
630 */
631 ASSERT(i == num_msdus);
632 return i;
633}
634#endif /* WLAN_FEATURE_FASTPATH */
635
636int
637ce_recv_buf_enqueue(struct CE_handle *copyeng,
638 void *per_recv_context, cdf_dma_addr_t buffer)
639{
640 int status;
641 struct CE_state *CE_state = (struct CE_state *)copyeng;
642 struct CE_ring_state *dest_ring = CE_state->dest_ring;
643 uint32_t ctrl_addr = CE_state->ctrl_addr;
644 unsigned int nentries_mask = dest_ring->nentries_mask;
645 unsigned int write_index;
646 unsigned int sw_index;
647 int val = 0;
648 uint64_t dma_addr = buffer;
649 struct ol_softc *scn = CE_state->scn;
650
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700651 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800652 write_index = dest_ring->write_index;
653 sw_index = dest_ring->sw_index;
654
655 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
656 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700657 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800658 return val;
659 }
660
661 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
662 struct CE_dest_desc *dest_ring_base =
663 (struct CE_dest_desc *)dest_ring->
664 base_addr_owner_space;
665 struct CE_dest_desc *dest_desc =
666 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
667
668 /* Update low 32 bit destination descriptor */
669 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
670#ifdef QCA_WIFI_3_0
671 dest_desc->buffer_addr_hi =
672 (uint32_t)((dma_addr >> 32) & 0x1F);
673#endif
674 dest_desc->nbytes = 0;
675
676 dest_ring->per_transfer_context[write_index] =
677 per_recv_context;
678
Houston Hoffman68e837e2015-12-04 12:57:24 -0800679 hif_record_ce_desc_event(CE_state->id, HIF_RX_DESC_POST,
680 (union ce_desc *) dest_desc, per_recv_context,
681 write_index);
682
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800683 /* Update Destination Ring Write Index */
684 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
685 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
686 dest_ring->write_index = write_index;
687 status = CDF_STATUS_SUCCESS;
688 } else {
689 status = CDF_STATUS_E_FAILURE;
690 }
691 A_TARGET_ACCESS_END_RET_EXT(scn, val);
692 if (val == -1) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700693 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800694 return val;
695 }
696
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700697 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800698
699 return status;
700}
701
702void
703ce_send_watermarks_set(struct CE_handle *copyeng,
704 unsigned int low_alert_nentries,
705 unsigned int high_alert_nentries)
706{
707 struct CE_state *CE_state = (struct CE_state *)copyeng;
708 uint32_t ctrl_addr = CE_state->ctrl_addr;
709 struct ol_softc *scn = CE_state->scn;
710
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
712 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713}
714
715void
716ce_recv_watermarks_set(struct CE_handle *copyeng,
717 unsigned int low_alert_nentries,
718 unsigned int high_alert_nentries)
719{
720 struct CE_state *CE_state = (struct CE_state *)copyeng;
721 uint32_t ctrl_addr = CE_state->ctrl_addr;
722 struct ol_softc *scn = CE_state->scn;
723
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800724 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
725 low_alert_nentries);
726 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
727 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800728}
729
730unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
731{
732 struct CE_state *CE_state = (struct CE_state *)copyeng;
733 struct CE_ring_state *src_ring = CE_state->src_ring;
734 unsigned int nentries_mask = src_ring->nentries_mask;
735 unsigned int sw_index;
736 unsigned int write_index;
737
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700738 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800739 sw_index = src_ring->sw_index;
740 write_index = src_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700741 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742
743 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
744}
745
746unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
747{
748 struct CE_state *CE_state = (struct CE_state *)copyeng;
749 struct CE_ring_state *dest_ring = CE_state->dest_ring;
750 unsigned int nentries_mask = dest_ring->nentries_mask;
751 unsigned int sw_index;
752 unsigned int write_index;
753
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700754 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800755 sw_index = dest_ring->sw_index;
756 write_index = dest_ring->write_index;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700757 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800758
759 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
760}
761
762/*
763 * Guts of ce_send_entries_done.
764 * The caller takes responsibility for any necessary locking.
765 */
766unsigned int
767ce_send_entries_done_nolock(struct ol_softc *scn,
768 struct CE_state *CE_state)
769{
770 struct CE_ring_state *src_ring = CE_state->src_ring;
771 uint32_t ctrl_addr = CE_state->ctrl_addr;
772 unsigned int nentries_mask = src_ring->nentries_mask;
773 unsigned int sw_index;
774 unsigned int read_index;
775
776 sw_index = src_ring->sw_index;
777 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
778
779 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
780}
781
782unsigned int ce_send_entries_done(struct CE_handle *copyeng)
783{
784 struct CE_state *CE_state = (struct CE_state *)copyeng;
785 unsigned int nentries;
786
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700787 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800788 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700789 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800790
791 return nentries;
792}
793
794/*
795 * Guts of ce_recv_entries_done.
796 * The caller takes responsibility for any necessary locking.
797 */
798unsigned int
799ce_recv_entries_done_nolock(struct ol_softc *scn,
800 struct CE_state *CE_state)
801{
802 struct CE_ring_state *dest_ring = CE_state->dest_ring;
803 uint32_t ctrl_addr = CE_state->ctrl_addr;
804 unsigned int nentries_mask = dest_ring->nentries_mask;
805 unsigned int sw_index;
806 unsigned int read_index;
807
808 sw_index = dest_ring->sw_index;
809 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
810
811 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
812}
813
814unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
815{
816 struct CE_state *CE_state = (struct CE_state *)copyeng;
817 unsigned int nentries;
818
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700819 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800820 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700821 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800822
823 return nentries;
824}
825
826/* Debug support */
827void *ce_debug_cmplrn_context; /* completed recv next context */
828void *ce_debug_cnclsn_context; /* cancel send next context */
829void *ce_debug_rvkrn_context; /* revoke receive next context */
830void *ce_debug_cmplsn_context; /* completed send next context */
831
832/*
833 * Guts of ce_completed_recv_next.
834 * The caller takes responsibility for any necessary locking.
835 */
836int
837ce_completed_recv_next_nolock(struct CE_state *CE_state,
838 void **per_CE_contextp,
839 void **per_transfer_contextp,
840 cdf_dma_addr_t *bufferp,
841 unsigned int *nbytesp,
842 unsigned int *transfer_idp,
843 unsigned int *flagsp)
844{
845 int status;
846 struct CE_ring_state *dest_ring = CE_state->dest_ring;
847 unsigned int nentries_mask = dest_ring->nentries_mask;
848 unsigned int sw_index = dest_ring->sw_index;
849
850 struct CE_dest_desc *dest_ring_base =
851 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
852 struct CE_dest_desc *dest_desc =
853 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
854 int nbytes;
855 struct CE_dest_desc dest_desc_info;
856 /*
857 * By copying the dest_desc_info element to local memory, we could
858 * avoid extra memory read from non-cachable memory.
859 */
860 dest_desc_info = *dest_desc;
861 nbytes = dest_desc_info.nbytes;
862 if (nbytes == 0) {
863 /*
864 * This closes a relatively unusual race where the Host
865 * sees the updated DRRI before the update to the
866 * corresponding descriptor has completed. We treat this
867 * as a descriptor that is not yet done.
868 */
869 status = CDF_STATUS_E_FAILURE;
870 goto done;
871 }
872
Houston Hoffman68e837e2015-12-04 12:57:24 -0800873 hif_record_ce_desc_event(CE_state->id, HIF_RX_DESC_COMPLETION,
874 (union ce_desc *) dest_desc,
875 dest_ring->per_transfer_context[sw_index],
876 sw_index);
877
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800878 dest_desc->nbytes = 0;
879
880 /* Return data from completed destination descriptor */
881 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
882 *nbytesp = nbytes;
883 *transfer_idp = dest_desc_info.meta_data;
884 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
885
886 if (per_CE_contextp) {
887 *per_CE_contextp = CE_state->recv_context;
888 }
889
890 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
891 if (per_transfer_contextp) {
892 *per_transfer_contextp = ce_debug_cmplrn_context;
893 }
894 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
895
896 /* Update sw_index */
897 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
898 dest_ring->sw_index = sw_index;
899 status = CDF_STATUS_SUCCESS;
900
901done:
902 return status;
903}
904
905int
906ce_completed_recv_next(struct CE_handle *copyeng,
907 void **per_CE_contextp,
908 void **per_transfer_contextp,
909 cdf_dma_addr_t *bufferp,
910 unsigned int *nbytesp,
911 unsigned int *transfer_idp, unsigned int *flagsp)
912{
913 struct CE_state *CE_state = (struct CE_state *)copyeng;
914 int status;
915
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700916 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800917 status =
918 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
919 per_transfer_contextp, bufferp,
920 nbytesp, transfer_idp, flagsp);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700921 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800922
923 return status;
924}
925
926/* NB: Modeled after ce_completed_recv_next_nolock */
927CDF_STATUS
928ce_revoke_recv_next(struct CE_handle *copyeng,
929 void **per_CE_contextp,
930 void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
931{
932 struct CE_state *CE_state;
933 struct CE_ring_state *dest_ring;
934 unsigned int nentries_mask;
935 unsigned int sw_index;
936 unsigned int write_index;
937 CDF_STATUS status;
938 struct ol_softc *scn;
939
940 CE_state = (struct CE_state *)copyeng;
941 dest_ring = CE_state->dest_ring;
942 if (!dest_ring) {
943 return CDF_STATUS_E_FAILURE;
944 }
945
946 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700947 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800948 nentries_mask = dest_ring->nentries_mask;
949 sw_index = dest_ring->sw_index;
950 write_index = dest_ring->write_index;
951 if (write_index != sw_index) {
952 struct CE_dest_desc *dest_ring_base =
953 (struct CE_dest_desc *)dest_ring->
954 base_addr_owner_space;
955 struct CE_dest_desc *dest_desc =
956 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
957
958 /* Return data from completed destination descriptor */
959 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
960
961 if (per_CE_contextp) {
962 *per_CE_contextp = CE_state->recv_context;
963 }
964
965 ce_debug_rvkrn_context =
966 dest_ring->per_transfer_context[sw_index];
967 if (per_transfer_contextp) {
968 *per_transfer_contextp = ce_debug_rvkrn_context;
969 }
970 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
971
972 /* Update sw_index */
973 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
974 dest_ring->sw_index = sw_index;
975 status = CDF_STATUS_SUCCESS;
976 } else {
977 status = CDF_STATUS_E_FAILURE;
978 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700979 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800980
981 return status;
982}
983
984/*
985 * Guts of ce_completed_send_next.
986 * The caller takes responsibility for any necessary locking.
987 */
988int
989ce_completed_send_next_nolock(struct CE_state *CE_state,
990 void **per_CE_contextp,
991 void **per_transfer_contextp,
992 cdf_dma_addr_t *bufferp,
993 unsigned int *nbytesp,
994 unsigned int *transfer_idp,
995 unsigned int *sw_idx,
996 unsigned int *hw_idx,
997 uint32_t *toeplitz_hash_result)
998{
999 int status = CDF_STATUS_E_FAILURE;
1000 struct CE_ring_state *src_ring = CE_state->src_ring;
1001 uint32_t ctrl_addr = CE_state->ctrl_addr;
1002 unsigned int nentries_mask = src_ring->nentries_mask;
1003 unsigned int sw_index = src_ring->sw_index;
1004 unsigned int read_index;
1005 struct ol_softc *scn = CE_state->scn;
1006
1007 if (src_ring->hw_index == sw_index) {
1008 /*
1009 * The SW completion index has caught up with the cached
1010 * version of the HW completion index.
1011 * Update the cached HW completion index to see whether
1012 * the SW has really caught up to the HW, or if the cached
1013 * value of the HW index has become stale.
1014 */
1015 A_TARGET_ACCESS_BEGIN_RET(scn);
1016 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001017 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001018 A_TARGET_ACCESS_END_RET(scn);
1019 }
1020 read_index = src_ring->hw_index;
1021
1022 if (sw_idx)
1023 *sw_idx = sw_index;
1024
1025 if (hw_idx)
1026 *hw_idx = read_index;
1027
1028 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1029 struct CE_src_desc *shadow_base =
1030 (struct CE_src_desc *)src_ring->shadow_base;
1031 struct CE_src_desc *shadow_src_desc =
1032 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1033#ifdef QCA_WIFI_3_0
1034 struct CE_src_desc *src_ring_base =
1035 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1036 struct CE_src_desc *src_desc =
1037 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1038#endif
Houston Hoffman68e837e2015-12-04 12:57:24 -08001039 hif_record_ce_desc_event(CE_state->id, HIF_TX_DESC_COMPLETION,
1040 (union ce_desc *) shadow_src_desc,
1041 src_ring->per_transfer_context[sw_index],
1042 sw_index);
1043
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001044 /* Return data from completed source descriptor */
1045 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1046 *nbytesp = shadow_src_desc->nbytes;
1047 *transfer_idp = shadow_src_desc->meta_data;
1048#ifdef QCA_WIFI_3_0
1049 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1050#else
1051 *toeplitz_hash_result = 0;
1052#endif
1053 if (per_CE_contextp) {
1054 *per_CE_contextp = CE_state->send_context;
1055 }
1056
1057 ce_debug_cmplsn_context =
1058 src_ring->per_transfer_context[sw_index];
1059 if (per_transfer_contextp) {
1060 *per_transfer_contextp = ce_debug_cmplsn_context;
1061 }
1062 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1063
1064 /* Update sw_index */
1065 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1066 src_ring->sw_index = sw_index;
1067 status = CDF_STATUS_SUCCESS;
1068 }
1069
1070 return status;
1071}
1072
1073/* NB: Modeled after ce_completed_send_next */
1074CDF_STATUS
1075ce_cancel_send_next(struct CE_handle *copyeng,
1076 void **per_CE_contextp,
1077 void **per_transfer_contextp,
1078 cdf_dma_addr_t *bufferp,
1079 unsigned int *nbytesp,
1080 unsigned int *transfer_idp,
1081 uint32_t *toeplitz_hash_result)
1082{
1083 struct CE_state *CE_state;
1084 struct CE_ring_state *src_ring;
1085 unsigned int nentries_mask;
1086 unsigned int sw_index;
1087 unsigned int write_index;
1088 CDF_STATUS status;
1089 struct ol_softc *scn;
1090
1091 CE_state = (struct CE_state *)copyeng;
1092 src_ring = CE_state->src_ring;
1093 if (!src_ring) {
1094 return CDF_STATUS_E_FAILURE;
1095 }
1096
1097 scn = CE_state->scn;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001098 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001099 nentries_mask = src_ring->nentries_mask;
1100 sw_index = src_ring->sw_index;
1101 write_index = src_ring->write_index;
1102
1103 if (write_index != sw_index) {
1104 struct CE_src_desc *src_ring_base =
1105 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1106 struct CE_src_desc *src_desc =
1107 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1108
1109 /* Return data from completed source descriptor */
1110 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1111 *nbytesp = src_desc->nbytes;
1112 *transfer_idp = src_desc->meta_data;
1113#ifdef QCA_WIFI_3_0
1114 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1115#else
1116 *toeplitz_hash_result = 0;
1117#endif
1118
1119 if (per_CE_contextp) {
1120 *per_CE_contextp = CE_state->send_context;
1121 }
1122
1123 ce_debug_cnclsn_context =
1124 src_ring->per_transfer_context[sw_index];
1125 if (per_transfer_contextp) {
1126 *per_transfer_contextp = ce_debug_cnclsn_context;
1127 }
1128 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1129
1130 /* Update sw_index */
1131 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1132 src_ring->sw_index = sw_index;
1133 status = CDF_STATUS_SUCCESS;
1134 } else {
1135 status = CDF_STATUS_E_FAILURE;
1136 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001137 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001138
1139 return status;
1140}
1141
1142/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1143#define CE_WM_SHFT 1
1144
1145int
1146ce_completed_send_next(struct CE_handle *copyeng,
1147 void **per_CE_contextp,
1148 void **per_transfer_contextp,
1149 cdf_dma_addr_t *bufferp,
1150 unsigned int *nbytesp,
1151 unsigned int *transfer_idp,
1152 unsigned int *sw_idx,
1153 unsigned int *hw_idx,
1154 unsigned int *toeplitz_hash_result)
1155{
1156 struct CE_state *CE_state = (struct CE_state *)copyeng;
1157 int status;
1158
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001159 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001160 status =
1161 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1162 per_transfer_contextp, bufferp,
1163 nbytesp, transfer_idp, sw_idx,
1164 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001165 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001166
1167 return status;
1168}
1169
1170#ifdef ATH_11AC_TXCOMPACT
1171/* CE engine descriptor reap
1172 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1173 * does recieve and reaping of completed descriptor ,
1174 * This function only handles reaping of Tx complete descriptor.
1175 * The Function is called from threshold reap poll routine
1176 * hif_send_complete_check so should not countain recieve functionality
1177 * within it .
1178 */
1179
1180void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
1181{
1182 void *CE_context;
1183 void *transfer_context;
1184 cdf_dma_addr_t buf;
1185 unsigned int nbytes;
1186 unsigned int id;
1187 unsigned int sw_idx, hw_idx;
1188 uint32_t toeplitz_hash_result;
1189 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1190
1191 A_TARGET_ACCESS_BEGIN(scn);
1192
1193 /* Since this function is called from both user context and
1194 * tasklet context the spinlock has to lock the bottom halves.
1195 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1196 * enabled in TX polling mode. If this is not the case, more
1197 * bottom halve spin lock changes are needed. Due to data path
1198 * performance concern, after internal discussion we've decided
1199 * to make minimum change, i.e., only address the issue occured
1200 * in this function. The possible negative effect of this minimum
1201 * change is that, in the future, if some other function will also
1202 * be opened to let the user context to use, those cases need to be
1203 * addressed by change spin_lock to spin_lock_bh also.
1204 */
1205
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001206 cdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207
1208 if (CE_state->send_cb) {
1209 {
1210 /* Pop completed send buffers and call the
1211 * registered send callback for each
1212 */
1213 while (ce_completed_send_next_nolock
1214 (CE_state, &CE_context,
1215 &transfer_context, &buf,
1216 &nbytes, &id, &sw_idx, &hw_idx,
1217 &toeplitz_hash_result) ==
1218 CDF_STATUS_SUCCESS) {
1219 if (CE_id != CE_HTT_H2T_MSG) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001220 cdf_spin_unlock_bh(
1221 &CE_state->ce_index_lock);
1222 CE_state->send_cb(
1223 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001224 CE_state, CE_context,
1225 transfer_context, buf,
1226 nbytes, id, sw_idx, hw_idx,
1227 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001228 cdf_spin_lock_bh(
1229 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001230 } else {
1231 struct HIF_CE_pipe_info *pipe_info =
1232 (struct HIF_CE_pipe_info *)
1233 CE_context;
1234
1235 cdf_spin_lock_bh(&pipe_info->
1236 completion_freeq_lock);
1237 pipe_info->num_sends_allowed++;
1238 cdf_spin_unlock_bh(&pipe_info->
1239 completion_freeq_lock);
1240 }
1241 }
1242 }
1243 }
1244
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001245 cdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001246 A_TARGET_ACCESS_END(scn);
1247}
1248
1249#endif /*ATH_11AC_TXCOMPACT */
1250
1251/*
1252 * Number of times to check for any pending tx/rx completion on
1253 * a copy engine, this count should be big enough. Once we hit
1254 * this threashold we'll not check for any Tx/Rx comlpetion in same
1255 * interrupt handling. Note that this threashold is only used for
1256 * Rx interrupt processing, this can be used tor Tx as well if we
1257 * suspect any infinite loop in checking for pending Tx completion.
1258 */
1259#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1260
1261/*
1262 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1263 *
1264 * Invokes registered callbacks for recv_complete,
1265 * send_complete, and watermarks.
1266 *
1267 * Returns: number of messages processed
1268 */
1269
1270int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id)
1271{
1272 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1273 uint32_t ctrl_addr = CE_state->ctrl_addr;
1274 void *CE_context;
1275 void *transfer_context;
1276 cdf_dma_addr_t buf;
1277 unsigned int nbytes;
1278 unsigned int id;
1279 unsigned int flags;
1280 uint32_t CE_int_status;
1281 unsigned int more_comp_cnt = 0;
1282 unsigned int more_snd_comp_cnt = 0;
1283 unsigned int sw_idx, hw_idx;
1284 uint32_t toeplitz_hash_result;
1285
1286 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1287 HIF_ERROR("[premature rc=0]\n");
1288 return 0; /* no work done */
1289 }
1290
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001291 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001292
1293 /* Clear force_break flag and re-initialize receive_count to 0 */
1294
1295 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001296 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001297 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001298more_completions:
1299 if (CE_state->recv_cb) {
1300
1301 /* Pop completed recv buffers and call
1302 * the registered recv callback for each
1303 */
1304 while (ce_completed_recv_next_nolock
1305 (CE_state, &CE_context, &transfer_context,
1306 &buf, &nbytes, &id, &flags) ==
1307 CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001308 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001309 CE_state->recv_cb((struct CE_handle *)CE_state,
1310 CE_context, transfer_context, buf,
1311 nbytes, id, flags);
1312
1313 /*
1314 * EV #112693 -
1315 * [Peregrine][ES1][WB342][Win8x86][Performance]
1316 * BSoD_0x133 occurred in VHT80 UDP_DL
1317 * Break out DPC by force if number of loops in
1318 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1319 * to avoid spending too long time in
1320 * DPC for each interrupt handling. Schedule another
1321 * DPC to avoid data loss if we had taken
1322 * force-break action before apply to Windows OS
1323 * only currently, Linux/MAC os can expand to their
1324 * platform if necessary
1325 */
1326
1327 /* Break the receive processes by
1328 * force if force_break set up
1329 */
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001330 if (cdf_unlikely(CE_state->force_break)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001331 cdf_atomic_set(&CE_state->rx_pending, 1);
1332 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1333 HOST_IS_COPY_COMPLETE_MASK);
1334 if (Q_TARGET_ACCESS_END(scn) < 0)
1335 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001336 CE_state->receive_count);
1337 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001338 }
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001339 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001340 }
1341 }
1342
1343 /*
1344 * Attention: We may experience potential infinite loop for below
1345 * While Loop during Sending Stress test.
1346 * Resolve the same way as Receive Case (Refer to EV #112693)
1347 */
1348
1349 if (CE_state->send_cb) {
1350 /* Pop completed send buffers and call
1351 * the registered send callback for each
1352 */
1353
1354#ifdef ATH_11AC_TXCOMPACT
1355 while (ce_completed_send_next_nolock
1356 (CE_state, &CE_context,
1357 &transfer_context, &buf, &nbytes,
1358 &id, &sw_idx, &hw_idx,
1359 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1360
1361 if (CE_id != CE_HTT_H2T_MSG ||
1362 WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001363 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001364 CE_state->send_cb((struct CE_handle *)CE_state,
1365 CE_context, transfer_context,
1366 buf, nbytes, id, sw_idx,
1367 hw_idx, toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001368 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369 } else {
1370 struct HIF_CE_pipe_info *pipe_info =
1371 (struct HIF_CE_pipe_info *)CE_context;
1372
1373 cdf_spin_lock(&pipe_info->
1374 completion_freeq_lock);
1375 pipe_info->num_sends_allowed++;
1376 cdf_spin_unlock(&pipe_info->
1377 completion_freeq_lock);
1378 }
1379 }
1380#else /*ATH_11AC_TXCOMPACT */
1381 while (ce_completed_send_next_nolock
1382 (CE_state, &CE_context,
1383 &transfer_context, &buf, &nbytes,
1384 &id, &sw_idx, &hw_idx,
1385 &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001386 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001387 CE_state->send_cb((struct CE_handle *)CE_state,
1388 CE_context, transfer_context, buf,
1389 nbytes, id, sw_idx, hw_idx,
1390 toeplitz_hash_result);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001391 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001392 }
1393#endif /*ATH_11AC_TXCOMPACT */
1394 }
1395
1396more_watermarks:
1397 if (CE_state->misc_cbs) {
1398 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1399 if (CE_int_status & CE_WATERMARK_MASK) {
1400 if (CE_state->watermark_cb) {
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001401 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001402 /* Convert HW IS bits to software flags */
1403 flags =
1404 (CE_int_status & CE_WATERMARK_MASK) >>
1405 CE_WM_SHFT;
1406
1407 CE_state->
1408 watermark_cb((struct CE_handle *)CE_state,
1409 CE_state->wm_context, flags);
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001410 cdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001411 }
1412 }
1413 }
1414
1415 /*
1416 * Clear the misc interrupts (watermark) that were handled above,
1417 * and that will be checked again below.
1418 * Clear and check for copy-complete interrupts again, just in case
1419 * more copy completions happened while the misc interrupts were being
1420 * handled.
1421 */
1422 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1423 CE_WATERMARK_MASK |
1424 HOST_IS_COPY_COMPLETE_MASK);
1425
1426 /*
1427 * Now that per-engine interrupts are cleared, verify that
1428 * no recv interrupts arrive while processing send interrupts,
1429 * and no recv or send interrupts happened while processing
1430 * misc interrupts.Go back and check again.Keep checking until
1431 * we find no more events to process.
1432 */
1433 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
1434 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1435 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1436 goto more_completions;
1437 } else {
1438 HIF_ERROR(
1439 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1440 __func__, CE_state->dest_ring->nentries_mask,
1441 CE_state->dest_ring->sw_index,
1442 CE_DEST_RING_READ_IDX_GET(scn,
1443 CE_state->ctrl_addr));
1444 }
1445 }
1446
1447 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
1448 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) ||
1449 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1450 goto more_completions;
1451 } else {
1452 HIF_ERROR(
1453 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1454 __func__, CE_state->src_ring->nentries_mask,
1455 CE_state->src_ring->sw_index,
1456 CE_SRC_RING_READ_IDX_GET(scn,
1457 CE_state->ctrl_addr));
1458 }
1459 }
1460
1461 if (CE_state->misc_cbs) {
1462 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1463 if (CE_int_status & CE_WATERMARK_MASK) {
1464 if (CE_state->watermark_cb) {
1465 goto more_watermarks;
1466 }
1467 }
1468 }
1469
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001470 cdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001471 cdf_atomic_set(&CE_state->rx_pending, 0);
1472
1473 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001474 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1475 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001476}
1477
1478/*
1479 * Handler for per-engine interrupts on ALL active CEs.
1480 * This is used in cases where the system is sharing a
1481 * single interrput for all CEs
1482 */
1483
1484void ce_per_engine_service_any(int irq, struct ol_softc *scn)
1485{
1486 int CE_id;
1487 uint32_t intr_summary;
1488
1489 A_TARGET_ACCESS_BEGIN(scn);
1490 if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
1491 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1492 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1493 if (cdf_atomic_read(&CE_state->rx_pending)) {
1494 cdf_atomic_set(&CE_state->rx_pending, 0);
1495 ce_per_engine_service(scn, CE_id);
1496 }
1497 }
1498
1499 A_TARGET_ACCESS_END(scn);
1500 return;
1501 }
1502
1503 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1504
1505 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1506 if (intr_summary & (1 << CE_id)) {
1507 intr_summary &= ~(1 << CE_id);
1508 } else {
1509 continue; /* no intr pending on this CE */
1510 }
1511
1512 ce_per_engine_service(scn, CE_id);
1513 }
1514
1515 A_TARGET_ACCESS_END(scn);
1516}
1517
1518/*
1519 * Adjust interrupts for the copy complete handler.
1520 * If it's needed for either send or recv, then unmask
1521 * this interrupt; otherwise, mask it.
1522 *
1523 * Called with target_lock held.
1524 */
1525static void
1526ce_per_engine_handler_adjust(struct CE_state *CE_state,
1527 int disable_copy_compl_intr)
1528{
1529 uint32_t ctrl_addr = CE_state->ctrl_addr;
1530 struct ol_softc *scn = CE_state->scn;
1531
1532 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1533 A_TARGET_ACCESS_BEGIN(scn);
1534 if ((!disable_copy_compl_intr) &&
1535 (CE_state->send_cb || CE_state->recv_cb)) {
1536 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1537 } else {
1538 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1539 }
1540
1541 if (CE_state->watermark_cb) {
1542 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1543 } else {
1544 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1545 }
1546 A_TARGET_ACCESS_END(scn);
1547
1548}
1549
1550/*Iterate the CE_state list and disable the compl interrupt
1551 * if it has been registered already.
1552 */
1553void ce_disable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1554{
1555 int CE_id;
1556
1557 A_TARGET_ACCESS_BEGIN(scn);
1558 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1559 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1560 uint32_t ctrl_addr = CE_state->ctrl_addr;
1561
1562 /* if the interrupt is currently enabled, disable it */
1563 if (!CE_state->disable_copy_compl_intr
1564 && (CE_state->send_cb || CE_state->recv_cb)) {
1565 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1566 }
1567
1568 if (CE_state->watermark_cb) {
1569 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1570 }
1571 }
1572 A_TARGET_ACCESS_END(scn);
1573}
1574
1575void ce_enable_any_copy_compl_intr_nolock(struct ol_softc *scn)
1576{
1577 int CE_id;
1578
1579 A_TARGET_ACCESS_BEGIN(scn);
1580 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1581 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1582 uint32_t ctrl_addr = CE_state->ctrl_addr;
1583
1584 /*
1585 * If the CE is supposed to have copy complete interrupts
1586 * enabled (i.e. there a callback registered, and the
1587 * "disable" flag is not set), then re-enable the interrupt.
1588 */
1589 if (!CE_state->disable_copy_compl_intr
1590 && (CE_state->send_cb || CE_state->recv_cb)) {
1591 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1592 }
1593
1594 if (CE_state->watermark_cb) {
1595 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1596 }
1597 }
1598 A_TARGET_ACCESS_END(scn);
1599}
1600
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001601/**
1602 * ce_send_cb_register(): register completion handler
1603 * @copyeng: CE_state representing the ce we are adding the behavior to
1604 * @fn_ptr: callback that the ce should use when processing tx completions
1605 * @disable_interrupts: if the interupts should be enabled or not.
1606 *
1607 * Caller should guarantee that no transactions are in progress before
1608 * switching the callback function.
1609 *
1610 * Registers the send context before the fn pointer so that if the cb is valid
1611 * the context should be valid.
1612 *
1613 * Beware that currently this function will enable completion interrupts.
1614 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615void
1616ce_send_cb_register(struct CE_handle *copyeng,
1617 ce_send_cb fn_ptr,
1618 void *ce_send_context, int disable_interrupts)
1619{
1620 struct CE_state *CE_state = (struct CE_state *)copyeng;
1621
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001622 if (CE_state == NULL) {
1623 pr_err("%s: Error CE state = NULL\n", __func__);
1624 return;
1625 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001626 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001627 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001629}
1630
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001631/**
1632 * ce_recv_cb_register(): register completion handler
1633 * @copyeng: CE_state representing the ce we are adding the behavior to
1634 * @fn_ptr: callback that the ce should use when processing rx completions
1635 * @disable_interrupts: if the interupts should be enabled or not.
1636 *
1637 * Registers the send context before the fn pointer so that if the cb is valid
1638 * the context should be valid.
1639 *
1640 * Caller should guarantee that no transactions are in progress before
1641 * switching the callback function.
1642 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643void
1644ce_recv_cb_register(struct CE_handle *copyeng,
1645 CE_recv_cb fn_ptr,
1646 void *CE_recv_context, int disable_interrupts)
1647{
1648 struct CE_state *CE_state = (struct CE_state *)copyeng;
1649
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001650 if (CE_state == NULL) {
1651 pr_err("%s: ERROR CE state = NULL\n", __func__);
1652 return;
1653 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001655 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001656 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657}
1658
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001659/**
1660 * ce_watermark_cb_register(): register completion handler
1661 * @copyeng: CE_state representing the ce we are adding the behavior to
1662 * @fn_ptr: callback that the ce should use when processing watermark events
1663 *
1664 * Caller should guarantee that no watermark events are being processed before
1665 * switching the callback function.
1666 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667void
1668ce_watermark_cb_register(struct CE_handle *copyeng,
1669 CE_watermark_cb fn_ptr, void *CE_wm_context)
1670{
1671 struct CE_state *CE_state = (struct CE_state *)copyeng;
1672
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673 CE_state->watermark_cb = fn_ptr;
1674 CE_state->wm_context = CE_wm_context;
1675 ce_per_engine_handler_adjust(CE_state, 0);
1676 if (fn_ptr) {
1677 CE_state->misc_cbs = 1;
1678 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001679}
1680
1681#ifdef WLAN_FEATURE_FASTPATH
1682/**
1683 * ce_pkt_dl_len_set() set the HTT packet download length
1684 * @hif_sc: HIF context
1685 * @pkt_download_len: download length
1686 *
1687 * Return: None
1688 */
1689void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1690{
1691 struct ol_softc *sc = (struct ol_softc *)(hif_sc);
1692 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1693
1694 cdf_assert_always(ce_state);
1695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697
1698 cdf_print("%s CE %d Pkt download length %d\n", __func__,
1699 ce_state->id, ce_state->download_len);
1700}
1701#else
1702void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1703{
1704}
1705#endif /* WLAN_FEATURE_FASTPATH */
1706
1707bool ce_get_rx_pending(struct ol_softc *scn)
1708{
1709 int CE_id;
1710
1711 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1712 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1713 if (cdf_atomic_read(&CE_state->rx_pending))
1714 return true;
1715 }
1716
1717 return false;
1718}
1719
1720/**
1721 * ce_check_rx_pending() - ce_check_rx_pending
1722 * @scn: ol_softc
1723 * @ce_id: ce_id
1724 *
1725 * Return: bool
1726 */
1727bool ce_check_rx_pending(struct ol_softc *scn, int ce_id)
1728{
1729 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1730 if (cdf_atomic_read(&CE_state->rx_pending))
1731 return true;
1732 else
1733 return false;
1734}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001735
1736/**
1737 * ce_enable_msi(): write the msi configuration to the target
1738 * @scn: hif context
1739 * @CE_id: which copy engine will be configured for msi interupts
1740 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1741 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1742 * @msi_data: Hardware will write this data to generate an interrupt
1743 *
1744 * should be done in the initialization sequence so no locking would be needed
1745 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001746void ce_enable_msi(struct ol_softc *scn, unsigned int CE_id,
1747 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1748 uint32_t msi_data)
1749{
1750#ifdef WLAN_ENABLE_QCA6180
1751 struct CE_state *CE_state;
1752 A_target_id_t targid;
1753 u_int32_t ctrl_addr;
1754 uint32_t tmp;
1755
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001756 CE_state = scn->ce_id_to_state[CE_id];
1757 if (!CE_state) {
1758 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001759 return;
1760 }
1761 targid = TARGID(sc);
1762 ctrl_addr = CE_state->ctrl_addr;
1763 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1764 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1765 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1766 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1767 tmp |= (1 << CE_MSI_ENABLE_BIT);
1768 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001769#endif
1770}
1771
1772#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001773/**
1774 * ce_ipa_get_resource() - get uc resource on copyengine
1775 * @ce: copyengine context
1776 * @ce_sr_base_paddr: copyengine source ring base physical address
1777 * @ce_sr_ring_size: copyengine source ring size
1778 * @ce_reg_paddr: copyengine register physical address
1779 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001780 * Copy engine should release resource to micro controller
1781 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08001782 * - Copy engine source descriptor base address
1783 * - Copy engine source descriptor size
1784 * - PCI BAR address to access copy engine regiser
1785 *
1786 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001787 */
1788void ce_ipa_get_resource(struct CE_handle *ce,
Leo Changd85f78d2015-11-13 10:55:34 -08001789 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001790 uint32_t *ce_sr_ring_size,
1791 cdf_dma_addr_t *ce_reg_paddr)
1792{
1793 struct CE_state *CE_state = (struct CE_state *)ce;
1794 uint32_t ring_loop;
1795 struct CE_src_desc *ce_desc;
1796 cdf_dma_addr_t phy_mem_base;
1797 struct ol_softc *scn = CE_state->scn;
1798
1799 if (CE_RUNNING != CE_state->state) {
1800 *ce_sr_base_paddr = 0;
1801 *ce_sr_ring_size = 0;
1802 return;
1803 }
1804
1805 /* Update default value for descriptor */
1806 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1807 ring_loop++) {
1808 ce_desc = (struct CE_src_desc *)
1809 ((char *)CE_state->src_ring->base_addr_owner_space +
1810 ring_loop * (sizeof(struct CE_src_desc)));
1811 CE_IPA_RING_INIT(ce_desc);
1812 }
1813
1814 /* Get BAR address */
1815 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1816
Leo Changd85f78d2015-11-13 10:55:34 -08001817 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
1818 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
1819 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001820 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1821 SR_WR_INDEX_ADDRESS;
1822 return;
1823}
1824#endif /* IPA_OFFLOAD */
1825