blob: 4fc62a9ba2c692ef7f4b61d9fe7b5c17d968c0cf [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053038#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080043
44#ifdef IPA_OFFLOAD
45#ifdef QCA_WIFI_3_0
46#define CE_IPA_RING_INIT(ce_desc) \
47 do { \
48 ce_desc->gather = 0; \
49 ce_desc->enable_11h = 0; \
50 ce_desc->meta_data_low = 0; \
51 ce_desc->packet_result_offset = 64; \
52 ce_desc->toeplitz_hash_enable = 0; \
53 ce_desc->addr_y_search_disable = 0; \
54 ce_desc->addr_x_search_disable = 0; \
55 ce_desc->misc_int_disable = 0; \
56 ce_desc->target_int_disable = 0; \
57 ce_desc->host_int_disable = 0; \
58 ce_desc->dest_byte_swap = 0; \
59 ce_desc->byte_swap = 0; \
60 ce_desc->type = 2; \
61 ce_desc->tx_classify = 1; \
62 ce_desc->buffer_addr_hi = 0; \
63 ce_desc->meta_data = 0; \
64 ce_desc->nbytes = 128; \
65 } while (0)
66#else
67#define CE_IPA_RING_INIT(ce_desc) \
68 do { \
69 ce_desc->byte_swap = 0; \
70 ce_desc->nbytes = 60; \
71 ce_desc->gather = 0; \
72 } while (0)
73#endif /* QCA_WIFI_3_0 */
74#endif /* IPA_OFFLOAD */
75
76static int war1_allow_sleep;
77/* io32 write workaround */
78static int hif_ce_war1;
79
Houston Hoffman68e837e2015-12-04 12:57:24 -080080#ifdef CONFIG_SLUB_DEBUG_ON
81
82/**
83 * struct hif_ce_event - structure for detailing a ce event
84 * @type: what the event was
85 * @time: when it happened
86 * @descriptor: descriptor enqueued or dequeued
87 * @memory: virtual address that was used
88 * @index: location of the descriptor in the ce ring;
89 */
90struct hif_ce_desc_event {
91 uint16_t index;
92 enum hif_ce_event_type type;
93 uint64_t time;
94 union ce_desc descriptor;
95 void *memory;
96};
97
98/* max history to record per copy engine */
99#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530100qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -0800101struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
102
Houston Hoffman4275ba22015-12-06 21:02:11 -0800103
Houston Hoffman68e837e2015-12-04 12:57:24 -0800104/**
105 * get_next_record_index() - get the next record index
106 * @table_index: atomic index variable to increment
107 * @array_size: array size of the circular buffer
108 *
109 * Increment the atomic index and reserve the value.
110 * Takes care of buffer wrap.
111 * Guaranteed to be thread safe as long as fewer than array_size contexts
112 * try to access the array. If there are more than array_size contexts
113 * trying to access the array, full locking of the recording process would
114 * be needed to have sane logging.
115 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530116static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800117{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530118 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800119 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530120 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800121
122 while (record_index >= array_size)
123 record_index -= array_size;
124 return record_index;
125}
126
127/**
128 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530129 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800130 * @ce_id: which ce is the event occuring on
131 * @type: what happened
132 * @descriptor: pointer to the descriptor posted/completed
133 * @memory: virtual address of buffer related to the descriptor
134 * @index: index that the descriptor was/will be at.
135 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530136void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
137 enum hif_ce_event_type type,
138 union ce_desc *descriptor,
139 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800140{
Komal Seelambd7c51d2016-02-24 10:27:30 +0530141 struct hif_callbacks *cbk = hif_get_callbacks_handle(scn);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800142 int record_index = get_next_record_index(
143 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
144
145 struct hif_ce_desc_event *event =
146 &hif_ce_desc_history[ce_id][record_index];
147 event->type = type;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530148
149 if (cbk && cbk->get_monotonic_boottime)
150 event->time = cbk->get_monotonic_boottime();
151 else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530152 event->time = ((uint64_t)qdf_system_ticks_to_msecs(
153 qdf_system_ticks()) * 1000);
Komal Seelambd7c51d2016-02-24 10:27:30 +0530154
Houston Hoffman4275ba22015-12-06 21:02:11 -0800155 if (descriptor != NULL)
156 event->descriptor = *descriptor;
157 else
158 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800159 event->memory = memory;
160 event->index = index;
161}
162
163/**
164 * ce_init_ce_desc_event_log() - initialize the ce event log
165 * @ce_id: copy engine id for which we are initializing the log
166 * @size: size of array to dedicate
167 *
168 * Currently the passed size is ignored in favor of a precompiled value.
169 */
170void ce_init_ce_desc_event_log(int ce_id, int size)
171{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530172 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800173}
174#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530175void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800176 int ce_id, enum hif_ce_event_type type,
177 union ce_desc *descriptor, void *memory,
178 int index)
179{
180}
181
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800182inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800183{
184}
185#endif
186
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800187/*
188 * Support for Copy Engine hardware, which is mainly used for
189 * communication between Host and Target over a PCIe interconnect.
190 */
191
192/*
193 * A single CopyEngine (CE) comprises two "rings":
194 * a source ring
195 * a destination ring
196 *
197 * Each ring consists of a number of descriptors which specify
198 * an address, length, and meta-data.
199 *
200 * Typically, one side of the PCIe interconnect (Host or Target)
201 * controls one ring and the other side controls the other ring.
202 * The source side chooses when to initiate a transfer and it
203 * chooses what to send (buffer address, length). The destination
204 * side keeps a supply of "anonymous receive buffers" available and
205 * it handles incoming data as it arrives (when the destination
206 * recieves an interrupt).
207 *
208 * The sender may send a simple buffer (address/length) or it may
209 * send a small list of buffers. When a small list is sent, hardware
210 * "gathers" these and they end up in a single destination buffer
211 * with a single interrupt.
212 *
213 * There are several "contexts" managed by this layer -- more, it
214 * may seem -- than should be needed. These are provided mainly for
215 * maximum flexibility and especially to facilitate a simpler HIF
216 * implementation. There are per-CopyEngine recv, send, and watermark
217 * contexts. These are supplied by the caller when a recv, send,
218 * or watermark handler is established and they are echoed back to
219 * the caller when the respective callbacks are invoked. There is
220 * also a per-transfer context supplied by the caller when a buffer
221 * (or sendlist) is sent and when a buffer is enqueued for recv.
222 * These per-transfer contexts are echoed back to the caller when
223 * the buffer is sent/received.
224 * Target TX harsh result toeplitz_hash_result
225 */
226
227/*
228 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
229 * The caller takes responsibility for any needed locking.
230 */
231int
232ce_completed_send_next_nolock(struct CE_state *CE_state,
233 void **per_CE_contextp,
234 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530235 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800236 unsigned int *nbytesp,
237 unsigned int *transfer_idp,
238 unsigned int *sw_idx, unsigned int *hw_idx,
239 uint32_t *toeplitz_hash_result);
240
Komal Seelam644263d2016-02-22 20:45:49 +0530241void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800242 u32 ctrl_addr, unsigned int write_index)
243{
244 if (hif_ce_war1) {
245 void __iomem *indicator_addr;
246
247 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
248
249 if (!war1_allow_sleep
250 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
251 hif_write32_mb(indicator_addr,
252 (CDC_WAR_MAGIC_STR | write_index));
253 } else {
254 unsigned long irq_flags;
255 local_irq_save(irq_flags);
256 hif_write32_mb(indicator_addr, 1);
257
258 /*
259 * PCIE write waits for ACK in IPQ8K, there is no
260 * need to read back value.
261 */
262 (void)hif_read32_mb(indicator_addr);
263 (void)hif_read32_mb(indicator_addr); /* conservative */
264
265 CE_SRC_RING_WRITE_IDX_SET(scn,
266 ctrl_addr, write_index);
267
268 hif_write32_mb(indicator_addr, 0);
269 local_irq_restore(irq_flags);
270 }
271 } else
272 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
273}
274
275int
276ce_send_nolock(struct CE_handle *copyeng,
277 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530278 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800279 uint32_t nbytes,
280 uint32_t transfer_id,
281 uint32_t flags,
282 uint32_t user_flags)
283{
284 int status;
285 struct CE_state *CE_state = (struct CE_state *)copyeng;
286 struct CE_ring_state *src_ring = CE_state->src_ring;
287 uint32_t ctrl_addr = CE_state->ctrl_addr;
288 unsigned int nentries_mask = src_ring->nentries_mask;
289 unsigned int sw_index = src_ring->sw_index;
290 unsigned int write_index = src_ring->write_index;
291 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530292 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800293
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700294 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
295 return ATH_ISR_NOSCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800296 if (unlikely(CE_RING_DELTA(nentries_mask,
297 write_index, sw_index - 1) <= 0)) {
298 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530299 status = QDF_STATUS_E_FAILURE;
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700300 if (Q_TARGET_ACCESS_END(scn) < 0)
301 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800302 return status;
303 }
304 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800305 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800306 struct CE_src_desc *src_ring_base =
307 (struct CE_src_desc *)src_ring->base_addr_owner_space;
308 struct CE_src_desc *shadow_base =
309 (struct CE_src_desc *)src_ring->shadow_base;
310 struct CE_src_desc *src_desc =
311 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
312 struct CE_src_desc *shadow_src_desc =
313 CE_SRC_RING_TO_DESC(shadow_base, write_index);
314
315 /* Update low 32 bits source descriptor address */
316 shadow_src_desc->buffer_addr =
317 (uint32_t)(dma_addr & 0xFFFFFFFF);
318#ifdef QCA_WIFI_3_0
319 shadow_src_desc->buffer_addr_hi =
320 (uint32_t)((dma_addr >> 32) & 0x1F);
321 user_flags |= shadow_src_desc->buffer_addr_hi;
322 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
323 sizeof(uint32_t));
324#endif
325 shadow_src_desc->meta_data = transfer_id;
326
327 /*
328 * Set the swap bit if:
329 * typical sends on this CE are swapped (host is big-endian)
330 * and this send doesn't disable the swapping
331 * (data is not bytestream)
332 */
333 shadow_src_desc->byte_swap =
334 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
335 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
336 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
337 shadow_src_desc->nbytes = nbytes;
338
339 *src_desc = *shadow_src_desc;
340
341 src_ring->per_transfer_context[write_index] =
342 per_transfer_context;
343
344 /* Update Source Ring Write Index */
345 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
346
347 /* WORKAROUND */
348 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800349 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800350 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
351 write_index);
352 }
353
Houston Hoffman68e837e2015-12-04 12:57:24 -0800354 /* src_ring->write index hasn't been updated event though
355 * the register has allready been written to.
356 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530357 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800358 (union ce_desc *) shadow_src_desc, per_transfer_context,
359 src_ring->write_index);
360
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800361 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530362 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800363 }
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700364 if (Q_TARGET_ACCESS_END(scn) < 0)
365 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800366
367 return status;
368}
369
370int
371ce_send(struct CE_handle *copyeng,
372 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530373 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800374 uint32_t nbytes,
375 uint32_t transfer_id,
376 uint32_t flags,
377 uint32_t user_flag)
378{
379 struct CE_state *CE_state = (struct CE_state *)copyeng;
380 int status;
381
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530382 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800383 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
384 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530385 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800386
387 return status;
388}
389
390unsigned int ce_sendlist_sizeof(void)
391{
392 return sizeof(struct ce_sendlist);
393}
394
395void ce_sendlist_init(struct ce_sendlist *sendlist)
396{
397 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
398 sl->num_items = 0;
399}
400
401int
402ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530403 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800404 uint32_t nbytes,
405 uint32_t flags,
406 uint32_t user_flags)
407{
408 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
409 unsigned int num_items = sl->num_items;
410 struct ce_sendlist_item *item;
411
412 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530413 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
414 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800415 }
416
417 item = &sl->item[num_items];
418 item->send_type = CE_SIMPLE_BUFFER_TYPE;
419 item->data = buffer;
420 item->u.nbytes = nbytes;
421 item->flags = flags;
422 item->user_flags = user_flags;
423 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530424 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800425}
426
427int
428ce_sendlist_send(struct CE_handle *copyeng,
429 void *per_transfer_context,
430 struct ce_sendlist *sendlist, unsigned int transfer_id)
431{
432 int status = -ENOMEM;
433 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
434 struct CE_state *CE_state = (struct CE_state *)copyeng;
435 struct CE_ring_state *src_ring = CE_state->src_ring;
436 unsigned int nentries_mask = src_ring->nentries_mask;
437 unsigned int num_items = sl->num_items;
438 unsigned int sw_index;
439 unsigned int write_index;
440
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530441 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800442
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530443 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800444 sw_index = src_ring->sw_index;
445 write_index = src_ring->write_index;
446
447 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
448 num_items) {
449 struct ce_sendlist_item *item;
450 int i;
451
452 /* handle all but the last item uniformly */
453 for (i = 0; i < num_items - 1; i++) {
454 item = &sl->item[i];
455 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530456 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800457 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800459 item->u.nbytes, transfer_id,
460 item->flags | CE_SEND_FLAG_GATHER,
461 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800463 }
464 /* provide valid context pointer for final item */
465 item = &sl->item[i];
466 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530467 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800468 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530469 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800470 item->u.nbytes,
471 transfer_id, item->flags,
472 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530473 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530474 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
475 QDF_NBUF_TX_PKT_CE);
476 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530477 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530478 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
479 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800480 } else {
481 /*
482 * Probably not worth the additional complexity to support
483 * partial sends with continuation or notification. We expect
484 * to use large rings and small sendlists. If we can't handle
485 * the entire request at once, punt it back to the caller.
486 */
487 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530488 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800489
490 return status;
491}
492
493#ifdef WLAN_FEATURE_FASTPATH
494#ifdef QCA_WIFI_3_0
495static inline void
496ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
497 uint64_t dma_addr,
498 uint32_t user_flags)
499{
500 shadow_src_desc->buffer_addr_hi =
501 (uint32_t)((dma_addr >> 32) & 0x1F);
502 user_flags |= shadow_src_desc->buffer_addr_hi;
503 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
504 sizeof(uint32_t));
505}
506#else
507static inline void
508ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
509 uint64_t dma_addr,
510 uint32_t user_flags)
511{
512}
513#endif
514
515/**
516 * ce_send_fast() CE layer Tx buffer posting function
517 * @copyeng: copy engine handle
518 * @msdus: iarray of msdu to be sent
519 * @num_msdus: number of msdus in an array
520 * @transfer_id: transfer_id
521 *
522 * Assumption : Called with an array of MSDU's
523 * Function:
524 * For each msdu in the array
525 * 1. Check no. of available entries
526 * 2. Create src ring entries (allocated in consistent memory
527 * 3. Write index to h/w
528 *
529 * Return: No. of packets that could be sent
530 */
531
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530532int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 unsigned int num_msdus, unsigned int transfer_id)
534{
535 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530536 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530537 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800538 struct CE_ring_state *src_ring = ce_state->src_ring;
539 u_int32_t ctrl_addr = ce_state->ctrl_addr;
540 unsigned int nentries_mask = src_ring->nentries_mask;
541 unsigned int write_index;
542 unsigned int sw_index;
543 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530544 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800545 int i;
546 uint64_t dma_addr;
547 uint32_t user_flags = 0;
548
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530549 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800550 sw_index = src_ring->sw_index;
551 write_index = src_ring->write_index;
552
553 /* 2 msdus per packet */
554 for (i = 0; i < num_msdus; i++) {
555 struct CE_src_desc *src_ring_base =
556 (struct CE_src_desc *)src_ring->base_addr_owner_space;
557 struct CE_src_desc *shadow_base =
558 (struct CE_src_desc *)src_ring->shadow_base;
559 struct CE_src_desc *src_desc =
560 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
561 struct CE_src_desc *shadow_src_desc =
562 CE_SRC_RING_TO_DESC(shadow_base, write_index);
563
Komal Seelam644263d2016-02-22 20:45:49 +0530564 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800565 msdu = msdus[i];
566
567 /*
568 * First fill out the ring descriptor for the HTC HTT frame
569 * header. These are uncached writes. Should we use a local
570 * structure instead?
571 */
572 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530573 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800574 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
575 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530576 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800577 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
578
579 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530580 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800581
582 /*
583 * HTC HTT header is a word stream, so byte swap if CE byte
584 * swap enabled
585 */
586 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
587 CE_ATTR_BYTE_SWAP_DATA) != 0);
588 /* For the first one, it still does not need to write */
589 shadow_src_desc->gather = 1;
590 *src_desc = *shadow_src_desc;
591
592 /* By default we could initialize the transfer context to this
593 * value
594 */
595 src_ring->per_transfer_context[write_index] =
596 CE_SENDLIST_ITEM_CTXT;
597
598 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
599
600 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
601 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
602 /*
603 * Now fill out the ring descriptor for the actual data
604 * packet
605 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530606 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800607 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
608 0xFFFFFFFF);
609 /*
610 * Clear packet offset for all but the first CE desc.
611 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530612 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800613 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
614 shadow_src_desc->meta_data = transfer_id;
615
616 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530617 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700618
619 /* only read download_len once */
620 shadow_src_desc->nbytes = ce_state->download_len;
621 if (shadow_src_desc->nbytes > frag_len)
622 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800623
624 /* Data packet is a byte stream, so disable byte swap */
625 shadow_src_desc->byte_swap = 0;
626 /* For the last one, gather is not set */
627 shadow_src_desc->gather = 0;
628 *src_desc = *shadow_src_desc;
629 src_ring->per_transfer_context[write_index] = msdu;
630 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
631 }
632
633 /* Write the final index to h/w one-shot */
634 if (i) {
635 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800636
Komal Seelam644263d2016-02-22 20:45:49 +0530637 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800638 /* Don't call WAR_XXX from here
639 * Just call XXX instead, that has the reqd. intel
640 */
641 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
642 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530643 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800644 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800645 }
646
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530647 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800648
649 /*
650 * If all packets in the array are transmitted,
651 * i = num_msdus
652 * Temporarily add an ASSERT
653 */
654 ASSERT(i == num_msdus);
655 return i;
656}
657#endif /* WLAN_FEATURE_FASTPATH */
658
659int
660ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530661 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800662{
663 int status;
664 struct CE_state *CE_state = (struct CE_state *)copyeng;
665 struct CE_ring_state *dest_ring = CE_state->dest_ring;
666 uint32_t ctrl_addr = CE_state->ctrl_addr;
667 unsigned int nentries_mask = dest_ring->nentries_mask;
668 unsigned int write_index;
669 unsigned int sw_index;
670 int val = 0;
671 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530672 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800673
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530674 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800675 write_index = dest_ring->write_index;
676 sw_index = dest_ring->sw_index;
677
678 A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
679 if (val == -1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530680 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800681 return val;
682 }
683
684 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
685 struct CE_dest_desc *dest_ring_base =
686 (struct CE_dest_desc *)dest_ring->
687 base_addr_owner_space;
688 struct CE_dest_desc *dest_desc =
689 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
690
691 /* Update low 32 bit destination descriptor */
692 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
693#ifdef QCA_WIFI_3_0
694 dest_desc->buffer_addr_hi =
695 (uint32_t)((dma_addr >> 32) & 0x1F);
696#endif
697 dest_desc->nbytes = 0;
698
699 dest_ring->per_transfer_context[write_index] =
700 per_recv_context;
701
Komal Seelambd7c51d2016-02-24 10:27:30 +0530702 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800703 (union ce_desc *) dest_desc, per_recv_context,
704 write_index);
705
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800706 /* Update Destination Ring Write Index */
707 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
708 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
709 dest_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530710 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530712 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713 }
714 A_TARGET_ACCESS_END_RET_EXT(scn, val);
715 if (val == -1) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530716 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800717 return val;
718 }
719
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530720 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800721
722 return status;
723}
724
725void
726ce_send_watermarks_set(struct CE_handle *copyeng,
727 unsigned int low_alert_nentries,
728 unsigned int high_alert_nentries)
729{
730 struct CE_state *CE_state = (struct CE_state *)copyeng;
731 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530732 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800733
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800734 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
735 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800736}
737
738void
739ce_recv_watermarks_set(struct CE_handle *copyeng,
740 unsigned int low_alert_nentries,
741 unsigned int high_alert_nentries)
742{
743 struct CE_state *CE_state = (struct CE_state *)copyeng;
744 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530745 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800746
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800747 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
748 low_alert_nentries);
749 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
750 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800751}
752
753unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
754{
755 struct CE_state *CE_state = (struct CE_state *)copyeng;
756 struct CE_ring_state *src_ring = CE_state->src_ring;
757 unsigned int nentries_mask = src_ring->nentries_mask;
758 unsigned int sw_index;
759 unsigned int write_index;
760
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530761 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800762 sw_index = src_ring->sw_index;
763 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530764 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800765
766 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
767}
768
769unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
770{
771 struct CE_state *CE_state = (struct CE_state *)copyeng;
772 struct CE_ring_state *dest_ring = CE_state->dest_ring;
773 unsigned int nentries_mask = dest_ring->nentries_mask;
774 unsigned int sw_index;
775 unsigned int write_index;
776
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530777 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800778 sw_index = dest_ring->sw_index;
779 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530780 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800781
782 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
783}
784
785/*
786 * Guts of ce_send_entries_done.
787 * The caller takes responsibility for any necessary locking.
788 */
789unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530790ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800791 struct CE_state *CE_state)
792{
793 struct CE_ring_state *src_ring = CE_state->src_ring;
794 uint32_t ctrl_addr = CE_state->ctrl_addr;
795 unsigned int nentries_mask = src_ring->nentries_mask;
796 unsigned int sw_index;
797 unsigned int read_index;
798
799 sw_index = src_ring->sw_index;
800 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
801
802 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
803}
804
805unsigned int ce_send_entries_done(struct CE_handle *copyeng)
806{
807 struct CE_state *CE_state = (struct CE_state *)copyeng;
808 unsigned int nentries;
809
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530810 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530812 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800813
814 return nentries;
815}
816
817/*
818 * Guts of ce_recv_entries_done.
819 * The caller takes responsibility for any necessary locking.
820 */
821unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530822ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800823 struct CE_state *CE_state)
824{
825 struct CE_ring_state *dest_ring = CE_state->dest_ring;
826 uint32_t ctrl_addr = CE_state->ctrl_addr;
827 unsigned int nentries_mask = dest_ring->nentries_mask;
828 unsigned int sw_index;
829 unsigned int read_index;
830
831 sw_index = dest_ring->sw_index;
832 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
833
834 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
835}
836
837unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
838{
839 struct CE_state *CE_state = (struct CE_state *)copyeng;
840 unsigned int nentries;
841
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530842 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800843 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530844 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800845
846 return nentries;
847}
848
849/* Debug support */
850void *ce_debug_cmplrn_context; /* completed recv next context */
851void *ce_debug_cnclsn_context; /* cancel send next context */
852void *ce_debug_rvkrn_context; /* revoke receive next context */
853void *ce_debug_cmplsn_context; /* completed send next context */
854
855/*
856 * Guts of ce_completed_recv_next.
857 * The caller takes responsibility for any necessary locking.
858 */
859int
860ce_completed_recv_next_nolock(struct CE_state *CE_state,
861 void **per_CE_contextp,
862 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530863 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800864 unsigned int *nbytesp,
865 unsigned int *transfer_idp,
866 unsigned int *flagsp)
867{
868 int status;
869 struct CE_ring_state *dest_ring = CE_state->dest_ring;
870 unsigned int nentries_mask = dest_ring->nentries_mask;
871 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530872 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800873 struct CE_dest_desc *dest_ring_base =
874 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
875 struct CE_dest_desc *dest_desc =
876 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
877 int nbytes;
878 struct CE_dest_desc dest_desc_info;
879 /*
880 * By copying the dest_desc_info element to local memory, we could
881 * avoid extra memory read from non-cachable memory.
882 */
883 dest_desc_info = *dest_desc;
884 nbytes = dest_desc_info.nbytes;
885 if (nbytes == 0) {
886 /*
887 * This closes a relatively unusual race where the Host
888 * sees the updated DRRI before the update to the
889 * corresponding descriptor has completed. We treat this
890 * as a descriptor that is not yet done.
891 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530892 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893 goto done;
894 }
895
Komal Seelambd7c51d2016-02-24 10:27:30 +0530896 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800897 (union ce_desc *) dest_desc,
898 dest_ring->per_transfer_context[sw_index],
899 sw_index);
900
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800901 dest_desc->nbytes = 0;
902
903 /* Return data from completed destination descriptor */
904 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
905 *nbytesp = nbytes;
906 *transfer_idp = dest_desc_info.meta_data;
907 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
908
909 if (per_CE_contextp) {
910 *per_CE_contextp = CE_state->recv_context;
911 }
912
913 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
914 if (per_transfer_contextp) {
915 *per_transfer_contextp = ce_debug_cmplrn_context;
916 }
917 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
918
919 /* Update sw_index */
920 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
921 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530922 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800923
924done:
925 return status;
926}
927
928int
929ce_completed_recv_next(struct CE_handle *copyeng,
930 void **per_CE_contextp,
931 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530932 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800933 unsigned int *nbytesp,
934 unsigned int *transfer_idp, unsigned int *flagsp)
935{
936 struct CE_state *CE_state = (struct CE_state *)copyeng;
937 int status;
938
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530939 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800940 status =
941 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
942 per_transfer_contextp, bufferp,
943 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530944 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800945
946 return status;
947}
948
949/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530950QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800951ce_revoke_recv_next(struct CE_handle *copyeng,
952 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530953 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800954{
955 struct CE_state *CE_state;
956 struct CE_ring_state *dest_ring;
957 unsigned int nentries_mask;
958 unsigned int sw_index;
959 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530960 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +0530961 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962
963 CE_state = (struct CE_state *)copyeng;
964 dest_ring = CE_state->dest_ring;
965 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530966 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800967 }
968
969 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530970 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800971 nentries_mask = dest_ring->nentries_mask;
972 sw_index = dest_ring->sw_index;
973 write_index = dest_ring->write_index;
974 if (write_index != sw_index) {
975 struct CE_dest_desc *dest_ring_base =
976 (struct CE_dest_desc *)dest_ring->
977 base_addr_owner_space;
978 struct CE_dest_desc *dest_desc =
979 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
980
981 /* Return data from completed destination descriptor */
982 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
983
984 if (per_CE_contextp) {
985 *per_CE_contextp = CE_state->recv_context;
986 }
987
988 ce_debug_rvkrn_context =
989 dest_ring->per_transfer_context[sw_index];
990 if (per_transfer_contextp) {
991 *per_transfer_contextp = ce_debug_rvkrn_context;
992 }
993 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
994
995 /* Update sw_index */
996 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
997 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530998 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800999 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301000 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001001 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301002 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001003
1004 return status;
1005}
1006
1007/*
1008 * Guts of ce_completed_send_next.
1009 * The caller takes responsibility for any necessary locking.
1010 */
1011int
1012ce_completed_send_next_nolock(struct CE_state *CE_state,
1013 void **per_CE_contextp,
1014 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301015 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001016 unsigned int *nbytesp,
1017 unsigned int *transfer_idp,
1018 unsigned int *sw_idx,
1019 unsigned int *hw_idx,
1020 uint32_t *toeplitz_hash_result)
1021{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301022 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001023 struct CE_ring_state *src_ring = CE_state->src_ring;
1024 uint32_t ctrl_addr = CE_state->ctrl_addr;
1025 unsigned int nentries_mask = src_ring->nentries_mask;
1026 unsigned int sw_index = src_ring->sw_index;
1027 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301028 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001029
1030 if (src_ring->hw_index == sw_index) {
1031 /*
1032 * The SW completion index has caught up with the cached
1033 * version of the HW completion index.
1034 * Update the cached HW completion index to see whether
1035 * the SW has really caught up to the HW, or if the cached
1036 * value of the HW index has become stale.
1037 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001038 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1039 return ATH_ISR_NOSCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001041 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001042 if (Q_TARGET_ACCESS_END(scn) < 0)
1043 return ATH_ISR_SCHED;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001044 }
1045 read_index = src_ring->hw_index;
1046
1047 if (sw_idx)
1048 *sw_idx = sw_index;
1049
1050 if (hw_idx)
1051 *hw_idx = read_index;
1052
1053 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1054 struct CE_src_desc *shadow_base =
1055 (struct CE_src_desc *)src_ring->shadow_base;
1056 struct CE_src_desc *shadow_src_desc =
1057 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1058#ifdef QCA_WIFI_3_0
1059 struct CE_src_desc *src_ring_base =
1060 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1061 struct CE_src_desc *src_desc =
1062 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1063#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301064 hif_record_ce_desc_event(scn, CE_state->id,
1065 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001066 (union ce_desc *) shadow_src_desc,
1067 src_ring->per_transfer_context[sw_index],
1068 sw_index);
1069
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001070 /* Return data from completed source descriptor */
1071 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1072 *nbytesp = shadow_src_desc->nbytes;
1073 *transfer_idp = shadow_src_desc->meta_data;
1074#ifdef QCA_WIFI_3_0
1075 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1076#else
1077 *toeplitz_hash_result = 0;
1078#endif
1079 if (per_CE_contextp) {
1080 *per_CE_contextp = CE_state->send_context;
1081 }
1082
1083 ce_debug_cmplsn_context =
1084 src_ring->per_transfer_context[sw_index];
1085 if (per_transfer_contextp) {
1086 *per_transfer_contextp = ce_debug_cmplsn_context;
1087 }
1088 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1089
1090 /* Update sw_index */
1091 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1092 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301093 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001094 }
1095
1096 return status;
1097}
1098
1099/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301100QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001101ce_cancel_send_next(struct CE_handle *copyeng,
1102 void **per_CE_contextp,
1103 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301104 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001105 unsigned int *nbytesp,
1106 unsigned int *transfer_idp,
1107 uint32_t *toeplitz_hash_result)
1108{
1109 struct CE_state *CE_state;
1110 struct CE_ring_state *src_ring;
1111 unsigned int nentries_mask;
1112 unsigned int sw_index;
1113 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301114 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301115 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001116
1117 CE_state = (struct CE_state *)copyeng;
1118 src_ring = CE_state->src_ring;
1119 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301120 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001121 }
1122
1123 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301124 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001125 nentries_mask = src_ring->nentries_mask;
1126 sw_index = src_ring->sw_index;
1127 write_index = src_ring->write_index;
1128
1129 if (write_index != sw_index) {
1130 struct CE_src_desc *src_ring_base =
1131 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1132 struct CE_src_desc *src_desc =
1133 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1134
1135 /* Return data from completed source descriptor */
1136 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1137 *nbytesp = src_desc->nbytes;
1138 *transfer_idp = src_desc->meta_data;
1139#ifdef QCA_WIFI_3_0
1140 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1141#else
1142 *toeplitz_hash_result = 0;
1143#endif
1144
1145 if (per_CE_contextp) {
1146 *per_CE_contextp = CE_state->send_context;
1147 }
1148
1149 ce_debug_cnclsn_context =
1150 src_ring->per_transfer_context[sw_index];
1151 if (per_transfer_contextp) {
1152 *per_transfer_contextp = ce_debug_cnclsn_context;
1153 }
1154 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1155
1156 /* Update sw_index */
1157 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1158 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301159 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001160 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301161 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001162 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301163 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001164
1165 return status;
1166}
1167
1168/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1169#define CE_WM_SHFT 1
1170
1171int
1172ce_completed_send_next(struct CE_handle *copyeng,
1173 void **per_CE_contextp,
1174 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301175 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001176 unsigned int *nbytesp,
1177 unsigned int *transfer_idp,
1178 unsigned int *sw_idx,
1179 unsigned int *hw_idx,
1180 unsigned int *toeplitz_hash_result)
1181{
1182 struct CE_state *CE_state = (struct CE_state *)copyeng;
1183 int status;
1184
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301185 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001186 status =
1187 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1188 per_transfer_contextp, bufferp,
1189 nbytesp, transfer_idp, sw_idx,
1190 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301191 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001192
1193 return status;
1194}
1195
1196#ifdef ATH_11AC_TXCOMPACT
1197/* CE engine descriptor reap
1198 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1199 * does recieve and reaping of completed descriptor ,
1200 * This function only handles reaping of Tx complete descriptor.
1201 * The Function is called from threshold reap poll routine
1202 * hif_send_complete_check so should not countain recieve functionality
1203 * within it .
1204 */
1205
Komal Seelam644263d2016-02-22 20:45:49 +05301206void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207{
1208 void *CE_context;
1209 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301210 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001211 unsigned int nbytes;
1212 unsigned int id;
1213 unsigned int sw_idx, hw_idx;
1214 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001215 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001216
Houston Hoffmanbac94542016-03-14 21:11:59 -07001217 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1218 return;
1219
Komal Seelambd7c51d2016-02-24 10:27:30 +05301220 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001221 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222
1223 /* Since this function is called from both user context and
1224 * tasklet context the spinlock has to lock the bottom halves.
1225 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1226 * enabled in TX polling mode. If this is not the case, more
1227 * bottom halve spin lock changes are needed. Due to data path
1228 * performance concern, after internal discussion we've decided
1229 * to make minimum change, i.e., only address the issue occured
1230 * in this function. The possible negative effect of this minimum
1231 * change is that, in the future, if some other function will also
1232 * be opened to let the user context to use, those cases need to be
1233 * addressed by change spin_lock to spin_lock_bh also.
1234 */
1235
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301236 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237
1238 if (CE_state->send_cb) {
1239 {
1240 /* Pop completed send buffers and call the
1241 * registered send callback for each
1242 */
1243 while (ce_completed_send_next_nolock
1244 (CE_state, &CE_context,
1245 &transfer_context, &buf,
1246 &nbytes, &id, &sw_idx, &hw_idx,
1247 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301248 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001249 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301250 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001251 &CE_state->ce_index_lock);
1252 CE_state->send_cb(
1253 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001254 CE_state, CE_context,
1255 transfer_context, buf,
1256 nbytes, id, sw_idx, hw_idx,
1257 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301258 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001259 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001260 } else {
1261 struct HIF_CE_pipe_info *pipe_info =
1262 (struct HIF_CE_pipe_info *)
1263 CE_context;
1264
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301265 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001266 completion_freeq_lock);
1267 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301268 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001269 completion_freeq_lock);
1270 }
1271 }
1272 }
1273 }
1274
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301275 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001276
Komal Seelambd7c51d2016-02-24 10:27:30 +05301277 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001278 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001279 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001280}
1281
1282#endif /*ATH_11AC_TXCOMPACT */
1283
1284/*
1285 * Number of times to check for any pending tx/rx completion on
1286 * a copy engine, this count should be big enough. Once we hit
1287 * this threashold we'll not check for any Tx/Rx comlpetion in same
1288 * interrupt handling. Note that this threashold is only used for
1289 * Rx interrupt processing, this can be used tor Tx as well if we
1290 * suspect any infinite loop in checking for pending Tx completion.
1291 */
1292#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1293
1294/*
1295 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1296 *
1297 * Invokes registered callbacks for recv_complete,
1298 * send_complete, and watermarks.
1299 *
1300 * Returns: number of messages processed
1301 */
1302
Komal Seelam644263d2016-02-22 20:45:49 +05301303int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001304{
1305 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1306 uint32_t ctrl_addr = CE_state->ctrl_addr;
1307 void *CE_context;
1308 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301309 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001310 unsigned int nbytes;
1311 unsigned int id;
1312 unsigned int flags;
1313 uint32_t CE_int_status;
1314 unsigned int more_comp_cnt = 0;
1315 unsigned int more_snd_comp_cnt = 0;
1316 unsigned int sw_idx, hw_idx;
1317 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301318 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001319
1320 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1321 HIF_ERROR("[premature rc=0]\n");
1322 return 0; /* no work done */
1323 }
1324
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301325 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001326
1327 /* Clear force_break flag and re-initialize receive_count to 0 */
1328
1329 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001330 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001331 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001332more_completions:
1333 if (CE_state->recv_cb) {
1334
1335 /* Pop completed recv buffers and call
1336 * the registered recv callback for each
1337 */
1338 while (ce_completed_recv_next_nolock
1339 (CE_state, &CE_context, &transfer_context,
1340 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301341 QDF_STATUS_SUCCESS) {
1342 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 CE_state->recv_cb((struct CE_handle *)CE_state,
1344 CE_context, transfer_context, buf,
1345 nbytes, id, flags);
1346
1347 /*
1348 * EV #112693 -
1349 * [Peregrine][ES1][WB342][Win8x86][Performance]
1350 * BSoD_0x133 occurred in VHT80 UDP_DL
1351 * Break out DPC by force if number of loops in
1352 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1353 * to avoid spending too long time in
1354 * DPC for each interrupt handling. Schedule another
1355 * DPC to avoid data loss if we had taken
1356 * force-break action before apply to Windows OS
1357 * only currently, Linux/MAC os can expand to their
1358 * platform if necessary
1359 */
1360
1361 /* Break the receive processes by
1362 * force if force_break set up
1363 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301364 if (qdf_unlikely(CE_state->force_break)) {
1365 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001366 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1367 HOST_IS_COPY_COMPLETE_MASK);
1368 if (Q_TARGET_ACCESS_END(scn) < 0)
1369 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001370 CE_state->receive_count);
1371 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001372 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301373 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001374 }
1375 }
1376
1377 /*
1378 * Attention: We may experience potential infinite loop for below
1379 * While Loop during Sending Stress test.
1380 * Resolve the same way as Receive Case (Refer to EV #112693)
1381 */
1382
1383 if (CE_state->send_cb) {
1384 /* Pop completed send buffers and call
1385 * the registered send callback for each
1386 */
1387
1388#ifdef ATH_11AC_TXCOMPACT
1389 while (ce_completed_send_next_nolock
1390 (CE_state, &CE_context,
1391 &transfer_context, &buf, &nbytes,
1392 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301393 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001394
1395 if (CE_id != CE_HTT_H2T_MSG ||
Komal Seelambd7c51d2016-02-24 10:27:30 +05301396 WLAN_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301397 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001398 CE_state->send_cb((struct CE_handle *)CE_state,
1399 CE_context, transfer_context,
1400 buf, nbytes, id, sw_idx,
1401 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301402 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001403 } else {
1404 struct HIF_CE_pipe_info *pipe_info =
1405 (struct HIF_CE_pipe_info *)CE_context;
1406
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301407 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 completion_freeq_lock);
1409 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301410 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001411 completion_freeq_lock);
1412 }
1413 }
1414#else /*ATH_11AC_TXCOMPACT */
1415 while (ce_completed_send_next_nolock
1416 (CE_state, &CE_context,
1417 &transfer_context, &buf, &nbytes,
1418 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301419 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1420 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001421 CE_state->send_cb((struct CE_handle *)CE_state,
1422 CE_context, transfer_context, buf,
1423 nbytes, id, sw_idx, hw_idx,
1424 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301425 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001426 }
1427#endif /*ATH_11AC_TXCOMPACT */
1428 }
1429
1430more_watermarks:
1431 if (CE_state->misc_cbs) {
1432 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1433 if (CE_int_status & CE_WATERMARK_MASK) {
1434 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301435 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001436 /* Convert HW IS bits to software flags */
1437 flags =
1438 (CE_int_status & CE_WATERMARK_MASK) >>
1439 CE_WM_SHFT;
1440
1441 CE_state->
1442 watermark_cb((struct CE_handle *)CE_state,
1443 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301444 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001445 }
1446 }
1447 }
1448
1449 /*
1450 * Clear the misc interrupts (watermark) that were handled above,
1451 * and that will be checked again below.
1452 * Clear and check for copy-complete interrupts again, just in case
1453 * more copy completions happened while the misc interrupts were being
1454 * handled.
1455 */
1456 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1457 CE_WATERMARK_MASK |
1458 HOST_IS_COPY_COMPLETE_MASK);
1459
1460 /*
1461 * Now that per-engine interrupts are cleared, verify that
1462 * no recv interrupts arrive while processing send interrupts,
1463 * and no recv or send interrupts happened while processing
1464 * misc interrupts.Go back and check again.Keep checking until
1465 * we find no more events to process.
1466 */
1467 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301468 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001469 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1470 goto more_completions;
1471 } else {
1472 HIF_ERROR(
1473 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1474 __func__, CE_state->dest_ring->nentries_mask,
1475 CE_state->dest_ring->sw_index,
1476 CE_DEST_RING_READ_IDX_GET(scn,
1477 CE_state->ctrl_addr));
1478 }
1479 }
1480
1481 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301482 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001483 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1484 goto more_completions;
1485 } else {
1486 HIF_ERROR(
1487 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1488 __func__, CE_state->src_ring->nentries_mask,
1489 CE_state->src_ring->sw_index,
1490 CE_SRC_RING_READ_IDX_GET(scn,
1491 CE_state->ctrl_addr));
1492 }
1493 }
1494
1495 if (CE_state->misc_cbs) {
1496 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1497 if (CE_int_status & CE_WATERMARK_MASK) {
1498 if (CE_state->watermark_cb) {
1499 goto more_watermarks;
1500 }
1501 }
1502 }
1503
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301504 qdf_spin_unlock(&CE_state->ce_index_lock);
1505 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506
1507 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001508 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1509 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001510}
1511
1512/*
1513 * Handler for per-engine interrupts on ALL active CEs.
1514 * This is used in cases where the system is sharing a
1515 * single interrput for all CEs
1516 */
1517
Komal Seelam644263d2016-02-22 20:45:49 +05301518void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519{
1520 int CE_id;
1521 uint32_t intr_summary;
1522
Houston Hoffmanbac94542016-03-14 21:11:59 -07001523 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1524 return;
1525
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301526 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1528 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301529 if (qdf_atomic_read(&CE_state->rx_pending)) {
1530 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001531 ce_per_engine_service(scn, CE_id);
1532 }
1533 }
1534
Houston Hoffmanbac94542016-03-14 21:11:59 -07001535 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536 return;
1537 }
1538
1539 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1540
1541 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1542 if (intr_summary & (1 << CE_id)) {
1543 intr_summary &= ~(1 << CE_id);
1544 } else {
1545 continue; /* no intr pending on this CE */
1546 }
1547
1548 ce_per_engine_service(scn, CE_id);
1549 }
1550
Houston Hoffmanbac94542016-03-14 21:11:59 -07001551 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001552}
1553
1554/*
1555 * Adjust interrupts for the copy complete handler.
1556 * If it's needed for either send or recv, then unmask
1557 * this interrupt; otherwise, mask it.
1558 *
1559 * Called with target_lock held.
1560 */
1561static void
1562ce_per_engine_handler_adjust(struct CE_state *CE_state,
1563 int disable_copy_compl_intr)
1564{
1565 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301566 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001567
1568 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001569
1570 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1571 return;
1572
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573 if ((!disable_copy_compl_intr) &&
1574 (CE_state->send_cb || CE_state->recv_cb)) {
1575 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1576 } else {
1577 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1578 }
1579
1580 if (CE_state->watermark_cb) {
1581 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1582 } else {
1583 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1584 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001585 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586}
1587
1588/*Iterate the CE_state list and disable the compl interrupt
1589 * if it has been registered already.
1590 */
Komal Seelam644263d2016-02-22 20:45:49 +05301591void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592{
1593 int CE_id;
1594
Houston Hoffmanbac94542016-03-14 21:11:59 -07001595 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1596 return;
1597
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001598 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1599 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1600 uint32_t ctrl_addr = CE_state->ctrl_addr;
1601
1602 /* if the interrupt is currently enabled, disable it */
1603 if (!CE_state->disable_copy_compl_intr
1604 && (CE_state->send_cb || CE_state->recv_cb)) {
1605 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1606 }
1607
1608 if (CE_state->watermark_cb) {
1609 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1610 }
1611 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001612 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613}
1614
Komal Seelam644263d2016-02-22 20:45:49 +05301615void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616{
1617 int CE_id;
1618
Houston Hoffmanbac94542016-03-14 21:11:59 -07001619 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1620 return;
1621
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001622 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1623 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1624 uint32_t ctrl_addr = CE_state->ctrl_addr;
1625
1626 /*
1627 * If the CE is supposed to have copy complete interrupts
1628 * enabled (i.e. there a callback registered, and the
1629 * "disable" flag is not set), then re-enable the interrupt.
1630 */
1631 if (!CE_state->disable_copy_compl_intr
1632 && (CE_state->send_cb || CE_state->recv_cb)) {
1633 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1634 }
1635
1636 if (CE_state->watermark_cb) {
1637 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1638 }
1639 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001640 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001641}
1642
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001643/**
1644 * ce_send_cb_register(): register completion handler
1645 * @copyeng: CE_state representing the ce we are adding the behavior to
1646 * @fn_ptr: callback that the ce should use when processing tx completions
1647 * @disable_interrupts: if the interupts should be enabled or not.
1648 *
1649 * Caller should guarantee that no transactions are in progress before
1650 * switching the callback function.
1651 *
1652 * Registers the send context before the fn pointer so that if the cb is valid
1653 * the context should be valid.
1654 *
1655 * Beware that currently this function will enable completion interrupts.
1656 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657void
1658ce_send_cb_register(struct CE_handle *copyeng,
1659 ce_send_cb fn_ptr,
1660 void *ce_send_context, int disable_interrupts)
1661{
1662 struct CE_state *CE_state = (struct CE_state *)copyeng;
1663
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001664 if (CE_state == NULL) {
1665 pr_err("%s: Error CE state = NULL\n", __func__);
1666 return;
1667 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001668 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001669 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001670 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001671}
1672
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001673/**
1674 * ce_recv_cb_register(): register completion handler
1675 * @copyeng: CE_state representing the ce we are adding the behavior to
1676 * @fn_ptr: callback that the ce should use when processing rx completions
1677 * @disable_interrupts: if the interupts should be enabled or not.
1678 *
1679 * Registers the send context before the fn pointer so that if the cb is valid
1680 * the context should be valid.
1681 *
1682 * Caller should guarantee that no transactions are in progress before
1683 * switching the callback function.
1684 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001685void
1686ce_recv_cb_register(struct CE_handle *copyeng,
1687 CE_recv_cb fn_ptr,
1688 void *CE_recv_context, int disable_interrupts)
1689{
1690 struct CE_state *CE_state = (struct CE_state *)copyeng;
1691
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001692 if (CE_state == NULL) {
1693 pr_err("%s: ERROR CE state = NULL\n", __func__);
1694 return;
1695 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001697 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001698 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001699}
1700
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001701/**
1702 * ce_watermark_cb_register(): register completion handler
1703 * @copyeng: CE_state representing the ce we are adding the behavior to
1704 * @fn_ptr: callback that the ce should use when processing watermark events
1705 *
1706 * Caller should guarantee that no watermark events are being processed before
1707 * switching the callback function.
1708 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709void
1710ce_watermark_cb_register(struct CE_handle *copyeng,
1711 CE_watermark_cb fn_ptr, void *CE_wm_context)
1712{
1713 struct CE_state *CE_state = (struct CE_state *)copyeng;
1714
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001715 CE_state->watermark_cb = fn_ptr;
1716 CE_state->wm_context = CE_wm_context;
1717 ce_per_engine_handler_adjust(CE_state, 0);
1718 if (fn_ptr) {
1719 CE_state->misc_cbs = 1;
1720 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001721}
1722
1723#ifdef WLAN_FEATURE_FASTPATH
1724/**
1725 * ce_pkt_dl_len_set() set the HTT packet download length
1726 * @hif_sc: HIF context
1727 * @pkt_download_len: download length
1728 *
1729 * Return: None
1730 */
1731void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1732{
Komal Seelam644263d2016-02-22 20:45:49 +05301733 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1735
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301736 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001739
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301740 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001741 ce_state->id, ce_state->download_len);
1742}
1743#else
1744void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1745{
1746}
1747#endif /* WLAN_FEATURE_FASTPATH */
1748
Komal Seelam644263d2016-02-22 20:45:49 +05301749bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750{
1751 int CE_id;
1752
1753 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1754 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301755 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001756 return true;
1757 }
1758
1759 return false;
1760}
1761
1762/**
1763 * ce_check_rx_pending() - ce_check_rx_pending
Komal Seelam644263d2016-02-22 20:45:49 +05301764 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001765 * @ce_id: ce_id
1766 *
1767 * Return: bool
1768 */
Komal Seelam644263d2016-02-22 20:45:49 +05301769bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001770{
1771 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301772 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001773 return true;
1774 else
1775 return false;
1776}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001777
1778/**
1779 * ce_enable_msi(): write the msi configuration to the target
1780 * @scn: hif context
1781 * @CE_id: which copy engine will be configured for msi interupts
1782 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1783 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1784 * @msi_data: Hardware will write this data to generate an interrupt
1785 *
1786 * should be done in the initialization sequence so no locking would be needed
1787 */
Komal Seelam644263d2016-02-22 20:45:49 +05301788void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001789 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1790 uint32_t msi_data)
1791{
1792#ifdef WLAN_ENABLE_QCA6180
1793 struct CE_state *CE_state;
1794 A_target_id_t targid;
1795 u_int32_t ctrl_addr;
1796 uint32_t tmp;
1797
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001798 CE_state = scn->ce_id_to_state[CE_id];
1799 if (!CE_state) {
1800 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001801 return;
1802 }
1803 targid = TARGID(sc);
1804 ctrl_addr = CE_state->ctrl_addr;
1805 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1806 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1807 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1808 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1809 tmp |= (1 << CE_MSI_ENABLE_BIT);
1810 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001811#endif
1812}
1813
1814#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001815/**
1816 * ce_ipa_get_resource() - get uc resource on copyengine
1817 * @ce: copyengine context
1818 * @ce_sr_base_paddr: copyengine source ring base physical address
1819 * @ce_sr_ring_size: copyengine source ring size
1820 * @ce_reg_paddr: copyengine register physical address
1821 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001822 * Copy engine should release resource to micro controller
1823 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08001824 * - Copy engine source descriptor base address
1825 * - Copy engine source descriptor size
1826 * - PCI BAR address to access copy engine regiser
1827 *
1828 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 */
1830void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301831 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301833 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834{
1835 struct CE_state *CE_state = (struct CE_state *)ce;
1836 uint32_t ring_loop;
1837 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301838 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05301839 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840
1841 if (CE_RUNNING != CE_state->state) {
1842 *ce_sr_base_paddr = 0;
1843 *ce_sr_ring_size = 0;
1844 return;
1845 }
1846
1847 /* Update default value for descriptor */
1848 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1849 ring_loop++) {
1850 ce_desc = (struct CE_src_desc *)
1851 ((char *)CE_state->src_ring->base_addr_owner_space +
1852 ring_loop * (sizeof(struct CE_src_desc)));
1853 CE_IPA_RING_INIT(ce_desc);
1854 }
1855
1856 /* Get BAR address */
1857 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1858
Leo Changd85f78d2015-11-13 10:55:34 -08001859 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
1860 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
1861 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001862 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1863 SR_WR_INDEX_ADDRESS;
1864 return;
1865}
1866#endif /* IPA_OFFLOAD */
1867