blob: 2341f83364261792fa7f1002fa6bbc4ec1e39116 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <osdep.h>
29#include "a_types.h"
30#include <athdefs.h>
31#include "osapi_linux.h"
32#include "hif.h"
33#include "hif_io32.h"
34#include "ce_api.h"
35#include "ce_main.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053038#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "epping_main.h"
41#include "hif_main.h"
42#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080043
44#ifdef IPA_OFFLOAD
45#ifdef QCA_WIFI_3_0
46#define CE_IPA_RING_INIT(ce_desc) \
47 do { \
48 ce_desc->gather = 0; \
49 ce_desc->enable_11h = 0; \
50 ce_desc->meta_data_low = 0; \
51 ce_desc->packet_result_offset = 64; \
52 ce_desc->toeplitz_hash_enable = 0; \
53 ce_desc->addr_y_search_disable = 0; \
54 ce_desc->addr_x_search_disable = 0; \
55 ce_desc->misc_int_disable = 0; \
56 ce_desc->target_int_disable = 0; \
57 ce_desc->host_int_disable = 0; \
58 ce_desc->dest_byte_swap = 0; \
59 ce_desc->byte_swap = 0; \
60 ce_desc->type = 2; \
61 ce_desc->tx_classify = 1; \
62 ce_desc->buffer_addr_hi = 0; \
63 ce_desc->meta_data = 0; \
64 ce_desc->nbytes = 128; \
65 } while (0)
66#else
67#define CE_IPA_RING_INIT(ce_desc) \
68 do { \
69 ce_desc->byte_swap = 0; \
70 ce_desc->nbytes = 60; \
71 ce_desc->gather = 0; \
72 } while (0)
73#endif /* QCA_WIFI_3_0 */
74#endif /* IPA_OFFLOAD */
75
76static int war1_allow_sleep;
77/* io32 write workaround */
78static int hif_ce_war1;
79
Houston Hoffman68e837e2015-12-04 12:57:24 -080080#ifdef CONFIG_SLUB_DEBUG_ON
81
82/**
83 * struct hif_ce_event - structure for detailing a ce event
84 * @type: what the event was
85 * @time: when it happened
86 * @descriptor: descriptor enqueued or dequeued
87 * @memory: virtual address that was used
88 * @index: location of the descriptor in the ce ring;
89 */
90struct hif_ce_desc_event {
91 uint16_t index;
92 enum hif_ce_event_type type;
93 uint64_t time;
94 union ce_desc descriptor;
95 void *memory;
96};
97
98/* max history to record per copy engine */
99#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530100qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -0800101struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
102
Houston Hoffman4275ba22015-12-06 21:02:11 -0800103
Houston Hoffman68e837e2015-12-04 12:57:24 -0800104/**
105 * get_next_record_index() - get the next record index
106 * @table_index: atomic index variable to increment
107 * @array_size: array size of the circular buffer
108 *
109 * Increment the atomic index and reserve the value.
110 * Takes care of buffer wrap.
111 * Guaranteed to be thread safe as long as fewer than array_size contexts
112 * try to access the array. If there are more than array_size contexts
113 * trying to access the array, full locking of the recording process would
114 * be needed to have sane logging.
115 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530116static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800117{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530118 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800119 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530120 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800121
122 while (record_index >= array_size)
123 record_index -= array_size;
124 return record_index;
125}
126
127/**
128 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530129 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800130 * @ce_id: which ce is the event occuring on
131 * @type: what happened
132 * @descriptor: pointer to the descriptor posted/completed
133 * @memory: virtual address of buffer related to the descriptor
134 * @index: index that the descriptor was/will be at.
135 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530136void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
137 enum hif_ce_event_type type,
138 union ce_desc *descriptor,
139 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800140{
Komal Seelambd7c51d2016-02-24 10:27:30 +0530141 struct hif_callbacks *cbk = hif_get_callbacks_handle(scn);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800142 int record_index = get_next_record_index(
143 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
144
145 struct hif_ce_desc_event *event =
146 &hif_ce_desc_history[ce_id][record_index];
147 event->type = type;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530148
149 if (cbk && cbk->get_monotonic_boottime)
150 event->time = cbk->get_monotonic_boottime();
151 else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530152 event->time = ((uint64_t)qdf_system_ticks_to_msecs(
153 qdf_system_ticks()) * 1000);
Komal Seelambd7c51d2016-02-24 10:27:30 +0530154
Houston Hoffman4275ba22015-12-06 21:02:11 -0800155 if (descriptor != NULL)
156 event->descriptor = *descriptor;
157 else
158 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800159 event->memory = memory;
160 event->index = index;
161}
162
163/**
164 * ce_init_ce_desc_event_log() - initialize the ce event log
165 * @ce_id: copy engine id for which we are initializing the log
166 * @size: size of array to dedicate
167 *
168 * Currently the passed size is ignored in favor of a precompiled value.
169 */
170void ce_init_ce_desc_event_log(int ce_id, int size)
171{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530172 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800173}
174#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530175void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800176 int ce_id, enum hif_ce_event_type type,
177 union ce_desc *descriptor, void *memory,
178 int index)
179{
180}
181
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800182inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800183{
184}
185#endif
186
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800187/*
188 * Support for Copy Engine hardware, which is mainly used for
189 * communication between Host and Target over a PCIe interconnect.
190 */
191
192/*
193 * A single CopyEngine (CE) comprises two "rings":
194 * a source ring
195 * a destination ring
196 *
197 * Each ring consists of a number of descriptors which specify
198 * an address, length, and meta-data.
199 *
200 * Typically, one side of the PCIe interconnect (Host or Target)
201 * controls one ring and the other side controls the other ring.
202 * The source side chooses when to initiate a transfer and it
203 * chooses what to send (buffer address, length). The destination
204 * side keeps a supply of "anonymous receive buffers" available and
205 * it handles incoming data as it arrives (when the destination
206 * recieves an interrupt).
207 *
208 * The sender may send a simple buffer (address/length) or it may
209 * send a small list of buffers. When a small list is sent, hardware
210 * "gathers" these and they end up in a single destination buffer
211 * with a single interrupt.
212 *
213 * There are several "contexts" managed by this layer -- more, it
214 * may seem -- than should be needed. These are provided mainly for
215 * maximum flexibility and especially to facilitate a simpler HIF
216 * implementation. There are per-CopyEngine recv, send, and watermark
217 * contexts. These are supplied by the caller when a recv, send,
218 * or watermark handler is established and they are echoed back to
219 * the caller when the respective callbacks are invoked. There is
220 * also a per-transfer context supplied by the caller when a buffer
221 * (or sendlist) is sent and when a buffer is enqueued for recv.
222 * These per-transfer contexts are echoed back to the caller when
223 * the buffer is sent/received.
224 * Target TX harsh result toeplitz_hash_result
225 */
226
227/*
228 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
229 * The caller takes responsibility for any needed locking.
230 */
231int
232ce_completed_send_next_nolock(struct CE_state *CE_state,
233 void **per_CE_contextp,
234 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530235 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800236 unsigned int *nbytesp,
237 unsigned int *transfer_idp,
238 unsigned int *sw_idx, unsigned int *hw_idx,
239 uint32_t *toeplitz_hash_result);
240
Komal Seelam644263d2016-02-22 20:45:49 +0530241void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800242 u32 ctrl_addr, unsigned int write_index)
243{
244 if (hif_ce_war1) {
245 void __iomem *indicator_addr;
246
247 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
248
249 if (!war1_allow_sleep
250 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
251 hif_write32_mb(indicator_addr,
252 (CDC_WAR_MAGIC_STR | write_index));
253 } else {
254 unsigned long irq_flags;
255 local_irq_save(irq_flags);
256 hif_write32_mb(indicator_addr, 1);
257
258 /*
259 * PCIE write waits for ACK in IPQ8K, there is no
260 * need to read back value.
261 */
262 (void)hif_read32_mb(indicator_addr);
263 (void)hif_read32_mb(indicator_addr); /* conservative */
264
265 CE_SRC_RING_WRITE_IDX_SET(scn,
266 ctrl_addr, write_index);
267
268 hif_write32_mb(indicator_addr, 0);
269 local_irq_restore(irq_flags);
270 }
271 } else
272 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
273}
274
275int
276ce_send_nolock(struct CE_handle *copyeng,
277 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530278 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800279 uint32_t nbytes,
280 uint32_t transfer_id,
281 uint32_t flags,
282 uint32_t user_flags)
283{
284 int status;
285 struct CE_state *CE_state = (struct CE_state *)copyeng;
286 struct CE_ring_state *src_ring = CE_state->src_ring;
287 uint32_t ctrl_addr = CE_state->ctrl_addr;
288 unsigned int nentries_mask = src_ring->nentries_mask;
289 unsigned int sw_index = src_ring->sw_index;
290 unsigned int write_index = src_ring->write_index;
291 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530292 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800293
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700294 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700295 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800296 if (unlikely(CE_RING_DELTA(nentries_mask,
297 write_index, sw_index - 1) <= 0)) {
298 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700299 Q_TARGET_ACCESS_END(scn);
300 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800301 }
302 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800303 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800304 struct CE_src_desc *src_ring_base =
305 (struct CE_src_desc *)src_ring->base_addr_owner_space;
306 struct CE_src_desc *shadow_base =
307 (struct CE_src_desc *)src_ring->shadow_base;
308 struct CE_src_desc *src_desc =
309 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
310 struct CE_src_desc *shadow_src_desc =
311 CE_SRC_RING_TO_DESC(shadow_base, write_index);
312
313 /* Update low 32 bits source descriptor address */
314 shadow_src_desc->buffer_addr =
315 (uint32_t)(dma_addr & 0xFFFFFFFF);
316#ifdef QCA_WIFI_3_0
317 shadow_src_desc->buffer_addr_hi =
318 (uint32_t)((dma_addr >> 32) & 0x1F);
319 user_flags |= shadow_src_desc->buffer_addr_hi;
320 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
321 sizeof(uint32_t));
322#endif
323 shadow_src_desc->meta_data = transfer_id;
324
325 /*
326 * Set the swap bit if:
327 * typical sends on this CE are swapped (host is big-endian)
328 * and this send doesn't disable the swapping
329 * (data is not bytestream)
330 */
331 shadow_src_desc->byte_swap =
332 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
333 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
334 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
335 shadow_src_desc->nbytes = nbytes;
336
337 *src_desc = *shadow_src_desc;
338
339 src_ring->per_transfer_context[write_index] =
340 per_transfer_context;
341
342 /* Update Source Ring Write Index */
343 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
344
345 /* WORKAROUND */
346 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800347 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800348 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
349 write_index);
350 }
351
Houston Hoffman68e837e2015-12-04 12:57:24 -0800352 /* src_ring->write index hasn't been updated event though
353 * the register has allready been written to.
354 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530355 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800356 (union ce_desc *) shadow_src_desc, per_transfer_context,
357 src_ring->write_index);
358
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800359 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530360 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800361 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700362 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800363 return status;
364}
365
366int
367ce_send(struct CE_handle *copyeng,
368 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530369 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800370 uint32_t nbytes,
371 uint32_t transfer_id,
372 uint32_t flags,
373 uint32_t user_flag)
374{
375 struct CE_state *CE_state = (struct CE_state *)copyeng;
376 int status;
377
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530378 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800379 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
380 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530381 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800382
383 return status;
384}
385
386unsigned int ce_sendlist_sizeof(void)
387{
388 return sizeof(struct ce_sendlist);
389}
390
391void ce_sendlist_init(struct ce_sendlist *sendlist)
392{
393 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
394 sl->num_items = 0;
395}
396
397int
398ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530399 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800400 uint32_t nbytes,
401 uint32_t flags,
402 uint32_t user_flags)
403{
404 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
405 unsigned int num_items = sl->num_items;
406 struct ce_sendlist_item *item;
407
408 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530409 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
410 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800411 }
412
413 item = &sl->item[num_items];
414 item->send_type = CE_SIMPLE_BUFFER_TYPE;
415 item->data = buffer;
416 item->u.nbytes = nbytes;
417 item->flags = flags;
418 item->user_flags = user_flags;
419 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530420 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800421}
422
423int
424ce_sendlist_send(struct CE_handle *copyeng,
425 void *per_transfer_context,
426 struct ce_sendlist *sendlist, unsigned int transfer_id)
427{
428 int status = -ENOMEM;
429 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
430 struct CE_state *CE_state = (struct CE_state *)copyeng;
431 struct CE_ring_state *src_ring = CE_state->src_ring;
432 unsigned int nentries_mask = src_ring->nentries_mask;
433 unsigned int num_items = sl->num_items;
434 unsigned int sw_index;
435 unsigned int write_index;
436
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530437 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800438
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530439 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800440 sw_index = src_ring->sw_index;
441 write_index = src_ring->write_index;
442
443 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
444 num_items) {
445 struct ce_sendlist_item *item;
446 int i;
447
448 /* handle all but the last item uniformly */
449 for (i = 0; i < num_items - 1; i++) {
450 item = &sl->item[i];
451 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530452 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530454 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800455 item->u.nbytes, transfer_id,
456 item->flags | CE_SEND_FLAG_GATHER,
457 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800459 }
460 /* provide valid context pointer for final item */
461 item = &sl->item[i];
462 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530463 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800464 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530465 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800466 item->u.nbytes,
467 transfer_id, item->flags,
468 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530469 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530470 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
471 QDF_NBUF_TX_PKT_CE);
472 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530473 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530474 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
475 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800476 } else {
477 /*
478 * Probably not worth the additional complexity to support
479 * partial sends with continuation or notification. We expect
480 * to use large rings and small sendlists. If we can't handle
481 * the entire request at once, punt it back to the caller.
482 */
483 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530484 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485
486 return status;
487}
488
489#ifdef WLAN_FEATURE_FASTPATH
490#ifdef QCA_WIFI_3_0
491static inline void
492ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
493 uint64_t dma_addr,
494 uint32_t user_flags)
495{
496 shadow_src_desc->buffer_addr_hi =
497 (uint32_t)((dma_addr >> 32) & 0x1F);
498 user_flags |= shadow_src_desc->buffer_addr_hi;
499 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
500 sizeof(uint32_t));
501}
502#else
503static inline void
504ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
505 uint64_t dma_addr,
506 uint32_t user_flags)
507{
508}
509#endif
510
511/**
512 * ce_send_fast() CE layer Tx buffer posting function
513 * @copyeng: copy engine handle
514 * @msdus: iarray of msdu to be sent
515 * @num_msdus: number of msdus in an array
516 * @transfer_id: transfer_id
517 *
518 * Assumption : Called with an array of MSDU's
519 * Function:
520 * For each msdu in the array
521 * 1. Check no. of available entries
522 * 2. Create src ring entries (allocated in consistent memory
523 * 3. Write index to h/w
524 *
525 * Return: No. of packets that could be sent
526 */
527
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530528int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800529 unsigned int num_msdus, unsigned int transfer_id)
530{
531 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530532 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530533 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800534 struct CE_ring_state *src_ring = ce_state->src_ring;
535 u_int32_t ctrl_addr = ce_state->ctrl_addr;
536 unsigned int nentries_mask = src_ring->nentries_mask;
537 unsigned int write_index;
538 unsigned int sw_index;
539 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530540 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800541 int i;
542 uint64_t dma_addr;
543 uint32_t user_flags = 0;
544
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530545 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800546 sw_index = src_ring->sw_index;
547 write_index = src_ring->write_index;
548
549 /* 2 msdus per packet */
550 for (i = 0; i < num_msdus; i++) {
551 struct CE_src_desc *src_ring_base =
552 (struct CE_src_desc *)src_ring->base_addr_owner_space;
553 struct CE_src_desc *shadow_base =
554 (struct CE_src_desc *)src_ring->shadow_base;
555 struct CE_src_desc *src_desc =
556 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
557 struct CE_src_desc *shadow_src_desc =
558 CE_SRC_RING_TO_DESC(shadow_base, write_index);
559
Komal Seelam644263d2016-02-22 20:45:49 +0530560 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800561 msdu = msdus[i];
562
563 /*
564 * First fill out the ring descriptor for the HTC HTT frame
565 * header. These are uncached writes. Should we use a local
566 * structure instead?
567 */
568 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530569 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800570 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
571 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530572 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800573 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
574
575 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530576 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800577
578 /*
579 * HTC HTT header is a word stream, so byte swap if CE byte
580 * swap enabled
581 */
582 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
583 CE_ATTR_BYTE_SWAP_DATA) != 0);
584 /* For the first one, it still does not need to write */
585 shadow_src_desc->gather = 1;
586 *src_desc = *shadow_src_desc;
587
588 /* By default we could initialize the transfer context to this
589 * value
590 */
591 src_ring->per_transfer_context[write_index] =
592 CE_SENDLIST_ITEM_CTXT;
593
594 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
595
596 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
597 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
598 /*
599 * Now fill out the ring descriptor for the actual data
600 * packet
601 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530602 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800603 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
604 0xFFFFFFFF);
605 /*
606 * Clear packet offset for all but the first CE desc.
607 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530608 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800609 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
610 shadow_src_desc->meta_data = transfer_id;
611
612 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530613 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700614
615 /* only read download_len once */
616 shadow_src_desc->nbytes = ce_state->download_len;
617 if (shadow_src_desc->nbytes > frag_len)
618 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800619
620 /* Data packet is a byte stream, so disable byte swap */
621 shadow_src_desc->byte_swap = 0;
622 /* For the last one, gather is not set */
623 shadow_src_desc->gather = 0;
624 *src_desc = *shadow_src_desc;
625 src_ring->per_transfer_context[write_index] = msdu;
626 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
627 }
628
629 /* Write the final index to h/w one-shot */
630 if (i) {
631 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800632
Komal Seelam644263d2016-02-22 20:45:49 +0530633 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800634 /* Don't call WAR_XXX from here
635 * Just call XXX instead, that has the reqd. intel
636 */
637 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
638 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530639 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800640 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800641 }
642
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530643 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800644
645 /*
646 * If all packets in the array are transmitted,
647 * i = num_msdus
648 * Temporarily add an ASSERT
649 */
650 ASSERT(i == num_msdus);
651 return i;
652}
653#endif /* WLAN_FEATURE_FASTPATH */
654
Houston Hoffman4411ad42016-03-14 21:12:04 -0700655/**
656 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
657 * @coyeng: copy engine handle
658 * @per_recv_context: virtual address of the nbuf
659 * @buffer: physical address of the nbuf
660 *
661 * Return: 0 if the buffer is enqueued
662 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800663int
664ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530665 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800666{
667 int status;
668 struct CE_state *CE_state = (struct CE_state *)copyeng;
669 struct CE_ring_state *dest_ring = CE_state->dest_ring;
670 uint32_t ctrl_addr = CE_state->ctrl_addr;
671 unsigned int nentries_mask = dest_ring->nentries_mask;
672 unsigned int write_index;
673 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800674 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530675 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800676
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530677 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800678 write_index = dest_ring->write_index;
679 sw_index = dest_ring->sw_index;
680
Houston Hoffman4411ad42016-03-14 21:12:04 -0700681 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530682 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700683 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800684 }
685
686 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
687 struct CE_dest_desc *dest_ring_base =
688 (struct CE_dest_desc *)dest_ring->
689 base_addr_owner_space;
690 struct CE_dest_desc *dest_desc =
691 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
692
693 /* Update low 32 bit destination descriptor */
694 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
695#ifdef QCA_WIFI_3_0
696 dest_desc->buffer_addr_hi =
697 (uint32_t)((dma_addr >> 32) & 0x1F);
698#endif
699 dest_desc->nbytes = 0;
700
701 dest_ring->per_transfer_context[write_index] =
702 per_recv_context;
703
Komal Seelambd7c51d2016-02-24 10:27:30 +0530704 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800705 (union ce_desc *) dest_desc, per_recv_context,
706 write_index);
707
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 /* Update Destination Ring Write Index */
709 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
710 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
711 dest_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530712 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530714 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800715 }
Houston Hoffman4411ad42016-03-14 21:12:04 -0700716 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530717 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800718 return status;
719}
720
721void
722ce_send_watermarks_set(struct CE_handle *copyeng,
723 unsigned int low_alert_nentries,
724 unsigned int high_alert_nentries)
725{
726 struct CE_state *CE_state = (struct CE_state *)copyeng;
727 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530728 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800729
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800730 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
731 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800732}
733
734void
735ce_recv_watermarks_set(struct CE_handle *copyeng,
736 unsigned int low_alert_nentries,
737 unsigned int high_alert_nentries)
738{
739 struct CE_state *CE_state = (struct CE_state *)copyeng;
740 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530741 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800743 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
744 low_alert_nentries);
745 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
746 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800747}
748
749unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
750{
751 struct CE_state *CE_state = (struct CE_state *)copyeng;
752 struct CE_ring_state *src_ring = CE_state->src_ring;
753 unsigned int nentries_mask = src_ring->nentries_mask;
754 unsigned int sw_index;
755 unsigned int write_index;
756
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530757 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800758 sw_index = src_ring->sw_index;
759 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530760 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800761
762 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
763}
764
765unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
766{
767 struct CE_state *CE_state = (struct CE_state *)copyeng;
768 struct CE_ring_state *dest_ring = CE_state->dest_ring;
769 unsigned int nentries_mask = dest_ring->nentries_mask;
770 unsigned int sw_index;
771 unsigned int write_index;
772
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530773 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800774 sw_index = dest_ring->sw_index;
775 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530776 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800777
778 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
779}
780
781/*
782 * Guts of ce_send_entries_done.
783 * The caller takes responsibility for any necessary locking.
784 */
785unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530786ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800787 struct CE_state *CE_state)
788{
789 struct CE_ring_state *src_ring = CE_state->src_ring;
790 uint32_t ctrl_addr = CE_state->ctrl_addr;
791 unsigned int nentries_mask = src_ring->nentries_mask;
792 unsigned int sw_index;
793 unsigned int read_index;
794
795 sw_index = src_ring->sw_index;
796 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
797
798 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
799}
800
801unsigned int ce_send_entries_done(struct CE_handle *copyeng)
802{
803 struct CE_state *CE_state = (struct CE_state *)copyeng;
804 unsigned int nentries;
805
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530806 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800807 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530808 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800809
810 return nentries;
811}
812
813/*
814 * Guts of ce_recv_entries_done.
815 * The caller takes responsibility for any necessary locking.
816 */
817unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530818ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800819 struct CE_state *CE_state)
820{
821 struct CE_ring_state *dest_ring = CE_state->dest_ring;
822 uint32_t ctrl_addr = CE_state->ctrl_addr;
823 unsigned int nentries_mask = dest_ring->nentries_mask;
824 unsigned int sw_index;
825 unsigned int read_index;
826
827 sw_index = dest_ring->sw_index;
828 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
829
830 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
831}
832
833unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
834{
835 struct CE_state *CE_state = (struct CE_state *)copyeng;
836 unsigned int nentries;
837
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530838 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800839 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530840 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800841
842 return nentries;
843}
844
845/* Debug support */
846void *ce_debug_cmplrn_context; /* completed recv next context */
847void *ce_debug_cnclsn_context; /* cancel send next context */
848void *ce_debug_rvkrn_context; /* revoke receive next context */
849void *ce_debug_cmplsn_context; /* completed send next context */
850
851/*
852 * Guts of ce_completed_recv_next.
853 * The caller takes responsibility for any necessary locking.
854 */
855int
856ce_completed_recv_next_nolock(struct CE_state *CE_state,
857 void **per_CE_contextp,
858 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530859 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800860 unsigned int *nbytesp,
861 unsigned int *transfer_idp,
862 unsigned int *flagsp)
863{
864 int status;
865 struct CE_ring_state *dest_ring = CE_state->dest_ring;
866 unsigned int nentries_mask = dest_ring->nentries_mask;
867 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530868 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869 struct CE_dest_desc *dest_ring_base =
870 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
871 struct CE_dest_desc *dest_desc =
872 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
873 int nbytes;
874 struct CE_dest_desc dest_desc_info;
875 /*
876 * By copying the dest_desc_info element to local memory, we could
877 * avoid extra memory read from non-cachable memory.
878 */
879 dest_desc_info = *dest_desc;
880 nbytes = dest_desc_info.nbytes;
881 if (nbytes == 0) {
882 /*
883 * This closes a relatively unusual race where the Host
884 * sees the updated DRRI before the update to the
885 * corresponding descriptor has completed. We treat this
886 * as a descriptor that is not yet done.
887 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530888 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800889 goto done;
890 }
891
Komal Seelambd7c51d2016-02-24 10:27:30 +0530892 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800893 (union ce_desc *) dest_desc,
894 dest_ring->per_transfer_context[sw_index],
895 sw_index);
896
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800897 dest_desc->nbytes = 0;
898
899 /* Return data from completed destination descriptor */
900 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
901 *nbytesp = nbytes;
902 *transfer_idp = dest_desc_info.meta_data;
903 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
904
905 if (per_CE_contextp) {
906 *per_CE_contextp = CE_state->recv_context;
907 }
908
909 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
910 if (per_transfer_contextp) {
911 *per_transfer_contextp = ce_debug_cmplrn_context;
912 }
913 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
914
915 /* Update sw_index */
916 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
917 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530918 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800919
920done:
921 return status;
922}
923
924int
925ce_completed_recv_next(struct CE_handle *copyeng,
926 void **per_CE_contextp,
927 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530928 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800929 unsigned int *nbytesp,
930 unsigned int *transfer_idp, unsigned int *flagsp)
931{
932 struct CE_state *CE_state = (struct CE_state *)copyeng;
933 int status;
934
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936 status =
937 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
938 per_transfer_contextp, bufferp,
939 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530940 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800941
942 return status;
943}
944
945/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530946QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800947ce_revoke_recv_next(struct CE_handle *copyeng,
948 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530949 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950{
951 struct CE_state *CE_state;
952 struct CE_ring_state *dest_ring;
953 unsigned int nentries_mask;
954 unsigned int sw_index;
955 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530956 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +0530957 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958
959 CE_state = (struct CE_state *)copyeng;
960 dest_ring = CE_state->dest_ring;
961 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530962 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800963 }
964
965 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530966 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800967 nentries_mask = dest_ring->nentries_mask;
968 sw_index = dest_ring->sw_index;
969 write_index = dest_ring->write_index;
970 if (write_index != sw_index) {
971 struct CE_dest_desc *dest_ring_base =
972 (struct CE_dest_desc *)dest_ring->
973 base_addr_owner_space;
974 struct CE_dest_desc *dest_desc =
975 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
976
977 /* Return data from completed destination descriptor */
978 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
979
980 if (per_CE_contextp) {
981 *per_CE_contextp = CE_state->recv_context;
982 }
983
984 ce_debug_rvkrn_context =
985 dest_ring->per_transfer_context[sw_index];
986 if (per_transfer_contextp) {
987 *per_transfer_contextp = ce_debug_rvkrn_context;
988 }
989 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
990
991 /* Update sw_index */
992 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
993 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530994 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800995 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530996 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800997 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530998 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800999
1000 return status;
1001}
1002
1003/*
1004 * Guts of ce_completed_send_next.
1005 * The caller takes responsibility for any necessary locking.
1006 */
1007int
1008ce_completed_send_next_nolock(struct CE_state *CE_state,
1009 void **per_CE_contextp,
1010 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301011 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001012 unsigned int *nbytesp,
1013 unsigned int *transfer_idp,
1014 unsigned int *sw_idx,
1015 unsigned int *hw_idx,
1016 uint32_t *toeplitz_hash_result)
1017{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301018 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001019 struct CE_ring_state *src_ring = CE_state->src_ring;
1020 uint32_t ctrl_addr = CE_state->ctrl_addr;
1021 unsigned int nentries_mask = src_ring->nentries_mask;
1022 unsigned int sw_index = src_ring->sw_index;
1023 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301024 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001025
1026 if (src_ring->hw_index == sw_index) {
1027 /*
1028 * The SW completion index has caught up with the cached
1029 * version of the HW completion index.
1030 * Update the cached HW completion index to see whether
1031 * the SW has really caught up to the HW, or if the cached
1032 * value of the HW index has become stale.
1033 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001034 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001035 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001036 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001037 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001038 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001039 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 }
1041 read_index = src_ring->hw_index;
1042
1043 if (sw_idx)
1044 *sw_idx = sw_index;
1045
1046 if (hw_idx)
1047 *hw_idx = read_index;
1048
1049 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1050 struct CE_src_desc *shadow_base =
1051 (struct CE_src_desc *)src_ring->shadow_base;
1052 struct CE_src_desc *shadow_src_desc =
1053 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1054#ifdef QCA_WIFI_3_0
1055 struct CE_src_desc *src_ring_base =
1056 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1057 struct CE_src_desc *src_desc =
1058 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1059#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301060 hif_record_ce_desc_event(scn, CE_state->id,
1061 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001062 (union ce_desc *) shadow_src_desc,
1063 src_ring->per_transfer_context[sw_index],
1064 sw_index);
1065
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001066 /* Return data from completed source descriptor */
1067 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1068 *nbytesp = shadow_src_desc->nbytes;
1069 *transfer_idp = shadow_src_desc->meta_data;
1070#ifdef QCA_WIFI_3_0
1071 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1072#else
1073 *toeplitz_hash_result = 0;
1074#endif
1075 if (per_CE_contextp) {
1076 *per_CE_contextp = CE_state->send_context;
1077 }
1078
1079 ce_debug_cmplsn_context =
1080 src_ring->per_transfer_context[sw_index];
1081 if (per_transfer_contextp) {
1082 *per_transfer_contextp = ce_debug_cmplsn_context;
1083 }
1084 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1085
1086 /* Update sw_index */
1087 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1088 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301089 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001090 }
1091
1092 return status;
1093}
1094
1095/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301096QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001097ce_cancel_send_next(struct CE_handle *copyeng,
1098 void **per_CE_contextp,
1099 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301100 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001101 unsigned int *nbytesp,
1102 unsigned int *transfer_idp,
1103 uint32_t *toeplitz_hash_result)
1104{
1105 struct CE_state *CE_state;
1106 struct CE_ring_state *src_ring;
1107 unsigned int nentries_mask;
1108 unsigned int sw_index;
1109 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301110 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301111 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001112
1113 CE_state = (struct CE_state *)copyeng;
1114 src_ring = CE_state->src_ring;
1115 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301116 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001117 }
1118
1119 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301120 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001121 nentries_mask = src_ring->nentries_mask;
1122 sw_index = src_ring->sw_index;
1123 write_index = src_ring->write_index;
1124
1125 if (write_index != sw_index) {
1126 struct CE_src_desc *src_ring_base =
1127 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1128 struct CE_src_desc *src_desc =
1129 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1130
1131 /* Return data from completed source descriptor */
1132 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1133 *nbytesp = src_desc->nbytes;
1134 *transfer_idp = src_desc->meta_data;
1135#ifdef QCA_WIFI_3_0
1136 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1137#else
1138 *toeplitz_hash_result = 0;
1139#endif
1140
1141 if (per_CE_contextp) {
1142 *per_CE_contextp = CE_state->send_context;
1143 }
1144
1145 ce_debug_cnclsn_context =
1146 src_ring->per_transfer_context[sw_index];
1147 if (per_transfer_contextp) {
1148 *per_transfer_contextp = ce_debug_cnclsn_context;
1149 }
1150 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1151
1152 /* Update sw_index */
1153 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1154 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301155 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001156 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301157 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001158 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301159 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001160
1161 return status;
1162}
1163
1164/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1165#define CE_WM_SHFT 1
1166
1167int
1168ce_completed_send_next(struct CE_handle *copyeng,
1169 void **per_CE_contextp,
1170 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301171 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001172 unsigned int *nbytesp,
1173 unsigned int *transfer_idp,
1174 unsigned int *sw_idx,
1175 unsigned int *hw_idx,
1176 unsigned int *toeplitz_hash_result)
1177{
1178 struct CE_state *CE_state = (struct CE_state *)copyeng;
1179 int status;
1180
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301181 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182 status =
1183 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1184 per_transfer_contextp, bufferp,
1185 nbytesp, transfer_idp, sw_idx,
1186 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301187 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001188
1189 return status;
1190}
1191
1192#ifdef ATH_11AC_TXCOMPACT
1193/* CE engine descriptor reap
1194 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1195 * does recieve and reaping of completed descriptor ,
1196 * This function only handles reaping of Tx complete descriptor.
1197 * The Function is called from threshold reap poll routine
1198 * hif_send_complete_check so should not countain recieve functionality
1199 * within it .
1200 */
1201
Komal Seelam644263d2016-02-22 20:45:49 +05301202void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001203{
1204 void *CE_context;
1205 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301206 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207 unsigned int nbytes;
1208 unsigned int id;
1209 unsigned int sw_idx, hw_idx;
1210 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001211 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001212
Houston Hoffmanbac94542016-03-14 21:11:59 -07001213 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1214 return;
1215
Komal Seelambd7c51d2016-02-24 10:27:30 +05301216 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001217 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001218
1219 /* Since this function is called from both user context and
1220 * tasklet context the spinlock has to lock the bottom halves.
1221 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1222 * enabled in TX polling mode. If this is not the case, more
1223 * bottom halve spin lock changes are needed. Due to data path
1224 * performance concern, after internal discussion we've decided
1225 * to make minimum change, i.e., only address the issue occured
1226 * in this function. The possible negative effect of this minimum
1227 * change is that, in the future, if some other function will also
1228 * be opened to let the user context to use, those cases need to be
1229 * addressed by change spin_lock to spin_lock_bh also.
1230 */
1231
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301232 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001233
1234 if (CE_state->send_cb) {
1235 {
1236 /* Pop completed send buffers and call the
1237 * registered send callback for each
1238 */
1239 while (ce_completed_send_next_nolock
1240 (CE_state, &CE_context,
1241 &transfer_context, &buf,
1242 &nbytes, &id, &sw_idx, &hw_idx,
1243 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301244 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001245 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301246 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001247 &CE_state->ce_index_lock);
1248 CE_state->send_cb(
1249 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001250 CE_state, CE_context,
1251 transfer_context, buf,
1252 nbytes, id, sw_idx, hw_idx,
1253 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301254 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001255 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001256 } else {
1257 struct HIF_CE_pipe_info *pipe_info =
1258 (struct HIF_CE_pipe_info *)
1259 CE_context;
1260
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301261 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001262 completion_freeq_lock);
1263 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301264 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001265 completion_freeq_lock);
1266 }
1267 }
1268 }
1269 }
1270
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301271 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001272
Komal Seelambd7c51d2016-02-24 10:27:30 +05301273 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001274 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001275 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001276}
1277
1278#endif /*ATH_11AC_TXCOMPACT */
1279
1280/*
1281 * Number of times to check for any pending tx/rx completion on
1282 * a copy engine, this count should be big enough. Once we hit
1283 * this threashold we'll not check for any Tx/Rx comlpetion in same
1284 * interrupt handling. Note that this threashold is only used for
1285 * Rx interrupt processing, this can be used tor Tx as well if we
1286 * suspect any infinite loop in checking for pending Tx completion.
1287 */
1288#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1289
1290/*
1291 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1292 *
1293 * Invokes registered callbacks for recv_complete,
1294 * send_complete, and watermarks.
1295 *
1296 * Returns: number of messages processed
1297 */
1298
Komal Seelam644263d2016-02-22 20:45:49 +05301299int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001300{
1301 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1302 uint32_t ctrl_addr = CE_state->ctrl_addr;
1303 void *CE_context;
1304 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301305 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001306 unsigned int nbytes;
1307 unsigned int id;
1308 unsigned int flags;
1309 uint32_t CE_int_status;
1310 unsigned int more_comp_cnt = 0;
1311 unsigned int more_snd_comp_cnt = 0;
1312 unsigned int sw_idx, hw_idx;
1313 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301314 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001315
1316 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1317 HIF_ERROR("[premature rc=0]\n");
1318 return 0; /* no work done */
1319 }
1320
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301321 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001322
1323 /* Clear force_break flag and re-initialize receive_count to 0 */
1324
1325 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001326 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001327 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001328more_completions:
1329 if (CE_state->recv_cb) {
1330
1331 /* Pop completed recv buffers and call
1332 * the registered recv callback for each
1333 */
1334 while (ce_completed_recv_next_nolock
1335 (CE_state, &CE_context, &transfer_context,
1336 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301337 QDF_STATUS_SUCCESS) {
1338 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339 CE_state->recv_cb((struct CE_handle *)CE_state,
1340 CE_context, transfer_context, buf,
1341 nbytes, id, flags);
1342
1343 /*
1344 * EV #112693 -
1345 * [Peregrine][ES1][WB342][Win8x86][Performance]
1346 * BSoD_0x133 occurred in VHT80 UDP_DL
1347 * Break out DPC by force if number of loops in
1348 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1349 * to avoid spending too long time in
1350 * DPC for each interrupt handling. Schedule another
1351 * DPC to avoid data loss if we had taken
1352 * force-break action before apply to Windows OS
1353 * only currently, Linux/MAC os can expand to their
1354 * platform if necessary
1355 */
1356
1357 /* Break the receive processes by
1358 * force if force_break set up
1359 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301360 if (qdf_unlikely(CE_state->force_break)) {
1361 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001362 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1363 HOST_IS_COPY_COMPLETE_MASK);
1364 if (Q_TARGET_ACCESS_END(scn) < 0)
1365 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001366 CE_state->receive_count);
1367 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001368 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301369 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001370 }
1371 }
1372
1373 /*
1374 * Attention: We may experience potential infinite loop for below
1375 * While Loop during Sending Stress test.
1376 * Resolve the same way as Receive Case (Refer to EV #112693)
1377 */
1378
1379 if (CE_state->send_cb) {
1380 /* Pop completed send buffers and call
1381 * the registered send callback for each
1382 */
1383
1384#ifdef ATH_11AC_TXCOMPACT
1385 while (ce_completed_send_next_nolock
1386 (CE_state, &CE_context,
1387 &transfer_context, &buf, &nbytes,
1388 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301389 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001390
1391 if (CE_id != CE_HTT_H2T_MSG ||
Komal Seelambd7c51d2016-02-24 10:27:30 +05301392 WLAN_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301393 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001394 CE_state->send_cb((struct CE_handle *)CE_state,
1395 CE_context, transfer_context,
1396 buf, nbytes, id, sw_idx,
1397 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301398 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001399 } else {
1400 struct HIF_CE_pipe_info *pipe_info =
1401 (struct HIF_CE_pipe_info *)CE_context;
1402
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301403 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001404 completion_freeq_lock);
1405 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301406 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001407 completion_freeq_lock);
1408 }
1409 }
1410#else /*ATH_11AC_TXCOMPACT */
1411 while (ce_completed_send_next_nolock
1412 (CE_state, &CE_context,
1413 &transfer_context, &buf, &nbytes,
1414 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301415 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1416 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001417 CE_state->send_cb((struct CE_handle *)CE_state,
1418 CE_context, transfer_context, buf,
1419 nbytes, id, sw_idx, hw_idx,
1420 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301421 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 }
1423#endif /*ATH_11AC_TXCOMPACT */
1424 }
1425
1426more_watermarks:
1427 if (CE_state->misc_cbs) {
1428 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1429 if (CE_int_status & CE_WATERMARK_MASK) {
1430 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301431 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001432 /* Convert HW IS bits to software flags */
1433 flags =
1434 (CE_int_status & CE_WATERMARK_MASK) >>
1435 CE_WM_SHFT;
1436
1437 CE_state->
1438 watermark_cb((struct CE_handle *)CE_state,
1439 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301440 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001441 }
1442 }
1443 }
1444
1445 /*
1446 * Clear the misc interrupts (watermark) that were handled above,
1447 * and that will be checked again below.
1448 * Clear and check for copy-complete interrupts again, just in case
1449 * more copy completions happened while the misc interrupts were being
1450 * handled.
1451 */
1452 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1453 CE_WATERMARK_MASK |
1454 HOST_IS_COPY_COMPLETE_MASK);
1455
1456 /*
1457 * Now that per-engine interrupts are cleared, verify that
1458 * no recv interrupts arrive while processing send interrupts,
1459 * and no recv or send interrupts happened while processing
1460 * misc interrupts.Go back and check again.Keep checking until
1461 * we find no more events to process.
1462 */
1463 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301464 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001465 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1466 goto more_completions;
1467 } else {
1468 HIF_ERROR(
1469 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1470 __func__, CE_state->dest_ring->nentries_mask,
1471 CE_state->dest_ring->sw_index,
1472 CE_DEST_RING_READ_IDX_GET(scn,
1473 CE_state->ctrl_addr));
1474 }
1475 }
1476
1477 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301478 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001479 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1480 goto more_completions;
1481 } else {
1482 HIF_ERROR(
1483 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1484 __func__, CE_state->src_ring->nentries_mask,
1485 CE_state->src_ring->sw_index,
1486 CE_SRC_RING_READ_IDX_GET(scn,
1487 CE_state->ctrl_addr));
1488 }
1489 }
1490
1491 if (CE_state->misc_cbs) {
1492 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1493 if (CE_int_status & CE_WATERMARK_MASK) {
1494 if (CE_state->watermark_cb) {
1495 goto more_watermarks;
1496 }
1497 }
1498 }
1499
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301500 qdf_spin_unlock(&CE_state->ce_index_lock);
1501 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001502
1503 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001504 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1505 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506}
1507
1508/*
1509 * Handler for per-engine interrupts on ALL active CEs.
1510 * This is used in cases where the system is sharing a
1511 * single interrput for all CEs
1512 */
1513
Komal Seelam644263d2016-02-22 20:45:49 +05301514void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001515{
1516 int CE_id;
1517 uint32_t intr_summary;
1518
Houston Hoffmanbac94542016-03-14 21:11:59 -07001519 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1520 return;
1521
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301522 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001523 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1524 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301525 if (qdf_atomic_read(&CE_state->rx_pending)) {
1526 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527 ce_per_engine_service(scn, CE_id);
1528 }
1529 }
1530
Houston Hoffmanbac94542016-03-14 21:11:59 -07001531 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001532 return;
1533 }
1534
1535 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1536
1537 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1538 if (intr_summary & (1 << CE_id)) {
1539 intr_summary &= ~(1 << CE_id);
1540 } else {
1541 continue; /* no intr pending on this CE */
1542 }
1543
1544 ce_per_engine_service(scn, CE_id);
1545 }
1546
Houston Hoffmanbac94542016-03-14 21:11:59 -07001547 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001548}
1549
1550/*
1551 * Adjust interrupts for the copy complete handler.
1552 * If it's needed for either send or recv, then unmask
1553 * this interrupt; otherwise, mask it.
1554 *
1555 * Called with target_lock held.
1556 */
1557static void
1558ce_per_engine_handler_adjust(struct CE_state *CE_state,
1559 int disable_copy_compl_intr)
1560{
1561 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301562 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001563
1564 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001565
1566 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1567 return;
1568
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001569 if ((!disable_copy_compl_intr) &&
1570 (CE_state->send_cb || CE_state->recv_cb)) {
1571 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1572 } else {
1573 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1574 }
1575
1576 if (CE_state->watermark_cb) {
1577 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1578 } else {
1579 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1580 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001581 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001582}
1583
1584/*Iterate the CE_state list and disable the compl interrupt
1585 * if it has been registered already.
1586 */
Komal Seelam644263d2016-02-22 20:45:49 +05301587void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001588{
1589 int CE_id;
1590
Houston Hoffmanbac94542016-03-14 21:11:59 -07001591 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1592 return;
1593
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001594 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1595 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1596 uint32_t ctrl_addr = CE_state->ctrl_addr;
1597
1598 /* if the interrupt is currently enabled, disable it */
1599 if (!CE_state->disable_copy_compl_intr
1600 && (CE_state->send_cb || CE_state->recv_cb)) {
1601 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1602 }
1603
1604 if (CE_state->watermark_cb) {
1605 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1606 }
1607 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001608 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001609}
1610
Komal Seelam644263d2016-02-22 20:45:49 +05301611void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001612{
1613 int CE_id;
1614
Houston Hoffmanbac94542016-03-14 21:11:59 -07001615 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1616 return;
1617
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001618 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1619 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1620 uint32_t ctrl_addr = CE_state->ctrl_addr;
1621
1622 /*
1623 * If the CE is supposed to have copy complete interrupts
1624 * enabled (i.e. there a callback registered, and the
1625 * "disable" flag is not set), then re-enable the interrupt.
1626 */
1627 if (!CE_state->disable_copy_compl_intr
1628 && (CE_state->send_cb || CE_state->recv_cb)) {
1629 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1630 }
1631
1632 if (CE_state->watermark_cb) {
1633 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1634 }
1635 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001636 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001637}
1638
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001639/**
1640 * ce_send_cb_register(): register completion handler
1641 * @copyeng: CE_state representing the ce we are adding the behavior to
1642 * @fn_ptr: callback that the ce should use when processing tx completions
1643 * @disable_interrupts: if the interupts should be enabled or not.
1644 *
1645 * Caller should guarantee that no transactions are in progress before
1646 * switching the callback function.
1647 *
1648 * Registers the send context before the fn pointer so that if the cb is valid
1649 * the context should be valid.
1650 *
1651 * Beware that currently this function will enable completion interrupts.
1652 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001653void
1654ce_send_cb_register(struct CE_handle *copyeng,
1655 ce_send_cb fn_ptr,
1656 void *ce_send_context, int disable_interrupts)
1657{
1658 struct CE_state *CE_state = (struct CE_state *)copyeng;
1659
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001660 if (CE_state == NULL) {
1661 pr_err("%s: Error CE state = NULL\n", __func__);
1662 return;
1663 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001665 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001666 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667}
1668
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001669/**
1670 * ce_recv_cb_register(): register completion handler
1671 * @copyeng: CE_state representing the ce we are adding the behavior to
1672 * @fn_ptr: callback that the ce should use when processing rx completions
1673 * @disable_interrupts: if the interupts should be enabled or not.
1674 *
1675 * Registers the send context before the fn pointer so that if the cb is valid
1676 * the context should be valid.
1677 *
1678 * Caller should guarantee that no transactions are in progress before
1679 * switching the callback function.
1680 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001681void
1682ce_recv_cb_register(struct CE_handle *copyeng,
1683 CE_recv_cb fn_ptr,
1684 void *CE_recv_context, int disable_interrupts)
1685{
1686 struct CE_state *CE_state = (struct CE_state *)copyeng;
1687
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001688 if (CE_state == NULL) {
1689 pr_err("%s: ERROR CE state = NULL\n", __func__);
1690 return;
1691 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001693 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001694 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001695}
1696
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001697/**
1698 * ce_watermark_cb_register(): register completion handler
1699 * @copyeng: CE_state representing the ce we are adding the behavior to
1700 * @fn_ptr: callback that the ce should use when processing watermark events
1701 *
1702 * Caller should guarantee that no watermark events are being processed before
1703 * switching the callback function.
1704 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001705void
1706ce_watermark_cb_register(struct CE_handle *copyeng,
1707 CE_watermark_cb fn_ptr, void *CE_wm_context)
1708{
1709 struct CE_state *CE_state = (struct CE_state *)copyeng;
1710
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001711 CE_state->watermark_cb = fn_ptr;
1712 CE_state->wm_context = CE_wm_context;
1713 ce_per_engine_handler_adjust(CE_state, 0);
1714 if (fn_ptr) {
1715 CE_state->misc_cbs = 1;
1716 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001717}
1718
1719#ifdef WLAN_FEATURE_FASTPATH
1720/**
1721 * ce_pkt_dl_len_set() set the HTT packet download length
1722 * @hif_sc: HIF context
1723 * @pkt_download_len: download length
1724 *
1725 * Return: None
1726 */
1727void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1728{
Komal Seelam644263d2016-02-22 20:45:49 +05301729 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001730 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1731
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301732 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001735
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301736 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737 ce_state->id, ce_state->download_len);
1738}
1739#else
1740void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1741{
1742}
1743#endif /* WLAN_FEATURE_FASTPATH */
1744
Komal Seelam644263d2016-02-22 20:45:49 +05301745bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001746{
1747 int CE_id;
1748
1749 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1750 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301751 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001752 return true;
1753 }
1754
1755 return false;
1756}
1757
1758/**
1759 * ce_check_rx_pending() - ce_check_rx_pending
Komal Seelam644263d2016-02-22 20:45:49 +05301760 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001761 * @ce_id: ce_id
1762 *
1763 * Return: bool
1764 */
Komal Seelam644263d2016-02-22 20:45:49 +05301765bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001766{
1767 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301768 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001769 return true;
1770 else
1771 return false;
1772}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001773
1774/**
1775 * ce_enable_msi(): write the msi configuration to the target
1776 * @scn: hif context
1777 * @CE_id: which copy engine will be configured for msi interupts
1778 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1779 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1780 * @msi_data: Hardware will write this data to generate an interrupt
1781 *
1782 * should be done in the initialization sequence so no locking would be needed
1783 */
Komal Seelam644263d2016-02-22 20:45:49 +05301784void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1786 uint32_t msi_data)
1787{
1788#ifdef WLAN_ENABLE_QCA6180
1789 struct CE_state *CE_state;
1790 A_target_id_t targid;
1791 u_int32_t ctrl_addr;
1792 uint32_t tmp;
1793
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794 CE_state = scn->ce_id_to_state[CE_id];
1795 if (!CE_state) {
1796 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001797 return;
1798 }
1799 targid = TARGID(sc);
1800 ctrl_addr = CE_state->ctrl_addr;
1801 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
1802 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
1803 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
1804 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
1805 tmp |= (1 << CE_MSI_ENABLE_BIT);
1806 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001807#endif
1808}
1809
1810#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001811/**
1812 * ce_ipa_get_resource() - get uc resource on copyengine
1813 * @ce: copyengine context
1814 * @ce_sr_base_paddr: copyengine source ring base physical address
1815 * @ce_sr_ring_size: copyengine source ring size
1816 * @ce_reg_paddr: copyengine register physical address
1817 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001818 * Copy engine should release resource to micro controller
1819 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08001820 * - Copy engine source descriptor base address
1821 * - Copy engine source descriptor size
1822 * - PCI BAR address to access copy engine regiser
1823 *
1824 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001825 */
1826void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301827 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301829 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001830{
1831 struct CE_state *CE_state = (struct CE_state *)ce;
1832 uint32_t ring_loop;
1833 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301834 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05301835 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836
1837 if (CE_RUNNING != CE_state->state) {
1838 *ce_sr_base_paddr = 0;
1839 *ce_sr_ring_size = 0;
1840 return;
1841 }
1842
1843 /* Update default value for descriptor */
1844 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1845 ring_loop++) {
1846 ce_desc = (struct CE_src_desc *)
1847 ((char *)CE_state->src_ring->base_addr_owner_space +
1848 ring_loop * (sizeof(struct CE_src_desc)));
1849 CE_IPA_RING_INIT(ce_desc);
1850 }
1851
1852 /* Get BAR address */
1853 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1854
Leo Changd85f78d2015-11-13 10:55:34 -08001855 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
1856 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
1857 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
1859 SR_WR_INDEX_ADDRESS;
1860 return;
1861}
1862#endif /* IPA_OFFLOAD */
1863