blob: cb878fd35720813b95904a26dd2fbe1bf267db85 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "hif.h"
29#include "hif_io32.h"
30#include "ce_api.h"
31#include "ce_main.h"
32#include "ce_internal.h"
33#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053034#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "epping_main.h"
37#include "hif_main.h"
38#include "hif_debug.h"
Manjunathappa Prakash7399f142016-04-13 23:38:16 -070039#include "ol_txrx_types.h"
40#include <cds_api.h>
41#include <osdep.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042
43#ifdef IPA_OFFLOAD
44#ifdef QCA_WIFI_3_0
45#define CE_IPA_RING_INIT(ce_desc) \
46 do { \
47 ce_desc->gather = 0; \
48 ce_desc->enable_11h = 0; \
49 ce_desc->meta_data_low = 0; \
50 ce_desc->packet_result_offset = 64; \
51 ce_desc->toeplitz_hash_enable = 0; \
52 ce_desc->addr_y_search_disable = 0; \
53 ce_desc->addr_x_search_disable = 0; \
54 ce_desc->misc_int_disable = 0; \
55 ce_desc->target_int_disable = 0; \
56 ce_desc->host_int_disable = 0; \
57 ce_desc->dest_byte_swap = 0; \
58 ce_desc->byte_swap = 0; \
59 ce_desc->type = 2; \
60 ce_desc->tx_classify = 1; \
61 ce_desc->buffer_addr_hi = 0; \
62 ce_desc->meta_data = 0; \
63 ce_desc->nbytes = 128; \
64 } while (0)
65#else
66#define CE_IPA_RING_INIT(ce_desc) \
67 do { \
68 ce_desc->byte_swap = 0; \
69 ce_desc->nbytes = 60; \
70 ce_desc->gather = 0; \
71 } while (0)
72#endif /* QCA_WIFI_3_0 */
73#endif /* IPA_OFFLOAD */
74
75static int war1_allow_sleep;
76/* io32 write workaround */
77static int hif_ce_war1;
78
Houston Hoffman68e837e2015-12-04 12:57:24 -080079#ifdef CONFIG_SLUB_DEBUG_ON
80
81/**
82 * struct hif_ce_event - structure for detailing a ce event
83 * @type: what the event was
84 * @time: when it happened
85 * @descriptor: descriptor enqueued or dequeued
86 * @memory: virtual address that was used
87 * @index: location of the descriptor in the ce ring;
88 */
89struct hif_ce_desc_event {
90 uint16_t index;
91 enum hif_ce_event_type type;
92 uint64_t time;
93 union ce_desc descriptor;
94 void *memory;
95};
96
97/* max history to record per copy engine */
98#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053099qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -0800100struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
101
Houston Hoffman4275ba22015-12-06 21:02:11 -0800102
Houston Hoffman68e837e2015-12-04 12:57:24 -0800103/**
104 * get_next_record_index() - get the next record index
105 * @table_index: atomic index variable to increment
106 * @array_size: array size of the circular buffer
107 *
108 * Increment the atomic index and reserve the value.
109 * Takes care of buffer wrap.
110 * Guaranteed to be thread safe as long as fewer than array_size contexts
111 * try to access the array. If there are more than array_size contexts
112 * trying to access the array, full locking of the recording process would
113 * be needed to have sane logging.
114 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530115static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800116{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530117 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800118 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530119 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800120
121 while (record_index >= array_size)
122 record_index -= array_size;
123 return record_index;
124}
125
126/**
127 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530128 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800129 * @ce_id: which ce is the event occuring on
130 * @type: what happened
131 * @descriptor: pointer to the descriptor posted/completed
132 * @memory: virtual address of buffer related to the descriptor
133 * @index: index that the descriptor was/will be at.
134 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530135void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
136 enum hif_ce_event_type type,
137 union ce_desc *descriptor,
138 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800139{
140 int record_index = get_next_record_index(
141 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
142
143 struct hif_ce_desc_event *event =
144 &hif_ce_desc_history[ce_id][record_index];
145 event->type = type;
Komal Seelam75080122016-03-02 15:18:25 +0530146 event->time = qdf_get_monotonic_boottime();
Komal Seelambd7c51d2016-02-24 10:27:30 +0530147
Houston Hoffman4275ba22015-12-06 21:02:11 -0800148 if (descriptor != NULL)
149 event->descriptor = *descriptor;
150 else
151 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800152 event->memory = memory;
153 event->index = index;
154}
155
156/**
157 * ce_init_ce_desc_event_log() - initialize the ce event log
158 * @ce_id: copy engine id for which we are initializing the log
159 * @size: size of array to dedicate
160 *
161 * Currently the passed size is ignored in favor of a precompiled value.
162 */
163void ce_init_ce_desc_event_log(int ce_id, int size)
164{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530165 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800166}
167#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530168void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800169 int ce_id, enum hif_ce_event_type type,
170 union ce_desc *descriptor, void *memory,
171 int index)
172{
173}
174
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800175inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800176{
177}
178#endif
179
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800180/*
181 * Support for Copy Engine hardware, which is mainly used for
182 * communication between Host and Target over a PCIe interconnect.
183 */
184
185/*
186 * A single CopyEngine (CE) comprises two "rings":
187 * a source ring
188 * a destination ring
189 *
190 * Each ring consists of a number of descriptors which specify
191 * an address, length, and meta-data.
192 *
193 * Typically, one side of the PCIe interconnect (Host or Target)
194 * controls one ring and the other side controls the other ring.
195 * The source side chooses when to initiate a transfer and it
196 * chooses what to send (buffer address, length). The destination
197 * side keeps a supply of "anonymous receive buffers" available and
198 * it handles incoming data as it arrives (when the destination
199 * recieves an interrupt).
200 *
201 * The sender may send a simple buffer (address/length) or it may
202 * send a small list of buffers. When a small list is sent, hardware
203 * "gathers" these and they end up in a single destination buffer
204 * with a single interrupt.
205 *
206 * There are several "contexts" managed by this layer -- more, it
207 * may seem -- than should be needed. These are provided mainly for
208 * maximum flexibility and especially to facilitate a simpler HIF
209 * implementation. There are per-CopyEngine recv, send, and watermark
210 * contexts. These are supplied by the caller when a recv, send,
211 * or watermark handler is established and they are echoed back to
212 * the caller when the respective callbacks are invoked. There is
213 * also a per-transfer context supplied by the caller when a buffer
214 * (or sendlist) is sent and when a buffer is enqueued for recv.
215 * These per-transfer contexts are echoed back to the caller when
216 * the buffer is sent/received.
217 * Target TX harsh result toeplitz_hash_result
218 */
219
220/*
221 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
222 * The caller takes responsibility for any needed locking.
223 */
224int
225ce_completed_send_next_nolock(struct CE_state *CE_state,
226 void **per_CE_contextp,
227 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530228 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800229 unsigned int *nbytesp,
230 unsigned int *transfer_idp,
231 unsigned int *sw_idx, unsigned int *hw_idx,
232 uint32_t *toeplitz_hash_result);
233
Komal Seelam644263d2016-02-22 20:45:49 +0530234void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800235 u32 ctrl_addr, unsigned int write_index)
236{
237 if (hif_ce_war1) {
238 void __iomem *indicator_addr;
239
240 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
241
242 if (!war1_allow_sleep
243 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
244 hif_write32_mb(indicator_addr,
245 (CDC_WAR_MAGIC_STR | write_index));
246 } else {
247 unsigned long irq_flags;
248 local_irq_save(irq_flags);
249 hif_write32_mb(indicator_addr, 1);
250
251 /*
252 * PCIE write waits for ACK in IPQ8K, there is no
253 * need to read back value.
254 */
255 (void)hif_read32_mb(indicator_addr);
256 (void)hif_read32_mb(indicator_addr); /* conservative */
257
258 CE_SRC_RING_WRITE_IDX_SET(scn,
259 ctrl_addr, write_index);
260
261 hif_write32_mb(indicator_addr, 0);
262 local_irq_restore(irq_flags);
263 }
264 } else
265 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
266}
267
268int
269ce_send_nolock(struct CE_handle *copyeng,
270 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530271 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800272 uint32_t nbytes,
273 uint32_t transfer_id,
274 uint32_t flags,
275 uint32_t user_flags)
276{
277 int status;
278 struct CE_state *CE_state = (struct CE_state *)copyeng;
279 struct CE_ring_state *src_ring = CE_state->src_ring;
280 uint32_t ctrl_addr = CE_state->ctrl_addr;
281 unsigned int nentries_mask = src_ring->nentries_mask;
282 unsigned int sw_index = src_ring->sw_index;
283 unsigned int write_index = src_ring->write_index;
284 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530285 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800286
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700287 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700288 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800289 if (unlikely(CE_RING_DELTA(nentries_mask,
290 write_index, sw_index - 1) <= 0)) {
291 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700292 Q_TARGET_ACCESS_END(scn);
293 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800294 }
295 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800296 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800297 struct CE_src_desc *src_ring_base =
298 (struct CE_src_desc *)src_ring->base_addr_owner_space;
299 struct CE_src_desc *shadow_base =
300 (struct CE_src_desc *)src_ring->shadow_base;
301 struct CE_src_desc *src_desc =
302 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
303 struct CE_src_desc *shadow_src_desc =
304 CE_SRC_RING_TO_DESC(shadow_base, write_index);
305
306 /* Update low 32 bits source descriptor address */
307 shadow_src_desc->buffer_addr =
308 (uint32_t)(dma_addr & 0xFFFFFFFF);
309#ifdef QCA_WIFI_3_0
310 shadow_src_desc->buffer_addr_hi =
311 (uint32_t)((dma_addr >> 32) & 0x1F);
312 user_flags |= shadow_src_desc->buffer_addr_hi;
313 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
314 sizeof(uint32_t));
315#endif
316 shadow_src_desc->meta_data = transfer_id;
317
318 /*
319 * Set the swap bit if:
320 * typical sends on this CE are swapped (host is big-endian)
321 * and this send doesn't disable the swapping
322 * (data is not bytestream)
323 */
324 shadow_src_desc->byte_swap =
325 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
326 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
327 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
328 shadow_src_desc->nbytes = nbytes;
329
330 *src_desc = *shadow_src_desc;
331
332 src_ring->per_transfer_context[write_index] =
333 per_transfer_context;
334
335 /* Update Source Ring Write Index */
336 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
337
338 /* WORKAROUND */
339 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800340 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800341 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
342 write_index);
343 }
344
Houston Hoffman68e837e2015-12-04 12:57:24 -0800345 /* src_ring->write index hasn't been updated event though
346 * the register has allready been written to.
347 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530348 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800349 (union ce_desc *) shadow_src_desc, per_transfer_context,
350 src_ring->write_index);
351
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800352 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530353 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800354 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700355 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800356 return status;
357}
358
359int
360ce_send(struct CE_handle *copyeng,
361 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530362 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800363 uint32_t nbytes,
364 uint32_t transfer_id,
365 uint32_t flags,
366 uint32_t user_flag)
367{
368 struct CE_state *CE_state = (struct CE_state *)copyeng;
369 int status;
370
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530371 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800372 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
373 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530374 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800375
376 return status;
377}
378
379unsigned int ce_sendlist_sizeof(void)
380{
381 return sizeof(struct ce_sendlist);
382}
383
384void ce_sendlist_init(struct ce_sendlist *sendlist)
385{
386 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
387 sl->num_items = 0;
388}
389
390int
391ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530392 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800393 uint32_t nbytes,
394 uint32_t flags,
395 uint32_t user_flags)
396{
397 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
398 unsigned int num_items = sl->num_items;
399 struct ce_sendlist_item *item;
400
401 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530402 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
403 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800404 }
405
406 item = &sl->item[num_items];
407 item->send_type = CE_SIMPLE_BUFFER_TYPE;
408 item->data = buffer;
409 item->u.nbytes = nbytes;
410 item->flags = flags;
411 item->user_flags = user_flags;
412 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530413 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800414}
415
416int
417ce_sendlist_send(struct CE_handle *copyeng,
418 void *per_transfer_context,
419 struct ce_sendlist *sendlist, unsigned int transfer_id)
420{
421 int status = -ENOMEM;
422 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
423 struct CE_state *CE_state = (struct CE_state *)copyeng;
424 struct CE_ring_state *src_ring = CE_state->src_ring;
425 unsigned int nentries_mask = src_ring->nentries_mask;
426 unsigned int num_items = sl->num_items;
427 unsigned int sw_index;
428 unsigned int write_index;
429
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530430 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800431
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530432 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800433 sw_index = src_ring->sw_index;
434 write_index = src_ring->write_index;
435
436 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
437 num_items) {
438 struct ce_sendlist_item *item;
439 int i;
440
441 /* handle all but the last item uniformly */
442 for (i = 0; i < num_items - 1; i++) {
443 item = &sl->item[i];
444 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530445 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800446 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530447 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800448 item->u.nbytes, transfer_id,
449 item->flags | CE_SEND_FLAG_GATHER,
450 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530451 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800452 }
453 /* provide valid context pointer for final item */
454 item = &sl->item[i];
455 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530456 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800457 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800459 item->u.nbytes,
460 transfer_id, item->flags,
461 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530463 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
464 QDF_NBUF_TX_PKT_CE);
465 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530466 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530467 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
468 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800469 } else {
470 /*
471 * Probably not worth the additional complexity to support
472 * partial sends with continuation or notification. We expect
473 * to use large rings and small sendlists. If we can't handle
474 * the entire request at once, punt it back to the caller.
475 */
476 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530477 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800478
479 return status;
480}
481
482#ifdef WLAN_FEATURE_FASTPATH
483#ifdef QCA_WIFI_3_0
484static inline void
485ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
486 uint64_t dma_addr,
487 uint32_t user_flags)
488{
489 shadow_src_desc->buffer_addr_hi =
490 (uint32_t)((dma_addr >> 32) & 0x1F);
491 user_flags |= shadow_src_desc->buffer_addr_hi;
492 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
493 sizeof(uint32_t));
494}
495#else
496static inline void
497ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
498 uint64_t dma_addr,
499 uint32_t user_flags)
500{
501}
502#endif
503
504/**
505 * ce_send_fast() CE layer Tx buffer posting function
506 * @copyeng: copy engine handle
507 * @msdus: iarray of msdu to be sent
508 * @num_msdus: number of msdus in an array
509 * @transfer_id: transfer_id
510 *
511 * Assumption : Called with an array of MSDU's
512 * Function:
513 * For each msdu in the array
514 * 1. Check no. of available entries
515 * 2. Create src ring entries (allocated in consistent memory
516 * 3. Write index to h/w
517 *
518 * Return: No. of packets that could be sent
519 */
520
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530521int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800522 unsigned int num_msdus, unsigned int transfer_id)
523{
524 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530525 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530526 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800527 struct CE_ring_state *src_ring = ce_state->src_ring;
528 u_int32_t ctrl_addr = ce_state->ctrl_addr;
529 unsigned int nentries_mask = src_ring->nentries_mask;
530 unsigned int write_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800531 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530532 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 int i;
534 uint64_t dma_addr;
535 uint32_t user_flags = 0;
536
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530537 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800538 write_index = src_ring->write_index;
539
540 /* 2 msdus per packet */
541 for (i = 0; i < num_msdus; i++) {
542 struct CE_src_desc *src_ring_base =
543 (struct CE_src_desc *)src_ring->base_addr_owner_space;
544 struct CE_src_desc *shadow_base =
545 (struct CE_src_desc *)src_ring->shadow_base;
546 struct CE_src_desc *src_desc =
547 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
548 struct CE_src_desc *shadow_src_desc =
549 CE_SRC_RING_TO_DESC(shadow_base, write_index);
550
Komal Seelam644263d2016-02-22 20:45:49 +0530551 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552 msdu = msdus[i];
553
554 /*
555 * First fill out the ring descriptor for the HTC HTT frame
556 * header. These are uncached writes. Should we use a local
557 * structure instead?
558 */
559 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530560 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800561 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
562 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530563 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800564 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
565
566 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530567 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800568
569 /*
570 * HTC HTT header is a word stream, so byte swap if CE byte
571 * swap enabled
572 */
573 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
574 CE_ATTR_BYTE_SWAP_DATA) != 0);
575 /* For the first one, it still does not need to write */
576 shadow_src_desc->gather = 1;
577 *src_desc = *shadow_src_desc;
578
579 /* By default we could initialize the transfer context to this
580 * value
581 */
582 src_ring->per_transfer_context[write_index] =
583 CE_SENDLIST_ITEM_CTXT;
584
585 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
586
587 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
588 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
589 /*
590 * Now fill out the ring descriptor for the actual data
591 * packet
592 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530593 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800594 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
595 0xFFFFFFFF);
596 /*
597 * Clear packet offset for all but the first CE desc.
598 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530599 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800600 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
601 shadow_src_desc->meta_data = transfer_id;
602
603 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530604 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700605
606 /* only read download_len once */
607 shadow_src_desc->nbytes = ce_state->download_len;
608 if (shadow_src_desc->nbytes > frag_len)
609 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800610
611 /* Data packet is a byte stream, so disable byte swap */
612 shadow_src_desc->byte_swap = 0;
613 /* For the last one, gather is not set */
614 shadow_src_desc->gather = 0;
615 *src_desc = *shadow_src_desc;
616 src_ring->per_transfer_context[write_index] = msdu;
617 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
618 }
619
620 /* Write the final index to h/w one-shot */
621 if (i) {
622 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800623
Komal Seelam644263d2016-02-22 20:45:49 +0530624 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800625 /* Don't call WAR_XXX from here
626 * Just call XXX instead, that has the reqd. intel
627 */
628 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
629 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530630 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800631 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800632 }
633
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530634 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800635
636 /*
637 * If all packets in the array are transmitted,
638 * i = num_msdus
639 * Temporarily add an ASSERT
640 */
641 ASSERT(i == num_msdus);
642 return i;
643}
644#endif /* WLAN_FEATURE_FASTPATH */
645
Houston Hoffman4411ad42016-03-14 21:12:04 -0700646/**
647 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
648 * @coyeng: copy engine handle
649 * @per_recv_context: virtual address of the nbuf
650 * @buffer: physical address of the nbuf
651 *
652 * Return: 0 if the buffer is enqueued
653 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800654int
655ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530656 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800657{
658 int status;
659 struct CE_state *CE_state = (struct CE_state *)copyeng;
660 struct CE_ring_state *dest_ring = CE_state->dest_ring;
661 uint32_t ctrl_addr = CE_state->ctrl_addr;
662 unsigned int nentries_mask = dest_ring->nentries_mask;
663 unsigned int write_index;
664 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800665 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530666 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800667
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530668 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800669 write_index = dest_ring->write_index;
670 sw_index = dest_ring->sw_index;
671
Houston Hoffman4411ad42016-03-14 21:12:04 -0700672 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530673 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700674 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800675 }
676
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700677 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
678 (ce_is_fastpath_enabled((struct hif_opaque_softc *)scn) &&
679 CE_state->htt_rx_data &&
680 (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0))) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800681 struct CE_dest_desc *dest_ring_base =
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700682 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800683 struct CE_dest_desc *dest_desc =
684 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
685
686 /* Update low 32 bit destination descriptor */
687 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
688#ifdef QCA_WIFI_3_0
689 dest_desc->buffer_addr_hi =
690 (uint32_t)((dma_addr >> 32) & 0x1F);
691#endif
692 dest_desc->nbytes = 0;
693
694 dest_ring->per_transfer_context[write_index] =
695 per_recv_context;
696
Komal Seelambd7c51d2016-02-24 10:27:30 +0530697 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800698 (union ce_desc *) dest_desc, per_recv_context,
699 write_index);
700
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800701 /* Update Destination Ring Write Index */
702 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700703 if (write_index != sw_index) {
704 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
705 dest_ring->write_index = write_index;
706 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530707 status = QDF_STATUS_SUCCESS;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700708 } else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530709 status = QDF_STATUS_E_FAILURE;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700710
Houston Hoffman4411ad42016-03-14 21:12:04 -0700711 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530712 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713 return status;
714}
715
716void
717ce_send_watermarks_set(struct CE_handle *copyeng,
718 unsigned int low_alert_nentries,
719 unsigned int high_alert_nentries)
720{
721 struct CE_state *CE_state = (struct CE_state *)copyeng;
722 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530723 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800724
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800725 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
726 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800727}
728
729void
730ce_recv_watermarks_set(struct CE_handle *copyeng,
731 unsigned int low_alert_nentries,
732 unsigned int high_alert_nentries)
733{
734 struct CE_state *CE_state = (struct CE_state *)copyeng;
735 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530736 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800737
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800738 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
739 low_alert_nentries);
740 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
741 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742}
743
744unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
745{
746 struct CE_state *CE_state = (struct CE_state *)copyeng;
747 struct CE_ring_state *src_ring = CE_state->src_ring;
748 unsigned int nentries_mask = src_ring->nentries_mask;
749 unsigned int sw_index;
750 unsigned int write_index;
751
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530752 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800753 sw_index = src_ring->sw_index;
754 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530755 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756
757 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
758}
759
760unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
761{
762 struct CE_state *CE_state = (struct CE_state *)copyeng;
763 struct CE_ring_state *dest_ring = CE_state->dest_ring;
764 unsigned int nentries_mask = dest_ring->nentries_mask;
765 unsigned int sw_index;
766 unsigned int write_index;
767
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530768 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800769 sw_index = dest_ring->sw_index;
770 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530771 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800772
773 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
774}
775
776/*
777 * Guts of ce_send_entries_done.
778 * The caller takes responsibility for any necessary locking.
779 */
780unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530781ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800782 struct CE_state *CE_state)
783{
784 struct CE_ring_state *src_ring = CE_state->src_ring;
785 uint32_t ctrl_addr = CE_state->ctrl_addr;
786 unsigned int nentries_mask = src_ring->nentries_mask;
787 unsigned int sw_index;
788 unsigned int read_index;
789
790 sw_index = src_ring->sw_index;
791 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
792
793 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
794}
795
796unsigned int ce_send_entries_done(struct CE_handle *copyeng)
797{
798 struct CE_state *CE_state = (struct CE_state *)copyeng;
799 unsigned int nentries;
800
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530801 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530803 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800804
805 return nentries;
806}
807
808/*
809 * Guts of ce_recv_entries_done.
810 * The caller takes responsibility for any necessary locking.
811 */
812unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530813ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800814 struct CE_state *CE_state)
815{
816 struct CE_ring_state *dest_ring = CE_state->dest_ring;
817 uint32_t ctrl_addr = CE_state->ctrl_addr;
818 unsigned int nentries_mask = dest_ring->nentries_mask;
819 unsigned int sw_index;
820 unsigned int read_index;
821
822 sw_index = dest_ring->sw_index;
823 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
824
825 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
826}
827
828unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
829{
830 struct CE_state *CE_state = (struct CE_state *)copyeng;
831 unsigned int nentries;
832
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530833 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800834 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530835 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800836
837 return nentries;
838}
839
840/* Debug support */
841void *ce_debug_cmplrn_context; /* completed recv next context */
842void *ce_debug_cnclsn_context; /* cancel send next context */
843void *ce_debug_rvkrn_context; /* revoke receive next context */
844void *ce_debug_cmplsn_context; /* completed send next context */
845
846/*
847 * Guts of ce_completed_recv_next.
848 * The caller takes responsibility for any necessary locking.
849 */
850int
851ce_completed_recv_next_nolock(struct CE_state *CE_state,
852 void **per_CE_contextp,
853 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530854 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800855 unsigned int *nbytesp,
856 unsigned int *transfer_idp,
857 unsigned int *flagsp)
858{
859 int status;
860 struct CE_ring_state *dest_ring = CE_state->dest_ring;
861 unsigned int nentries_mask = dest_ring->nentries_mask;
862 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530863 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800864 struct CE_dest_desc *dest_ring_base =
865 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
866 struct CE_dest_desc *dest_desc =
867 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
868 int nbytes;
869 struct CE_dest_desc dest_desc_info;
870 /*
871 * By copying the dest_desc_info element to local memory, we could
872 * avoid extra memory read from non-cachable memory.
873 */
874 dest_desc_info = *dest_desc;
875 nbytes = dest_desc_info.nbytes;
876 if (nbytes == 0) {
877 /*
878 * This closes a relatively unusual race where the Host
879 * sees the updated DRRI before the update to the
880 * corresponding descriptor has completed. We treat this
881 * as a descriptor that is not yet done.
882 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530883 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800884 goto done;
885 }
886
Komal Seelambd7c51d2016-02-24 10:27:30 +0530887 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800888 (union ce_desc *) dest_desc,
889 dest_ring->per_transfer_context[sw_index],
890 sw_index);
891
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800892 dest_desc->nbytes = 0;
893
894 /* Return data from completed destination descriptor */
895 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
896 *nbytesp = nbytes;
897 *transfer_idp = dest_desc_info.meta_data;
898 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
899
900 if (per_CE_contextp) {
901 *per_CE_contextp = CE_state->recv_context;
902 }
903
904 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
905 if (per_transfer_contextp) {
906 *per_transfer_contextp = ce_debug_cmplrn_context;
907 }
908 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
909
910 /* Update sw_index */
911 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
912 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530913 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800914
915done:
916 return status;
917}
918
919int
920ce_completed_recv_next(struct CE_handle *copyeng,
921 void **per_CE_contextp,
922 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530923 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800924 unsigned int *nbytesp,
925 unsigned int *transfer_idp, unsigned int *flagsp)
926{
927 struct CE_state *CE_state = (struct CE_state *)copyeng;
928 int status;
929
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530930 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800931 status =
932 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
933 per_transfer_contextp, bufferp,
934 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936
937 return status;
938}
939
940/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530941QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942ce_revoke_recv_next(struct CE_handle *copyeng,
943 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530944 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800945{
946 struct CE_state *CE_state;
947 struct CE_ring_state *dest_ring;
948 unsigned int nentries_mask;
949 unsigned int sw_index;
950 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530951 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +0530952 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800953
954 CE_state = (struct CE_state *)copyeng;
955 dest_ring = CE_state->dest_ring;
956 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530957 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958 }
959
960 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530961 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962 nentries_mask = dest_ring->nentries_mask;
963 sw_index = dest_ring->sw_index;
964 write_index = dest_ring->write_index;
965 if (write_index != sw_index) {
966 struct CE_dest_desc *dest_ring_base =
967 (struct CE_dest_desc *)dest_ring->
968 base_addr_owner_space;
969 struct CE_dest_desc *dest_desc =
970 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
971
972 /* Return data from completed destination descriptor */
973 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
974
975 if (per_CE_contextp) {
976 *per_CE_contextp = CE_state->recv_context;
977 }
978
979 ce_debug_rvkrn_context =
980 dest_ring->per_transfer_context[sw_index];
981 if (per_transfer_contextp) {
982 *per_transfer_contextp = ce_debug_rvkrn_context;
983 }
984 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
985
986 /* Update sw_index */
987 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
988 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530989 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800990 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530991 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800992 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994
995 return status;
996}
997
998/*
999 * Guts of ce_completed_send_next.
1000 * The caller takes responsibility for any necessary locking.
1001 */
1002int
1003ce_completed_send_next_nolock(struct CE_state *CE_state,
1004 void **per_CE_contextp,
1005 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301006 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001007 unsigned int *nbytesp,
1008 unsigned int *transfer_idp,
1009 unsigned int *sw_idx,
1010 unsigned int *hw_idx,
1011 uint32_t *toeplitz_hash_result)
1012{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301013 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001014 struct CE_ring_state *src_ring = CE_state->src_ring;
1015 uint32_t ctrl_addr = CE_state->ctrl_addr;
1016 unsigned int nentries_mask = src_ring->nentries_mask;
1017 unsigned int sw_index = src_ring->sw_index;
1018 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301019 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001020
1021 if (src_ring->hw_index == sw_index) {
1022 /*
1023 * The SW completion index has caught up with the cached
1024 * version of the HW completion index.
1025 * Update the cached HW completion index to see whether
1026 * the SW has really caught up to the HW, or if the cached
1027 * value of the HW index has become stale.
1028 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001029 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001030 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001031 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001032 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001033 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001034 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001035 }
1036 read_index = src_ring->hw_index;
1037
1038 if (sw_idx)
1039 *sw_idx = sw_index;
1040
1041 if (hw_idx)
1042 *hw_idx = read_index;
1043
1044 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1045 struct CE_src_desc *shadow_base =
1046 (struct CE_src_desc *)src_ring->shadow_base;
1047 struct CE_src_desc *shadow_src_desc =
1048 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1049#ifdef QCA_WIFI_3_0
1050 struct CE_src_desc *src_ring_base =
1051 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1052 struct CE_src_desc *src_desc =
1053 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1054#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301055 hif_record_ce_desc_event(scn, CE_state->id,
1056 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001057 (union ce_desc *) shadow_src_desc,
1058 src_ring->per_transfer_context[sw_index],
1059 sw_index);
1060
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001061 /* Return data from completed source descriptor */
1062 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1063 *nbytesp = shadow_src_desc->nbytes;
1064 *transfer_idp = shadow_src_desc->meta_data;
1065#ifdef QCA_WIFI_3_0
1066 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1067#else
1068 *toeplitz_hash_result = 0;
1069#endif
1070 if (per_CE_contextp) {
1071 *per_CE_contextp = CE_state->send_context;
1072 }
1073
1074 ce_debug_cmplsn_context =
1075 src_ring->per_transfer_context[sw_index];
1076 if (per_transfer_contextp) {
1077 *per_transfer_contextp = ce_debug_cmplsn_context;
1078 }
1079 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1080
1081 /* Update sw_index */
1082 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1083 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301084 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085 }
1086
1087 return status;
1088}
1089
1090/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301091QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001092ce_cancel_send_next(struct CE_handle *copyeng,
1093 void **per_CE_contextp,
1094 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301095 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001096 unsigned int *nbytesp,
1097 unsigned int *transfer_idp,
1098 uint32_t *toeplitz_hash_result)
1099{
1100 struct CE_state *CE_state;
1101 struct CE_ring_state *src_ring;
1102 unsigned int nentries_mask;
1103 unsigned int sw_index;
1104 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301105 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301106 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001107
1108 CE_state = (struct CE_state *)copyeng;
1109 src_ring = CE_state->src_ring;
1110 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301111 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001112 }
1113
1114 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301115 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001116 nentries_mask = src_ring->nentries_mask;
1117 sw_index = src_ring->sw_index;
1118 write_index = src_ring->write_index;
1119
1120 if (write_index != sw_index) {
1121 struct CE_src_desc *src_ring_base =
1122 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1123 struct CE_src_desc *src_desc =
1124 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1125
1126 /* Return data from completed source descriptor */
1127 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1128 *nbytesp = src_desc->nbytes;
1129 *transfer_idp = src_desc->meta_data;
1130#ifdef QCA_WIFI_3_0
1131 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1132#else
1133 *toeplitz_hash_result = 0;
1134#endif
1135
1136 if (per_CE_contextp) {
1137 *per_CE_contextp = CE_state->send_context;
1138 }
1139
1140 ce_debug_cnclsn_context =
1141 src_ring->per_transfer_context[sw_index];
1142 if (per_transfer_contextp) {
1143 *per_transfer_contextp = ce_debug_cnclsn_context;
1144 }
1145 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1146
1147 /* Update sw_index */
1148 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1149 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301150 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001151 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301152 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001153 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301154 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001155
1156 return status;
1157}
1158
1159/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1160#define CE_WM_SHFT 1
1161
1162int
1163ce_completed_send_next(struct CE_handle *copyeng,
1164 void **per_CE_contextp,
1165 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301166 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001167 unsigned int *nbytesp,
1168 unsigned int *transfer_idp,
1169 unsigned int *sw_idx,
1170 unsigned int *hw_idx,
1171 unsigned int *toeplitz_hash_result)
1172{
1173 struct CE_state *CE_state = (struct CE_state *)copyeng;
1174 int status;
1175
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301176 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001177 status =
1178 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1179 per_transfer_contextp, bufferp,
1180 nbytesp, transfer_idp, sw_idx,
1181 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301182 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001183
1184 return status;
1185}
1186
1187#ifdef ATH_11AC_TXCOMPACT
1188/* CE engine descriptor reap
1189 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1190 * does recieve and reaping of completed descriptor ,
1191 * This function only handles reaping of Tx complete descriptor.
1192 * The Function is called from threshold reap poll routine
1193 * hif_send_complete_check so should not countain recieve functionality
1194 * within it .
1195 */
1196
Komal Seelam644263d2016-02-22 20:45:49 +05301197void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001198{
1199 void *CE_context;
1200 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301201 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001202 unsigned int nbytes;
1203 unsigned int id;
1204 unsigned int sw_idx, hw_idx;
1205 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001206 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207
Houston Hoffmanbac94542016-03-14 21:11:59 -07001208 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1209 return;
1210
Komal Seelambd7c51d2016-02-24 10:27:30 +05301211 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001212 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213
1214 /* Since this function is called from both user context and
1215 * tasklet context the spinlock has to lock the bottom halves.
1216 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1217 * enabled in TX polling mode. If this is not the case, more
1218 * bottom halve spin lock changes are needed. Due to data path
1219 * performance concern, after internal discussion we've decided
1220 * to make minimum change, i.e., only address the issue occured
1221 * in this function. The possible negative effect of this minimum
1222 * change is that, in the future, if some other function will also
1223 * be opened to let the user context to use, those cases need to be
1224 * addressed by change spin_lock to spin_lock_bh also.
1225 */
1226
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301227 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001228
1229 if (CE_state->send_cb) {
1230 {
1231 /* Pop completed send buffers and call the
1232 * registered send callback for each
1233 */
1234 while (ce_completed_send_next_nolock
1235 (CE_state, &CE_context,
1236 &transfer_context, &buf,
1237 &nbytes, &id, &sw_idx, &hw_idx,
1238 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301239 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001240 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301241 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001242 &CE_state->ce_index_lock);
1243 CE_state->send_cb(
1244 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001245 CE_state, CE_context,
1246 transfer_context, buf,
1247 nbytes, id, sw_idx, hw_idx,
1248 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301249 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001250 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001251 } else {
1252 struct HIF_CE_pipe_info *pipe_info =
1253 (struct HIF_CE_pipe_info *)
1254 CE_context;
1255
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301256 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001257 completion_freeq_lock);
1258 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301259 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001260 completion_freeq_lock);
1261 }
1262 }
1263 }
1264 }
1265
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301266 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001267
Komal Seelambd7c51d2016-02-24 10:27:30 +05301268 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001269 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001270 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271}
1272
1273#endif /*ATH_11AC_TXCOMPACT */
1274
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001275#ifdef WLAN_FEATURE_FASTPATH
1276
1277/**
1278 * ce_tx_completion() - reap off the CE source ring when CE completion happens
1279 * @ce_state: Handle to CE
1280 * @num_tx_cmpls: Number of completions handled
1281 *
1282 * API to reap off the CE source ring when CE completion happens:
1283 * Update number of src_ring entries based on number of completions.
1284 *
1285 * Return: None
1286 */
1287static void
1288ce_tx_completion(struct CE_state *ce_state, uint32_t num_tx_cmpls)
1289{
1290 struct CE_ring_state *src_ring = ce_state->src_ring;
1291 uint32_t nentries_mask = src_ring->nentries_mask;
1292
1293 ASSERT(num_tx_cmpls);
1294
1295 qdf_spin_lock(&ce_state->ce_index_lock);
1296
1297 /*
1298 * This locks the index manipulation of this CE with those done
1299 * in ce_send_fast().
1300 */
1301
1302 /*
1303 * Advance the s/w index:
1304 * This effectively simulates completing the CE ring descriptors
1305 */
1306 src_ring->sw_index = CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
1307 num_tx_cmpls);
1308 qdf_spin_unlock(&ce_state->ce_index_lock);
1309}
1310
1311/**
1312 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1313 * @ce_state: handle to copy engine state
1314 * @cmpl_msdus: Rx msdus
1315 * @num_cmpls: number of Rx msdus
1316 * @ctrl_addr: CE control address
1317 *
1318 * Return: None
1319 */
1320static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1321 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1322 uint32_t ctrl_addr)
1323{
1324 struct hif_softc *scn = ce_state->scn;
1325 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1326 struct CE_state *ce_tx_cmpl_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1327 uint32_t nentries_mask = dest_ring->nentries_mask;
1328 uint32_t tx_cmpls;
1329 uint32_t write_index;
1330
1331 tx_cmpls = (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus,
1332 num_cmpls);
1333
1334 /* Update Destination Ring Write Index */
1335 write_index = dest_ring->write_index;
1336 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1337 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1338 dest_ring->write_index = write_index;
1339 ce_tx_completion(ce_tx_cmpl_state, tx_cmpls);
1340}
1341
1342#define MSG_FLUSH_NUM 20
1343/**
1344 * ce_per_engine_service_fast() - CE handler routine to service fastpath messages
1345 * @scn: hif_context
1346 * @ce_id: COpy engine ID
1347 * Function:
1348 * 1) Go through the CE ring, and find the completions
1349 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1350 * 3) Unmap buffer & accumulate in an array.
1351 * 4) Call message handler when array is full or when exiting the handler
1352 *
1353 * Return: void
1354 */
1355
1356static int
1357ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1358{
1359 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1360 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1361 struct CE_dest_desc *dest_ring_base =
1362 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1363
1364 uint32_t nentries_mask = dest_ring->nentries_mask;
1365 uint32_t sw_index = dest_ring->sw_index;
1366 uint32_t nbytes;
1367 qdf_nbuf_t nbuf;
1368 uint32_t paddr_lo;
1369 struct CE_dest_desc *dest_desc;
1370 uint32_t ce_int_status = (1 << ce_id);
1371 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1372 uint32_t ctrl_addr = ce_state->ctrl_addr;
1373 uint32_t nbuf_cmpl_idx = 0;
1374
1375more_data:
1376 if (ce_int_status == (1 << ce_id)) {
1377 for (;;) {
1378
1379 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1380 sw_index);
1381
1382 /*
1383 * The following 2 reads are from non-cached memory
1384 */
1385 nbytes = dest_desc->nbytes;
1386
1387 /* If completion is invalid, break */
1388 if (qdf_unlikely(nbytes == 0))
1389 break;
1390
1391
1392 /*
1393 * Build the nbuf list from valid completions
1394 */
1395 nbuf = dest_ring->per_transfer_context[sw_index];
1396
1397 /*
1398 * No lock is needed here, since this is the only thread
1399 * that accesses the sw_index
1400 */
1401 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1402
1403 /*
1404 * CAREFUL : Uncached write, but still less expensive,
1405 * since most modern caches use "write-combining" to
1406 * flush multiple cache-writes all at once.
1407 */
1408 dest_desc->nbytes = 0;
1409
1410 /*
1411 * Per our understanding this is not required on our
1412 * since we are doing the same cache invalidation
1413 * operation on the same buffer twice in succession,
1414 * without any modifiication to this buffer by CPU in
1415 * between.
1416 * However, this code with 2 syncs in succession has
1417 * been undergoing some testing at a customer site,
1418 * and seemed to be showing no problems so far. Would
1419 * like to validate from the customer, that this line
1420 * is really not required, before we remove this line
1421 * completely.
1422 */
1423 paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
1424
1425 OS_SYNC_SINGLE_FOR_CPU(scn->qdf_dev->dev, paddr_lo,
1426 (skb_end_pointer(nbuf) - (nbuf)->data),
1427 DMA_FROM_DEVICE);
1428 qdf_nbuf_put_tail(nbuf, nbytes);
1429
1430 qdf_assert_always(nbuf->data != NULL);
1431
1432 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1433
1434 /*
1435 * we are not posting the buffers back instead
1436 * reusing the buffers
1437 */
1438 if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1439 qdf_spin_unlock(&ce_state->ce_index_lock);
1440 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1441 MSG_FLUSH_NUM, ctrl_addr);
1442 qdf_spin_lock(&ce_state->ce_index_lock);
1443 nbuf_cmpl_idx = 0;
1444 }
1445
1446 }
1447
1448 /*
1449 * If there are not enough completions to fill the array,
1450 * just call the message handler here
1451 */
1452 if (nbuf_cmpl_idx) {
1453 qdf_spin_unlock(&ce_state->ce_index_lock);
1454 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1455 nbuf_cmpl_idx, ctrl_addr);
1456 qdf_spin_lock(&ce_state->ce_index_lock);
1457 nbuf_cmpl_idx = 0;
1458 }
1459 qdf_atomic_set(&ce_state->rx_pending, 0);
1460 dest_ring->sw_index = sw_index;
1461
1462 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1463 HOST_IS_COPY_COMPLETE_MASK);
1464 }
1465 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1466 if (ce_int_status & CE_WATERMARK_MASK)
1467 goto more_data;
1468
1469 return QDF_STATUS_SUCCESS;
1470}
1471
1472#else
1473static int
1474ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1475{
1476 return QDF_STATUS_E_FAILURE;
1477}
1478#endif /* WLAN_FEATURE_FASTPATH */
1479
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001480/*
1481 * Number of times to check for any pending tx/rx completion on
1482 * a copy engine, this count should be big enough. Once we hit
1483 * this threashold we'll not check for any Tx/Rx comlpetion in same
1484 * interrupt handling. Note that this threashold is only used for
1485 * Rx interrupt processing, this can be used tor Tx as well if we
1486 * suspect any infinite loop in checking for pending Tx completion.
1487 */
1488#define CE_TXRX_COMP_CHECK_THRESHOLD 20
1489
1490/*
1491 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1492 *
1493 * Invokes registered callbacks for recv_complete,
1494 * send_complete, and watermarks.
1495 *
1496 * Returns: number of messages processed
1497 */
1498
Komal Seelam644263d2016-02-22 20:45:49 +05301499int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001500{
1501 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1502 uint32_t ctrl_addr = CE_state->ctrl_addr;
1503 void *CE_context;
1504 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301505 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 unsigned int nbytes;
1507 unsigned int id;
1508 unsigned int flags;
1509 uint32_t CE_int_status;
1510 unsigned int more_comp_cnt = 0;
1511 unsigned int more_snd_comp_cnt = 0;
1512 unsigned int sw_idx, hw_idx;
1513 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301514 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001515
1516 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1517 HIF_ERROR("[premature rc=0]\n");
1518 return 0; /* no work done */
1519 }
1520
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301521 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001522
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001523 /*
1524 * With below check we make sure CE we are handling is datapath CE and
1525 * fastpath is enabled.
1526 */
1527 if (ce_is_fastpath_handler_registered(CE_state))
1528 /* For datapath only Rx CEs */
1529 if (!ce_per_engine_service_fast(scn, CE_id)) {
1530 qdf_spin_unlock(&CE_state->ce_index_lock);
1531 return 0;
1532 }
1533
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 /* Clear force_break flag and re-initialize receive_count to 0 */
1535
1536 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001537 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001538 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001539more_completions:
1540 if (CE_state->recv_cb) {
1541
1542 /* Pop completed recv buffers and call
1543 * the registered recv callback for each
1544 */
1545 while (ce_completed_recv_next_nolock
1546 (CE_state, &CE_context, &transfer_context,
1547 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301548 QDF_STATUS_SUCCESS) {
1549 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001550 CE_state->recv_cb((struct CE_handle *)CE_state,
1551 CE_context, transfer_context, buf,
1552 nbytes, id, flags);
1553
1554 /*
1555 * EV #112693 -
1556 * [Peregrine][ES1][WB342][Win8x86][Performance]
1557 * BSoD_0x133 occurred in VHT80 UDP_DL
1558 * Break out DPC by force if number of loops in
1559 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1560 * to avoid spending too long time in
1561 * DPC for each interrupt handling. Schedule another
1562 * DPC to avoid data loss if we had taken
1563 * force-break action before apply to Windows OS
1564 * only currently, Linux/MAC os can expand to their
1565 * platform if necessary
1566 */
1567
1568 /* Break the receive processes by
1569 * force if force_break set up
1570 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301571 if (qdf_unlikely(CE_state->force_break)) {
1572 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1574 HOST_IS_COPY_COMPLETE_MASK);
1575 if (Q_TARGET_ACCESS_END(scn) < 0)
1576 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001577 CE_state->receive_count);
1578 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301580 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001581 }
1582 }
1583
1584 /*
1585 * Attention: We may experience potential infinite loop for below
1586 * While Loop during Sending Stress test.
1587 * Resolve the same way as Receive Case (Refer to EV #112693)
1588 */
1589
1590 if (CE_state->send_cb) {
1591 /* Pop completed send buffers and call
1592 * the registered send callback for each
1593 */
1594
1595#ifdef ATH_11AC_TXCOMPACT
1596 while (ce_completed_send_next_nolock
1597 (CE_state, &CE_context,
1598 &transfer_context, &buf, &nbytes,
1599 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301600 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601
1602 if (CE_id != CE_HTT_H2T_MSG ||
Komal Seelambd7c51d2016-02-24 10:27:30 +05301603 WLAN_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301604 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001605 CE_state->send_cb((struct CE_handle *)CE_state,
1606 CE_context, transfer_context,
1607 buf, nbytes, id, sw_idx,
1608 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301609 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001610 } else {
1611 struct HIF_CE_pipe_info *pipe_info =
1612 (struct HIF_CE_pipe_info *)CE_context;
1613
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301614 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615 completion_freeq_lock);
1616 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301617 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001618 completion_freeq_lock);
1619 }
1620 }
1621#else /*ATH_11AC_TXCOMPACT */
1622 while (ce_completed_send_next_nolock
1623 (CE_state, &CE_context,
1624 &transfer_context, &buf, &nbytes,
1625 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301626 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1627 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628 CE_state->send_cb((struct CE_handle *)CE_state,
1629 CE_context, transfer_context, buf,
1630 nbytes, id, sw_idx, hw_idx,
1631 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301632 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001633 }
1634#endif /*ATH_11AC_TXCOMPACT */
1635 }
1636
1637more_watermarks:
1638 if (CE_state->misc_cbs) {
1639 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1640 if (CE_int_status & CE_WATERMARK_MASK) {
1641 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301642 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643 /* Convert HW IS bits to software flags */
1644 flags =
1645 (CE_int_status & CE_WATERMARK_MASK) >>
1646 CE_WM_SHFT;
1647
1648 CE_state->
1649 watermark_cb((struct CE_handle *)CE_state,
1650 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301651 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001652 }
1653 }
1654 }
1655
1656 /*
1657 * Clear the misc interrupts (watermark) that were handled above,
1658 * and that will be checked again below.
1659 * Clear and check for copy-complete interrupts again, just in case
1660 * more copy completions happened while the misc interrupts were being
1661 * handled.
1662 */
1663 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1664 CE_WATERMARK_MASK |
1665 HOST_IS_COPY_COMPLETE_MASK);
1666
1667 /*
1668 * Now that per-engine interrupts are cleared, verify that
1669 * no recv interrupts arrive while processing send interrupts,
1670 * and no recv or send interrupts happened while processing
1671 * misc interrupts.Go back and check again.Keep checking until
1672 * we find no more events to process.
1673 */
1674 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301675 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001676 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1677 goto more_completions;
1678 } else {
1679 HIF_ERROR(
1680 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1681 __func__, CE_state->dest_ring->nentries_mask,
1682 CE_state->dest_ring->sw_index,
1683 CE_DEST_RING_READ_IDX_GET(scn,
1684 CE_state->ctrl_addr));
1685 }
1686 }
1687
1688 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301689 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001690 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1691 goto more_completions;
1692 } else {
1693 HIF_ERROR(
1694 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1695 __func__, CE_state->src_ring->nentries_mask,
1696 CE_state->src_ring->sw_index,
1697 CE_SRC_RING_READ_IDX_GET(scn,
1698 CE_state->ctrl_addr));
1699 }
1700 }
1701
1702 if (CE_state->misc_cbs) {
1703 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1704 if (CE_int_status & CE_WATERMARK_MASK) {
1705 if (CE_state->watermark_cb) {
1706 goto more_watermarks;
1707 }
1708 }
1709 }
1710
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301711 qdf_spin_unlock(&CE_state->ce_index_lock);
1712 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001713
1714 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001715 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1716 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001717}
1718
1719/*
1720 * Handler for per-engine interrupts on ALL active CEs.
1721 * This is used in cases where the system is sharing a
1722 * single interrput for all CEs
1723 */
1724
Komal Seelam644263d2016-02-22 20:45:49 +05301725void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726{
1727 int CE_id;
1728 uint32_t intr_summary;
1729
Houston Hoffmanbac94542016-03-14 21:11:59 -07001730 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1731 return;
1732
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301733 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1735 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301736 if (qdf_atomic_read(&CE_state->rx_pending)) {
1737 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 ce_per_engine_service(scn, CE_id);
1739 }
1740 }
1741
Houston Hoffmanbac94542016-03-14 21:11:59 -07001742 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001743 return;
1744 }
1745
1746 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1747
1748 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1749 if (intr_summary & (1 << CE_id)) {
1750 intr_summary &= ~(1 << CE_id);
1751 } else {
1752 continue; /* no intr pending on this CE */
1753 }
1754
1755 ce_per_engine_service(scn, CE_id);
1756 }
1757
Houston Hoffmanbac94542016-03-14 21:11:59 -07001758 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001759}
1760
1761/*
1762 * Adjust interrupts for the copy complete handler.
1763 * If it's needed for either send or recv, then unmask
1764 * this interrupt; otherwise, mask it.
1765 *
1766 * Called with target_lock held.
1767 */
1768static void
1769ce_per_engine_handler_adjust(struct CE_state *CE_state,
1770 int disable_copy_compl_intr)
1771{
1772 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301773 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001774
1775 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001776
1777 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1778 return;
1779
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001780 if ((!disable_copy_compl_intr) &&
1781 (CE_state->send_cb || CE_state->recv_cb)) {
1782 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1783 } else {
1784 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1785 }
1786
1787 if (CE_state->watermark_cb) {
1788 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1789 } else {
1790 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1791 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001792 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001793}
1794
1795/*Iterate the CE_state list and disable the compl interrupt
1796 * if it has been registered already.
1797 */
Komal Seelam644263d2016-02-22 20:45:49 +05301798void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001799{
1800 int CE_id;
1801
Houston Hoffmanbac94542016-03-14 21:11:59 -07001802 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1803 return;
1804
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001805 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1806 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1807 uint32_t ctrl_addr = CE_state->ctrl_addr;
1808
1809 /* if the interrupt is currently enabled, disable it */
1810 if (!CE_state->disable_copy_compl_intr
1811 && (CE_state->send_cb || CE_state->recv_cb)) {
1812 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1813 }
1814
1815 if (CE_state->watermark_cb) {
1816 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1817 }
1818 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001819 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001820}
1821
Komal Seelam644263d2016-02-22 20:45:49 +05301822void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001823{
1824 int CE_id;
1825
Houston Hoffmanbac94542016-03-14 21:11:59 -07001826 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1827 return;
1828
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1830 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1831 uint32_t ctrl_addr = CE_state->ctrl_addr;
1832
1833 /*
1834 * If the CE is supposed to have copy complete interrupts
1835 * enabled (i.e. there a callback registered, and the
1836 * "disable" flag is not set), then re-enable the interrupt.
1837 */
1838 if (!CE_state->disable_copy_compl_intr
1839 && (CE_state->send_cb || CE_state->recv_cb)) {
1840 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1841 }
1842
1843 if (CE_state->watermark_cb) {
1844 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1845 }
1846 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001847 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848}
1849
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001850/**
1851 * ce_send_cb_register(): register completion handler
1852 * @copyeng: CE_state representing the ce we are adding the behavior to
1853 * @fn_ptr: callback that the ce should use when processing tx completions
1854 * @disable_interrupts: if the interupts should be enabled or not.
1855 *
1856 * Caller should guarantee that no transactions are in progress before
1857 * switching the callback function.
1858 *
1859 * Registers the send context before the fn pointer so that if the cb is valid
1860 * the context should be valid.
1861 *
1862 * Beware that currently this function will enable completion interrupts.
1863 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001864void
1865ce_send_cb_register(struct CE_handle *copyeng,
1866 ce_send_cb fn_ptr,
1867 void *ce_send_context, int disable_interrupts)
1868{
1869 struct CE_state *CE_state = (struct CE_state *)copyeng;
1870
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001871 if (CE_state == NULL) {
1872 pr_err("%s: Error CE state = NULL\n", __func__);
1873 return;
1874 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001875 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001876 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001877 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001878}
1879
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001880/**
1881 * ce_recv_cb_register(): register completion handler
1882 * @copyeng: CE_state representing the ce we are adding the behavior to
1883 * @fn_ptr: callback that the ce should use when processing rx completions
1884 * @disable_interrupts: if the interupts should be enabled or not.
1885 *
1886 * Registers the send context before the fn pointer so that if the cb is valid
1887 * the context should be valid.
1888 *
1889 * Caller should guarantee that no transactions are in progress before
1890 * switching the callback function.
1891 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001892void
1893ce_recv_cb_register(struct CE_handle *copyeng,
1894 CE_recv_cb fn_ptr,
1895 void *CE_recv_context, int disable_interrupts)
1896{
1897 struct CE_state *CE_state = (struct CE_state *)copyeng;
1898
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001899 if (CE_state == NULL) {
1900 pr_err("%s: ERROR CE state = NULL\n", __func__);
1901 return;
1902 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001904 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001905 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906}
1907
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001908/**
1909 * ce_watermark_cb_register(): register completion handler
1910 * @copyeng: CE_state representing the ce we are adding the behavior to
1911 * @fn_ptr: callback that the ce should use when processing watermark events
1912 *
1913 * Caller should guarantee that no watermark events are being processed before
1914 * switching the callback function.
1915 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916void
1917ce_watermark_cb_register(struct CE_handle *copyeng,
1918 CE_watermark_cb fn_ptr, void *CE_wm_context)
1919{
1920 struct CE_state *CE_state = (struct CE_state *)copyeng;
1921
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001922 CE_state->watermark_cb = fn_ptr;
1923 CE_state->wm_context = CE_wm_context;
1924 ce_per_engine_handler_adjust(CE_state, 0);
1925 if (fn_ptr) {
1926 CE_state->misc_cbs = 1;
1927 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001928}
1929
1930#ifdef WLAN_FEATURE_FASTPATH
1931/**
1932 * ce_pkt_dl_len_set() set the HTT packet download length
1933 * @hif_sc: HIF context
1934 * @pkt_download_len: download length
1935 *
1936 * Return: None
1937 */
1938void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1939{
Komal Seelam644263d2016-02-22 20:45:49 +05301940 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001941 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1942
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301943 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001944
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001945 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001946
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301947 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001948 ce_state->id, ce_state->download_len);
1949}
1950#else
1951void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1952{
1953}
1954#endif /* WLAN_FEATURE_FASTPATH */
1955
Komal Seelam644263d2016-02-22 20:45:49 +05301956bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001957{
1958 int CE_id;
1959
1960 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1961 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301962 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963 return true;
1964 }
1965
1966 return false;
1967}
1968
1969/**
1970 * ce_check_rx_pending() - ce_check_rx_pending
Komal Seelam644263d2016-02-22 20:45:49 +05301971 * @scn: hif_softc
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001972 * @ce_id: ce_id
1973 *
1974 * Return: bool
1975 */
Komal Seelam644263d2016-02-22 20:45:49 +05301976bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001977{
1978 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301979 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001980 return true;
1981 else
1982 return false;
1983}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001984
1985/**
1986 * ce_enable_msi(): write the msi configuration to the target
1987 * @scn: hif context
1988 * @CE_id: which copy engine will be configured for msi interupts
1989 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
1990 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
1991 * @msi_data: Hardware will write this data to generate an interrupt
1992 *
1993 * should be done in the initialization sequence so no locking would be needed
1994 */
Komal Seelam644263d2016-02-22 20:45:49 +05301995void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001996 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
1997 uint32_t msi_data)
1998{
1999#ifdef WLAN_ENABLE_QCA6180
2000 struct CE_state *CE_state;
2001 A_target_id_t targid;
2002 u_int32_t ctrl_addr;
2003 uint32_t tmp;
2004
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002005 CE_state = scn->ce_id_to_state[CE_id];
2006 if (!CE_state) {
2007 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002008 return;
2009 }
2010 targid = TARGID(sc);
2011 ctrl_addr = CE_state->ctrl_addr;
2012 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
2013 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
2014 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
2015 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
2016 tmp |= (1 << CE_MSI_ENABLE_BIT);
2017 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002018#endif
2019}
2020
2021#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002022/**
2023 * ce_ipa_get_resource() - get uc resource on copyengine
2024 * @ce: copyengine context
2025 * @ce_sr_base_paddr: copyengine source ring base physical address
2026 * @ce_sr_ring_size: copyengine source ring size
2027 * @ce_reg_paddr: copyengine register physical address
2028 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002029 * Copy engine should release resource to micro controller
2030 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08002031 * - Copy engine source descriptor base address
2032 * - Copy engine source descriptor size
2033 * - PCI BAR address to access copy engine regiser
2034 *
2035 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002036 */
2037void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302038 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002039 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302040 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002041{
2042 struct CE_state *CE_state = (struct CE_state *)ce;
2043 uint32_t ring_loop;
2044 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302045 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05302046 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002047
2048 if (CE_RUNNING != CE_state->state) {
2049 *ce_sr_base_paddr = 0;
2050 *ce_sr_ring_size = 0;
2051 return;
2052 }
2053
2054 /* Update default value for descriptor */
2055 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2056 ring_loop++) {
2057 ce_desc = (struct CE_src_desc *)
2058 ((char *)CE_state->src_ring->base_addr_owner_space +
2059 ring_loop * (sizeof(struct CE_src_desc)));
2060 CE_IPA_RING_INIT(ce_desc);
2061 }
2062
2063 /* Get BAR address */
2064 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2065
Leo Changd85f78d2015-11-13 10:55:34 -08002066 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2067 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2068 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2070 SR_WR_INDEX_ADDRESS;
2071 return;
2072}
2073#endif /* IPA_OFFLOAD */
2074