blob: c5028d8b1308aa5c684dc930d8772f8a6f8d3643 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "hif.h"
29#include "hif_io32.h"
30#include "ce_api.h"
31#include "ce_main.h"
32#include "ce_internal.h"
33#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053034#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "epping_main.h"
37#include "hif_main.h"
38#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039
40#ifdef IPA_OFFLOAD
41#ifdef QCA_WIFI_3_0
42#define CE_IPA_RING_INIT(ce_desc) \
43 do { \
44 ce_desc->gather = 0; \
45 ce_desc->enable_11h = 0; \
46 ce_desc->meta_data_low = 0; \
47 ce_desc->packet_result_offset = 64; \
48 ce_desc->toeplitz_hash_enable = 0; \
49 ce_desc->addr_y_search_disable = 0; \
50 ce_desc->addr_x_search_disable = 0; \
51 ce_desc->misc_int_disable = 0; \
52 ce_desc->target_int_disable = 0; \
53 ce_desc->host_int_disable = 0; \
54 ce_desc->dest_byte_swap = 0; \
55 ce_desc->byte_swap = 0; \
56 ce_desc->type = 2; \
57 ce_desc->tx_classify = 1; \
58 ce_desc->buffer_addr_hi = 0; \
59 ce_desc->meta_data = 0; \
60 ce_desc->nbytes = 128; \
61 } while (0)
62#else
63#define CE_IPA_RING_INIT(ce_desc) \
64 do { \
65 ce_desc->byte_swap = 0; \
66 ce_desc->nbytes = 60; \
67 ce_desc->gather = 0; \
68 } while (0)
69#endif /* QCA_WIFI_3_0 */
70#endif /* IPA_OFFLOAD */
71
72static int war1_allow_sleep;
73/* io32 write workaround */
74static int hif_ce_war1;
75
Houston Hoffman68e837e2015-12-04 12:57:24 -080076#ifdef CONFIG_SLUB_DEBUG_ON
77
78/**
79 * struct hif_ce_event - structure for detailing a ce event
80 * @type: what the event was
81 * @time: when it happened
82 * @descriptor: descriptor enqueued or dequeued
83 * @memory: virtual address that was used
84 * @index: location of the descriptor in the ce ring;
85 */
86struct hif_ce_desc_event {
87 uint16_t index;
88 enum hif_ce_event_type type;
89 uint64_t time;
90 union ce_desc descriptor;
91 void *memory;
92};
93
94/* max history to record per copy engine */
95#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053096qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -080097struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
98
Houston Hoffman4275ba22015-12-06 21:02:11 -080099
Houston Hoffman68e837e2015-12-04 12:57:24 -0800100/**
101 * get_next_record_index() - get the next record index
102 * @table_index: atomic index variable to increment
103 * @array_size: array size of the circular buffer
104 *
105 * Increment the atomic index and reserve the value.
106 * Takes care of buffer wrap.
107 * Guaranteed to be thread safe as long as fewer than array_size contexts
108 * try to access the array. If there are more than array_size contexts
109 * trying to access the array, full locking of the recording process would
110 * be needed to have sane logging.
111 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530112static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800113{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530114 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800115 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530116 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800117
118 while (record_index >= array_size)
119 record_index -= array_size;
120 return record_index;
121}
122
123/**
124 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530125 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800126 * @ce_id: which ce is the event occuring on
127 * @type: what happened
128 * @descriptor: pointer to the descriptor posted/completed
129 * @memory: virtual address of buffer related to the descriptor
130 * @index: index that the descriptor was/will be at.
131 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530132void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
133 enum hif_ce_event_type type,
134 union ce_desc *descriptor,
135 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800136{
137 int record_index = get_next_record_index(
138 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
139
140 struct hif_ce_desc_event *event =
141 &hif_ce_desc_history[ce_id][record_index];
142 event->type = type;
Komal Seelam75080122016-03-02 15:18:25 +0530143 event->time = qdf_get_monotonic_boottime();
Komal Seelambd7c51d2016-02-24 10:27:30 +0530144
Houston Hoffman4275ba22015-12-06 21:02:11 -0800145 if (descriptor != NULL)
146 event->descriptor = *descriptor;
147 else
148 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800149 event->memory = memory;
150 event->index = index;
151}
152
153/**
154 * ce_init_ce_desc_event_log() - initialize the ce event log
155 * @ce_id: copy engine id for which we are initializing the log
156 * @size: size of array to dedicate
157 *
158 * Currently the passed size is ignored in favor of a precompiled value.
159 */
160void ce_init_ce_desc_event_log(int ce_id, int size)
161{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530162 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800163}
164#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530165void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800166 int ce_id, enum hif_ce_event_type type,
167 union ce_desc *descriptor, void *memory,
168 int index)
169{
170}
171
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800172inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800173{
174}
175#endif
176
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800177/*
178 * Support for Copy Engine hardware, which is mainly used for
179 * communication between Host and Target over a PCIe interconnect.
180 */
181
182/*
183 * A single CopyEngine (CE) comprises two "rings":
184 * a source ring
185 * a destination ring
186 *
187 * Each ring consists of a number of descriptors which specify
188 * an address, length, and meta-data.
189 *
190 * Typically, one side of the PCIe interconnect (Host or Target)
191 * controls one ring and the other side controls the other ring.
192 * The source side chooses when to initiate a transfer and it
193 * chooses what to send (buffer address, length). The destination
194 * side keeps a supply of "anonymous receive buffers" available and
195 * it handles incoming data as it arrives (when the destination
196 * recieves an interrupt).
197 *
198 * The sender may send a simple buffer (address/length) or it may
199 * send a small list of buffers. When a small list is sent, hardware
200 * "gathers" these and they end up in a single destination buffer
201 * with a single interrupt.
202 *
203 * There are several "contexts" managed by this layer -- more, it
204 * may seem -- than should be needed. These are provided mainly for
205 * maximum flexibility and especially to facilitate a simpler HIF
206 * implementation. There are per-CopyEngine recv, send, and watermark
207 * contexts. These are supplied by the caller when a recv, send,
208 * or watermark handler is established and they are echoed back to
209 * the caller when the respective callbacks are invoked. There is
210 * also a per-transfer context supplied by the caller when a buffer
211 * (or sendlist) is sent and when a buffer is enqueued for recv.
212 * These per-transfer contexts are echoed back to the caller when
213 * the buffer is sent/received.
214 * Target TX harsh result toeplitz_hash_result
215 */
216
217/*
218 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
219 * The caller takes responsibility for any needed locking.
220 */
221int
222ce_completed_send_next_nolock(struct CE_state *CE_state,
223 void **per_CE_contextp,
224 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530225 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800226 unsigned int *nbytesp,
227 unsigned int *transfer_idp,
228 unsigned int *sw_idx, unsigned int *hw_idx,
229 uint32_t *toeplitz_hash_result);
230
Komal Seelam644263d2016-02-22 20:45:49 +0530231void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800232 u32 ctrl_addr, unsigned int write_index)
233{
234 if (hif_ce_war1) {
235 void __iomem *indicator_addr;
236
237 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
238
239 if (!war1_allow_sleep
240 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
241 hif_write32_mb(indicator_addr,
242 (CDC_WAR_MAGIC_STR | write_index));
243 } else {
244 unsigned long irq_flags;
245 local_irq_save(irq_flags);
246 hif_write32_mb(indicator_addr, 1);
247
248 /*
249 * PCIE write waits for ACK in IPQ8K, there is no
250 * need to read back value.
251 */
252 (void)hif_read32_mb(indicator_addr);
253 (void)hif_read32_mb(indicator_addr); /* conservative */
254
255 CE_SRC_RING_WRITE_IDX_SET(scn,
256 ctrl_addr, write_index);
257
258 hif_write32_mb(indicator_addr, 0);
259 local_irq_restore(irq_flags);
260 }
261 } else
262 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
263}
264
265int
266ce_send_nolock(struct CE_handle *copyeng,
267 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530268 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800269 uint32_t nbytes,
270 uint32_t transfer_id,
271 uint32_t flags,
272 uint32_t user_flags)
273{
274 int status;
275 struct CE_state *CE_state = (struct CE_state *)copyeng;
276 struct CE_ring_state *src_ring = CE_state->src_ring;
277 uint32_t ctrl_addr = CE_state->ctrl_addr;
278 unsigned int nentries_mask = src_ring->nentries_mask;
279 unsigned int sw_index = src_ring->sw_index;
280 unsigned int write_index = src_ring->write_index;
281 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530282 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800283
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700284 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700285 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800286 if (unlikely(CE_RING_DELTA(nentries_mask,
287 write_index, sw_index - 1) <= 0)) {
288 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700289 Q_TARGET_ACCESS_END(scn);
290 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800291 }
292 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800293 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800294 struct CE_src_desc *src_ring_base =
295 (struct CE_src_desc *)src_ring->base_addr_owner_space;
296 struct CE_src_desc *shadow_base =
297 (struct CE_src_desc *)src_ring->shadow_base;
298 struct CE_src_desc *src_desc =
299 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
300 struct CE_src_desc *shadow_src_desc =
301 CE_SRC_RING_TO_DESC(shadow_base, write_index);
302
303 /* Update low 32 bits source descriptor address */
304 shadow_src_desc->buffer_addr =
305 (uint32_t)(dma_addr & 0xFFFFFFFF);
306#ifdef QCA_WIFI_3_0
307 shadow_src_desc->buffer_addr_hi =
308 (uint32_t)((dma_addr >> 32) & 0x1F);
309 user_flags |= shadow_src_desc->buffer_addr_hi;
310 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
311 sizeof(uint32_t));
312#endif
313 shadow_src_desc->meta_data = transfer_id;
314
315 /*
316 * Set the swap bit if:
317 * typical sends on this CE are swapped (host is big-endian)
318 * and this send doesn't disable the swapping
319 * (data is not bytestream)
320 */
321 shadow_src_desc->byte_swap =
322 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
323 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
324 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
325 shadow_src_desc->nbytes = nbytes;
326
327 *src_desc = *shadow_src_desc;
328
329 src_ring->per_transfer_context[write_index] =
330 per_transfer_context;
331
332 /* Update Source Ring Write Index */
333 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
334
335 /* WORKAROUND */
336 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800337 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800338 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
339 write_index);
340 }
341
Houston Hoffman68e837e2015-12-04 12:57:24 -0800342 /* src_ring->write index hasn't been updated event though
343 * the register has allready been written to.
344 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530345 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800346 (union ce_desc *) shadow_src_desc, per_transfer_context,
347 src_ring->write_index);
348
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800349 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530350 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800351 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700352 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800353 return status;
354}
355
356int
357ce_send(struct CE_handle *copyeng,
358 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530359 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800360 uint32_t nbytes,
361 uint32_t transfer_id,
362 uint32_t flags,
363 uint32_t user_flag)
364{
365 struct CE_state *CE_state = (struct CE_state *)copyeng;
366 int status;
367
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530368 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800369 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
370 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530371 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800372
373 return status;
374}
375
376unsigned int ce_sendlist_sizeof(void)
377{
378 return sizeof(struct ce_sendlist);
379}
380
381void ce_sendlist_init(struct ce_sendlist *sendlist)
382{
383 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
384 sl->num_items = 0;
385}
386
387int
388ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530389 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800390 uint32_t nbytes,
391 uint32_t flags,
392 uint32_t user_flags)
393{
394 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
395 unsigned int num_items = sl->num_items;
396 struct ce_sendlist_item *item;
397
398 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530399 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
400 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800401 }
402
403 item = &sl->item[num_items];
404 item->send_type = CE_SIMPLE_BUFFER_TYPE;
405 item->data = buffer;
406 item->u.nbytes = nbytes;
407 item->flags = flags;
408 item->user_flags = user_flags;
409 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530410 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800411}
412
413int
414ce_sendlist_send(struct CE_handle *copyeng,
415 void *per_transfer_context,
416 struct ce_sendlist *sendlist, unsigned int transfer_id)
417{
418 int status = -ENOMEM;
419 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
420 struct CE_state *CE_state = (struct CE_state *)copyeng;
421 struct CE_ring_state *src_ring = CE_state->src_ring;
422 unsigned int nentries_mask = src_ring->nentries_mask;
423 unsigned int num_items = sl->num_items;
424 unsigned int sw_index;
425 unsigned int write_index;
426
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530427 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800428
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530429 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800430 sw_index = src_ring->sw_index;
431 write_index = src_ring->write_index;
432
433 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
434 num_items) {
435 struct ce_sendlist_item *item;
436 int i;
437
438 /* handle all but the last item uniformly */
439 for (i = 0; i < num_items - 1; i++) {
440 item = &sl->item[i];
441 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530442 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800443 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530444 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800445 item->u.nbytes, transfer_id,
446 item->flags | CE_SEND_FLAG_GATHER,
447 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530448 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800449 }
450 /* provide valid context pointer for final item */
451 item = &sl->item[i];
452 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530453 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800454 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530455 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800456 item->u.nbytes,
457 transfer_id, item->flags,
458 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530459 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530460 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
461 QDF_NBUF_TX_PKT_CE);
462 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530463 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530464 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
465 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800466 } else {
467 /*
468 * Probably not worth the additional complexity to support
469 * partial sends with continuation or notification. We expect
470 * to use large rings and small sendlists. If we can't handle
471 * the entire request at once, punt it back to the caller.
472 */
473 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530474 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800475
476 return status;
477}
478
479#ifdef WLAN_FEATURE_FASTPATH
480#ifdef QCA_WIFI_3_0
481static inline void
482ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
483 uint64_t dma_addr,
484 uint32_t user_flags)
485{
486 shadow_src_desc->buffer_addr_hi =
487 (uint32_t)((dma_addr >> 32) & 0x1F);
488 user_flags |= shadow_src_desc->buffer_addr_hi;
489 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
490 sizeof(uint32_t));
491}
492#else
493static inline void
494ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
495 uint64_t dma_addr,
496 uint32_t user_flags)
497{
498}
499#endif
500
501/**
502 * ce_send_fast() CE layer Tx buffer posting function
503 * @copyeng: copy engine handle
504 * @msdus: iarray of msdu to be sent
505 * @num_msdus: number of msdus in an array
506 * @transfer_id: transfer_id
507 *
508 * Assumption : Called with an array of MSDU's
509 * Function:
510 * For each msdu in the array
511 * 1. Check no. of available entries
512 * 2. Create src ring entries (allocated in consistent memory
513 * 3. Write index to h/w
514 *
515 * Return: No. of packets that could be sent
516 */
517
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530518int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800519 unsigned int num_msdus, unsigned int transfer_id)
520{
521 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530522 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530523 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800524 struct CE_ring_state *src_ring = ce_state->src_ring;
525 u_int32_t ctrl_addr = ce_state->ctrl_addr;
526 unsigned int nentries_mask = src_ring->nentries_mask;
527 unsigned int write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700528 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800529 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530530 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800531 int i;
532 uint64_t dma_addr;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700533 uint32_t user_flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800534
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530535 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700536 Q_TARGET_ACCESS_BEGIN(scn);
537
538 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800539 write_index = src_ring->write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700540 sw_index = src_ring->sw_index;
541
542 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
543 < (SLOTS_PER_DATAPATH_TX * num_msdus))) {
544 HIF_ERROR("Source ring full, required %d, available %d",
545 (SLOTS_PER_DATAPATH_TX * num_msdus),
546 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
547 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
548 Q_TARGET_ACCESS_END(scn);
549 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
550 return 0;
551 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552
553 /* 2 msdus per packet */
554 for (i = 0; i < num_msdus; i++) {
555 struct CE_src_desc *src_ring_base =
556 (struct CE_src_desc *)src_ring->base_addr_owner_space;
557 struct CE_src_desc *shadow_base =
558 (struct CE_src_desc *)src_ring->shadow_base;
559 struct CE_src_desc *src_desc =
560 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
561 struct CE_src_desc *shadow_src_desc =
562 CE_SRC_RING_TO_DESC(shadow_base, write_index);
563
Komal Seelam644263d2016-02-22 20:45:49 +0530564 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800565 msdu = msdus[i];
566
567 /*
568 * First fill out the ring descriptor for the HTC HTT frame
569 * header. These are uncached writes. Should we use a local
570 * structure instead?
571 */
572 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530573 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800574 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
575 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530576 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800577 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
578
579 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530580 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800581
582 /*
583 * HTC HTT header is a word stream, so byte swap if CE byte
584 * swap enabled
585 */
586 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
587 CE_ATTR_BYTE_SWAP_DATA) != 0);
588 /* For the first one, it still does not need to write */
589 shadow_src_desc->gather = 1;
590 *src_desc = *shadow_src_desc;
591
592 /* By default we could initialize the transfer context to this
593 * value
594 */
595 src_ring->per_transfer_context[write_index] =
596 CE_SENDLIST_ITEM_CTXT;
597
598 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
599
600 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
601 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
602 /*
603 * Now fill out the ring descriptor for the actual data
604 * packet
605 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530606 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800607 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
608 0xFFFFFFFF);
609 /*
610 * Clear packet offset for all but the first CE desc.
611 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530612 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800613 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
614 shadow_src_desc->meta_data = transfer_id;
615
616 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530617 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700618
619 /* only read download_len once */
620 shadow_src_desc->nbytes = ce_state->download_len;
621 if (shadow_src_desc->nbytes > frag_len)
622 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800623
624 /* Data packet is a byte stream, so disable byte swap */
625 shadow_src_desc->byte_swap = 0;
626 /* For the last one, gather is not set */
627 shadow_src_desc->gather = 0;
628 *src_desc = *shadow_src_desc;
629 src_ring->per_transfer_context[write_index] = msdu;
630 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
631 }
632
633 /* Write the final index to h/w one-shot */
634 if (i) {
635 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800636
Komal Seelam644263d2016-02-22 20:45:49 +0530637 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800638 /* Don't call WAR_XXX from here
639 * Just call XXX instead, that has the reqd. intel
640 */
641 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
642 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530643 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800644 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800645 }
646
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700647 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530648 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800649
650 /*
651 * If all packets in the array are transmitted,
652 * i = num_msdus
653 * Temporarily add an ASSERT
654 */
655 ASSERT(i == num_msdus);
656 return i;
657}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700658
659/**
660 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
661 * @scn: Handle to HIF context
662 *
663 * Return: true if fastpath is enabled else false.
664 */
665static bool ce_is_fastpath_enabled(struct hif_softc *scn)
666{
667 return scn->fastpath_mode_on;
668}
669
670/**
671 * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
672 * fastpath is enabled.
673 * @ce_state: handle to copy engine
674 *
675 * Return: true if fastpath handler is registered for datapath CE.
676 */
677static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
678{
679 if (ce_state->fastpath_handler)
680 return true;
681 else
682 return false;
683}
684
685
686#else
687static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
688{
689 return false;
690}
691
692static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
693{
694 return false;
695}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800696#endif /* WLAN_FEATURE_FASTPATH */
697
Houston Hoffman4411ad42016-03-14 21:12:04 -0700698/**
699 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
700 * @coyeng: copy engine handle
701 * @per_recv_context: virtual address of the nbuf
702 * @buffer: physical address of the nbuf
703 *
704 * Return: 0 if the buffer is enqueued
705 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800706int
707ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530708 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800709{
710 int status;
711 struct CE_state *CE_state = (struct CE_state *)copyeng;
712 struct CE_ring_state *dest_ring = CE_state->dest_ring;
713 uint32_t ctrl_addr = CE_state->ctrl_addr;
714 unsigned int nentries_mask = dest_ring->nentries_mask;
715 unsigned int write_index;
716 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800717 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530718 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800719
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530720 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800721 write_index = dest_ring->write_index;
722 sw_index = dest_ring->sw_index;
723
Houston Hoffman4411ad42016-03-14 21:12:04 -0700724 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530725 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700726 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800727 }
728
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700729 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700730 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800731 struct CE_dest_desc *dest_ring_base =
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700732 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800733 struct CE_dest_desc *dest_desc =
734 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
735
736 /* Update low 32 bit destination descriptor */
737 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
738#ifdef QCA_WIFI_3_0
739 dest_desc->buffer_addr_hi =
740 (uint32_t)((dma_addr >> 32) & 0x1F);
741#endif
742 dest_desc->nbytes = 0;
743
744 dest_ring->per_transfer_context[write_index] =
745 per_recv_context;
746
Komal Seelambd7c51d2016-02-24 10:27:30 +0530747 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800748 (union ce_desc *) dest_desc, per_recv_context,
749 write_index);
750
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800751 /* Update Destination Ring Write Index */
752 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700753 if (write_index != sw_index) {
754 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
755 dest_ring->write_index = write_index;
756 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530757 status = QDF_STATUS_SUCCESS;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700758 } else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530759 status = QDF_STATUS_E_FAILURE;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700760
Houston Hoffman4411ad42016-03-14 21:12:04 -0700761 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530762 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800763 return status;
764}
765
766void
767ce_send_watermarks_set(struct CE_handle *copyeng,
768 unsigned int low_alert_nentries,
769 unsigned int high_alert_nentries)
770{
771 struct CE_state *CE_state = (struct CE_state *)copyeng;
772 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530773 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800774
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800775 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
776 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800777}
778
779void
780ce_recv_watermarks_set(struct CE_handle *copyeng,
781 unsigned int low_alert_nentries,
782 unsigned int high_alert_nentries)
783{
784 struct CE_state *CE_state = (struct CE_state *)copyeng;
785 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530786 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800787
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800788 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
789 low_alert_nentries);
790 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
791 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800792}
793
794unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
795{
796 struct CE_state *CE_state = (struct CE_state *)copyeng;
797 struct CE_ring_state *src_ring = CE_state->src_ring;
798 unsigned int nentries_mask = src_ring->nentries_mask;
799 unsigned int sw_index;
800 unsigned int write_index;
801
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530802 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800803 sw_index = src_ring->sw_index;
804 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530805 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800806
807 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
808}
809
810unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
811{
812 struct CE_state *CE_state = (struct CE_state *)copyeng;
813 struct CE_ring_state *dest_ring = CE_state->dest_ring;
814 unsigned int nentries_mask = dest_ring->nentries_mask;
815 unsigned int sw_index;
816 unsigned int write_index;
817
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530818 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800819 sw_index = dest_ring->sw_index;
820 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530821 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800822
823 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
824}
825
826/*
827 * Guts of ce_send_entries_done.
828 * The caller takes responsibility for any necessary locking.
829 */
830unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530831ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800832 struct CE_state *CE_state)
833{
834 struct CE_ring_state *src_ring = CE_state->src_ring;
835 uint32_t ctrl_addr = CE_state->ctrl_addr;
836 unsigned int nentries_mask = src_ring->nentries_mask;
837 unsigned int sw_index;
838 unsigned int read_index;
839
840 sw_index = src_ring->sw_index;
841 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
842
843 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
844}
845
846unsigned int ce_send_entries_done(struct CE_handle *copyeng)
847{
848 struct CE_state *CE_state = (struct CE_state *)copyeng;
849 unsigned int nentries;
850
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530851 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800852 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530853 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800854
855 return nentries;
856}
857
858/*
859 * Guts of ce_recv_entries_done.
860 * The caller takes responsibility for any necessary locking.
861 */
862unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530863ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800864 struct CE_state *CE_state)
865{
866 struct CE_ring_state *dest_ring = CE_state->dest_ring;
867 uint32_t ctrl_addr = CE_state->ctrl_addr;
868 unsigned int nentries_mask = dest_ring->nentries_mask;
869 unsigned int sw_index;
870 unsigned int read_index;
871
872 sw_index = dest_ring->sw_index;
873 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
874
875 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
876}
877
878unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
879{
880 struct CE_state *CE_state = (struct CE_state *)copyeng;
881 unsigned int nentries;
882
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530883 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800884 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530885 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800886
887 return nentries;
888}
889
890/* Debug support */
891void *ce_debug_cmplrn_context; /* completed recv next context */
892void *ce_debug_cnclsn_context; /* cancel send next context */
893void *ce_debug_rvkrn_context; /* revoke receive next context */
894void *ce_debug_cmplsn_context; /* completed send next context */
895
896/*
897 * Guts of ce_completed_recv_next.
898 * The caller takes responsibility for any necessary locking.
899 */
900int
901ce_completed_recv_next_nolock(struct CE_state *CE_state,
902 void **per_CE_contextp,
903 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530904 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800905 unsigned int *nbytesp,
906 unsigned int *transfer_idp,
907 unsigned int *flagsp)
908{
909 int status;
910 struct CE_ring_state *dest_ring = CE_state->dest_ring;
911 unsigned int nentries_mask = dest_ring->nentries_mask;
912 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530913 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800914 struct CE_dest_desc *dest_ring_base =
915 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
916 struct CE_dest_desc *dest_desc =
917 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
918 int nbytes;
919 struct CE_dest_desc dest_desc_info;
920 /*
921 * By copying the dest_desc_info element to local memory, we could
922 * avoid extra memory read from non-cachable memory.
923 */
924 dest_desc_info = *dest_desc;
925 nbytes = dest_desc_info.nbytes;
926 if (nbytes == 0) {
927 /*
928 * This closes a relatively unusual race where the Host
929 * sees the updated DRRI before the update to the
930 * corresponding descriptor has completed. We treat this
931 * as a descriptor that is not yet done.
932 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530933 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800934 goto done;
935 }
936
Komal Seelambd7c51d2016-02-24 10:27:30 +0530937 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800938 (union ce_desc *) dest_desc,
939 dest_ring->per_transfer_context[sw_index],
940 sw_index);
941
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 dest_desc->nbytes = 0;
943
944 /* Return data from completed destination descriptor */
945 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
946 *nbytesp = nbytes;
947 *transfer_idp = dest_desc_info.meta_data;
948 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
949
950 if (per_CE_contextp) {
951 *per_CE_contextp = CE_state->recv_context;
952 }
953
954 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
955 if (per_transfer_contextp) {
956 *per_transfer_contextp = ce_debug_cmplrn_context;
957 }
958 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
959
960 /* Update sw_index */
961 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
962 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530963 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800964
965done:
966 return status;
967}
968
969int
970ce_completed_recv_next(struct CE_handle *copyeng,
971 void **per_CE_contextp,
972 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530973 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800974 unsigned int *nbytesp,
975 unsigned int *transfer_idp, unsigned int *flagsp)
976{
977 struct CE_state *CE_state = (struct CE_state *)copyeng;
978 int status;
979
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530980 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800981 status =
982 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
983 per_transfer_contextp, bufferp,
984 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530985 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800986
987 return status;
988}
989
990/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530991QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800992ce_revoke_recv_next(struct CE_handle *copyeng,
993 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530994 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800995{
996 struct CE_state *CE_state;
997 struct CE_ring_state *dest_ring;
998 unsigned int nentries_mask;
999 unsigned int sw_index;
1000 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301001 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301002 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001003
1004 CE_state = (struct CE_state *)copyeng;
1005 dest_ring = CE_state->dest_ring;
1006 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301007 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001008 }
1009
1010 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301011 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001012 nentries_mask = dest_ring->nentries_mask;
1013 sw_index = dest_ring->sw_index;
1014 write_index = dest_ring->write_index;
1015 if (write_index != sw_index) {
1016 struct CE_dest_desc *dest_ring_base =
1017 (struct CE_dest_desc *)dest_ring->
1018 base_addr_owner_space;
1019 struct CE_dest_desc *dest_desc =
1020 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1021
1022 /* Return data from completed destination descriptor */
1023 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1024
1025 if (per_CE_contextp) {
1026 *per_CE_contextp = CE_state->recv_context;
1027 }
1028
1029 ce_debug_rvkrn_context =
1030 dest_ring->per_transfer_context[sw_index];
1031 if (per_transfer_contextp) {
1032 *per_transfer_contextp = ce_debug_rvkrn_context;
1033 }
1034 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
1035
1036 /* Update sw_index */
1037 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1038 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301039 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301041 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001042 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301043 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001044
1045 return status;
1046}
1047
1048/*
1049 * Guts of ce_completed_send_next.
1050 * The caller takes responsibility for any necessary locking.
1051 */
1052int
1053ce_completed_send_next_nolock(struct CE_state *CE_state,
1054 void **per_CE_contextp,
1055 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301056 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001057 unsigned int *nbytesp,
1058 unsigned int *transfer_idp,
1059 unsigned int *sw_idx,
1060 unsigned int *hw_idx,
1061 uint32_t *toeplitz_hash_result)
1062{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301063 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064 struct CE_ring_state *src_ring = CE_state->src_ring;
1065 uint32_t ctrl_addr = CE_state->ctrl_addr;
1066 unsigned int nentries_mask = src_ring->nentries_mask;
1067 unsigned int sw_index = src_ring->sw_index;
1068 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301069 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001070
1071 if (src_ring->hw_index == sw_index) {
1072 /*
1073 * The SW completion index has caught up with the cached
1074 * version of the HW completion index.
1075 * Update the cached HW completion index to see whether
1076 * the SW has really caught up to the HW, or if the cached
1077 * value of the HW index has become stale.
1078 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001079 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001080 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001081 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001082 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001083 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001084 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085 }
1086 read_index = src_ring->hw_index;
1087
1088 if (sw_idx)
1089 *sw_idx = sw_index;
1090
1091 if (hw_idx)
1092 *hw_idx = read_index;
1093
1094 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1095 struct CE_src_desc *shadow_base =
1096 (struct CE_src_desc *)src_ring->shadow_base;
1097 struct CE_src_desc *shadow_src_desc =
1098 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1099#ifdef QCA_WIFI_3_0
1100 struct CE_src_desc *src_ring_base =
1101 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1102 struct CE_src_desc *src_desc =
1103 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1104#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301105 hif_record_ce_desc_event(scn, CE_state->id,
1106 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001107 (union ce_desc *) shadow_src_desc,
1108 src_ring->per_transfer_context[sw_index],
1109 sw_index);
1110
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001111 /* Return data from completed source descriptor */
1112 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1113 *nbytesp = shadow_src_desc->nbytes;
1114 *transfer_idp = shadow_src_desc->meta_data;
1115#ifdef QCA_WIFI_3_0
1116 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1117#else
1118 *toeplitz_hash_result = 0;
1119#endif
1120 if (per_CE_contextp) {
1121 *per_CE_contextp = CE_state->send_context;
1122 }
1123
1124 ce_debug_cmplsn_context =
1125 src_ring->per_transfer_context[sw_index];
1126 if (per_transfer_contextp) {
1127 *per_transfer_contextp = ce_debug_cmplsn_context;
1128 }
1129 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1130
1131 /* Update sw_index */
1132 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1133 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301134 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001135 }
1136
1137 return status;
1138}
1139
1140/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301141QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001142ce_cancel_send_next(struct CE_handle *copyeng,
1143 void **per_CE_contextp,
1144 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301145 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001146 unsigned int *nbytesp,
1147 unsigned int *transfer_idp,
1148 uint32_t *toeplitz_hash_result)
1149{
1150 struct CE_state *CE_state;
1151 struct CE_ring_state *src_ring;
1152 unsigned int nentries_mask;
1153 unsigned int sw_index;
1154 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301155 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301156 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001157
1158 CE_state = (struct CE_state *)copyeng;
1159 src_ring = CE_state->src_ring;
1160 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301161 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001162 }
1163
1164 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301165 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001166 nentries_mask = src_ring->nentries_mask;
1167 sw_index = src_ring->sw_index;
1168 write_index = src_ring->write_index;
1169
1170 if (write_index != sw_index) {
1171 struct CE_src_desc *src_ring_base =
1172 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1173 struct CE_src_desc *src_desc =
1174 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1175
1176 /* Return data from completed source descriptor */
1177 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1178 *nbytesp = src_desc->nbytes;
1179 *transfer_idp = src_desc->meta_data;
1180#ifdef QCA_WIFI_3_0
1181 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1182#else
1183 *toeplitz_hash_result = 0;
1184#endif
1185
1186 if (per_CE_contextp) {
1187 *per_CE_contextp = CE_state->send_context;
1188 }
1189
1190 ce_debug_cnclsn_context =
1191 src_ring->per_transfer_context[sw_index];
1192 if (per_transfer_contextp) {
1193 *per_transfer_contextp = ce_debug_cnclsn_context;
1194 }
1195 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1196
1197 /* Update sw_index */
1198 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1199 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301200 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301202 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001203 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301204 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001205
1206 return status;
1207}
1208
1209/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1210#define CE_WM_SHFT 1
1211
1212int
1213ce_completed_send_next(struct CE_handle *copyeng,
1214 void **per_CE_contextp,
1215 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301216 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001217 unsigned int *nbytesp,
1218 unsigned int *transfer_idp,
1219 unsigned int *sw_idx,
1220 unsigned int *hw_idx,
1221 unsigned int *toeplitz_hash_result)
1222{
1223 struct CE_state *CE_state = (struct CE_state *)copyeng;
1224 int status;
1225
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301226 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001227 status =
1228 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1229 per_transfer_contextp, bufferp,
1230 nbytesp, transfer_idp, sw_idx,
1231 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301232 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001233
1234 return status;
1235}
1236
1237#ifdef ATH_11AC_TXCOMPACT
1238/* CE engine descriptor reap
1239 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1240 * does recieve and reaping of completed descriptor ,
1241 * This function only handles reaping of Tx complete descriptor.
1242 * The Function is called from threshold reap poll routine
1243 * hif_send_complete_check so should not countain recieve functionality
1244 * within it .
1245 */
1246
Komal Seelam644263d2016-02-22 20:45:49 +05301247void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001248{
1249 void *CE_context;
1250 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301251 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001252 unsigned int nbytes;
1253 unsigned int id;
1254 unsigned int sw_idx, hw_idx;
1255 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001256 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001257
Houston Hoffmanbac94542016-03-14 21:11:59 -07001258 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1259 return;
1260
Komal Seelambd7c51d2016-02-24 10:27:30 +05301261 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001262 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001263
1264 /* Since this function is called from both user context and
1265 * tasklet context the spinlock has to lock the bottom halves.
1266 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1267 * enabled in TX polling mode. If this is not the case, more
1268 * bottom halve spin lock changes are needed. Due to data path
1269 * performance concern, after internal discussion we've decided
1270 * to make minimum change, i.e., only address the issue occured
1271 * in this function. The possible negative effect of this minimum
1272 * change is that, in the future, if some other function will also
1273 * be opened to let the user context to use, those cases need to be
1274 * addressed by change spin_lock to spin_lock_bh also.
1275 */
1276
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301277 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001278
1279 if (CE_state->send_cb) {
1280 {
1281 /* Pop completed send buffers and call the
1282 * registered send callback for each
1283 */
1284 while (ce_completed_send_next_nolock
1285 (CE_state, &CE_context,
1286 &transfer_context, &buf,
1287 &nbytes, &id, &sw_idx, &hw_idx,
1288 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301289 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001290 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301291 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001292 &CE_state->ce_index_lock);
1293 CE_state->send_cb(
1294 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001295 CE_state, CE_context,
1296 transfer_context, buf,
1297 nbytes, id, sw_idx, hw_idx,
1298 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301299 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001300 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301 } else {
1302 struct HIF_CE_pipe_info *pipe_info =
1303 (struct HIF_CE_pipe_info *)
1304 CE_context;
1305
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301306 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001307 completion_freeq_lock);
1308 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301309 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001310 completion_freeq_lock);
1311 }
1312 }
1313 }
1314 }
1315
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301316 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001317
Komal Seelambd7c51d2016-02-24 10:27:30 +05301318 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001319 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001320 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001321}
1322
1323#endif /*ATH_11AC_TXCOMPACT */
1324
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001325/*
1326 * Number of times to check for any pending tx/rx completion on
1327 * a copy engine, this count should be big enough. Once we hit
1328 * this threashold we'll not check for any Tx/Rx comlpetion in same
1329 * interrupt handling. Note that this threashold is only used for
1330 * Rx interrupt processing, this can be used tor Tx as well if we
1331 * suspect any infinite loop in checking for pending Tx completion.
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001332 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001333#define CE_TXRX_COMP_CHECK_THRESHOLD 20
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001334
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001335#ifdef WLAN_FEATURE_FASTPATH
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001336/**
1337 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1338 * @ce_state: handle to copy engine state
1339 * @cmpl_msdus: Rx msdus
1340 * @num_cmpls: number of Rx msdus
1341 * @ctrl_addr: CE control address
1342 *
1343 * Return: None
1344 */
1345static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1346 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1347 uint32_t ctrl_addr)
1348{
1349 struct hif_softc *scn = ce_state->scn;
1350 struct CE_ring_state *dest_ring = ce_state->dest_ring;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001351 uint32_t nentries_mask = dest_ring->nentries_mask;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001352 uint32_t write_index;
1353
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001354 (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001355
1356 /* Update Destination Ring Write Index */
1357 write_index = dest_ring->write_index;
1358 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1359 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1360 dest_ring->write_index = write_index;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001361}
1362
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001363#define MSG_FLUSH_NUM 6
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001364/**
1365 * ce_per_engine_service_fast() - CE handler routine to service fastpath messages
1366 * @scn: hif_context
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001367 * @ce_id: Copy engine ID
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001368 * 1) Go through the CE ring, and find the completions
1369 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1370 * 3) Unmap buffer & accumulate in an array.
1371 * 4) Call message handler when array is full or when exiting the handler
1372 *
1373 * Return: void
1374 */
1375
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001376static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001377{
1378 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1379 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1380 struct CE_dest_desc *dest_ring_base =
1381 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1382
1383 uint32_t nentries_mask = dest_ring->nentries_mask;
1384 uint32_t sw_index = dest_ring->sw_index;
1385 uint32_t nbytes;
1386 qdf_nbuf_t nbuf;
1387 uint32_t paddr_lo;
1388 struct CE_dest_desc *dest_desc;
1389 uint32_t ce_int_status = (1 << ce_id);
1390 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1391 uint32_t ctrl_addr = ce_state->ctrl_addr;
1392 uint32_t nbuf_cmpl_idx = 0;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001393 unsigned int more_comp_cnt = 0;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001394
1395more_data:
1396 if (ce_int_status == (1 << ce_id)) {
1397 for (;;) {
1398
1399 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1400 sw_index);
1401
1402 /*
1403 * The following 2 reads are from non-cached memory
1404 */
1405 nbytes = dest_desc->nbytes;
1406
1407 /* If completion is invalid, break */
1408 if (qdf_unlikely(nbytes == 0))
1409 break;
1410
1411
1412 /*
1413 * Build the nbuf list from valid completions
1414 */
1415 nbuf = dest_ring->per_transfer_context[sw_index];
1416
1417 /*
1418 * No lock is needed here, since this is the only thread
1419 * that accesses the sw_index
1420 */
1421 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1422
1423 /*
1424 * CAREFUL : Uncached write, but still less expensive,
1425 * since most modern caches use "write-combining" to
1426 * flush multiple cache-writes all at once.
1427 */
1428 dest_desc->nbytes = 0;
1429
1430 /*
1431 * Per our understanding this is not required on our
1432 * since we are doing the same cache invalidation
1433 * operation on the same buffer twice in succession,
1434 * without any modifiication to this buffer by CPU in
1435 * between.
1436 * However, this code with 2 syncs in succession has
1437 * been undergoing some testing at a customer site,
1438 * and seemed to be showing no problems so far. Would
1439 * like to validate from the customer, that this line
1440 * is really not required, before we remove this line
1441 * completely.
1442 */
1443 paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
1444
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001445 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev,
1446 paddr_lo,
1447 (skb_end_pointer(nbuf) - (nbuf)->data),
1448 DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001449 qdf_nbuf_put_tail(nbuf, nbytes);
1450
1451 qdf_assert_always(nbuf->data != NULL);
1452
1453 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1454
1455 /*
1456 * we are not posting the buffers back instead
1457 * reusing the buffers
1458 */
1459 if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1460 qdf_spin_unlock(&ce_state->ce_index_lock);
1461 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1462 MSG_FLUSH_NUM, ctrl_addr);
1463 qdf_spin_lock(&ce_state->ce_index_lock);
1464 nbuf_cmpl_idx = 0;
1465 }
1466
1467 }
1468
1469 /*
1470 * If there are not enough completions to fill the array,
1471 * just call the message handler here
1472 */
1473 if (nbuf_cmpl_idx) {
1474 qdf_spin_unlock(&ce_state->ce_index_lock);
1475 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1476 nbuf_cmpl_idx, ctrl_addr);
1477 qdf_spin_lock(&ce_state->ce_index_lock);
1478 nbuf_cmpl_idx = 0;
1479 }
1480 qdf_atomic_set(&ce_state->rx_pending, 0);
1481 dest_ring->sw_index = sw_index;
1482
1483 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1484 HOST_IS_COPY_COMPLETE_MASK);
1485 }
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001486 if (ce_recv_entries_done_nolock(scn, ce_state)) {
1487 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1488 goto more_data;
1489 } else {
1490 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1491 __func__, nentries_mask,
1492 ce_state->dest_ring->sw_index,
1493 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1494 }
1495 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001496}
1497
1498#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001499static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001500{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001501}
1502#endif /* WLAN_FEATURE_FASTPATH */
1503
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504/*
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001505 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1506 *
1507 * Invokes registered callbacks for recv_complete,
1508 * send_complete, and watermarks.
1509 *
1510 * Returns: number of messages processed
1511 */
1512
Komal Seelam644263d2016-02-22 20:45:49 +05301513int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001514{
1515 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1516 uint32_t ctrl_addr = CE_state->ctrl_addr;
1517 void *CE_context;
1518 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301519 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 unsigned int nbytes;
1521 unsigned int id;
1522 unsigned int flags;
1523 uint32_t CE_int_status;
1524 unsigned int more_comp_cnt = 0;
1525 unsigned int more_snd_comp_cnt = 0;
1526 unsigned int sw_idx, hw_idx;
1527 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301528 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001529
1530 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1531 HIF_ERROR("[premature rc=0]\n");
1532 return 0; /* no work done */
1533 }
1534
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301535 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001537 /*
1538 * With below check we make sure CE we are handling is datapath CE and
1539 * fastpath is enabled.
1540 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001541 if (ce_is_fastpath_handler_registered(CE_state)) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001542 /* For datapath only Rx CEs */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001543 ce_per_engine_service_fast(scn, CE_id);
1544 qdf_spin_unlock(&CE_state->ce_index_lock);
1545 return CE_state->receive_count;
1546 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001547
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001548 /* Clear force_break flag and re-initialize receive_count to 0 */
1549
1550 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001551 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001552 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001553more_completions:
1554 if (CE_state->recv_cb) {
1555
1556 /* Pop completed recv buffers and call
1557 * the registered recv callback for each
1558 */
1559 while (ce_completed_recv_next_nolock
1560 (CE_state, &CE_context, &transfer_context,
1561 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301562 QDF_STATUS_SUCCESS) {
1563 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001564 CE_state->recv_cb((struct CE_handle *)CE_state,
1565 CE_context, transfer_context, buf,
1566 nbytes, id, flags);
1567
1568 /*
1569 * EV #112693 -
1570 * [Peregrine][ES1][WB342][Win8x86][Performance]
1571 * BSoD_0x133 occurred in VHT80 UDP_DL
1572 * Break out DPC by force if number of loops in
1573 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1574 * to avoid spending too long time in
1575 * DPC for each interrupt handling. Schedule another
1576 * DPC to avoid data loss if we had taken
1577 * force-break action before apply to Windows OS
1578 * only currently, Linux/MAC os can expand to their
1579 * platform if necessary
1580 */
1581
1582 /* Break the receive processes by
1583 * force if force_break set up
1584 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301585 if (qdf_unlikely(CE_state->force_break)) {
1586 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001587 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1588 HOST_IS_COPY_COMPLETE_MASK);
1589 if (Q_TARGET_ACCESS_END(scn) < 0)
1590 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001591 CE_state->receive_count);
1592 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301594 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001595 }
1596 }
1597
1598 /*
1599 * Attention: We may experience potential infinite loop for below
1600 * While Loop during Sending Stress test.
1601 * Resolve the same way as Receive Case (Refer to EV #112693)
1602 */
1603
1604 if (CE_state->send_cb) {
1605 /* Pop completed send buffers and call
1606 * the registered send callback for each
1607 */
1608
1609#ifdef ATH_11AC_TXCOMPACT
1610 while (ce_completed_send_next_nolock
1611 (CE_state, &CE_context,
1612 &transfer_context, &buf, &nbytes,
1613 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301614 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615
1616 if (CE_id != CE_HTT_H2T_MSG ||
Komal Seelambd7c51d2016-02-24 10:27:30 +05301617 WLAN_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301618 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619 CE_state->send_cb((struct CE_handle *)CE_state,
1620 CE_context, transfer_context,
1621 buf, nbytes, id, sw_idx,
1622 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301623 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001624 } else {
1625 struct HIF_CE_pipe_info *pipe_info =
1626 (struct HIF_CE_pipe_info *)CE_context;
1627
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301628 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001629 completion_freeq_lock);
1630 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301631 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001632 completion_freeq_lock);
1633 }
1634 }
1635#else /*ATH_11AC_TXCOMPACT */
1636 while (ce_completed_send_next_nolock
1637 (CE_state, &CE_context,
1638 &transfer_context, &buf, &nbytes,
1639 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301640 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1641 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642 CE_state->send_cb((struct CE_handle *)CE_state,
1643 CE_context, transfer_context, buf,
1644 nbytes, id, sw_idx, hw_idx,
1645 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301646 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001647 }
1648#endif /*ATH_11AC_TXCOMPACT */
1649 }
1650
1651more_watermarks:
1652 if (CE_state->misc_cbs) {
1653 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1654 if (CE_int_status & CE_WATERMARK_MASK) {
1655 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301656 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657 /* Convert HW IS bits to software flags */
1658 flags =
1659 (CE_int_status & CE_WATERMARK_MASK) >>
1660 CE_WM_SHFT;
1661
1662 CE_state->
1663 watermark_cb((struct CE_handle *)CE_state,
1664 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301665 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001666 }
1667 }
1668 }
1669
1670 /*
1671 * Clear the misc interrupts (watermark) that were handled above,
1672 * and that will be checked again below.
1673 * Clear and check for copy-complete interrupts again, just in case
1674 * more copy completions happened while the misc interrupts were being
1675 * handled.
1676 */
1677 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1678 CE_WATERMARK_MASK |
1679 HOST_IS_COPY_COMPLETE_MASK);
1680
1681 /*
1682 * Now that per-engine interrupts are cleared, verify that
1683 * no recv interrupts arrive while processing send interrupts,
1684 * and no recv or send interrupts happened while processing
1685 * misc interrupts.Go back and check again.Keep checking until
1686 * we find no more events to process.
1687 */
1688 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301689 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001690 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1691 goto more_completions;
1692 } else {
1693 HIF_ERROR(
1694 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1695 __func__, CE_state->dest_ring->nentries_mask,
1696 CE_state->dest_ring->sw_index,
1697 CE_DEST_RING_READ_IDX_GET(scn,
1698 CE_state->ctrl_addr));
1699 }
1700 }
1701
1702 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Komal Seelambd7c51d2016-02-24 10:27:30 +05301703 if (WLAN_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001704 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1705 goto more_completions;
1706 } else {
1707 HIF_ERROR(
1708 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1709 __func__, CE_state->src_ring->nentries_mask,
1710 CE_state->src_ring->sw_index,
1711 CE_SRC_RING_READ_IDX_GET(scn,
1712 CE_state->ctrl_addr));
1713 }
1714 }
1715
1716 if (CE_state->misc_cbs) {
1717 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1718 if (CE_int_status & CE_WATERMARK_MASK) {
1719 if (CE_state->watermark_cb) {
1720 goto more_watermarks;
1721 }
1722 }
1723 }
1724
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301725 qdf_spin_unlock(&CE_state->ce_index_lock);
1726 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001727
1728 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001729 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1730 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731}
1732
1733/*
1734 * Handler for per-engine interrupts on ALL active CEs.
1735 * This is used in cases where the system is sharing a
1736 * single interrput for all CEs
1737 */
1738
Komal Seelam644263d2016-02-22 20:45:49 +05301739void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740{
1741 int CE_id;
1742 uint32_t intr_summary;
1743
Houston Hoffmanbac94542016-03-14 21:11:59 -07001744 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1745 return;
1746
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301747 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001748 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1749 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301750 if (qdf_atomic_read(&CE_state->rx_pending)) {
1751 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001752 ce_per_engine_service(scn, CE_id);
1753 }
1754 }
1755
Houston Hoffmanbac94542016-03-14 21:11:59 -07001756 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001757 return;
1758 }
1759
1760 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1761
1762 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1763 if (intr_summary & (1 << CE_id)) {
1764 intr_summary &= ~(1 << CE_id);
1765 } else {
1766 continue; /* no intr pending on this CE */
1767 }
1768
1769 ce_per_engine_service(scn, CE_id);
1770 }
1771
Houston Hoffmanbac94542016-03-14 21:11:59 -07001772 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001773}
1774
1775/*
1776 * Adjust interrupts for the copy complete handler.
1777 * If it's needed for either send or recv, then unmask
1778 * this interrupt; otherwise, mask it.
1779 *
1780 * Called with target_lock held.
1781 */
1782static void
1783ce_per_engine_handler_adjust(struct CE_state *CE_state,
1784 int disable_copy_compl_intr)
1785{
1786 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301787 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001788
1789 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001790
1791 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1792 return;
1793
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794 if ((!disable_copy_compl_intr) &&
1795 (CE_state->send_cb || CE_state->recv_cb)) {
1796 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1797 } else {
1798 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1799 }
1800
1801 if (CE_state->watermark_cb) {
1802 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1803 } else {
1804 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1805 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001806 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001807}
1808
1809/*Iterate the CE_state list and disable the compl interrupt
1810 * if it has been registered already.
1811 */
Komal Seelam644263d2016-02-22 20:45:49 +05301812void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001813{
1814 int CE_id;
1815
Houston Hoffmanbac94542016-03-14 21:11:59 -07001816 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1817 return;
1818
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1820 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1821 uint32_t ctrl_addr = CE_state->ctrl_addr;
1822
1823 /* if the interrupt is currently enabled, disable it */
1824 if (!CE_state->disable_copy_compl_intr
1825 && (CE_state->send_cb || CE_state->recv_cb)) {
1826 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1827 }
1828
1829 if (CE_state->watermark_cb) {
1830 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1831 }
1832 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001833 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834}
1835
Komal Seelam644263d2016-02-22 20:45:49 +05301836void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001837{
1838 int CE_id;
1839
Houston Hoffmanbac94542016-03-14 21:11:59 -07001840 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1841 return;
1842
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1844 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1845 uint32_t ctrl_addr = CE_state->ctrl_addr;
1846
1847 /*
1848 * If the CE is supposed to have copy complete interrupts
1849 * enabled (i.e. there a callback registered, and the
1850 * "disable" flag is not set), then re-enable the interrupt.
1851 */
1852 if (!CE_state->disable_copy_compl_intr
1853 && (CE_state->send_cb || CE_state->recv_cb)) {
1854 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1855 }
1856
1857 if (CE_state->watermark_cb) {
1858 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1859 }
1860 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001861 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001862}
1863
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001864/**
1865 * ce_send_cb_register(): register completion handler
1866 * @copyeng: CE_state representing the ce we are adding the behavior to
1867 * @fn_ptr: callback that the ce should use when processing tx completions
1868 * @disable_interrupts: if the interupts should be enabled or not.
1869 *
1870 * Caller should guarantee that no transactions are in progress before
1871 * switching the callback function.
1872 *
1873 * Registers the send context before the fn pointer so that if the cb is valid
1874 * the context should be valid.
1875 *
1876 * Beware that currently this function will enable completion interrupts.
1877 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001878void
1879ce_send_cb_register(struct CE_handle *copyeng,
1880 ce_send_cb fn_ptr,
1881 void *ce_send_context, int disable_interrupts)
1882{
1883 struct CE_state *CE_state = (struct CE_state *)copyeng;
1884
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001885 if (CE_state == NULL) {
1886 pr_err("%s: Error CE state = NULL\n", __func__);
1887 return;
1888 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001889 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001890 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001891 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001892}
1893
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001894/**
1895 * ce_recv_cb_register(): register completion handler
1896 * @copyeng: CE_state representing the ce we are adding the behavior to
1897 * @fn_ptr: callback that the ce should use when processing rx completions
1898 * @disable_interrupts: if the interupts should be enabled or not.
1899 *
1900 * Registers the send context before the fn pointer so that if the cb is valid
1901 * the context should be valid.
1902 *
1903 * Caller should guarantee that no transactions are in progress before
1904 * switching the callback function.
1905 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906void
1907ce_recv_cb_register(struct CE_handle *copyeng,
1908 CE_recv_cb fn_ptr,
1909 void *CE_recv_context, int disable_interrupts)
1910{
1911 struct CE_state *CE_state = (struct CE_state *)copyeng;
1912
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001913 if (CE_state == NULL) {
1914 pr_err("%s: ERROR CE state = NULL\n", __func__);
1915 return;
1916 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001917 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001918 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001919 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001920}
1921
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001922/**
1923 * ce_watermark_cb_register(): register completion handler
1924 * @copyeng: CE_state representing the ce we are adding the behavior to
1925 * @fn_ptr: callback that the ce should use when processing watermark events
1926 *
1927 * Caller should guarantee that no watermark events are being processed before
1928 * switching the callback function.
1929 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001930void
1931ce_watermark_cb_register(struct CE_handle *copyeng,
1932 CE_watermark_cb fn_ptr, void *CE_wm_context)
1933{
1934 struct CE_state *CE_state = (struct CE_state *)copyeng;
1935
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001936 CE_state->watermark_cb = fn_ptr;
1937 CE_state->wm_context = CE_wm_context;
1938 ce_per_engine_handler_adjust(CE_state, 0);
1939 if (fn_ptr) {
1940 CE_state->misc_cbs = 1;
1941 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942}
1943
1944#ifdef WLAN_FEATURE_FASTPATH
1945/**
1946 * ce_pkt_dl_len_set() set the HTT packet download length
1947 * @hif_sc: HIF context
1948 * @pkt_download_len: download length
1949 *
1950 * Return: None
1951 */
1952void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1953{
Komal Seelam644263d2016-02-22 20:45:49 +05301954 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001955 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1956
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301957 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001958
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001960
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301961 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001962 ce_state->id, ce_state->download_len);
1963}
1964#else
1965void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1966{
1967}
1968#endif /* WLAN_FEATURE_FASTPATH */
1969
Komal Seelam644263d2016-02-22 20:45:49 +05301970bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971{
1972 int CE_id;
1973
1974 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1975 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301976 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001977 return true;
1978 }
1979
1980 return false;
1981}
1982
1983/**
1984 * ce_check_rx_pending() - ce_check_rx_pending
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001985 * @CE_state: context of the copy engine to check
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 *
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001987 * Return: true if there per_engine_service
1988 * didn't process all the rx descriptors.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001989 */
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001990bool ce_check_rx_pending(struct CE_state *CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001991{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301992 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001993 return true;
1994 else
1995 return false;
1996}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001997
1998/**
1999 * ce_enable_msi(): write the msi configuration to the target
2000 * @scn: hif context
2001 * @CE_id: which copy engine will be configured for msi interupts
2002 * @msi_addr_lo: Hardware will write to this address to generate an interrupt
2003 * @msi_addr_hi: Hardware will write to this address to generate an interrupt
2004 * @msi_data: Hardware will write this data to generate an interrupt
2005 *
2006 * should be done in the initialization sequence so no locking would be needed
2007 */
Komal Seelam644263d2016-02-22 20:45:49 +05302008void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002009 uint32_t msi_addr_lo, uint32_t msi_addr_hi,
2010 uint32_t msi_data)
2011{
2012#ifdef WLAN_ENABLE_QCA6180
2013 struct CE_state *CE_state;
2014 A_target_id_t targid;
2015 u_int32_t ctrl_addr;
2016 uint32_t tmp;
2017
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002018 CE_state = scn->ce_id_to_state[CE_id];
2019 if (!CE_state) {
2020 HIF_ERROR("%s: error - CE_state = NULL", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002021 return;
2022 }
2023 targid = TARGID(sc);
2024 ctrl_addr = CE_state->ctrl_addr;
2025 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, msi_addr_lo);
2026 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, msi_addr_hi);
2027 CE_MSI_DATA_SET(scn, ctrl_addr, msi_data);
2028 tmp = CE_CTRL_REGISTER1_GET(scn, ctrl_addr);
2029 tmp |= (1 << CE_MSI_ENABLE_BIT);
2030 CE_CTRL_REGISTER1_SET(scn, ctrl_addr, tmp);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002031#endif
2032}
2033
2034#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002035/**
2036 * ce_ipa_get_resource() - get uc resource on copyengine
2037 * @ce: copyengine context
2038 * @ce_sr_base_paddr: copyengine source ring base physical address
2039 * @ce_sr_ring_size: copyengine source ring size
2040 * @ce_reg_paddr: copyengine register physical address
2041 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002042 * Copy engine should release resource to micro controller
2043 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08002044 * - Copy engine source descriptor base address
2045 * - Copy engine source descriptor size
2046 * - PCI BAR address to access copy engine regiser
2047 *
2048 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002049 */
2050void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302051 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002052 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302053 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002054{
2055 struct CE_state *CE_state = (struct CE_state *)ce;
2056 uint32_t ring_loop;
2057 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302058 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05302059 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002060
2061 if (CE_RUNNING != CE_state->state) {
2062 *ce_sr_base_paddr = 0;
2063 *ce_sr_ring_size = 0;
2064 return;
2065 }
2066
2067 /* Update default value for descriptor */
2068 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2069 ring_loop++) {
2070 ce_desc = (struct CE_src_desc *)
2071 ((char *)CE_state->src_ring->base_addr_owner_space +
2072 ring_loop * (sizeof(struct CE_src_desc)));
2073 CE_IPA_RING_INIT(ce_desc);
2074 }
2075
2076 /* Get BAR address */
2077 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2078
Leo Changd85f78d2015-11-13 10:55:34 -08002079 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2080 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2081 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002082 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2083 SR_WR_INDEX_ADDRESS;
2084 return;
2085}
2086#endif /* IPA_OFFLOAD */
2087