blob: c3fc02f3885858017155fa11c045bddd47566ecc [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "hif.h"
29#include "hif_io32.h"
30#include "ce_api.h"
31#include "ce_main.h"
32#include "ce_internal.h"
33#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053034#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "hif_main.h"
37#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080038
39#ifdef IPA_OFFLOAD
40#ifdef QCA_WIFI_3_0
41#define CE_IPA_RING_INIT(ce_desc) \
42 do { \
43 ce_desc->gather = 0; \
44 ce_desc->enable_11h = 0; \
45 ce_desc->meta_data_low = 0; \
46 ce_desc->packet_result_offset = 64; \
47 ce_desc->toeplitz_hash_enable = 0; \
48 ce_desc->addr_y_search_disable = 0; \
49 ce_desc->addr_x_search_disable = 0; \
50 ce_desc->misc_int_disable = 0; \
51 ce_desc->target_int_disable = 0; \
52 ce_desc->host_int_disable = 0; \
53 ce_desc->dest_byte_swap = 0; \
54 ce_desc->byte_swap = 0; \
55 ce_desc->type = 2; \
56 ce_desc->tx_classify = 1; \
57 ce_desc->buffer_addr_hi = 0; \
58 ce_desc->meta_data = 0; \
59 ce_desc->nbytes = 128; \
60 } while (0)
61#else
62#define CE_IPA_RING_INIT(ce_desc) \
63 do { \
64 ce_desc->byte_swap = 0; \
65 ce_desc->nbytes = 60; \
66 ce_desc->gather = 0; \
67 } while (0)
68#endif /* QCA_WIFI_3_0 */
69#endif /* IPA_OFFLOAD */
70
71static int war1_allow_sleep;
72/* io32 write workaround */
73static int hif_ce_war1;
74
Houston Hoffman68e837e2015-12-04 12:57:24 -080075#ifdef CONFIG_SLUB_DEBUG_ON
76
77/**
78 * struct hif_ce_event - structure for detailing a ce event
79 * @type: what the event was
80 * @time: when it happened
81 * @descriptor: descriptor enqueued or dequeued
82 * @memory: virtual address that was used
83 * @index: location of the descriptor in the ce ring;
84 */
85struct hif_ce_desc_event {
86 uint16_t index;
87 enum hif_ce_event_type type;
88 uint64_t time;
89 union ce_desc descriptor;
90 void *memory;
91};
92
93/* max history to record per copy engine */
94#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053095qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -080096struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
97
Houston Hoffman4275ba22015-12-06 21:02:11 -080098
Houston Hoffman68e837e2015-12-04 12:57:24 -080099/**
100 * get_next_record_index() - get the next record index
101 * @table_index: atomic index variable to increment
102 * @array_size: array size of the circular buffer
103 *
104 * Increment the atomic index and reserve the value.
105 * Takes care of buffer wrap.
106 * Guaranteed to be thread safe as long as fewer than array_size contexts
107 * try to access the array. If there are more than array_size contexts
108 * trying to access the array, full locking of the recording process would
109 * be needed to have sane logging.
110 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530111static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800112{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530113 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800114 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530115 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800116
117 while (record_index >= array_size)
118 record_index -= array_size;
119 return record_index;
120}
121
122/**
123 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530124 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800125 * @ce_id: which ce is the event occuring on
126 * @type: what happened
127 * @descriptor: pointer to the descriptor posted/completed
128 * @memory: virtual address of buffer related to the descriptor
129 * @index: index that the descriptor was/will be at.
130 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530131void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
132 enum hif_ce_event_type type,
133 union ce_desc *descriptor,
134 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800135{
136 int record_index = get_next_record_index(
137 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
138
139 struct hif_ce_desc_event *event =
140 &hif_ce_desc_history[ce_id][record_index];
141 event->type = type;
Komal Seelam75080122016-03-02 15:18:25 +0530142 event->time = qdf_get_monotonic_boottime();
Komal Seelambd7c51d2016-02-24 10:27:30 +0530143
Houston Hoffman4275ba22015-12-06 21:02:11 -0800144 if (descriptor != NULL)
145 event->descriptor = *descriptor;
146 else
147 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800148 event->memory = memory;
149 event->index = index;
150}
151
152/**
153 * ce_init_ce_desc_event_log() - initialize the ce event log
154 * @ce_id: copy engine id for which we are initializing the log
155 * @size: size of array to dedicate
156 *
157 * Currently the passed size is ignored in favor of a precompiled value.
158 */
159void ce_init_ce_desc_event_log(int ce_id, int size)
160{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530161 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800162}
163#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530164void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800165 int ce_id, enum hif_ce_event_type type,
166 union ce_desc *descriptor, void *memory,
167 int index)
168{
169}
170
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800171inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800172{
173}
174#endif
175
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800176/*
177 * Support for Copy Engine hardware, which is mainly used for
178 * communication between Host and Target over a PCIe interconnect.
179 */
180
181/*
182 * A single CopyEngine (CE) comprises two "rings":
183 * a source ring
184 * a destination ring
185 *
186 * Each ring consists of a number of descriptors which specify
187 * an address, length, and meta-data.
188 *
189 * Typically, one side of the PCIe interconnect (Host or Target)
190 * controls one ring and the other side controls the other ring.
191 * The source side chooses when to initiate a transfer and it
192 * chooses what to send (buffer address, length). The destination
193 * side keeps a supply of "anonymous receive buffers" available and
194 * it handles incoming data as it arrives (when the destination
195 * recieves an interrupt).
196 *
197 * The sender may send a simple buffer (address/length) or it may
198 * send a small list of buffers. When a small list is sent, hardware
199 * "gathers" these and they end up in a single destination buffer
200 * with a single interrupt.
201 *
202 * There are several "contexts" managed by this layer -- more, it
203 * may seem -- than should be needed. These are provided mainly for
204 * maximum flexibility and especially to facilitate a simpler HIF
205 * implementation. There are per-CopyEngine recv, send, and watermark
206 * contexts. These are supplied by the caller when a recv, send,
207 * or watermark handler is established and they are echoed back to
208 * the caller when the respective callbacks are invoked. There is
209 * also a per-transfer context supplied by the caller when a buffer
210 * (or sendlist) is sent and when a buffer is enqueued for recv.
211 * These per-transfer contexts are echoed back to the caller when
212 * the buffer is sent/received.
213 * Target TX harsh result toeplitz_hash_result
214 */
215
216/*
217 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
218 * The caller takes responsibility for any needed locking.
219 */
220int
221ce_completed_send_next_nolock(struct CE_state *CE_state,
222 void **per_CE_contextp,
223 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530224 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800225 unsigned int *nbytesp,
226 unsigned int *transfer_idp,
227 unsigned int *sw_idx, unsigned int *hw_idx,
228 uint32_t *toeplitz_hash_result);
229
Komal Seelam644263d2016-02-22 20:45:49 +0530230void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800231 u32 ctrl_addr, unsigned int write_index)
232{
233 if (hif_ce_war1) {
234 void __iomem *indicator_addr;
235
236 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
237
238 if (!war1_allow_sleep
239 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
240 hif_write32_mb(indicator_addr,
241 (CDC_WAR_MAGIC_STR | write_index));
242 } else {
243 unsigned long irq_flags;
244 local_irq_save(irq_flags);
245 hif_write32_mb(indicator_addr, 1);
246
247 /*
248 * PCIE write waits for ACK in IPQ8K, there is no
249 * need to read back value.
250 */
251 (void)hif_read32_mb(indicator_addr);
252 (void)hif_read32_mb(indicator_addr); /* conservative */
253
254 CE_SRC_RING_WRITE_IDX_SET(scn,
255 ctrl_addr, write_index);
256
257 hif_write32_mb(indicator_addr, 0);
258 local_irq_restore(irq_flags);
259 }
260 } else
261 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
262}
263
264int
265ce_send_nolock(struct CE_handle *copyeng,
266 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530267 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800268 uint32_t nbytes,
269 uint32_t transfer_id,
270 uint32_t flags,
271 uint32_t user_flags)
272{
273 int status;
274 struct CE_state *CE_state = (struct CE_state *)copyeng;
275 struct CE_ring_state *src_ring = CE_state->src_ring;
276 uint32_t ctrl_addr = CE_state->ctrl_addr;
277 unsigned int nentries_mask = src_ring->nentries_mask;
278 unsigned int sw_index = src_ring->sw_index;
279 unsigned int write_index = src_ring->write_index;
280 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530281 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800282
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700283 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700284 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800285 if (unlikely(CE_RING_DELTA(nentries_mask,
286 write_index, sw_index - 1) <= 0)) {
287 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700288 Q_TARGET_ACCESS_END(scn);
289 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800290 }
291 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800292 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800293 struct CE_src_desc *src_ring_base =
294 (struct CE_src_desc *)src_ring->base_addr_owner_space;
295 struct CE_src_desc *shadow_base =
296 (struct CE_src_desc *)src_ring->shadow_base;
297 struct CE_src_desc *src_desc =
298 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
299 struct CE_src_desc *shadow_src_desc =
300 CE_SRC_RING_TO_DESC(shadow_base, write_index);
301
302 /* Update low 32 bits source descriptor address */
303 shadow_src_desc->buffer_addr =
304 (uint32_t)(dma_addr & 0xFFFFFFFF);
305#ifdef QCA_WIFI_3_0
306 shadow_src_desc->buffer_addr_hi =
307 (uint32_t)((dma_addr >> 32) & 0x1F);
308 user_flags |= shadow_src_desc->buffer_addr_hi;
309 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
310 sizeof(uint32_t));
311#endif
312 shadow_src_desc->meta_data = transfer_id;
313
314 /*
315 * Set the swap bit if:
316 * typical sends on this CE are swapped (host is big-endian)
317 * and this send doesn't disable the swapping
318 * (data is not bytestream)
319 */
320 shadow_src_desc->byte_swap =
321 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
322 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
323 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
324 shadow_src_desc->nbytes = nbytes;
325
326 *src_desc = *shadow_src_desc;
327
328 src_ring->per_transfer_context[write_index] =
329 per_transfer_context;
330
331 /* Update Source Ring Write Index */
332 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
333
334 /* WORKAROUND */
335 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800336 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
338 write_index);
339 }
340
Houston Hoffman68e837e2015-12-04 12:57:24 -0800341 /* src_ring->write index hasn't been updated event though
342 * the register has allready been written to.
343 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530344 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800345 (union ce_desc *) shadow_src_desc, per_transfer_context,
346 src_ring->write_index);
347
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800348 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530349 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800350 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700351 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800352 return status;
353}
354
355int
356ce_send(struct CE_handle *copyeng,
357 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530358 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800359 uint32_t nbytes,
360 uint32_t transfer_id,
361 uint32_t flags,
362 uint32_t user_flag)
363{
364 struct CE_state *CE_state = (struct CE_state *)copyeng;
365 int status;
366
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530367 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800368 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
369 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530370 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371
372 return status;
373}
374
375unsigned int ce_sendlist_sizeof(void)
376{
377 return sizeof(struct ce_sendlist);
378}
379
380void ce_sendlist_init(struct ce_sendlist *sendlist)
381{
382 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
383 sl->num_items = 0;
384}
385
386int
387ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530388 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800389 uint32_t nbytes,
390 uint32_t flags,
391 uint32_t user_flags)
392{
393 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
394 unsigned int num_items = sl->num_items;
395 struct ce_sendlist_item *item;
396
397 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530398 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
399 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800400 }
401
402 item = &sl->item[num_items];
403 item->send_type = CE_SIMPLE_BUFFER_TYPE;
404 item->data = buffer;
405 item->u.nbytes = nbytes;
406 item->flags = flags;
407 item->user_flags = user_flags;
408 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530409 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800410}
411
412int
413ce_sendlist_send(struct CE_handle *copyeng,
414 void *per_transfer_context,
415 struct ce_sendlist *sendlist, unsigned int transfer_id)
416{
417 int status = -ENOMEM;
418 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
419 struct CE_state *CE_state = (struct CE_state *)copyeng;
420 struct CE_ring_state *src_ring = CE_state->src_ring;
421 unsigned int nentries_mask = src_ring->nentries_mask;
422 unsigned int num_items = sl->num_items;
423 unsigned int sw_index;
424 unsigned int write_index;
425
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530426 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800427
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530428 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800429 sw_index = src_ring->sw_index;
430 write_index = src_ring->write_index;
431
432 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
433 num_items) {
434 struct ce_sendlist_item *item;
435 int i;
436
437 /* handle all but the last item uniformly */
438 for (i = 0; i < num_items - 1; i++) {
439 item = &sl->item[i];
440 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530441 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800442 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530443 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800444 item->u.nbytes, transfer_id,
445 item->flags | CE_SEND_FLAG_GATHER,
446 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530447 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800448 }
449 /* provide valid context pointer for final item */
450 item = &sl->item[i];
451 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530452 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530454 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800455 item->u.nbytes,
456 transfer_id, item->flags,
457 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530459 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
460 QDF_NBUF_TX_PKT_CE);
461 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530463 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
464 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800465 } else {
466 /*
467 * Probably not worth the additional complexity to support
468 * partial sends with continuation or notification. We expect
469 * to use large rings and small sendlists. If we can't handle
470 * the entire request at once, punt it back to the caller.
471 */
472 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530473 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800474
475 return status;
476}
477
478#ifdef WLAN_FEATURE_FASTPATH
479#ifdef QCA_WIFI_3_0
480static inline void
481ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
482 uint64_t dma_addr,
483 uint32_t user_flags)
484{
485 shadow_src_desc->buffer_addr_hi =
486 (uint32_t)((dma_addr >> 32) & 0x1F);
487 user_flags |= shadow_src_desc->buffer_addr_hi;
488 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
489 sizeof(uint32_t));
490}
491#else
492static inline void
493ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
494 uint64_t dma_addr,
495 uint32_t user_flags)
496{
497}
498#endif
499
Houston Hoffman735bb8d2016-04-27 18:25:20 -0700500#define SLOTS_PER_DATAPATH_TX 2
501
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800502/**
503 * ce_send_fast() CE layer Tx buffer posting function
504 * @copyeng: copy engine handle
505 * @msdus: iarray of msdu to be sent
506 * @num_msdus: number of msdus in an array
507 * @transfer_id: transfer_id
508 *
509 * Assumption : Called with an array of MSDU's
510 * Function:
511 * For each msdu in the array
512 * 1. Check no. of available entries
513 * 2. Create src ring entries (allocated in consistent memory
514 * 3. Write index to h/w
515 *
516 * Return: No. of packets that could be sent
517 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530518int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800519 unsigned int num_msdus, unsigned int transfer_id)
520{
521 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530522 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530523 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800524 struct CE_ring_state *src_ring = ce_state->src_ring;
525 u_int32_t ctrl_addr = ce_state->ctrl_addr;
526 unsigned int nentries_mask = src_ring->nentries_mask;
527 unsigned int write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700528 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800529 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530530 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800531 int i;
532 uint64_t dma_addr;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700533 uint32_t user_flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800534
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530535 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700536 Q_TARGET_ACCESS_BEGIN(scn);
537
538 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800539 write_index = src_ring->write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700540 sw_index = src_ring->sw_index;
541
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700542 hif_record_ce_desc_event(scn, ce_state->id,
543 FAST_TX_SOFTWARE_INDEX_UPDATE,
544 NULL, NULL, write_index);
545
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700546 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
547 < (SLOTS_PER_DATAPATH_TX * num_msdus))) {
548 HIF_ERROR("Source ring full, required %d, available %d",
549 (SLOTS_PER_DATAPATH_TX * num_msdus),
550 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
551 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
552 Q_TARGET_ACCESS_END(scn);
553 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
554 return 0;
555 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800556
557 /* 2 msdus per packet */
558 for (i = 0; i < num_msdus; i++) {
559 struct CE_src_desc *src_ring_base =
560 (struct CE_src_desc *)src_ring->base_addr_owner_space;
561 struct CE_src_desc *shadow_base =
562 (struct CE_src_desc *)src_ring->shadow_base;
563 struct CE_src_desc *src_desc =
564 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
565 struct CE_src_desc *shadow_src_desc =
566 CE_SRC_RING_TO_DESC(shadow_base, write_index);
567
Komal Seelam644263d2016-02-22 20:45:49 +0530568 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800569 msdu = msdus[i];
570
571 /*
572 * First fill out the ring descriptor for the HTC HTT frame
573 * header. These are uncached writes. Should we use a local
574 * structure instead?
575 */
576 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530577 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800578 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
579 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530580 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800581 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
582
583 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530584 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800585
586 /*
587 * HTC HTT header is a word stream, so byte swap if CE byte
588 * swap enabled
589 */
590 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
591 CE_ATTR_BYTE_SWAP_DATA) != 0);
592 /* For the first one, it still does not need to write */
593 shadow_src_desc->gather = 1;
594 *src_desc = *shadow_src_desc;
595
596 /* By default we could initialize the transfer context to this
597 * value
598 */
599 src_ring->per_transfer_context[write_index] =
600 CE_SENDLIST_ITEM_CTXT;
601
602 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
603
604 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
605 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
606 /*
607 * Now fill out the ring descriptor for the actual data
608 * packet
609 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530610 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800611 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
612 0xFFFFFFFF);
613 /*
614 * Clear packet offset for all but the first CE desc.
615 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530616 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800617 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
618 shadow_src_desc->meta_data = transfer_id;
619
620 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530621 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700622
623 /* only read download_len once */
624 shadow_src_desc->nbytes = ce_state->download_len;
625 if (shadow_src_desc->nbytes > frag_len)
626 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800627
628 /* Data packet is a byte stream, so disable byte swap */
629 shadow_src_desc->byte_swap = 0;
630 /* For the last one, gather is not set */
631 shadow_src_desc->gather = 0;
632 *src_desc = *shadow_src_desc;
633 src_ring->per_transfer_context[write_index] = msdu;
634 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
635 }
636
637 /* Write the final index to h/w one-shot */
638 if (i) {
639 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800640
Komal Seelam644263d2016-02-22 20:45:49 +0530641 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700642 hif_record_ce_desc_event(scn, ce_state->id,
643 FAST_TX_WRITE_INDEX_UPDATE,
644 NULL, NULL, write_index);
645
Houston Hoffmanf4607852015-12-17 17:14:40 -0800646 /* Don't call WAR_XXX from here
647 * Just call XXX instead, that has the reqd. intel
648 */
649 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
650 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530651 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800652 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800653 }
654
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700655 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530656 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800657
658 /*
659 * If all packets in the array are transmitted,
660 * i = num_msdus
661 * Temporarily add an ASSERT
662 */
663 ASSERT(i == num_msdus);
664 return i;
665}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700666
667/**
668 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
669 * @scn: Handle to HIF context
670 *
671 * Return: true if fastpath is enabled else false.
672 */
673static bool ce_is_fastpath_enabled(struct hif_softc *scn)
674{
675 return scn->fastpath_mode_on;
676}
677
678/**
679 * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
680 * fastpath is enabled.
681 * @ce_state: handle to copy engine
682 *
683 * Return: true if fastpath handler is registered for datapath CE.
684 */
685static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
686{
687 if (ce_state->fastpath_handler)
688 return true;
689 else
690 return false;
691}
692
693
694#else
695static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
696{
697 return false;
698}
699
700static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
701{
702 return false;
703}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800704#endif /* WLAN_FEATURE_FASTPATH */
705
Houston Hoffman4411ad42016-03-14 21:12:04 -0700706/**
707 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
708 * @coyeng: copy engine handle
709 * @per_recv_context: virtual address of the nbuf
710 * @buffer: physical address of the nbuf
711 *
712 * Return: 0 if the buffer is enqueued
713 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800714int
715ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530716 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800717{
718 int status;
719 struct CE_state *CE_state = (struct CE_state *)copyeng;
720 struct CE_ring_state *dest_ring = CE_state->dest_ring;
721 uint32_t ctrl_addr = CE_state->ctrl_addr;
722 unsigned int nentries_mask = dest_ring->nentries_mask;
723 unsigned int write_index;
724 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800725 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530726 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800727
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530728 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800729 write_index = dest_ring->write_index;
730 sw_index = dest_ring->sw_index;
731
Houston Hoffman4411ad42016-03-14 21:12:04 -0700732 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530733 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700734 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800735 }
736
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700737 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700738 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800739 struct CE_dest_desc *dest_ring_base =
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700740 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800741 struct CE_dest_desc *dest_desc =
742 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
743
744 /* Update low 32 bit destination descriptor */
745 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
746#ifdef QCA_WIFI_3_0
747 dest_desc->buffer_addr_hi =
748 (uint32_t)((dma_addr >> 32) & 0x1F);
749#endif
750 dest_desc->nbytes = 0;
751
752 dest_ring->per_transfer_context[write_index] =
753 per_recv_context;
754
Komal Seelambd7c51d2016-02-24 10:27:30 +0530755 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800756 (union ce_desc *) dest_desc, per_recv_context,
757 write_index);
758
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800759 /* Update Destination Ring Write Index */
760 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700761 if (write_index != sw_index) {
762 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
763 dest_ring->write_index = write_index;
764 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530765 status = QDF_STATUS_SUCCESS;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700766 } else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530767 status = QDF_STATUS_E_FAILURE;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700768
Houston Hoffman4411ad42016-03-14 21:12:04 -0700769 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530770 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800771 return status;
772}
773
774void
775ce_send_watermarks_set(struct CE_handle *copyeng,
776 unsigned int low_alert_nentries,
777 unsigned int high_alert_nentries)
778{
779 struct CE_state *CE_state = (struct CE_state *)copyeng;
780 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530781 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800782
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800783 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
784 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800785}
786
787void
788ce_recv_watermarks_set(struct CE_handle *copyeng,
789 unsigned int low_alert_nentries,
790 unsigned int high_alert_nentries)
791{
792 struct CE_state *CE_state = (struct CE_state *)copyeng;
793 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530794 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800795
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800796 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
797 low_alert_nentries);
798 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
799 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800800}
801
802unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
803{
804 struct CE_state *CE_state = (struct CE_state *)copyeng;
805 struct CE_ring_state *src_ring = CE_state->src_ring;
806 unsigned int nentries_mask = src_ring->nentries_mask;
807 unsigned int sw_index;
808 unsigned int write_index;
809
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530810 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811 sw_index = src_ring->sw_index;
812 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530813 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800814
815 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
816}
817
818unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
819{
820 struct CE_state *CE_state = (struct CE_state *)copyeng;
821 struct CE_ring_state *dest_ring = CE_state->dest_ring;
822 unsigned int nentries_mask = dest_ring->nentries_mask;
823 unsigned int sw_index;
824 unsigned int write_index;
825
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530826 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800827 sw_index = dest_ring->sw_index;
828 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530829 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800830
831 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
832}
833
834/*
835 * Guts of ce_send_entries_done.
836 * The caller takes responsibility for any necessary locking.
837 */
838unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530839ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800840 struct CE_state *CE_state)
841{
842 struct CE_ring_state *src_ring = CE_state->src_ring;
843 uint32_t ctrl_addr = CE_state->ctrl_addr;
844 unsigned int nentries_mask = src_ring->nentries_mask;
845 unsigned int sw_index;
846 unsigned int read_index;
847
848 sw_index = src_ring->sw_index;
849 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
850
851 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
852}
853
854unsigned int ce_send_entries_done(struct CE_handle *copyeng)
855{
856 struct CE_state *CE_state = (struct CE_state *)copyeng;
857 unsigned int nentries;
858
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530859 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800860 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530861 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800862
863 return nentries;
864}
865
866/*
867 * Guts of ce_recv_entries_done.
868 * The caller takes responsibility for any necessary locking.
869 */
870unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530871ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800872 struct CE_state *CE_state)
873{
874 struct CE_ring_state *dest_ring = CE_state->dest_ring;
875 uint32_t ctrl_addr = CE_state->ctrl_addr;
876 unsigned int nentries_mask = dest_ring->nentries_mask;
877 unsigned int sw_index;
878 unsigned int read_index;
879
880 sw_index = dest_ring->sw_index;
881 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
882
883 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
884}
885
886unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
887{
888 struct CE_state *CE_state = (struct CE_state *)copyeng;
889 unsigned int nentries;
890
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530891 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800892 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530893 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800894
895 return nentries;
896}
897
898/* Debug support */
899void *ce_debug_cmplrn_context; /* completed recv next context */
900void *ce_debug_cnclsn_context; /* cancel send next context */
901void *ce_debug_rvkrn_context; /* revoke receive next context */
902void *ce_debug_cmplsn_context; /* completed send next context */
903
904/*
905 * Guts of ce_completed_recv_next.
906 * The caller takes responsibility for any necessary locking.
907 */
908int
909ce_completed_recv_next_nolock(struct CE_state *CE_state,
910 void **per_CE_contextp,
911 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530912 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800913 unsigned int *nbytesp,
914 unsigned int *transfer_idp,
915 unsigned int *flagsp)
916{
917 int status;
918 struct CE_ring_state *dest_ring = CE_state->dest_ring;
919 unsigned int nentries_mask = dest_ring->nentries_mask;
920 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530921 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800922 struct CE_dest_desc *dest_ring_base =
923 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
924 struct CE_dest_desc *dest_desc =
925 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
926 int nbytes;
927 struct CE_dest_desc dest_desc_info;
928 /*
929 * By copying the dest_desc_info element to local memory, we could
930 * avoid extra memory read from non-cachable memory.
931 */
932 dest_desc_info = *dest_desc;
933 nbytes = dest_desc_info.nbytes;
934 if (nbytes == 0) {
935 /*
936 * This closes a relatively unusual race where the Host
937 * sees the updated DRRI before the update to the
938 * corresponding descriptor has completed. We treat this
939 * as a descriptor that is not yet done.
940 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530941 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 goto done;
943 }
944
Komal Seelambd7c51d2016-02-24 10:27:30 +0530945 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800946 (union ce_desc *) dest_desc,
947 dest_ring->per_transfer_context[sw_index],
948 sw_index);
949
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950 dest_desc->nbytes = 0;
951
952 /* Return data from completed destination descriptor */
953 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
954 *nbytesp = nbytes;
955 *transfer_idp = dest_desc_info.meta_data;
956 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
957
958 if (per_CE_contextp) {
959 *per_CE_contextp = CE_state->recv_context;
960 }
961
962 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
963 if (per_transfer_contextp) {
964 *per_transfer_contextp = ce_debug_cmplrn_context;
965 }
966 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
967
968 /* Update sw_index */
969 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
970 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530971 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800972
973done:
974 return status;
975}
976
977int
978ce_completed_recv_next(struct CE_handle *copyeng,
979 void **per_CE_contextp,
980 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530981 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 unsigned int *nbytesp,
983 unsigned int *transfer_idp, unsigned int *flagsp)
984{
985 struct CE_state *CE_state = (struct CE_state *)copyeng;
986 int status;
987
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530988 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989 status =
990 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
991 per_transfer_contextp, bufferp,
992 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994
995 return status;
996}
997
998/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530999QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001000ce_revoke_recv_next(struct CE_handle *copyeng,
1001 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301002 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001003{
1004 struct CE_state *CE_state;
1005 struct CE_ring_state *dest_ring;
1006 unsigned int nentries_mask;
1007 unsigned int sw_index;
1008 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301009 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301010 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001011
1012 CE_state = (struct CE_state *)copyeng;
1013 dest_ring = CE_state->dest_ring;
1014 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301015 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001016 }
1017
1018 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301019 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001020 nentries_mask = dest_ring->nentries_mask;
1021 sw_index = dest_ring->sw_index;
1022 write_index = dest_ring->write_index;
1023 if (write_index != sw_index) {
1024 struct CE_dest_desc *dest_ring_base =
1025 (struct CE_dest_desc *)dest_ring->
1026 base_addr_owner_space;
1027 struct CE_dest_desc *dest_desc =
1028 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1029
1030 /* Return data from completed destination descriptor */
1031 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1032
1033 if (per_CE_contextp) {
1034 *per_CE_contextp = CE_state->recv_context;
1035 }
1036
1037 ce_debug_rvkrn_context =
1038 dest_ring->per_transfer_context[sw_index];
1039 if (per_transfer_contextp) {
1040 *per_transfer_contextp = ce_debug_rvkrn_context;
1041 }
1042 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
1043
1044 /* Update sw_index */
1045 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1046 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301047 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001048 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301049 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001050 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301051 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001052
1053 return status;
1054}
1055
1056/*
1057 * Guts of ce_completed_send_next.
1058 * The caller takes responsibility for any necessary locking.
1059 */
1060int
1061ce_completed_send_next_nolock(struct CE_state *CE_state,
1062 void **per_CE_contextp,
1063 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301064 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001065 unsigned int *nbytesp,
1066 unsigned int *transfer_idp,
1067 unsigned int *sw_idx,
1068 unsigned int *hw_idx,
1069 uint32_t *toeplitz_hash_result)
1070{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301071 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001072 struct CE_ring_state *src_ring = CE_state->src_ring;
1073 uint32_t ctrl_addr = CE_state->ctrl_addr;
1074 unsigned int nentries_mask = src_ring->nentries_mask;
1075 unsigned int sw_index = src_ring->sw_index;
1076 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301077 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001078
1079 if (src_ring->hw_index == sw_index) {
1080 /*
1081 * The SW completion index has caught up with the cached
1082 * version of the HW completion index.
1083 * Update the cached HW completion index to see whether
1084 * the SW has really caught up to the HW, or if the cached
1085 * value of the HW index has become stale.
1086 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001087 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001088 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001089 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001090 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001091 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001092 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001093 }
1094 read_index = src_ring->hw_index;
1095
1096 if (sw_idx)
1097 *sw_idx = sw_index;
1098
1099 if (hw_idx)
1100 *hw_idx = read_index;
1101
1102 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1103 struct CE_src_desc *shadow_base =
1104 (struct CE_src_desc *)src_ring->shadow_base;
1105 struct CE_src_desc *shadow_src_desc =
1106 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1107#ifdef QCA_WIFI_3_0
1108 struct CE_src_desc *src_ring_base =
1109 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1110 struct CE_src_desc *src_desc =
1111 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1112#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301113 hif_record_ce_desc_event(scn, CE_state->id,
1114 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001115 (union ce_desc *) shadow_src_desc,
1116 src_ring->per_transfer_context[sw_index],
1117 sw_index);
1118
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001119 /* Return data from completed source descriptor */
1120 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1121 *nbytesp = shadow_src_desc->nbytes;
1122 *transfer_idp = shadow_src_desc->meta_data;
1123#ifdef QCA_WIFI_3_0
1124 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1125#else
1126 *toeplitz_hash_result = 0;
1127#endif
1128 if (per_CE_contextp) {
1129 *per_CE_contextp = CE_state->send_context;
1130 }
1131
1132 ce_debug_cmplsn_context =
1133 src_ring->per_transfer_context[sw_index];
1134 if (per_transfer_contextp) {
1135 *per_transfer_contextp = ce_debug_cmplsn_context;
1136 }
1137 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1138
1139 /* Update sw_index */
1140 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1141 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301142 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001143 }
1144
1145 return status;
1146}
1147
1148/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301149QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001150ce_cancel_send_next(struct CE_handle *copyeng,
1151 void **per_CE_contextp,
1152 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301153 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001154 unsigned int *nbytesp,
1155 unsigned int *transfer_idp,
1156 uint32_t *toeplitz_hash_result)
1157{
1158 struct CE_state *CE_state;
1159 struct CE_ring_state *src_ring;
1160 unsigned int nentries_mask;
1161 unsigned int sw_index;
1162 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301163 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301164 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001165
1166 CE_state = (struct CE_state *)copyeng;
1167 src_ring = CE_state->src_ring;
1168 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301169 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001170 }
1171
1172 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301173 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001174 nentries_mask = src_ring->nentries_mask;
1175 sw_index = src_ring->sw_index;
1176 write_index = src_ring->write_index;
1177
1178 if (write_index != sw_index) {
1179 struct CE_src_desc *src_ring_base =
1180 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1181 struct CE_src_desc *src_desc =
1182 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1183
1184 /* Return data from completed source descriptor */
1185 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1186 *nbytesp = src_desc->nbytes;
1187 *transfer_idp = src_desc->meta_data;
1188#ifdef QCA_WIFI_3_0
1189 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1190#else
1191 *toeplitz_hash_result = 0;
1192#endif
1193
1194 if (per_CE_contextp) {
1195 *per_CE_contextp = CE_state->send_context;
1196 }
1197
1198 ce_debug_cnclsn_context =
1199 src_ring->per_transfer_context[sw_index];
1200 if (per_transfer_contextp) {
1201 *per_transfer_contextp = ce_debug_cnclsn_context;
1202 }
1203 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1204
1205 /* Update sw_index */
1206 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1207 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301208 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001209 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301210 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001211 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301212 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213
1214 return status;
1215}
1216
1217/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1218#define CE_WM_SHFT 1
1219
1220int
1221ce_completed_send_next(struct CE_handle *copyeng,
1222 void **per_CE_contextp,
1223 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301224 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001225 unsigned int *nbytesp,
1226 unsigned int *transfer_idp,
1227 unsigned int *sw_idx,
1228 unsigned int *hw_idx,
1229 unsigned int *toeplitz_hash_result)
1230{
1231 struct CE_state *CE_state = (struct CE_state *)copyeng;
1232 int status;
1233
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301234 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001235 status =
1236 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1237 per_transfer_contextp, bufferp,
1238 nbytesp, transfer_idp, sw_idx,
1239 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301240 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001241
1242 return status;
1243}
1244
1245#ifdef ATH_11AC_TXCOMPACT
1246/* CE engine descriptor reap
1247 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1248 * does recieve and reaping of completed descriptor ,
1249 * This function only handles reaping of Tx complete descriptor.
1250 * The Function is called from threshold reap poll routine
1251 * hif_send_complete_check so should not countain recieve functionality
1252 * within it .
1253 */
1254
Komal Seelam644263d2016-02-22 20:45:49 +05301255void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001256{
1257 void *CE_context;
1258 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301259 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001260 unsigned int nbytes;
1261 unsigned int id;
1262 unsigned int sw_idx, hw_idx;
1263 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001264 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001265
Houston Hoffmanbac94542016-03-14 21:11:59 -07001266 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1267 return;
1268
Komal Seelambd7c51d2016-02-24 10:27:30 +05301269 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001270 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271
1272 /* Since this function is called from both user context and
1273 * tasklet context the spinlock has to lock the bottom halves.
1274 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1275 * enabled in TX polling mode. If this is not the case, more
1276 * bottom halve spin lock changes are needed. Due to data path
1277 * performance concern, after internal discussion we've decided
1278 * to make minimum change, i.e., only address the issue occured
1279 * in this function. The possible negative effect of this minimum
1280 * change is that, in the future, if some other function will also
1281 * be opened to let the user context to use, those cases need to be
1282 * addressed by change spin_lock to spin_lock_bh also.
1283 */
1284
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301285 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001286
1287 if (CE_state->send_cb) {
1288 {
1289 /* Pop completed send buffers and call the
1290 * registered send callback for each
1291 */
1292 while (ce_completed_send_next_nolock
1293 (CE_state, &CE_context,
1294 &transfer_context, &buf,
1295 &nbytes, &id, &sw_idx, &hw_idx,
1296 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301297 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001298 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301299 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001300 &CE_state->ce_index_lock);
1301 CE_state->send_cb(
1302 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001303 CE_state, CE_context,
1304 transfer_context, buf,
1305 nbytes, id, sw_idx, hw_idx,
1306 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301307 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001308 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001309 } else {
1310 struct HIF_CE_pipe_info *pipe_info =
1311 (struct HIF_CE_pipe_info *)
1312 CE_context;
1313
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301314 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001315 completion_freeq_lock);
1316 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301317 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001318 completion_freeq_lock);
1319 }
1320 }
1321 }
1322 }
1323
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301324 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001325
Komal Seelambd7c51d2016-02-24 10:27:30 +05301326 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001327 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001328 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001329}
1330
1331#endif /*ATH_11AC_TXCOMPACT */
1332
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001333/*
1334 * Number of times to check for any pending tx/rx completion on
1335 * a copy engine, this count should be big enough. Once we hit
1336 * this threashold we'll not check for any Tx/Rx comlpetion in same
1337 * interrupt handling. Note that this threashold is only used for
1338 * Rx interrupt processing, this can be used tor Tx as well if we
1339 * suspect any infinite loop in checking for pending Tx completion.
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001340 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001341#define CE_TXRX_COMP_CHECK_THRESHOLD 20
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001342
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001343#ifdef WLAN_FEATURE_FASTPATH
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001344/**
1345 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1346 * @ce_state: handle to copy engine state
1347 * @cmpl_msdus: Rx msdus
1348 * @num_cmpls: number of Rx msdus
1349 * @ctrl_addr: CE control address
1350 *
1351 * Return: None
1352 */
1353static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1354 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1355 uint32_t ctrl_addr)
1356{
1357 struct hif_softc *scn = ce_state->scn;
1358 struct CE_ring_state *dest_ring = ce_state->dest_ring;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001359 uint32_t nentries_mask = dest_ring->nentries_mask;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001360 uint32_t write_index;
1361
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001362 (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001363
1364 /* Update Destination Ring Write Index */
1365 write_index = dest_ring->write_index;
1366 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -07001367
1368 hif_record_ce_desc_event(scn, ce_state->id,
1369 FAST_RX_WRITE_INDEX_UPDATE,
1370 NULL, NULL, write_index);
1371
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001372 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1373 dest_ring->write_index = write_index;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001374}
1375
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001376#define MSG_FLUSH_NUM 6
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001377/**
1378 * ce_per_engine_service_fast() - CE handler routine to service fastpath messages
1379 * @scn: hif_context
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001380 * @ce_id: Copy engine ID
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001381 * 1) Go through the CE ring, and find the completions
1382 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1383 * 3) Unmap buffer & accumulate in an array.
1384 * 4) Call message handler when array is full or when exiting the handler
1385 *
1386 * Return: void
1387 */
1388
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001389static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001390{
1391 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1392 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1393 struct CE_dest_desc *dest_ring_base =
1394 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1395
1396 uint32_t nentries_mask = dest_ring->nentries_mask;
1397 uint32_t sw_index = dest_ring->sw_index;
1398 uint32_t nbytes;
1399 qdf_nbuf_t nbuf;
1400 uint32_t paddr_lo;
1401 struct CE_dest_desc *dest_desc;
1402 uint32_t ce_int_status = (1 << ce_id);
1403 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1404 uint32_t ctrl_addr = ce_state->ctrl_addr;
1405 uint32_t nbuf_cmpl_idx = 0;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001406 unsigned int more_comp_cnt = 0;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001407
1408more_data:
1409 if (ce_int_status == (1 << ce_id)) {
1410 for (;;) {
1411
1412 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1413 sw_index);
1414
1415 /*
1416 * The following 2 reads are from non-cached memory
1417 */
1418 nbytes = dest_desc->nbytes;
1419
1420 /* If completion is invalid, break */
1421 if (qdf_unlikely(nbytes == 0))
1422 break;
1423
1424
1425 /*
1426 * Build the nbuf list from valid completions
1427 */
1428 nbuf = dest_ring->per_transfer_context[sw_index];
1429
1430 /*
1431 * No lock is needed here, since this is the only thread
1432 * that accesses the sw_index
1433 */
1434 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1435
1436 /*
1437 * CAREFUL : Uncached write, but still less expensive,
1438 * since most modern caches use "write-combining" to
1439 * flush multiple cache-writes all at once.
1440 */
1441 dest_desc->nbytes = 0;
1442
1443 /*
1444 * Per our understanding this is not required on our
1445 * since we are doing the same cache invalidation
1446 * operation on the same buffer twice in succession,
1447 * without any modifiication to this buffer by CPU in
1448 * between.
1449 * However, this code with 2 syncs in succession has
1450 * been undergoing some testing at a customer site,
1451 * and seemed to be showing no problems so far. Would
1452 * like to validate from the customer, that this line
1453 * is really not required, before we remove this line
1454 * completely.
1455 */
1456 paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
1457
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001458 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev,
1459 paddr_lo,
1460 (skb_end_pointer(nbuf) - (nbuf)->data),
1461 DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001462 qdf_nbuf_put_tail(nbuf, nbytes);
1463
1464 qdf_assert_always(nbuf->data != NULL);
1465
1466 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1467
1468 /*
1469 * we are not posting the buffers back instead
1470 * reusing the buffers
1471 */
1472 if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
Houston Hoffmanfa260aa2016-04-26 16:14:13 -07001473 hif_record_ce_desc_event(scn, ce_state->id,
1474 FAST_RX_SOFTWARE_INDEX_UPDATE,
1475 NULL, NULL, sw_index);
1476 dest_ring->sw_index = sw_index;
1477
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001478 qdf_spin_unlock(&ce_state->ce_index_lock);
1479 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1480 MSG_FLUSH_NUM, ctrl_addr);
1481 qdf_spin_lock(&ce_state->ce_index_lock);
1482 nbuf_cmpl_idx = 0;
1483 }
1484
1485 }
1486
Houston Hoffmanfa260aa2016-04-26 16:14:13 -07001487 hif_record_ce_desc_event(scn, ce_state->id,
1488 FAST_RX_SOFTWARE_INDEX_UPDATE,
1489 NULL, NULL, sw_index);
1490
1491 dest_ring->sw_index = sw_index;
1492
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001493 /*
1494 * If there are not enough completions to fill the array,
1495 * just call the message handler here
1496 */
1497 if (nbuf_cmpl_idx) {
1498 qdf_spin_unlock(&ce_state->ce_index_lock);
1499 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1500 nbuf_cmpl_idx, ctrl_addr);
1501 qdf_spin_lock(&ce_state->ce_index_lock);
1502 nbuf_cmpl_idx = 0;
1503 }
1504 qdf_atomic_set(&ce_state->rx_pending, 0);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001505 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1506 HOST_IS_COPY_COMPLETE_MASK);
1507 }
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001508 if (ce_recv_entries_done_nolock(scn, ce_state)) {
1509 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1510 goto more_data;
1511 } else {
1512 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1513 __func__, nentries_mask,
1514 ce_state->dest_ring->sw_index,
1515 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1516 }
1517 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001518}
1519
1520#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001521static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001522{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001523}
1524#endif /* WLAN_FEATURE_FASTPATH */
1525
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001526/*
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1528 *
1529 * Invokes registered callbacks for recv_complete,
1530 * send_complete, and watermarks.
1531 *
1532 * Returns: number of messages processed
1533 */
1534
Komal Seelam644263d2016-02-22 20:45:49 +05301535int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536{
1537 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1538 uint32_t ctrl_addr = CE_state->ctrl_addr;
1539 void *CE_context;
1540 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301541 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 unsigned int nbytes;
1543 unsigned int id;
1544 unsigned int flags;
1545 uint32_t CE_int_status;
1546 unsigned int more_comp_cnt = 0;
1547 unsigned int more_snd_comp_cnt = 0;
1548 unsigned int sw_idx, hw_idx;
1549 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301550 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001551
1552 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1553 HIF_ERROR("[premature rc=0]\n");
1554 return 0; /* no work done */
1555 }
1556
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301557 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001558
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001559 /*
1560 * With below check we make sure CE we are handling is datapath CE and
1561 * fastpath is enabled.
1562 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001563 if (ce_is_fastpath_handler_registered(CE_state)) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001564 /* For datapath only Rx CEs */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001565 ce_per_engine_service_fast(scn, CE_id);
1566 qdf_spin_unlock(&CE_state->ce_index_lock);
1567 return CE_state->receive_count;
1568 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001569
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001570 /* Clear force_break flag and re-initialize receive_count to 0 */
1571
1572 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001573 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001574 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001575more_completions:
1576 if (CE_state->recv_cb) {
1577
1578 /* Pop completed recv buffers and call
1579 * the registered recv callback for each
1580 */
1581 while (ce_completed_recv_next_nolock
1582 (CE_state, &CE_context, &transfer_context,
1583 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301584 QDF_STATUS_SUCCESS) {
1585 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 CE_state->recv_cb((struct CE_handle *)CE_state,
1587 CE_context, transfer_context, buf,
1588 nbytes, id, flags);
1589
1590 /*
1591 * EV #112693 -
1592 * [Peregrine][ES1][WB342][Win8x86][Performance]
1593 * BSoD_0x133 occurred in VHT80 UDP_DL
1594 * Break out DPC by force if number of loops in
1595 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1596 * to avoid spending too long time in
1597 * DPC for each interrupt handling. Schedule another
1598 * DPC to avoid data loss if we had taken
1599 * force-break action before apply to Windows OS
1600 * only currently, Linux/MAC os can expand to their
1601 * platform if necessary
1602 */
1603
1604 /* Break the receive processes by
1605 * force if force_break set up
1606 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301607 if (qdf_unlikely(CE_state->force_break)) {
1608 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001609 if (Q_TARGET_ACCESS_END(scn) < 0)
1610 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001611 CE_state->receive_count);
1612 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301614 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615 }
1616 }
1617
1618 /*
1619 * Attention: We may experience potential infinite loop for below
1620 * While Loop during Sending Stress test.
1621 * Resolve the same way as Receive Case (Refer to EV #112693)
1622 */
1623
1624 if (CE_state->send_cb) {
1625 /* Pop completed send buffers and call
1626 * the registered send callback for each
1627 */
1628
1629#ifdef ATH_11AC_TXCOMPACT
1630 while (ce_completed_send_next_nolock
1631 (CE_state, &CE_context,
1632 &transfer_context, &buf, &nbytes,
1633 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301634 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001635
1636 if (CE_id != CE_HTT_H2T_MSG ||
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001637 QDF_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301638 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001639 CE_state->send_cb((struct CE_handle *)CE_state,
1640 CE_context, transfer_context,
1641 buf, nbytes, id, sw_idx,
1642 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301643 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001644 } else {
1645 struct HIF_CE_pipe_info *pipe_info =
1646 (struct HIF_CE_pipe_info *)CE_context;
1647
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301648 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001649 completion_freeq_lock);
1650 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301651 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001652 completion_freeq_lock);
1653 }
1654 }
1655#else /*ATH_11AC_TXCOMPACT */
1656 while (ce_completed_send_next_nolock
1657 (CE_state, &CE_context,
1658 &transfer_context, &buf, &nbytes,
1659 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301660 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1661 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001662 CE_state->send_cb((struct CE_handle *)CE_state,
1663 CE_context, transfer_context, buf,
1664 nbytes, id, sw_idx, hw_idx,
1665 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301666 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667 }
1668#endif /*ATH_11AC_TXCOMPACT */
1669 }
1670
1671more_watermarks:
1672 if (CE_state->misc_cbs) {
1673 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1674 if (CE_int_status & CE_WATERMARK_MASK) {
1675 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301676 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001677 /* Convert HW IS bits to software flags */
1678 flags =
1679 (CE_int_status & CE_WATERMARK_MASK) >>
1680 CE_WM_SHFT;
1681
1682 CE_state->
1683 watermark_cb((struct CE_handle *)CE_state,
1684 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301685 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001686 }
1687 }
1688 }
1689
1690 /*
1691 * Clear the misc interrupts (watermark) that were handled above,
1692 * and that will be checked again below.
1693 * Clear and check for copy-complete interrupts again, just in case
1694 * more copy completions happened while the misc interrupts were being
1695 * handled.
1696 */
1697 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1698 CE_WATERMARK_MASK |
1699 HOST_IS_COPY_COMPLETE_MASK);
1700
1701 /*
1702 * Now that per-engine interrupts are cleared, verify that
1703 * no recv interrupts arrive while processing send interrupts,
1704 * and no recv or send interrupts happened while processing
1705 * misc interrupts.Go back and check again.Keep checking until
1706 * we find no more events to process.
1707 */
1708 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001709 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001710 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1711 goto more_completions;
1712 } else {
1713 HIF_ERROR(
1714 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1715 __func__, CE_state->dest_ring->nentries_mask,
1716 CE_state->dest_ring->sw_index,
1717 CE_DEST_RING_READ_IDX_GET(scn,
1718 CE_state->ctrl_addr));
1719 }
1720 }
1721
1722 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001723 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1725 goto more_completions;
1726 } else {
1727 HIF_ERROR(
1728 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1729 __func__, CE_state->src_ring->nentries_mask,
1730 CE_state->src_ring->sw_index,
1731 CE_SRC_RING_READ_IDX_GET(scn,
1732 CE_state->ctrl_addr));
1733 }
1734 }
1735
1736 if (CE_state->misc_cbs) {
1737 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1738 if (CE_int_status & CE_WATERMARK_MASK) {
1739 if (CE_state->watermark_cb) {
1740 goto more_watermarks;
1741 }
1742 }
1743 }
1744
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301745 qdf_spin_unlock(&CE_state->ce_index_lock);
1746 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001747
1748 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001749 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1750 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001751}
1752
1753/*
1754 * Handler for per-engine interrupts on ALL active CEs.
1755 * This is used in cases where the system is sharing a
1756 * single interrput for all CEs
1757 */
1758
Komal Seelam644263d2016-02-22 20:45:49 +05301759void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001760{
1761 int CE_id;
1762 uint32_t intr_summary;
1763
Houston Hoffmanbac94542016-03-14 21:11:59 -07001764 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1765 return;
1766
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301767 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001768 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1769 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301770 if (qdf_atomic_read(&CE_state->rx_pending)) {
1771 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001772 ce_per_engine_service(scn, CE_id);
1773 }
1774 }
1775
Houston Hoffmanbac94542016-03-14 21:11:59 -07001776 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001777 return;
1778 }
1779
1780 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1781
1782 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1783 if (intr_summary & (1 << CE_id)) {
1784 intr_summary &= ~(1 << CE_id);
1785 } else {
1786 continue; /* no intr pending on this CE */
1787 }
1788
1789 ce_per_engine_service(scn, CE_id);
1790 }
1791
Houston Hoffmanbac94542016-03-14 21:11:59 -07001792 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001793}
1794
1795/*
1796 * Adjust interrupts for the copy complete handler.
1797 * If it's needed for either send or recv, then unmask
1798 * this interrupt; otherwise, mask it.
1799 *
1800 * Called with target_lock held.
1801 */
1802static void
1803ce_per_engine_handler_adjust(struct CE_state *CE_state,
1804 int disable_copy_compl_intr)
1805{
1806 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301807 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001808
1809 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001810
1811 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1812 return;
1813
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814 if ((!disable_copy_compl_intr) &&
1815 (CE_state->send_cb || CE_state->recv_cb)) {
1816 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1817 } else {
1818 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1819 }
1820
1821 if (CE_state->watermark_cb) {
1822 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1823 } else {
1824 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1825 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001826 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001827}
1828
1829/*Iterate the CE_state list and disable the compl interrupt
1830 * if it has been registered already.
1831 */
Komal Seelam644263d2016-02-22 20:45:49 +05301832void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001833{
1834 int CE_id;
1835
Houston Hoffmanbac94542016-03-14 21:11:59 -07001836 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1837 return;
1838
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001839 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1840 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1841 uint32_t ctrl_addr = CE_state->ctrl_addr;
1842
1843 /* if the interrupt is currently enabled, disable it */
1844 if (!CE_state->disable_copy_compl_intr
1845 && (CE_state->send_cb || CE_state->recv_cb)) {
1846 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1847 }
1848
1849 if (CE_state->watermark_cb) {
1850 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1851 }
1852 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001853 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854}
1855
Komal Seelam644263d2016-02-22 20:45:49 +05301856void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001857{
1858 int CE_id;
1859
Houston Hoffmanbac94542016-03-14 21:11:59 -07001860 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1861 return;
1862
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001863 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1864 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1865 uint32_t ctrl_addr = CE_state->ctrl_addr;
1866
1867 /*
1868 * If the CE is supposed to have copy complete interrupts
1869 * enabled (i.e. there a callback registered, and the
1870 * "disable" flag is not set), then re-enable the interrupt.
1871 */
1872 if (!CE_state->disable_copy_compl_intr
1873 && (CE_state->send_cb || CE_state->recv_cb)) {
1874 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1875 }
1876
1877 if (CE_state->watermark_cb) {
1878 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1879 }
1880 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001881 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882}
1883
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001884/**
1885 * ce_send_cb_register(): register completion handler
1886 * @copyeng: CE_state representing the ce we are adding the behavior to
1887 * @fn_ptr: callback that the ce should use when processing tx completions
1888 * @disable_interrupts: if the interupts should be enabled or not.
1889 *
1890 * Caller should guarantee that no transactions are in progress before
1891 * switching the callback function.
1892 *
1893 * Registers the send context before the fn pointer so that if the cb is valid
1894 * the context should be valid.
1895 *
1896 * Beware that currently this function will enable completion interrupts.
1897 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898void
1899ce_send_cb_register(struct CE_handle *copyeng,
1900 ce_send_cb fn_ptr,
1901 void *ce_send_context, int disable_interrupts)
1902{
1903 struct CE_state *CE_state = (struct CE_state *)copyeng;
1904
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001905 if (CE_state == NULL) {
1906 pr_err("%s: Error CE state = NULL\n", __func__);
1907 return;
1908 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001910 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001911 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001912}
1913
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001914/**
1915 * ce_recv_cb_register(): register completion handler
1916 * @copyeng: CE_state representing the ce we are adding the behavior to
1917 * @fn_ptr: callback that the ce should use when processing rx completions
1918 * @disable_interrupts: if the interupts should be enabled or not.
1919 *
1920 * Registers the send context before the fn pointer so that if the cb is valid
1921 * the context should be valid.
1922 *
1923 * Caller should guarantee that no transactions are in progress before
1924 * switching the callback function.
1925 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926void
1927ce_recv_cb_register(struct CE_handle *copyeng,
1928 CE_recv_cb fn_ptr,
1929 void *CE_recv_context, int disable_interrupts)
1930{
1931 struct CE_state *CE_state = (struct CE_state *)copyeng;
1932
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001933 if (CE_state == NULL) {
1934 pr_err("%s: ERROR CE state = NULL\n", __func__);
1935 return;
1936 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001938 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940}
1941
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001942/**
1943 * ce_watermark_cb_register(): register completion handler
1944 * @copyeng: CE_state representing the ce we are adding the behavior to
1945 * @fn_ptr: callback that the ce should use when processing watermark events
1946 *
1947 * Caller should guarantee that no watermark events are being processed before
1948 * switching the callback function.
1949 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001950void
1951ce_watermark_cb_register(struct CE_handle *copyeng,
1952 CE_watermark_cb fn_ptr, void *CE_wm_context)
1953{
1954 struct CE_state *CE_state = (struct CE_state *)copyeng;
1955
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 CE_state->watermark_cb = fn_ptr;
1957 CE_state->wm_context = CE_wm_context;
1958 ce_per_engine_handler_adjust(CE_state, 0);
1959 if (fn_ptr) {
1960 CE_state->misc_cbs = 1;
1961 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001962}
1963
1964#ifdef WLAN_FEATURE_FASTPATH
1965/**
1966 * ce_pkt_dl_len_set() set the HTT packet download length
1967 * @hif_sc: HIF context
1968 * @pkt_download_len: download length
1969 *
1970 * Return: None
1971 */
1972void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1973{
Komal Seelam644263d2016-02-22 20:45:49 +05301974 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1976
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301977 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001978
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001979 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001980
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301981 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001982 ce_state->id, ce_state->download_len);
1983}
1984#else
1985void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1986{
1987}
1988#endif /* WLAN_FEATURE_FASTPATH */
1989
Komal Seelam644263d2016-02-22 20:45:49 +05301990bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001991{
1992 int CE_id;
1993
1994 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1995 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301996 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001997 return true;
1998 }
1999
2000 return false;
2001}
2002
2003/**
2004 * ce_check_rx_pending() - ce_check_rx_pending
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002005 * @CE_state: context of the copy engine to check
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002006 *
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002007 * Return: true if there per_engine_service
2008 * didn't process all the rx descriptors.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002009 */
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002010bool ce_check_rx_pending(struct CE_state *CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002011{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302012 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002013 return true;
2014 else
2015 return false;
2016}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07002017
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002018#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002019/**
2020 * ce_ipa_get_resource() - get uc resource on copyengine
2021 * @ce: copyengine context
2022 * @ce_sr_base_paddr: copyengine source ring base physical address
2023 * @ce_sr_ring_size: copyengine source ring size
2024 * @ce_reg_paddr: copyengine register physical address
2025 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002026 * Copy engine should release resource to micro controller
2027 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08002028 * - Copy engine source descriptor base address
2029 * - Copy engine source descriptor size
2030 * - PCI BAR address to access copy engine regiser
2031 *
2032 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033 */
2034void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302035 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002036 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302037 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002038{
2039 struct CE_state *CE_state = (struct CE_state *)ce;
2040 uint32_t ring_loop;
2041 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302042 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05302043 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002044
2045 if (CE_RUNNING != CE_state->state) {
2046 *ce_sr_base_paddr = 0;
2047 *ce_sr_ring_size = 0;
2048 return;
2049 }
2050
2051 /* Update default value for descriptor */
2052 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2053 ring_loop++) {
2054 ce_desc = (struct CE_src_desc *)
2055 ((char *)CE_state->src_ring->base_addr_owner_space +
2056 ring_loop * (sizeof(struct CE_src_desc)));
2057 CE_IPA_RING_INIT(ce_desc);
2058 }
2059
2060 /* Get BAR address */
2061 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2062
Leo Changd85f78d2015-11-13 10:55:34 -08002063 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2064 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2065 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002066 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2067 SR_WR_INDEX_ADDRESS;
2068 return;
2069}
2070#endif /* IPA_OFFLOAD */
2071