blob: 74950bd530f477349e4b61c896bc61e284c9463f [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "hif.h"
29#include "hif_io32.h"
30#include "ce_api.h"
31#include "ce_main.h"
32#include "ce_internal.h"
33#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053034#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "hif_main.h"
37#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080038
39#ifdef IPA_OFFLOAD
40#ifdef QCA_WIFI_3_0
41#define CE_IPA_RING_INIT(ce_desc) \
42 do { \
43 ce_desc->gather = 0; \
44 ce_desc->enable_11h = 0; \
45 ce_desc->meta_data_low = 0; \
46 ce_desc->packet_result_offset = 64; \
47 ce_desc->toeplitz_hash_enable = 0; \
48 ce_desc->addr_y_search_disable = 0; \
49 ce_desc->addr_x_search_disable = 0; \
50 ce_desc->misc_int_disable = 0; \
51 ce_desc->target_int_disable = 0; \
52 ce_desc->host_int_disable = 0; \
53 ce_desc->dest_byte_swap = 0; \
54 ce_desc->byte_swap = 0; \
55 ce_desc->type = 2; \
56 ce_desc->tx_classify = 1; \
57 ce_desc->buffer_addr_hi = 0; \
58 ce_desc->meta_data = 0; \
59 ce_desc->nbytes = 128; \
60 } while (0)
61#else
62#define CE_IPA_RING_INIT(ce_desc) \
63 do { \
64 ce_desc->byte_swap = 0; \
65 ce_desc->nbytes = 60; \
66 ce_desc->gather = 0; \
67 } while (0)
68#endif /* QCA_WIFI_3_0 */
69#endif /* IPA_OFFLOAD */
70
71static int war1_allow_sleep;
72/* io32 write workaround */
73static int hif_ce_war1;
74
Houston Hoffman68e837e2015-12-04 12:57:24 -080075#ifdef CONFIG_SLUB_DEBUG_ON
76
77/**
78 * struct hif_ce_event - structure for detailing a ce event
79 * @type: what the event was
80 * @time: when it happened
81 * @descriptor: descriptor enqueued or dequeued
82 * @memory: virtual address that was used
83 * @index: location of the descriptor in the ce ring;
84 */
85struct hif_ce_desc_event {
86 uint16_t index;
87 enum hif_ce_event_type type;
88 uint64_t time;
89 union ce_desc descriptor;
90 void *memory;
91};
92
93/* max history to record per copy engine */
94#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053095qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -080096struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
97
Houston Hoffman4275ba22015-12-06 21:02:11 -080098
Houston Hoffman68e837e2015-12-04 12:57:24 -080099/**
100 * get_next_record_index() - get the next record index
101 * @table_index: atomic index variable to increment
102 * @array_size: array size of the circular buffer
103 *
104 * Increment the atomic index and reserve the value.
105 * Takes care of buffer wrap.
106 * Guaranteed to be thread safe as long as fewer than array_size contexts
107 * try to access the array. If there are more than array_size contexts
108 * trying to access the array, full locking of the recording process would
109 * be needed to have sane logging.
110 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530111static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800112{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530113 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800114 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530115 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800116
117 while (record_index >= array_size)
118 record_index -= array_size;
119 return record_index;
120}
121
122/**
123 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530124 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800125 * @ce_id: which ce is the event occuring on
126 * @type: what happened
127 * @descriptor: pointer to the descriptor posted/completed
128 * @memory: virtual address of buffer related to the descriptor
129 * @index: index that the descriptor was/will be at.
130 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530131void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
132 enum hif_ce_event_type type,
133 union ce_desc *descriptor,
134 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800135{
136 int record_index = get_next_record_index(
137 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
138
139 struct hif_ce_desc_event *event =
140 &hif_ce_desc_history[ce_id][record_index];
141 event->type = type;
Komal Seelam75080122016-03-02 15:18:25 +0530142 event->time = qdf_get_monotonic_boottime();
Komal Seelambd7c51d2016-02-24 10:27:30 +0530143
Houston Hoffman4275ba22015-12-06 21:02:11 -0800144 if (descriptor != NULL)
145 event->descriptor = *descriptor;
146 else
147 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800148 event->memory = memory;
149 event->index = index;
150}
151
152/**
153 * ce_init_ce_desc_event_log() - initialize the ce event log
154 * @ce_id: copy engine id for which we are initializing the log
155 * @size: size of array to dedicate
156 *
157 * Currently the passed size is ignored in favor of a precompiled value.
158 */
159void ce_init_ce_desc_event_log(int ce_id, int size)
160{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530161 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800162}
163#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530164void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800165 int ce_id, enum hif_ce_event_type type,
166 union ce_desc *descriptor, void *memory,
167 int index)
168{
169}
170
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800171inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800172{
173}
174#endif
175
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800176/*
177 * Support for Copy Engine hardware, which is mainly used for
178 * communication between Host and Target over a PCIe interconnect.
179 */
180
181/*
182 * A single CopyEngine (CE) comprises two "rings":
183 * a source ring
184 * a destination ring
185 *
186 * Each ring consists of a number of descriptors which specify
187 * an address, length, and meta-data.
188 *
189 * Typically, one side of the PCIe interconnect (Host or Target)
190 * controls one ring and the other side controls the other ring.
191 * The source side chooses when to initiate a transfer and it
192 * chooses what to send (buffer address, length). The destination
193 * side keeps a supply of "anonymous receive buffers" available and
194 * it handles incoming data as it arrives (when the destination
195 * recieves an interrupt).
196 *
197 * The sender may send a simple buffer (address/length) or it may
198 * send a small list of buffers. When a small list is sent, hardware
199 * "gathers" these and they end up in a single destination buffer
200 * with a single interrupt.
201 *
202 * There are several "contexts" managed by this layer -- more, it
203 * may seem -- than should be needed. These are provided mainly for
204 * maximum flexibility and especially to facilitate a simpler HIF
205 * implementation. There are per-CopyEngine recv, send, and watermark
206 * contexts. These are supplied by the caller when a recv, send,
207 * or watermark handler is established and they are echoed back to
208 * the caller when the respective callbacks are invoked. There is
209 * also a per-transfer context supplied by the caller when a buffer
210 * (or sendlist) is sent and when a buffer is enqueued for recv.
211 * These per-transfer contexts are echoed back to the caller when
212 * the buffer is sent/received.
213 * Target TX harsh result toeplitz_hash_result
214 */
215
216/*
217 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
218 * The caller takes responsibility for any needed locking.
219 */
220int
221ce_completed_send_next_nolock(struct CE_state *CE_state,
222 void **per_CE_contextp,
223 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530224 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800225 unsigned int *nbytesp,
226 unsigned int *transfer_idp,
227 unsigned int *sw_idx, unsigned int *hw_idx,
228 uint32_t *toeplitz_hash_result);
229
Komal Seelam644263d2016-02-22 20:45:49 +0530230void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800231 u32 ctrl_addr, unsigned int write_index)
232{
233 if (hif_ce_war1) {
234 void __iomem *indicator_addr;
235
236 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
237
238 if (!war1_allow_sleep
239 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
240 hif_write32_mb(indicator_addr,
241 (CDC_WAR_MAGIC_STR | write_index));
242 } else {
243 unsigned long irq_flags;
244 local_irq_save(irq_flags);
245 hif_write32_mb(indicator_addr, 1);
246
247 /*
248 * PCIE write waits for ACK in IPQ8K, there is no
249 * need to read back value.
250 */
251 (void)hif_read32_mb(indicator_addr);
252 (void)hif_read32_mb(indicator_addr); /* conservative */
253
254 CE_SRC_RING_WRITE_IDX_SET(scn,
255 ctrl_addr, write_index);
256
257 hif_write32_mb(indicator_addr, 0);
258 local_irq_restore(irq_flags);
259 }
260 } else
261 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
262}
263
264int
265ce_send_nolock(struct CE_handle *copyeng,
266 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530267 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800268 uint32_t nbytes,
269 uint32_t transfer_id,
270 uint32_t flags,
271 uint32_t user_flags)
272{
273 int status;
274 struct CE_state *CE_state = (struct CE_state *)copyeng;
275 struct CE_ring_state *src_ring = CE_state->src_ring;
276 uint32_t ctrl_addr = CE_state->ctrl_addr;
277 unsigned int nentries_mask = src_ring->nentries_mask;
278 unsigned int sw_index = src_ring->sw_index;
279 unsigned int write_index = src_ring->write_index;
280 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530281 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800282
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700283 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700284 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800285 if (unlikely(CE_RING_DELTA(nentries_mask,
286 write_index, sw_index - 1) <= 0)) {
287 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700288 Q_TARGET_ACCESS_END(scn);
289 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800290 }
291 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800292 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800293 struct CE_src_desc *src_ring_base =
294 (struct CE_src_desc *)src_ring->base_addr_owner_space;
295 struct CE_src_desc *shadow_base =
296 (struct CE_src_desc *)src_ring->shadow_base;
297 struct CE_src_desc *src_desc =
298 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
299 struct CE_src_desc *shadow_src_desc =
300 CE_SRC_RING_TO_DESC(shadow_base, write_index);
301
302 /* Update low 32 bits source descriptor address */
303 shadow_src_desc->buffer_addr =
304 (uint32_t)(dma_addr & 0xFFFFFFFF);
305#ifdef QCA_WIFI_3_0
306 shadow_src_desc->buffer_addr_hi =
307 (uint32_t)((dma_addr >> 32) & 0x1F);
308 user_flags |= shadow_src_desc->buffer_addr_hi;
309 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
310 sizeof(uint32_t));
311#endif
312 shadow_src_desc->meta_data = transfer_id;
313
314 /*
315 * Set the swap bit if:
316 * typical sends on this CE are swapped (host is big-endian)
317 * and this send doesn't disable the swapping
318 * (data is not bytestream)
319 */
320 shadow_src_desc->byte_swap =
321 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
322 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
323 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
324 shadow_src_desc->nbytes = nbytes;
325
326 *src_desc = *shadow_src_desc;
327
328 src_ring->per_transfer_context[write_index] =
329 per_transfer_context;
330
331 /* Update Source Ring Write Index */
332 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
333
334 /* WORKAROUND */
335 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800336 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
338 write_index);
339 }
340
Houston Hoffman68e837e2015-12-04 12:57:24 -0800341 /* src_ring->write index hasn't been updated event though
342 * the register has allready been written to.
343 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530344 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800345 (union ce_desc *) shadow_src_desc, per_transfer_context,
346 src_ring->write_index);
347
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800348 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530349 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800350 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700351 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800352 return status;
353}
354
355int
356ce_send(struct CE_handle *copyeng,
357 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530358 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800359 uint32_t nbytes,
360 uint32_t transfer_id,
361 uint32_t flags,
362 uint32_t user_flag)
363{
364 struct CE_state *CE_state = (struct CE_state *)copyeng;
365 int status;
366
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530367 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800368 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
369 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530370 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371
372 return status;
373}
374
375unsigned int ce_sendlist_sizeof(void)
376{
377 return sizeof(struct ce_sendlist);
378}
379
380void ce_sendlist_init(struct ce_sendlist *sendlist)
381{
382 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
383 sl->num_items = 0;
384}
385
386int
387ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530388 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800389 uint32_t nbytes,
390 uint32_t flags,
391 uint32_t user_flags)
392{
393 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
394 unsigned int num_items = sl->num_items;
395 struct ce_sendlist_item *item;
396
397 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530398 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
399 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800400 }
401
402 item = &sl->item[num_items];
403 item->send_type = CE_SIMPLE_BUFFER_TYPE;
404 item->data = buffer;
405 item->u.nbytes = nbytes;
406 item->flags = flags;
407 item->user_flags = user_flags;
408 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530409 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800410}
411
412int
413ce_sendlist_send(struct CE_handle *copyeng,
414 void *per_transfer_context,
415 struct ce_sendlist *sendlist, unsigned int transfer_id)
416{
417 int status = -ENOMEM;
418 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
419 struct CE_state *CE_state = (struct CE_state *)copyeng;
420 struct CE_ring_state *src_ring = CE_state->src_ring;
421 unsigned int nentries_mask = src_ring->nentries_mask;
422 unsigned int num_items = sl->num_items;
423 unsigned int sw_index;
424 unsigned int write_index;
425
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530426 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800427
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530428 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800429 sw_index = src_ring->sw_index;
430 write_index = src_ring->write_index;
431
432 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
433 num_items) {
434 struct ce_sendlist_item *item;
435 int i;
436
437 /* handle all but the last item uniformly */
438 for (i = 0; i < num_items - 1; i++) {
439 item = &sl->item[i];
440 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530441 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800442 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530443 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800444 item->u.nbytes, transfer_id,
445 item->flags | CE_SEND_FLAG_GATHER,
446 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530447 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800448 }
449 /* provide valid context pointer for final item */
450 item = &sl->item[i];
451 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530452 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530454 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800455 item->u.nbytes,
456 transfer_id, item->flags,
457 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530459 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
460 QDF_NBUF_TX_PKT_CE);
461 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530463 (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
464 sizeof(((qdf_nbuf_t)per_transfer_context)->data)));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800465 } else {
466 /*
467 * Probably not worth the additional complexity to support
468 * partial sends with continuation or notification. We expect
469 * to use large rings and small sendlists. If we can't handle
470 * the entire request at once, punt it back to the caller.
471 */
472 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530473 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800474
475 return status;
476}
477
478#ifdef WLAN_FEATURE_FASTPATH
479#ifdef QCA_WIFI_3_0
480static inline void
481ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
482 uint64_t dma_addr,
483 uint32_t user_flags)
484{
485 shadow_src_desc->buffer_addr_hi =
486 (uint32_t)((dma_addr >> 32) & 0x1F);
487 user_flags |= shadow_src_desc->buffer_addr_hi;
488 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
489 sizeof(uint32_t));
490}
491#else
492static inline void
493ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
494 uint64_t dma_addr,
495 uint32_t user_flags)
496{
497}
498#endif
499
500/**
501 * ce_send_fast() CE layer Tx buffer posting function
502 * @copyeng: copy engine handle
503 * @msdus: iarray of msdu to be sent
504 * @num_msdus: number of msdus in an array
505 * @transfer_id: transfer_id
506 *
507 * Assumption : Called with an array of MSDU's
508 * Function:
509 * For each msdu in the array
510 * 1. Check no. of available entries
511 * 2. Create src ring entries (allocated in consistent memory
512 * 3. Write index to h/w
513 *
514 * Return: No. of packets that could be sent
515 */
516
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530517int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800518 unsigned int num_msdus, unsigned int transfer_id)
519{
520 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530521 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530522 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800523 struct CE_ring_state *src_ring = ce_state->src_ring;
524 u_int32_t ctrl_addr = ce_state->ctrl_addr;
525 unsigned int nentries_mask = src_ring->nentries_mask;
526 unsigned int write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700527 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800528 unsigned int frag_len;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530529 qdf_nbuf_t msdu;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800530 int i;
531 uint64_t dma_addr;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700532 uint32_t user_flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530534 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700535 Q_TARGET_ACCESS_BEGIN(scn);
536
537 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800538 write_index = src_ring->write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700539 sw_index = src_ring->sw_index;
540
541 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
542 < (SLOTS_PER_DATAPATH_TX * num_msdus))) {
543 HIF_ERROR("Source ring full, required %d, available %d",
544 (SLOTS_PER_DATAPATH_TX * num_msdus),
545 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
546 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
547 Q_TARGET_ACCESS_END(scn);
548 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
549 return 0;
550 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800551
552 /* 2 msdus per packet */
553 for (i = 0; i < num_msdus; i++) {
554 struct CE_src_desc *src_ring_base =
555 (struct CE_src_desc *)src_ring->base_addr_owner_space;
556 struct CE_src_desc *shadow_base =
557 (struct CE_src_desc *)src_ring->shadow_base;
558 struct CE_src_desc *src_desc =
559 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
560 struct CE_src_desc *shadow_src_desc =
561 CE_SRC_RING_TO_DESC(shadow_base, write_index);
562
Komal Seelam644263d2016-02-22 20:45:49 +0530563 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800564 msdu = msdus[i];
565
566 /*
567 * First fill out the ring descriptor for the HTC HTT frame
568 * header. These are uncached writes. Should we use a local
569 * structure instead?
570 */
571 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530572 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800573 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
574 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530575 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800576 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
577
578 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530579 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800580
581 /*
582 * HTC HTT header is a word stream, so byte swap if CE byte
583 * swap enabled
584 */
585 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
586 CE_ATTR_BYTE_SWAP_DATA) != 0);
587 /* For the first one, it still does not need to write */
588 shadow_src_desc->gather = 1;
589 *src_desc = *shadow_src_desc;
590
591 /* By default we could initialize the transfer context to this
592 * value
593 */
594 src_ring->per_transfer_context[write_index] =
595 CE_SENDLIST_ITEM_CTXT;
596
597 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
598
599 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
600 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
601 /*
602 * Now fill out the ring descriptor for the actual data
603 * packet
604 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530605 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800606 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
607 0xFFFFFFFF);
608 /*
609 * Clear packet offset for all but the first CE desc.
610 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530611 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800612 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
613 shadow_src_desc->meta_data = transfer_id;
614
615 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530616 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700617
618 /* only read download_len once */
619 shadow_src_desc->nbytes = ce_state->download_len;
620 if (shadow_src_desc->nbytes > frag_len)
621 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800622
623 /* Data packet is a byte stream, so disable byte swap */
624 shadow_src_desc->byte_swap = 0;
625 /* For the last one, gather is not set */
626 shadow_src_desc->gather = 0;
627 *src_desc = *shadow_src_desc;
628 src_ring->per_transfer_context[write_index] = msdu;
629 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
630 }
631
632 /* Write the final index to h/w one-shot */
633 if (i) {
634 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800635
Komal Seelam644263d2016-02-22 20:45:49 +0530636 if (hif_pm_runtime_get(hif_hdl) == 0) {
Houston Hoffmanf4607852015-12-17 17:14:40 -0800637 /* Don't call WAR_XXX from here
638 * Just call XXX instead, that has the reqd. intel
639 */
640 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
641 write_index);
Komal Seelam644263d2016-02-22 20:45:49 +0530642 hif_pm_runtime_put(hif_hdl);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800643 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800644 }
645
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700646 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530647 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800648
649 /*
650 * If all packets in the array are transmitted,
651 * i = num_msdus
652 * Temporarily add an ASSERT
653 */
654 ASSERT(i == num_msdus);
655 return i;
656}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700657
658/**
659 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
660 * @scn: Handle to HIF context
661 *
662 * Return: true if fastpath is enabled else false.
663 */
664static bool ce_is_fastpath_enabled(struct hif_softc *scn)
665{
666 return scn->fastpath_mode_on;
667}
668
669/**
670 * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
671 * fastpath is enabled.
672 * @ce_state: handle to copy engine
673 *
674 * Return: true if fastpath handler is registered for datapath CE.
675 */
676static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
677{
678 if (ce_state->fastpath_handler)
679 return true;
680 else
681 return false;
682}
683
684
685#else
686static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
687{
688 return false;
689}
690
691static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
692{
693 return false;
694}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800695#endif /* WLAN_FEATURE_FASTPATH */
696
Houston Hoffman4411ad42016-03-14 21:12:04 -0700697/**
698 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
699 * @coyeng: copy engine handle
700 * @per_recv_context: virtual address of the nbuf
701 * @buffer: physical address of the nbuf
702 *
703 * Return: 0 if the buffer is enqueued
704 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800705int
706ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530707 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708{
709 int status;
710 struct CE_state *CE_state = (struct CE_state *)copyeng;
711 struct CE_ring_state *dest_ring = CE_state->dest_ring;
712 uint32_t ctrl_addr = CE_state->ctrl_addr;
713 unsigned int nentries_mask = dest_ring->nentries_mask;
714 unsigned int write_index;
715 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800716 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530717 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800718
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530719 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800720 write_index = dest_ring->write_index;
721 sw_index = dest_ring->sw_index;
722
Houston Hoffman4411ad42016-03-14 21:12:04 -0700723 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530724 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700725 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800726 }
727
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700728 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700729 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800730 struct CE_dest_desc *dest_ring_base =
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700731 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800732 struct CE_dest_desc *dest_desc =
733 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
734
735 /* Update low 32 bit destination descriptor */
736 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
737#ifdef QCA_WIFI_3_0
738 dest_desc->buffer_addr_hi =
739 (uint32_t)((dma_addr >> 32) & 0x1F);
740#endif
741 dest_desc->nbytes = 0;
742
743 dest_ring->per_transfer_context[write_index] =
744 per_recv_context;
745
Komal Seelambd7c51d2016-02-24 10:27:30 +0530746 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800747 (union ce_desc *) dest_desc, per_recv_context,
748 write_index);
749
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800750 /* Update Destination Ring Write Index */
751 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700752 if (write_index != sw_index) {
753 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
754 dest_ring->write_index = write_index;
755 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530756 status = QDF_STATUS_SUCCESS;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700757 } else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530758 status = QDF_STATUS_E_FAILURE;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700759
Houston Hoffman4411ad42016-03-14 21:12:04 -0700760 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530761 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800762 return status;
763}
764
765void
766ce_send_watermarks_set(struct CE_handle *copyeng,
767 unsigned int low_alert_nentries,
768 unsigned int high_alert_nentries)
769{
770 struct CE_state *CE_state = (struct CE_state *)copyeng;
771 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530772 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800773
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800774 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
775 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800776}
777
778void
779ce_recv_watermarks_set(struct CE_handle *copyeng,
780 unsigned int low_alert_nentries,
781 unsigned int high_alert_nentries)
782{
783 struct CE_state *CE_state = (struct CE_state *)copyeng;
784 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +0530785 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800786
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800787 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
788 low_alert_nentries);
789 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
790 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800791}
792
793unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
794{
795 struct CE_state *CE_state = (struct CE_state *)copyeng;
796 struct CE_ring_state *src_ring = CE_state->src_ring;
797 unsigned int nentries_mask = src_ring->nentries_mask;
798 unsigned int sw_index;
799 unsigned int write_index;
800
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530801 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 sw_index = src_ring->sw_index;
803 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530804 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800805
806 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
807}
808
809unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
810{
811 struct CE_state *CE_state = (struct CE_state *)copyeng;
812 struct CE_ring_state *dest_ring = CE_state->dest_ring;
813 unsigned int nentries_mask = dest_ring->nentries_mask;
814 unsigned int sw_index;
815 unsigned int write_index;
816
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530817 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800818 sw_index = dest_ring->sw_index;
819 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530820 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800821
822 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
823}
824
825/*
826 * Guts of ce_send_entries_done.
827 * The caller takes responsibility for any necessary locking.
828 */
829unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530830ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800831 struct CE_state *CE_state)
832{
833 struct CE_ring_state *src_ring = CE_state->src_ring;
834 uint32_t ctrl_addr = CE_state->ctrl_addr;
835 unsigned int nentries_mask = src_ring->nentries_mask;
836 unsigned int sw_index;
837 unsigned int read_index;
838
839 sw_index = src_ring->sw_index;
840 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
841
842 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
843}
844
845unsigned int ce_send_entries_done(struct CE_handle *copyeng)
846{
847 struct CE_state *CE_state = (struct CE_state *)copyeng;
848 unsigned int nentries;
849
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530850 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800851 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530852 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800853
854 return nentries;
855}
856
857/*
858 * Guts of ce_recv_entries_done.
859 * The caller takes responsibility for any necessary locking.
860 */
861unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +0530862ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800863 struct CE_state *CE_state)
864{
865 struct CE_ring_state *dest_ring = CE_state->dest_ring;
866 uint32_t ctrl_addr = CE_state->ctrl_addr;
867 unsigned int nentries_mask = dest_ring->nentries_mask;
868 unsigned int sw_index;
869 unsigned int read_index;
870
871 sw_index = dest_ring->sw_index;
872 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
873
874 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
875}
876
877unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
878{
879 struct CE_state *CE_state = (struct CE_state *)copyeng;
880 unsigned int nentries;
881
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530882 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800883 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530884 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800885
886 return nentries;
887}
888
889/* Debug support */
890void *ce_debug_cmplrn_context; /* completed recv next context */
891void *ce_debug_cnclsn_context; /* cancel send next context */
892void *ce_debug_rvkrn_context; /* revoke receive next context */
893void *ce_debug_cmplsn_context; /* completed send next context */
894
895/*
896 * Guts of ce_completed_recv_next.
897 * The caller takes responsibility for any necessary locking.
898 */
899int
900ce_completed_recv_next_nolock(struct CE_state *CE_state,
901 void **per_CE_contextp,
902 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530903 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800904 unsigned int *nbytesp,
905 unsigned int *transfer_idp,
906 unsigned int *flagsp)
907{
908 int status;
909 struct CE_ring_state *dest_ring = CE_state->dest_ring;
910 unsigned int nentries_mask = dest_ring->nentries_mask;
911 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530912 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800913 struct CE_dest_desc *dest_ring_base =
914 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
915 struct CE_dest_desc *dest_desc =
916 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
917 int nbytes;
918 struct CE_dest_desc dest_desc_info;
919 /*
920 * By copying the dest_desc_info element to local memory, we could
921 * avoid extra memory read from non-cachable memory.
922 */
923 dest_desc_info = *dest_desc;
924 nbytes = dest_desc_info.nbytes;
925 if (nbytes == 0) {
926 /*
927 * This closes a relatively unusual race where the Host
928 * sees the updated DRRI before the update to the
929 * corresponding descriptor has completed. We treat this
930 * as a descriptor that is not yet done.
931 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530932 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800933 goto done;
934 }
935
Komal Seelambd7c51d2016-02-24 10:27:30 +0530936 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800937 (union ce_desc *) dest_desc,
938 dest_ring->per_transfer_context[sw_index],
939 sw_index);
940
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800941 dest_desc->nbytes = 0;
942
943 /* Return data from completed destination descriptor */
944 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
945 *nbytesp = nbytes;
946 *transfer_idp = dest_desc_info.meta_data;
947 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
948
949 if (per_CE_contextp) {
950 *per_CE_contextp = CE_state->recv_context;
951 }
952
953 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
954 if (per_transfer_contextp) {
955 *per_transfer_contextp = ce_debug_cmplrn_context;
956 }
957 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
958
959 /* Update sw_index */
960 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
961 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530962 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800963
964done:
965 return status;
966}
967
968int
969ce_completed_recv_next(struct CE_handle *copyeng,
970 void **per_CE_contextp,
971 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530972 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800973 unsigned int *nbytesp,
974 unsigned int *transfer_idp, unsigned int *flagsp)
975{
976 struct CE_state *CE_state = (struct CE_state *)copyeng;
977 int status;
978
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530979 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800980 status =
981 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
982 per_transfer_contextp, bufferp,
983 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530984 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800985
986 return status;
987}
988
989/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530990QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800991ce_revoke_recv_next(struct CE_handle *copyeng,
992 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994{
995 struct CE_state *CE_state;
996 struct CE_ring_state *dest_ring;
997 unsigned int nentries_mask;
998 unsigned int sw_index;
999 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301000 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301001 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001002
1003 CE_state = (struct CE_state *)copyeng;
1004 dest_ring = CE_state->dest_ring;
1005 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301006 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001007 }
1008
1009 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301010 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001011 nentries_mask = dest_ring->nentries_mask;
1012 sw_index = dest_ring->sw_index;
1013 write_index = dest_ring->write_index;
1014 if (write_index != sw_index) {
1015 struct CE_dest_desc *dest_ring_base =
1016 (struct CE_dest_desc *)dest_ring->
1017 base_addr_owner_space;
1018 struct CE_dest_desc *dest_desc =
1019 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1020
1021 /* Return data from completed destination descriptor */
1022 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1023
1024 if (per_CE_contextp) {
1025 *per_CE_contextp = CE_state->recv_context;
1026 }
1027
1028 ce_debug_rvkrn_context =
1029 dest_ring->per_transfer_context[sw_index];
1030 if (per_transfer_contextp) {
1031 *per_transfer_contextp = ce_debug_rvkrn_context;
1032 }
1033 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
1034
1035 /* Update sw_index */
1036 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1037 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301038 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001039 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301040 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001041 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301042 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001043
1044 return status;
1045}
1046
1047/*
1048 * Guts of ce_completed_send_next.
1049 * The caller takes responsibility for any necessary locking.
1050 */
1051int
1052ce_completed_send_next_nolock(struct CE_state *CE_state,
1053 void **per_CE_contextp,
1054 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301055 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001056 unsigned int *nbytesp,
1057 unsigned int *transfer_idp,
1058 unsigned int *sw_idx,
1059 unsigned int *hw_idx,
1060 uint32_t *toeplitz_hash_result)
1061{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301062 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001063 struct CE_ring_state *src_ring = CE_state->src_ring;
1064 uint32_t ctrl_addr = CE_state->ctrl_addr;
1065 unsigned int nentries_mask = src_ring->nentries_mask;
1066 unsigned int sw_index = src_ring->sw_index;
1067 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301068 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001069
1070 if (src_ring->hw_index == sw_index) {
1071 /*
1072 * The SW completion index has caught up with the cached
1073 * version of the HW completion index.
1074 * Update the cached HW completion index to see whether
1075 * the SW has really caught up to the HW, or if the cached
1076 * value of the HW index has become stale.
1077 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001078 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001079 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001080 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001081 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001082 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001083 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001084 }
1085 read_index = src_ring->hw_index;
1086
1087 if (sw_idx)
1088 *sw_idx = sw_index;
1089
1090 if (hw_idx)
1091 *hw_idx = read_index;
1092
1093 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1094 struct CE_src_desc *shadow_base =
1095 (struct CE_src_desc *)src_ring->shadow_base;
1096 struct CE_src_desc *shadow_src_desc =
1097 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1098#ifdef QCA_WIFI_3_0
1099 struct CE_src_desc *src_ring_base =
1100 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1101 struct CE_src_desc *src_desc =
1102 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1103#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301104 hif_record_ce_desc_event(scn, CE_state->id,
1105 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001106 (union ce_desc *) shadow_src_desc,
1107 src_ring->per_transfer_context[sw_index],
1108 sw_index);
1109
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001110 /* Return data from completed source descriptor */
1111 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1112 *nbytesp = shadow_src_desc->nbytes;
1113 *transfer_idp = shadow_src_desc->meta_data;
1114#ifdef QCA_WIFI_3_0
1115 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1116#else
1117 *toeplitz_hash_result = 0;
1118#endif
1119 if (per_CE_contextp) {
1120 *per_CE_contextp = CE_state->send_context;
1121 }
1122
1123 ce_debug_cmplsn_context =
1124 src_ring->per_transfer_context[sw_index];
1125 if (per_transfer_contextp) {
1126 *per_transfer_contextp = ce_debug_cmplsn_context;
1127 }
1128 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1129
1130 /* Update sw_index */
1131 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1132 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301133 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001134 }
1135
1136 return status;
1137}
1138
1139/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301140QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001141ce_cancel_send_next(struct CE_handle *copyeng,
1142 void **per_CE_contextp,
1143 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301144 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001145 unsigned int *nbytesp,
1146 unsigned int *transfer_idp,
1147 uint32_t *toeplitz_hash_result)
1148{
1149 struct CE_state *CE_state;
1150 struct CE_ring_state *src_ring;
1151 unsigned int nentries_mask;
1152 unsigned int sw_index;
1153 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301154 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301155 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001156
1157 CE_state = (struct CE_state *)copyeng;
1158 src_ring = CE_state->src_ring;
1159 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301160 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001161 }
1162
1163 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301164 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001165 nentries_mask = src_ring->nentries_mask;
1166 sw_index = src_ring->sw_index;
1167 write_index = src_ring->write_index;
1168
1169 if (write_index != sw_index) {
1170 struct CE_src_desc *src_ring_base =
1171 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1172 struct CE_src_desc *src_desc =
1173 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1174
1175 /* Return data from completed source descriptor */
1176 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1177 *nbytesp = src_desc->nbytes;
1178 *transfer_idp = src_desc->meta_data;
1179#ifdef QCA_WIFI_3_0
1180 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1181#else
1182 *toeplitz_hash_result = 0;
1183#endif
1184
1185 if (per_CE_contextp) {
1186 *per_CE_contextp = CE_state->send_context;
1187 }
1188
1189 ce_debug_cnclsn_context =
1190 src_ring->per_transfer_context[sw_index];
1191 if (per_transfer_contextp) {
1192 *per_transfer_contextp = ce_debug_cnclsn_context;
1193 }
1194 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1195
1196 /* Update sw_index */
1197 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1198 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301199 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001200 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301201 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001202 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301203 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204
1205 return status;
1206}
1207
1208/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1209#define CE_WM_SHFT 1
1210
1211int
1212ce_completed_send_next(struct CE_handle *copyeng,
1213 void **per_CE_contextp,
1214 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301215 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001216 unsigned int *nbytesp,
1217 unsigned int *transfer_idp,
1218 unsigned int *sw_idx,
1219 unsigned int *hw_idx,
1220 unsigned int *toeplitz_hash_result)
1221{
1222 struct CE_state *CE_state = (struct CE_state *)copyeng;
1223 int status;
1224
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301225 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001226 status =
1227 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1228 per_transfer_contextp, bufferp,
1229 nbytesp, transfer_idp, sw_idx,
1230 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301231 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001232
1233 return status;
1234}
1235
1236#ifdef ATH_11AC_TXCOMPACT
1237/* CE engine descriptor reap
1238 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1239 * does recieve and reaping of completed descriptor ,
1240 * This function only handles reaping of Tx complete descriptor.
1241 * The Function is called from threshold reap poll routine
1242 * hif_send_complete_check so should not countain recieve functionality
1243 * within it .
1244 */
1245
Komal Seelam644263d2016-02-22 20:45:49 +05301246void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001247{
1248 void *CE_context;
1249 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301250 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001251 unsigned int nbytes;
1252 unsigned int id;
1253 unsigned int sw_idx, hw_idx;
1254 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001255 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001256
Houston Hoffmanbac94542016-03-14 21:11:59 -07001257 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1258 return;
1259
Komal Seelambd7c51d2016-02-24 10:27:30 +05301260 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001261 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001262
1263 /* Since this function is called from both user context and
1264 * tasklet context the spinlock has to lock the bottom halves.
1265 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1266 * enabled in TX polling mode. If this is not the case, more
1267 * bottom halve spin lock changes are needed. Due to data path
1268 * performance concern, after internal discussion we've decided
1269 * to make minimum change, i.e., only address the issue occured
1270 * in this function. The possible negative effect of this minimum
1271 * change is that, in the future, if some other function will also
1272 * be opened to let the user context to use, those cases need to be
1273 * addressed by change spin_lock to spin_lock_bh also.
1274 */
1275
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301276 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001277
1278 if (CE_state->send_cb) {
1279 {
1280 /* Pop completed send buffers and call the
1281 * registered send callback for each
1282 */
1283 while (ce_completed_send_next_nolock
1284 (CE_state, &CE_context,
1285 &transfer_context, &buf,
1286 &nbytes, &id, &sw_idx, &hw_idx,
1287 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301288 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001289 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301290 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001291 &CE_state->ce_index_lock);
1292 CE_state->send_cb(
1293 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001294 CE_state, CE_context,
1295 transfer_context, buf,
1296 nbytes, id, sw_idx, hw_idx,
1297 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301298 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001299 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001300 } else {
1301 struct HIF_CE_pipe_info *pipe_info =
1302 (struct HIF_CE_pipe_info *)
1303 CE_context;
1304
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301305 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001306 completion_freeq_lock);
1307 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301308 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001309 completion_freeq_lock);
1310 }
1311 }
1312 }
1313 }
1314
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301315 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001316
Komal Seelambd7c51d2016-02-24 10:27:30 +05301317 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001318 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001319 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001320}
1321
1322#endif /*ATH_11AC_TXCOMPACT */
1323
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001324/*
1325 * Number of times to check for any pending tx/rx completion on
1326 * a copy engine, this count should be big enough. Once we hit
1327 * this threashold we'll not check for any Tx/Rx comlpetion in same
1328 * interrupt handling. Note that this threashold is only used for
1329 * Rx interrupt processing, this can be used tor Tx as well if we
1330 * suspect any infinite loop in checking for pending Tx completion.
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001331 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001332#define CE_TXRX_COMP_CHECK_THRESHOLD 20
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001333
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001334#ifdef WLAN_FEATURE_FASTPATH
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001335/**
1336 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1337 * @ce_state: handle to copy engine state
1338 * @cmpl_msdus: Rx msdus
1339 * @num_cmpls: number of Rx msdus
1340 * @ctrl_addr: CE control address
1341 *
1342 * Return: None
1343 */
1344static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1345 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1346 uint32_t ctrl_addr)
1347{
1348 struct hif_softc *scn = ce_state->scn;
1349 struct CE_ring_state *dest_ring = ce_state->dest_ring;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001350 uint32_t nentries_mask = dest_ring->nentries_mask;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001351 uint32_t write_index;
1352
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001353 (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001354
1355 /* Update Destination Ring Write Index */
1356 write_index = dest_ring->write_index;
1357 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1358 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1359 dest_ring->write_index = write_index;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001360}
1361
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001362#define MSG_FLUSH_NUM 6
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001363/**
1364 * ce_per_engine_service_fast() - CE handler routine to service fastpath messages
1365 * @scn: hif_context
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001366 * @ce_id: Copy engine ID
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001367 * 1) Go through the CE ring, and find the completions
1368 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1369 * 3) Unmap buffer & accumulate in an array.
1370 * 4) Call message handler when array is full or when exiting the handler
1371 *
1372 * Return: void
1373 */
1374
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001375static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001376{
1377 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1378 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1379 struct CE_dest_desc *dest_ring_base =
1380 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1381
1382 uint32_t nentries_mask = dest_ring->nentries_mask;
1383 uint32_t sw_index = dest_ring->sw_index;
1384 uint32_t nbytes;
1385 qdf_nbuf_t nbuf;
1386 uint32_t paddr_lo;
1387 struct CE_dest_desc *dest_desc;
1388 uint32_t ce_int_status = (1 << ce_id);
1389 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1390 uint32_t ctrl_addr = ce_state->ctrl_addr;
1391 uint32_t nbuf_cmpl_idx = 0;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001392 unsigned int more_comp_cnt = 0;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001393
1394more_data:
1395 if (ce_int_status == (1 << ce_id)) {
1396 for (;;) {
1397
1398 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1399 sw_index);
1400
1401 /*
1402 * The following 2 reads are from non-cached memory
1403 */
1404 nbytes = dest_desc->nbytes;
1405
1406 /* If completion is invalid, break */
1407 if (qdf_unlikely(nbytes == 0))
1408 break;
1409
1410
1411 /*
1412 * Build the nbuf list from valid completions
1413 */
1414 nbuf = dest_ring->per_transfer_context[sw_index];
1415
1416 /*
1417 * No lock is needed here, since this is the only thread
1418 * that accesses the sw_index
1419 */
1420 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1421
1422 /*
1423 * CAREFUL : Uncached write, but still less expensive,
1424 * since most modern caches use "write-combining" to
1425 * flush multiple cache-writes all at once.
1426 */
1427 dest_desc->nbytes = 0;
1428
1429 /*
1430 * Per our understanding this is not required on our
1431 * since we are doing the same cache invalidation
1432 * operation on the same buffer twice in succession,
1433 * without any modifiication to this buffer by CPU in
1434 * between.
1435 * However, this code with 2 syncs in succession has
1436 * been undergoing some testing at a customer site,
1437 * and seemed to be showing no problems so far. Would
1438 * like to validate from the customer, that this line
1439 * is really not required, before we remove this line
1440 * completely.
1441 */
1442 paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
1443
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001444 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev,
1445 paddr_lo,
1446 (skb_end_pointer(nbuf) - (nbuf)->data),
1447 DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001448 qdf_nbuf_put_tail(nbuf, nbytes);
1449
1450 qdf_assert_always(nbuf->data != NULL);
1451
1452 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1453
1454 /*
1455 * we are not posting the buffers back instead
1456 * reusing the buffers
1457 */
1458 if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1459 qdf_spin_unlock(&ce_state->ce_index_lock);
1460 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1461 MSG_FLUSH_NUM, ctrl_addr);
1462 qdf_spin_lock(&ce_state->ce_index_lock);
1463 nbuf_cmpl_idx = 0;
1464 }
1465
1466 }
1467
1468 /*
1469 * If there are not enough completions to fill the array,
1470 * just call the message handler here
1471 */
1472 if (nbuf_cmpl_idx) {
1473 qdf_spin_unlock(&ce_state->ce_index_lock);
1474 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1475 nbuf_cmpl_idx, ctrl_addr);
1476 qdf_spin_lock(&ce_state->ce_index_lock);
1477 nbuf_cmpl_idx = 0;
1478 }
1479 qdf_atomic_set(&ce_state->rx_pending, 0);
1480 dest_ring->sw_index = sw_index;
1481
1482 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1483 HOST_IS_COPY_COMPLETE_MASK);
1484 }
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001485 if (ce_recv_entries_done_nolock(scn, ce_state)) {
1486 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1487 goto more_data;
1488 } else {
1489 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1490 __func__, nentries_mask,
1491 ce_state->dest_ring->sw_index,
1492 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1493 }
1494 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001495}
1496
1497#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001498static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001499{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001500}
1501#endif /* WLAN_FEATURE_FASTPATH */
1502
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001503/*
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1505 *
1506 * Invokes registered callbacks for recv_complete,
1507 * send_complete, and watermarks.
1508 *
1509 * Returns: number of messages processed
1510 */
1511
Komal Seelam644263d2016-02-22 20:45:49 +05301512int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001513{
1514 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1515 uint32_t ctrl_addr = CE_state->ctrl_addr;
1516 void *CE_context;
1517 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301518 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519 unsigned int nbytes;
1520 unsigned int id;
1521 unsigned int flags;
1522 uint32_t CE_int_status;
1523 unsigned int more_comp_cnt = 0;
1524 unsigned int more_snd_comp_cnt = 0;
1525 unsigned int sw_idx, hw_idx;
1526 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301527 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001528
1529 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1530 HIF_ERROR("[premature rc=0]\n");
1531 return 0; /* no work done */
1532 }
1533
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301534 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001535
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001536 /*
1537 * With below check we make sure CE we are handling is datapath CE and
1538 * fastpath is enabled.
1539 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001540 if (ce_is_fastpath_handler_registered(CE_state)) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001541 /* For datapath only Rx CEs */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001542 ce_per_engine_service_fast(scn, CE_id);
1543 qdf_spin_unlock(&CE_state->ce_index_lock);
1544 return CE_state->receive_count;
1545 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001546
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 /* Clear force_break flag and re-initialize receive_count to 0 */
1548
1549 /* NAPI: scn variables- thread/multi-processing safety? */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001550 CE_state->receive_count = 0;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001551 CE_state->force_break = 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001552more_completions:
1553 if (CE_state->recv_cb) {
1554
1555 /* Pop completed recv buffers and call
1556 * the registered recv callback for each
1557 */
1558 while (ce_completed_recv_next_nolock
1559 (CE_state, &CE_context, &transfer_context,
1560 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301561 QDF_STATUS_SUCCESS) {
1562 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001563 CE_state->recv_cb((struct CE_handle *)CE_state,
1564 CE_context, transfer_context, buf,
1565 nbytes, id, flags);
1566
1567 /*
1568 * EV #112693 -
1569 * [Peregrine][ES1][WB342][Win8x86][Performance]
1570 * BSoD_0x133 occurred in VHT80 UDP_DL
1571 * Break out DPC by force if number of loops in
1572 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1573 * to avoid spending too long time in
1574 * DPC for each interrupt handling. Schedule another
1575 * DPC to avoid data loss if we had taken
1576 * force-break action before apply to Windows OS
1577 * only currently, Linux/MAC os can expand to their
1578 * platform if necessary
1579 */
1580
1581 /* Break the receive processes by
1582 * force if force_break set up
1583 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301584 if (qdf_unlikely(CE_state->force_break)) {
1585 qdf_atomic_set(&CE_state->rx_pending, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 if (Q_TARGET_ACCESS_END(scn) < 0)
1587 HIF_ERROR("<--[premature rc=%d]\n",
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001588 CE_state->receive_count);
1589 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001590 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301591 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592 }
1593 }
1594
1595 /*
1596 * Attention: We may experience potential infinite loop for below
1597 * While Loop during Sending Stress test.
1598 * Resolve the same way as Receive Case (Refer to EV #112693)
1599 */
1600
1601 if (CE_state->send_cb) {
1602 /* Pop completed send buffers and call
1603 * the registered send callback for each
1604 */
1605
1606#ifdef ATH_11AC_TXCOMPACT
1607 while (ce_completed_send_next_nolock
1608 (CE_state, &CE_context,
1609 &transfer_context, &buf, &nbytes,
1610 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301611 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001612
1613 if (CE_id != CE_HTT_H2T_MSG ||
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001614 QDF_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301615 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 CE_state->send_cb((struct CE_handle *)CE_state,
1617 CE_context, transfer_context,
1618 buf, nbytes, id, sw_idx,
1619 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301620 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621 } else {
1622 struct HIF_CE_pipe_info *pipe_info =
1623 (struct HIF_CE_pipe_info *)CE_context;
1624
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301625 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001626 completion_freeq_lock);
1627 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301628 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001629 completion_freeq_lock);
1630 }
1631 }
1632#else /*ATH_11AC_TXCOMPACT */
1633 while (ce_completed_send_next_nolock
1634 (CE_state, &CE_context,
1635 &transfer_context, &buf, &nbytes,
1636 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301637 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1638 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001639 CE_state->send_cb((struct CE_handle *)CE_state,
1640 CE_context, transfer_context, buf,
1641 nbytes, id, sw_idx, hw_idx,
1642 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301643 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001644 }
1645#endif /*ATH_11AC_TXCOMPACT */
1646 }
1647
1648more_watermarks:
1649 if (CE_state->misc_cbs) {
1650 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1651 if (CE_int_status & CE_WATERMARK_MASK) {
1652 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301653 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 /* Convert HW IS bits to software flags */
1655 flags =
1656 (CE_int_status & CE_WATERMARK_MASK) >>
1657 CE_WM_SHFT;
1658
1659 CE_state->
1660 watermark_cb((struct CE_handle *)CE_state,
1661 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301662 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663 }
1664 }
1665 }
1666
1667 /*
1668 * Clear the misc interrupts (watermark) that were handled above,
1669 * and that will be checked again below.
1670 * Clear and check for copy-complete interrupts again, just in case
1671 * more copy completions happened while the misc interrupts were being
1672 * handled.
1673 */
1674 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1675 CE_WATERMARK_MASK |
1676 HOST_IS_COPY_COMPLETE_MASK);
1677
1678 /*
1679 * Now that per-engine interrupts are cleared, verify that
1680 * no recv interrupts arrive while processing send interrupts,
1681 * and no recv or send interrupts happened while processing
1682 * misc interrupts.Go back and check again.Keep checking until
1683 * we find no more events to process.
1684 */
1685 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001686 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001687 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1688 goto more_completions;
1689 } else {
1690 HIF_ERROR(
1691 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1692 __func__, CE_state->dest_ring->nentries_mask,
1693 CE_state->dest_ring->sw_index,
1694 CE_DEST_RING_READ_IDX_GET(scn,
1695 CE_state->ctrl_addr));
1696 }
1697 }
1698
1699 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001700 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001701 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1702 goto more_completions;
1703 } else {
1704 HIF_ERROR(
1705 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1706 __func__, CE_state->src_ring->nentries_mask,
1707 CE_state->src_ring->sw_index,
1708 CE_SRC_RING_READ_IDX_GET(scn,
1709 CE_state->ctrl_addr));
1710 }
1711 }
1712
1713 if (CE_state->misc_cbs) {
1714 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1715 if (CE_int_status & CE_WATERMARK_MASK) {
1716 if (CE_state->watermark_cb) {
1717 goto more_watermarks;
1718 }
1719 }
1720 }
1721
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301722 qdf_spin_unlock(&CE_state->ce_index_lock);
1723 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724
1725 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001726 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1727 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728}
1729
1730/*
1731 * Handler for per-engine interrupts on ALL active CEs.
1732 * This is used in cases where the system is sharing a
1733 * single interrput for all CEs
1734 */
1735
Komal Seelam644263d2016-02-22 20:45:49 +05301736void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737{
1738 int CE_id;
1739 uint32_t intr_summary;
1740
Houston Hoffmanbac94542016-03-14 21:11:59 -07001741 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1742 return;
1743
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301744 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001745 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1746 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301747 if (qdf_atomic_read(&CE_state->rx_pending)) {
1748 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001749 ce_per_engine_service(scn, CE_id);
1750 }
1751 }
1752
Houston Hoffmanbac94542016-03-14 21:11:59 -07001753 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001754 return;
1755 }
1756
1757 intr_summary = CE_INTERRUPT_SUMMARY(scn);
1758
1759 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1760 if (intr_summary & (1 << CE_id)) {
1761 intr_summary &= ~(1 << CE_id);
1762 } else {
1763 continue; /* no intr pending on this CE */
1764 }
1765
1766 ce_per_engine_service(scn, CE_id);
1767 }
1768
Houston Hoffmanbac94542016-03-14 21:11:59 -07001769 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001770}
1771
1772/*
1773 * Adjust interrupts for the copy complete handler.
1774 * If it's needed for either send or recv, then unmask
1775 * this interrupt; otherwise, mask it.
1776 *
1777 * Called with target_lock held.
1778 */
1779static void
1780ce_per_engine_handler_adjust(struct CE_state *CE_state,
1781 int disable_copy_compl_intr)
1782{
1783 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301784 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785
1786 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07001787
1788 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1789 return;
1790
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001791 if ((!disable_copy_compl_intr) &&
1792 (CE_state->send_cb || CE_state->recv_cb)) {
1793 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1794 } else {
1795 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1796 }
1797
1798 if (CE_state->watermark_cb) {
1799 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1800 } else {
1801 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1802 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001803 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001804}
1805
1806/*Iterate the CE_state list and disable the compl interrupt
1807 * if it has been registered already.
1808 */
Komal Seelam644263d2016-02-22 20:45:49 +05301809void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001810{
1811 int CE_id;
1812
Houston Hoffmanbac94542016-03-14 21:11:59 -07001813 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1814 return;
1815
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001816 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1817 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1818 uint32_t ctrl_addr = CE_state->ctrl_addr;
1819
1820 /* if the interrupt is currently enabled, disable it */
1821 if (!CE_state->disable_copy_compl_intr
1822 && (CE_state->send_cb || CE_state->recv_cb)) {
1823 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1824 }
1825
1826 if (CE_state->watermark_cb) {
1827 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1828 }
1829 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001830 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001831}
1832
Komal Seelam644263d2016-02-22 20:45:49 +05301833void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834{
1835 int CE_id;
1836
Houston Hoffmanbac94542016-03-14 21:11:59 -07001837 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1838 return;
1839
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1841 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1842 uint32_t ctrl_addr = CE_state->ctrl_addr;
1843
1844 /*
1845 * If the CE is supposed to have copy complete interrupts
1846 * enabled (i.e. there a callback registered, and the
1847 * "disable" flag is not set), then re-enable the interrupt.
1848 */
1849 if (!CE_state->disable_copy_compl_intr
1850 && (CE_state->send_cb || CE_state->recv_cb)) {
1851 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1852 }
1853
1854 if (CE_state->watermark_cb) {
1855 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1856 }
1857 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07001858 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001859}
1860
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001861/**
1862 * ce_send_cb_register(): register completion handler
1863 * @copyeng: CE_state representing the ce we are adding the behavior to
1864 * @fn_ptr: callback that the ce should use when processing tx completions
1865 * @disable_interrupts: if the interupts should be enabled or not.
1866 *
1867 * Caller should guarantee that no transactions are in progress before
1868 * switching the callback function.
1869 *
1870 * Registers the send context before the fn pointer so that if the cb is valid
1871 * the context should be valid.
1872 *
1873 * Beware that currently this function will enable completion interrupts.
1874 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001875void
1876ce_send_cb_register(struct CE_handle *copyeng,
1877 ce_send_cb fn_ptr,
1878 void *ce_send_context, int disable_interrupts)
1879{
1880 struct CE_state *CE_state = (struct CE_state *)copyeng;
1881
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001882 if (CE_state == NULL) {
1883 pr_err("%s: Error CE state = NULL\n", __func__);
1884 return;
1885 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001886 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001887 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001889}
1890
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001891/**
1892 * ce_recv_cb_register(): register completion handler
1893 * @copyeng: CE_state representing the ce we are adding the behavior to
1894 * @fn_ptr: callback that the ce should use when processing rx completions
1895 * @disable_interrupts: if the interupts should be enabled or not.
1896 *
1897 * Registers the send context before the fn pointer so that if the cb is valid
1898 * the context should be valid.
1899 *
1900 * Caller should guarantee that no transactions are in progress before
1901 * switching the callback function.
1902 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903void
1904ce_recv_cb_register(struct CE_handle *copyeng,
1905 CE_recv_cb fn_ptr,
1906 void *CE_recv_context, int disable_interrupts)
1907{
1908 struct CE_state *CE_state = (struct CE_state *)copyeng;
1909
Sanjay Devnani9ce15772015-11-12 14:08:57 -08001910 if (CE_state == NULL) {
1911 pr_err("%s: ERROR CE state = NULL\n", __func__);
1912 return;
1913 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001915 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001917}
1918
Houston Hoffmana837c9a2015-09-03 12:47:01 -07001919/**
1920 * ce_watermark_cb_register(): register completion handler
1921 * @copyeng: CE_state representing the ce we are adding the behavior to
1922 * @fn_ptr: callback that the ce should use when processing watermark events
1923 *
1924 * Caller should guarantee that no watermark events are being processed before
1925 * switching the callback function.
1926 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001927void
1928ce_watermark_cb_register(struct CE_handle *copyeng,
1929 CE_watermark_cb fn_ptr, void *CE_wm_context)
1930{
1931 struct CE_state *CE_state = (struct CE_state *)copyeng;
1932
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933 CE_state->watermark_cb = fn_ptr;
1934 CE_state->wm_context = CE_wm_context;
1935 ce_per_engine_handler_adjust(CE_state, 0);
1936 if (fn_ptr) {
1937 CE_state->misc_cbs = 1;
1938 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939}
1940
1941#ifdef WLAN_FEATURE_FASTPATH
1942/**
1943 * ce_pkt_dl_len_set() set the HTT packet download length
1944 * @hif_sc: HIF context
1945 * @pkt_download_len: download length
1946 *
1947 * Return: None
1948 */
1949void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1950{
Komal Seelam644263d2016-02-22 20:45:49 +05301951 struct hif_softc *sc = (struct hif_softc *)(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001952 struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
1953
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301954 qdf_assert_always(ce_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001955
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 ce_state->download_len = pkt_download_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001957
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301958 qdf_print("%s CE %d Pkt download length %d", __func__,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 ce_state->id, ce_state->download_len);
1960}
1961#else
1962void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
1963{
1964}
1965#endif /* WLAN_FEATURE_FASTPATH */
1966
Komal Seelam644263d2016-02-22 20:45:49 +05301967bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001968{
1969 int CE_id;
1970
1971 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1972 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301973 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001974 return true;
1975 }
1976
1977 return false;
1978}
1979
1980/**
1981 * ce_check_rx_pending() - ce_check_rx_pending
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001982 * @CE_state: context of the copy engine to check
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983 *
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001984 * Return: true if there per_engine_service
1985 * didn't process all the rx descriptors.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 */
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07001987bool ce_check_rx_pending(struct CE_state *CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301989 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001990 return true;
1991 else
1992 return false;
1993}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07001994
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001995#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001996/**
1997 * ce_ipa_get_resource() - get uc resource on copyengine
1998 * @ce: copyengine context
1999 * @ce_sr_base_paddr: copyengine source ring base physical address
2000 * @ce_sr_ring_size: copyengine source ring size
2001 * @ce_reg_paddr: copyengine register physical address
2002 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002003 * Copy engine should release resource to micro controller
2004 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08002005 * - Copy engine source descriptor base address
2006 * - Copy engine source descriptor size
2007 * - PCI BAR address to access copy engine regiser
2008 *
2009 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 */
2011void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302012 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002013 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302014 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015{
2016 struct CE_state *CE_state = (struct CE_state *)ce;
2017 uint32_t ring_loop;
2018 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302019 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05302020 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002021
2022 if (CE_RUNNING != CE_state->state) {
2023 *ce_sr_base_paddr = 0;
2024 *ce_sr_ring_size = 0;
2025 return;
2026 }
2027
2028 /* Update default value for descriptor */
2029 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2030 ring_loop++) {
2031 ce_desc = (struct CE_src_desc *)
2032 ((char *)CE_state->src_ring->base_addr_owner_space +
2033 ring_loop * (sizeof(struct CE_src_desc)));
2034 CE_IPA_RING_INIT(ce_desc);
2035 }
2036
2037 /* Get BAR address */
2038 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2039
Leo Changd85f78d2015-11-13 10:55:34 -08002040 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2041 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2042 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002043 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2044 SR_WR_INDEX_ADDRESS;
2045 return;
2046}
2047#endif /* IPA_OFFLOAD */
2048