blob: 951eef45d6a4c7a411bb2d684b50c42dec5469f0 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Komal Seelam644263d2016-02-22 20:45:49 +05302 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "hif.h"
29#include "hif_io32.h"
30#include "ce_api.h"
31#include "ce_main.h"
32#include "ce_internal.h"
33#include "ce_reg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053034#include "qdf_lock.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "regtable.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "hif_main.h"
37#include "hif_debug.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080038
39#ifdef IPA_OFFLOAD
40#ifdef QCA_WIFI_3_0
41#define CE_IPA_RING_INIT(ce_desc) \
42 do { \
43 ce_desc->gather = 0; \
44 ce_desc->enable_11h = 0; \
45 ce_desc->meta_data_low = 0; \
46 ce_desc->packet_result_offset = 64; \
47 ce_desc->toeplitz_hash_enable = 0; \
48 ce_desc->addr_y_search_disable = 0; \
49 ce_desc->addr_x_search_disable = 0; \
50 ce_desc->misc_int_disable = 0; \
51 ce_desc->target_int_disable = 0; \
52 ce_desc->host_int_disable = 0; \
53 ce_desc->dest_byte_swap = 0; \
54 ce_desc->byte_swap = 0; \
55 ce_desc->type = 2; \
56 ce_desc->tx_classify = 1; \
57 ce_desc->buffer_addr_hi = 0; \
58 ce_desc->meta_data = 0; \
59 ce_desc->nbytes = 128; \
60 } while (0)
61#else
62#define CE_IPA_RING_INIT(ce_desc) \
63 do { \
64 ce_desc->byte_swap = 0; \
65 ce_desc->nbytes = 60; \
66 ce_desc->gather = 0; \
67 } while (0)
68#endif /* QCA_WIFI_3_0 */
69#endif /* IPA_OFFLOAD */
70
71static int war1_allow_sleep;
72/* io32 write workaround */
73static int hif_ce_war1;
74
Houston Hoffmanfb698ef2016-05-05 19:50:44 -070075/**
76 * hif_ce_war_disable() - disable ce war gobally
77 */
78void hif_ce_war_disable(void)
79{
80 hif_ce_war1 = 0;
81}
82
83/**
84 * hif_ce_war_enable() - enable ce war gobally
85 */
86void hif_ce_war_enable(void)
87{
88 hif_ce_war1 = 1;
89}
90
Houston Hoffman68e837e2015-12-04 12:57:24 -080091#ifdef CONFIG_SLUB_DEBUG_ON
92
93/**
94 * struct hif_ce_event - structure for detailing a ce event
95 * @type: what the event was
96 * @time: when it happened
97 * @descriptor: descriptor enqueued or dequeued
98 * @memory: virtual address that was used
99 * @index: location of the descriptor in the ce ring;
100 */
101struct hif_ce_desc_event {
102 uint16_t index;
103 enum hif_ce_event_type type;
104 uint64_t time;
105 union ce_desc descriptor;
106 void *memory;
107};
108
109/* max history to record per copy engine */
110#define HIF_CE_HISTORY_MAX 512
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530111qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
Houston Hoffman68e837e2015-12-04 12:57:24 -0800112struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
113
Houston Hoffman4275ba22015-12-06 21:02:11 -0800114
Houston Hoffman68e837e2015-12-04 12:57:24 -0800115/**
116 * get_next_record_index() - get the next record index
117 * @table_index: atomic index variable to increment
118 * @array_size: array size of the circular buffer
119 *
120 * Increment the atomic index and reserve the value.
121 * Takes care of buffer wrap.
122 * Guaranteed to be thread safe as long as fewer than array_size contexts
123 * try to access the array. If there are more than array_size contexts
124 * trying to access the array, full locking of the recording process would
125 * be needed to have sane logging.
126 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530127static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800128{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530129 int record_index = qdf_atomic_inc_return(table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800130 if (record_index == array_size)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530131 qdf_atomic_sub(array_size, table_index);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800132
133 while (record_index >= array_size)
134 record_index -= array_size;
135 return record_index;
136}
137
138/**
139 * hif_record_ce_desc_event() - record ce descriptor events
Komal Seelambd7c51d2016-02-24 10:27:30 +0530140 * @scn: hif_softc
Houston Hoffman68e837e2015-12-04 12:57:24 -0800141 * @ce_id: which ce is the event occuring on
142 * @type: what happened
143 * @descriptor: pointer to the descriptor posted/completed
144 * @memory: virtual address of buffer related to the descriptor
145 * @index: index that the descriptor was/will be at.
146 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530147void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
148 enum hif_ce_event_type type,
149 union ce_desc *descriptor,
150 void *memory, int index)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800151{
152 int record_index = get_next_record_index(
153 &hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
154
155 struct hif_ce_desc_event *event =
156 &hif_ce_desc_history[ce_id][record_index];
157 event->type = type;
Komal Seelam75080122016-03-02 15:18:25 +0530158 event->time = qdf_get_monotonic_boottime();
Komal Seelambd7c51d2016-02-24 10:27:30 +0530159
Houston Hoffman4275ba22015-12-06 21:02:11 -0800160 if (descriptor != NULL)
161 event->descriptor = *descriptor;
162 else
163 memset(&event->descriptor, 0, sizeof(union ce_desc));
Houston Hoffman68e837e2015-12-04 12:57:24 -0800164 event->memory = memory;
165 event->index = index;
166}
167
168/**
169 * ce_init_ce_desc_event_log() - initialize the ce event log
170 * @ce_id: copy engine id for which we are initializing the log
171 * @size: size of array to dedicate
172 *
173 * Currently the passed size is ignored in favor of a precompiled value.
174 */
175void ce_init_ce_desc_event_log(int ce_id, int size)
176{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530177 qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
Houston Hoffman68e837e2015-12-04 12:57:24 -0800178}
179#else
Komal Seelambd7c51d2016-02-24 10:27:30 +0530180void hif_record_ce_desc_event(struct hif_softc *scn,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800181 int ce_id, enum hif_ce_event_type type,
182 union ce_desc *descriptor, void *memory,
183 int index)
184{
185}
186
Houston Hoffman5cc292b2015-12-22 11:33:14 -0800187inline void ce_init_ce_desc_event_log(int ce_id, int size)
Houston Hoffman68e837e2015-12-04 12:57:24 -0800188{
189}
190#endif
191
Houston Hoffman05652722016-04-29 16:58:59 -0700192/**
193 * hif_ce_service_should_yield() - return true if the service is hogging the cpu
194 * @scn: hif context
195 * @ce_state: context of the copy engine being serviced
196 *
197 * Return: true if the service should yield
198 */
199bool hif_ce_service_should_yield(struct hif_softc *scn,
200 struct CE_state *ce_state)
201{
202 bool yield = qdf_system_time_after_eq(qdf_system_ticks(),
203 ce_state->ce_service_yield_time) ||
204 hif_max_num_receives_reached(scn, ce_state->receive_count);
205 return yield;
206}
207
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800208/*
209 * Support for Copy Engine hardware, which is mainly used for
210 * communication between Host and Target over a PCIe interconnect.
211 */
212
213/*
214 * A single CopyEngine (CE) comprises two "rings":
215 * a source ring
216 * a destination ring
217 *
218 * Each ring consists of a number of descriptors which specify
219 * an address, length, and meta-data.
220 *
221 * Typically, one side of the PCIe interconnect (Host or Target)
222 * controls one ring and the other side controls the other ring.
223 * The source side chooses when to initiate a transfer and it
224 * chooses what to send (buffer address, length). The destination
225 * side keeps a supply of "anonymous receive buffers" available and
226 * it handles incoming data as it arrives (when the destination
227 * recieves an interrupt).
228 *
229 * The sender may send a simple buffer (address/length) or it may
230 * send a small list of buffers. When a small list is sent, hardware
231 * "gathers" these and they end up in a single destination buffer
232 * with a single interrupt.
233 *
234 * There are several "contexts" managed by this layer -- more, it
235 * may seem -- than should be needed. These are provided mainly for
236 * maximum flexibility and especially to facilitate a simpler HIF
237 * implementation. There are per-CopyEngine recv, send, and watermark
238 * contexts. These are supplied by the caller when a recv, send,
239 * or watermark handler is established and they are echoed back to
240 * the caller when the respective callbacks are invoked. There is
241 * also a per-transfer context supplied by the caller when a buffer
242 * (or sendlist) is sent and when a buffer is enqueued for recv.
243 * These per-transfer contexts are echoed back to the caller when
244 * the buffer is sent/received.
245 * Target TX harsh result toeplitz_hash_result
246 */
247
248/*
249 * Guts of ce_send, used by both ce_send and ce_sendlist_send.
250 * The caller takes responsibility for any needed locking.
251 */
252int
253ce_completed_send_next_nolock(struct CE_state *CE_state,
254 void **per_CE_contextp,
255 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530256 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800257 unsigned int *nbytesp,
258 unsigned int *transfer_idp,
259 unsigned int *sw_idx, unsigned int *hw_idx,
260 uint32_t *toeplitz_hash_result);
261
Komal Seelam644263d2016-02-22 20:45:49 +0530262void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800263 u32 ctrl_addr, unsigned int write_index)
264{
265 if (hif_ce_war1) {
266 void __iomem *indicator_addr;
267
268 indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
269
270 if (!war1_allow_sleep
271 && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
272 hif_write32_mb(indicator_addr,
273 (CDC_WAR_MAGIC_STR | write_index));
274 } else {
275 unsigned long irq_flags;
276 local_irq_save(irq_flags);
277 hif_write32_mb(indicator_addr, 1);
278
279 /*
280 * PCIE write waits for ACK in IPQ8K, there is no
281 * need to read back value.
282 */
283 (void)hif_read32_mb(indicator_addr);
284 (void)hif_read32_mb(indicator_addr); /* conservative */
285
286 CE_SRC_RING_WRITE_IDX_SET(scn,
287 ctrl_addr, write_index);
288
289 hif_write32_mb(indicator_addr, 0);
290 local_irq_restore(irq_flags);
291 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700292 } else {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800293 CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700294 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800295}
296
297int
298ce_send_nolock(struct CE_handle *copyeng,
299 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530300 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800301 uint32_t nbytes,
302 uint32_t transfer_id,
303 uint32_t flags,
304 uint32_t user_flags)
305{
306 int status;
307 struct CE_state *CE_state = (struct CE_state *)copyeng;
308 struct CE_ring_state *src_ring = CE_state->src_ring;
309 uint32_t ctrl_addr = CE_state->ctrl_addr;
310 unsigned int nentries_mask = src_ring->nentries_mask;
311 unsigned int sw_index = src_ring->sw_index;
312 unsigned int write_index = src_ring->write_index;
313 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530314 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800315
Houston Hoffman2c32cf62016-03-14 21:12:00 -0700316 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -0700317 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800318 if (unlikely(CE_RING_DELTA(nentries_mask,
319 write_index, sw_index - 1) <= 0)) {
320 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Houston Hoffman987ab442016-03-14 21:12:02 -0700321 Q_TARGET_ACCESS_END(scn);
322 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800323 }
324 {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800325 enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800326 struct CE_src_desc *src_ring_base =
327 (struct CE_src_desc *)src_ring->base_addr_owner_space;
328 struct CE_src_desc *shadow_base =
329 (struct CE_src_desc *)src_ring->shadow_base;
330 struct CE_src_desc *src_desc =
331 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
332 struct CE_src_desc *shadow_src_desc =
333 CE_SRC_RING_TO_DESC(shadow_base, write_index);
334
335 /* Update low 32 bits source descriptor address */
336 shadow_src_desc->buffer_addr =
337 (uint32_t)(dma_addr & 0xFFFFFFFF);
338#ifdef QCA_WIFI_3_0
339 shadow_src_desc->buffer_addr_hi =
340 (uint32_t)((dma_addr >> 32) & 0x1F);
341 user_flags |= shadow_src_desc->buffer_addr_hi;
342 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
343 sizeof(uint32_t));
344#endif
Houston Hoffman56e0d702016-05-05 17:48:06 -0700345 shadow_src_desc->target_int_disable = 0;
346 shadow_src_desc->host_int_disable = 0;
347
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800348 shadow_src_desc->meta_data = transfer_id;
349
350 /*
351 * Set the swap bit if:
352 * typical sends on this CE are swapped (host is big-endian)
353 * and this send doesn't disable the swapping
354 * (data is not bytestream)
355 */
356 shadow_src_desc->byte_swap =
357 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
358 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
359 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
360 shadow_src_desc->nbytes = nbytes;
361
362 *src_desc = *shadow_src_desc;
363
364 src_ring->per_transfer_context[write_index] =
365 per_transfer_context;
366
367 /* Update Source Ring Write Index */
368 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
369
370 /* WORKAROUND */
371 if (!shadow_src_desc->gather) {
Houston Hoffman68e837e2015-12-04 12:57:24 -0800372 event_type = HIF_TX_DESC_POST;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800373 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
374 write_index);
375 }
376
Houston Hoffman68e837e2015-12-04 12:57:24 -0800377 /* src_ring->write index hasn't been updated event though
378 * the register has allready been written to.
379 */
Komal Seelambd7c51d2016-02-24 10:27:30 +0530380 hif_record_ce_desc_event(scn, CE_state->id, event_type,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800381 (union ce_desc *) shadow_src_desc, per_transfer_context,
382 src_ring->write_index);
383
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800384 src_ring->write_index = write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530385 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800386 }
Houston Hoffman987ab442016-03-14 21:12:02 -0700387 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800388 return status;
389}
390
391int
392ce_send(struct CE_handle *copyeng,
393 void *per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530394 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800395 uint32_t nbytes,
396 uint32_t transfer_id,
397 uint32_t flags,
398 uint32_t user_flag)
399{
400 struct CE_state *CE_state = (struct CE_state *)copyeng;
401 int status;
402
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530403 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800404 status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
405 transfer_id, flags, user_flag);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530406 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800407
408 return status;
409}
410
411unsigned int ce_sendlist_sizeof(void)
412{
413 return sizeof(struct ce_sendlist);
414}
415
416void ce_sendlist_init(struct ce_sendlist *sendlist)
417{
418 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
419 sl->num_items = 0;
420}
421
422int
423ce_sendlist_buf_add(struct ce_sendlist *sendlist,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530424 qdf_dma_addr_t buffer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800425 uint32_t nbytes,
426 uint32_t flags,
427 uint32_t user_flags)
428{
429 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
430 unsigned int num_items = sl->num_items;
431 struct ce_sendlist_item *item;
432
433 if (num_items >= CE_SENDLIST_ITEMS_MAX) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530434 QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
435 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800436 }
437
438 item = &sl->item[num_items];
439 item->send_type = CE_SIMPLE_BUFFER_TYPE;
440 item->data = buffer;
441 item->u.nbytes = nbytes;
442 item->flags = flags;
443 item->user_flags = user_flags;
444 sl->num_items = num_items + 1;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530445 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800446}
447
448int
449ce_sendlist_send(struct CE_handle *copyeng,
450 void *per_transfer_context,
451 struct ce_sendlist *sendlist, unsigned int transfer_id)
452{
453 int status = -ENOMEM;
454 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
455 struct CE_state *CE_state = (struct CE_state *)copyeng;
456 struct CE_ring_state *src_ring = CE_state->src_ring;
457 unsigned int nentries_mask = src_ring->nentries_mask;
458 unsigned int num_items = sl->num_items;
459 unsigned int sw_index;
460 unsigned int write_index;
461
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800463
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530464 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800465 sw_index = src_ring->sw_index;
466 write_index = src_ring->write_index;
467
468 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
469 num_items) {
470 struct ce_sendlist_item *item;
471 int i;
472
473 /* handle all but the last item uniformly */
474 for (i = 0; i < num_items - 1; i++) {
475 item = &sl->item[i];
476 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530477 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800478 status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530479 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800480 item->u.nbytes, transfer_id,
481 item->flags | CE_SEND_FLAG_GATHER,
482 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530483 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800484 }
485 /* provide valid context pointer for final item */
486 item = &sl->item[i];
487 /* TBDXXX: Support extensible sendlist_types? */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530488 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800489 status = ce_send_nolock(copyeng, per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530490 (qdf_dma_addr_t) item->data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800491 item->u.nbytes,
492 transfer_id, item->flags,
493 item->user_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530494 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530495 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
496 QDF_NBUF_TX_PKT_CE);
497 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530498 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +0530499 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
Nirav Shah29beae02016-04-26 22:58:54 +0530500 sizeof(((qdf_nbuf_t)per_transfer_context)->data),
501 QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800502 } else {
503 /*
504 * Probably not worth the additional complexity to support
505 * partial sends with continuation or notification. We expect
506 * to use large rings and small sendlists. If we can't handle
507 * the entire request at once, punt it back to the caller.
508 */
509 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530510 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800511
512 return status;
513}
514
515#ifdef WLAN_FEATURE_FASTPATH
516#ifdef QCA_WIFI_3_0
517static inline void
518ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
519 uint64_t dma_addr,
520 uint32_t user_flags)
521{
522 shadow_src_desc->buffer_addr_hi =
523 (uint32_t)((dma_addr >> 32) & 0x1F);
524 user_flags |= shadow_src_desc->buffer_addr_hi;
525 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
526 sizeof(uint32_t));
527}
528#else
529static inline void
530ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
531 uint64_t dma_addr,
532 uint32_t user_flags)
533{
534}
535#endif
536
Houston Hoffman735bb8d2016-04-27 18:25:20 -0700537#define SLOTS_PER_DATAPATH_TX 2
538
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800539/**
540 * ce_send_fast() CE layer Tx buffer posting function
541 * @copyeng: copy engine handle
Nirav Shahda0881a2016-05-16 10:45:16 +0530542 * @msdu: msdu to be sent
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800543 * @transfer_id: transfer_id
Nirav Shahda0881a2016-05-16 10:45:16 +0530544 * @download_len: packet download length
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800545 *
546 * Assumption : Called with an array of MSDU's
547 * Function:
548 * For each msdu in the array
549 * 1. Check no. of available entries
550 * 2. Create src ring entries (allocated in consistent memory
551 * 3. Write index to h/w
552 *
553 * Return: No. of packets that could be sent
554 */
Nirav Shahda0881a2016-05-16 10:45:16 +0530555int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
556 unsigned int transfer_id, uint32_t download_len)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800557{
558 struct CE_state *ce_state = (struct CE_state *)copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530559 struct hif_softc *scn = ce_state->scn;
Komal Seelam5584a7c2016-02-24 19:22:48 +0530560 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800561 struct CE_ring_state *src_ring = ce_state->src_ring;
562 u_int32_t ctrl_addr = ce_state->ctrl_addr;
563 unsigned int nentries_mask = src_ring->nentries_mask;
564 unsigned int write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700565 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800566 unsigned int frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800567 uint64_t dma_addr;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700568 uint32_t user_flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800569
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530570 qdf_spin_lock_bh(&ce_state->ce_index_lock);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700571 Q_TARGET_ACCESS_BEGIN(scn);
572
573 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800574 write_index = src_ring->write_index;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700575 sw_index = src_ring->sw_index;
576
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700577 hif_record_ce_desc_event(scn, ce_state->id,
578 FAST_TX_SOFTWARE_INDEX_UPDATE,
579 NULL, NULL, write_index);
580
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700581 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
Nirav Shahda0881a2016-05-16 10:45:16 +0530582 < SLOTS_PER_DATAPATH_TX)) {
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700583 HIF_ERROR("Source ring full, required %d, available %d",
Nirav Shahda0881a2016-05-16 10:45:16 +0530584 SLOTS_PER_DATAPATH_TX,
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700585 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
586 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
587 Q_TARGET_ACCESS_END(scn);
588 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
589 return 0;
590 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800591
Nirav Shahda0881a2016-05-16 10:45:16 +0530592 {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800593 struct CE_src_desc *src_ring_base =
594 (struct CE_src_desc *)src_ring->base_addr_owner_space;
595 struct CE_src_desc *shadow_base =
596 (struct CE_src_desc *)src_ring->shadow_base;
597 struct CE_src_desc *src_desc =
598 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
599 struct CE_src_desc *shadow_src_desc =
600 CE_SRC_RING_TO_DESC(shadow_base, write_index);
601
Komal Seelam644263d2016-02-22 20:45:49 +0530602 hif_pm_runtime_get_noresume(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800603
604 /*
605 * First fill out the ring descriptor for the HTC HTT frame
606 * header. These are uncached writes. Should we use a local
607 * structure instead?
608 */
609 /* HTT/HTC header can be passed as a argument */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530610 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800611 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
612 0xFFFFFFFF);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530613 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800614 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
Nirav Shahda0881a2016-05-16 10:45:16 +0530615 shadow_src_desc->meta_data = transfer_id;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530616 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
Nirav Shahda0881a2016-05-16 10:45:16 +0530617 download_len -= shadow_src_desc->nbytes;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800618 /*
619 * HTC HTT header is a word stream, so byte swap if CE byte
620 * swap enabled
621 */
622 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
623 CE_ATTR_BYTE_SWAP_DATA) != 0);
624 /* For the first one, it still does not need to write */
625 shadow_src_desc->gather = 1;
626 *src_desc = *shadow_src_desc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800627 /* By default we could initialize the transfer context to this
628 * value
629 */
630 src_ring->per_transfer_context[write_index] =
631 CE_SENDLIST_ITEM_CTXT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800632 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
633
634 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
635 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
636 /*
637 * Now fill out the ring descriptor for the actual data
638 * packet
639 */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530640 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800641 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
642 0xFFFFFFFF);
643 /*
644 * Clear packet offset for all but the first CE desc.
645 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530646 user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800647 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
648 shadow_src_desc->meta_data = transfer_id;
649
650 /* get actual packet length */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530651 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700652
Nirav Shahda0881a2016-05-16 10:45:16 +0530653 /* download remaining bytes of payload */
654 shadow_src_desc->nbytes = download_len;
Houston Hoffmana5e74c12015-09-02 18:06:28 -0700655 if (shadow_src_desc->nbytes > frag_len)
656 shadow_src_desc->nbytes = frag_len;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800657
658 /* Data packet is a byte stream, so disable byte swap */
659 shadow_src_desc->byte_swap = 0;
660 /* For the last one, gather is not set */
661 shadow_src_desc->gather = 0;
662 *src_desc = *shadow_src_desc;
663 src_ring->per_transfer_context[write_index] = msdu;
664 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Nirav Shaheaa20d82016-04-25 18:01:05 +0530665
666 DPTRACE(qdf_dp_trace(msdu,
667 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
668 qdf_nbuf_data_addr(msdu),
Nirav Shah29beae02016-04-26 22:58:54 +0530669 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800670 }
671
Nirav Shahda0881a2016-05-16 10:45:16 +0530672 src_ring->write_index = write_index;
Houston Hoffmanf4607852015-12-17 17:14:40 -0800673
Nirav Shahda0881a2016-05-16 10:45:16 +0530674 if (hif_pm_runtime_get(hif_hdl) == 0) {
675 hif_record_ce_desc_event(scn, ce_state->id,
676 FAST_TX_WRITE_INDEX_UPDATE,
677 NULL, NULL, write_index);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -0700678
Nirav Shahda0881a2016-05-16 10:45:16 +0530679 /* Don't call WAR_XXX from here
680 * Just call XXX instead, that has the reqd. intel
681 */
682 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
683 write_index);
684 hif_pm_runtime_put(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800685 }
686
Nirav Shahda0881a2016-05-16 10:45:16 +0530687
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700688 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530689 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800690
Nirav Shahda0881a2016-05-16 10:45:16 +0530691 /* sent 1 packet */
692 return 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700694
695/**
696 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
697 * @scn: Handle to HIF context
698 *
699 * Return: true if fastpath is enabled else false.
700 */
701static bool ce_is_fastpath_enabled(struct hif_softc *scn)
702{
703 return scn->fastpath_mode_on;
704}
705
706/**
707 * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
708 * fastpath is enabled.
709 * @ce_state: handle to copy engine
710 *
711 * Return: true if fastpath handler is registered for datapath CE.
712 */
713static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
714{
715 if (ce_state->fastpath_handler)
716 return true;
717 else
718 return false;
719}
720
721
722#else
723static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
724{
725 return false;
726}
727
728static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
729{
730 return false;
731}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800732#endif /* WLAN_FEATURE_FASTPATH */
733
Houston Hoffman56e0d702016-05-05 17:48:06 -0700734#ifndef AH_NEED_TX_DATA_SWAP
735#define AH_NEED_TX_DATA_SWAP 0
736#endif
737
738/**
739 * ce_batch_send() - sends bunch of msdus at once
740 * @ce_tx_hdl : pointer to CE handle
741 * @msdu : list of msdus to be sent
742 * @transfer_id : transfer id
743 * @len : Downloaded length
744 * @sendhead : sendhead
745 *
746 * Assumption : Called with an array of MSDU's
747 * Function:
748 * For each msdu in the array
749 * 1. Send each msdu
750 * 2. Increment write index accordinlgy.
751 *
752 * Return: list of msds not sent
753 */
754qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
755 uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
756{
757 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
758 struct hif_softc *scn = ce_state->scn;
759 struct CE_ring_state *src_ring = ce_state->src_ring;
760 u_int32_t ctrl_addr = ce_state->ctrl_addr;
761 /* A_target_id_t targid = TARGID(scn);*/
762
763 uint32_t nentries_mask = src_ring->nentries_mask;
764 uint32_t sw_index, write_index;
765
766 struct CE_src_desc *src_desc_base =
767 (struct CE_src_desc *)src_ring->base_addr_owner_space;
768 uint32_t *src_desc;
769
770 struct CE_src_desc lsrc_desc = {0};
771 int deltacount = 0;
772 qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
773
774 sw_index = src_ring->sw_index;
775 write_index = src_ring->write_index;
776
777 deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
778
779 while (msdu) {
780 tempnext = qdf_nbuf_next(msdu);
781
782 if (deltacount < 2) {
783 if (sendhead)
784 return msdu;
785 qdf_print("Out of descriptor\n");
786 src_ring->write_index = write_index;
787 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
788 write_index);
789
790 sw_index = src_ring->sw_index;
791 write_index = src_ring->write_index;
792
793 deltacount = CE_RING_DELTA(nentries_mask, write_index,
794 sw_index-1);
795 if (freelist == NULL) {
796 freelist = msdu;
797 hfreelist = msdu;
798 } else {
799 qdf_nbuf_set_next(freelist, msdu);
800 freelist = msdu;
801 }
802 qdf_nbuf_set_next(msdu, NULL);
803 msdu = tempnext;
804 continue;
805 }
806
807 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
808 write_index);
809
810 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
811
812 lsrc_desc.meta_data = transfer_id;
813 if (len > msdu->len)
814 len = msdu->len;
815 lsrc_desc.nbytes = len;
816 /* Data packet is a byte stream, so disable byte swap */
817 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
818 lsrc_desc.gather = 0; /*For the last one, gather is not set*/
819
820 src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
821
822
823 src_ring->per_transfer_context[write_index] = msdu;
824 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
825
826 if (sendhead)
827 break;
828 qdf_nbuf_set_next(msdu, NULL);
829 msdu = tempnext;
830
831 }
832
833
834 src_ring->write_index = write_index;
835 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
836
837 return hfreelist;
838}
839
840/**
841 * ce_update_tx_ring() - Advance sw index.
842 * @ce_tx_hdl : pointer to CE handle
843 * @num_htt_cmpls : htt completions received.
844 *
845 * Function:
846 * Increment the value of sw index of src ring
847 * according to number of htt completions
848 * received.
849 *
850 * Return: void
851 */
852void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
853{
854 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
855 struct CE_ring_state *src_ring = ce_state->src_ring;
856 uint32_t nentries_mask = src_ring->nentries_mask;
857 /*
858 * Advance the s/w index:
859 * This effectively simulates completing the CE ring descriptors
860 */
861 src_ring->sw_index =
862 CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
863 num_htt_cmpls);
864}
865
866/**
867 * ce_send_single() - sends
868 * @ce_tx_hdl : pointer to CE handle
869 * @msdu : msdu to be sent
870 * @transfer_id : transfer id
871 * @len : Downloaded length
872 *
873 * Function:
874 * 1. Send one msdu
875 * 2. Increment write index of src ring accordinlgy.
876 *
877 * Return: int: CE sent status
878 */
879int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
880 uint32_t transfer_id, u_int32_t len)
881{
882 struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
883 struct hif_softc *scn = ce_state->scn;
884 struct CE_ring_state *src_ring = ce_state->src_ring;
885 uint32_t ctrl_addr = ce_state->ctrl_addr;
886 /*A_target_id_t targid = TARGID(scn);*/
887
888 uint32_t nentries_mask = src_ring->nentries_mask;
889 uint32_t sw_index, write_index;
890
891 struct CE_src_desc *src_desc_base =
892 (struct CE_src_desc *)src_ring->base_addr_owner_space;
893 uint32_t *src_desc;
894
895 struct CE_src_desc lsrc_desc = {0};
896
897 sw_index = src_ring->sw_index;
898 write_index = src_ring->write_index;
899
900 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
901 sw_index-1) < 1)) {
902 /* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
903 qdf_print("ce send fail %d %d %d\n", nentries_mask,
904 write_index, sw_index);
905 return 1;
906 }
907
908 src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
909
910 src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
911
912 lsrc_desc.meta_data = transfer_id;
913 lsrc_desc.nbytes = len;
914 /* Data packet is a byte stream, so disable byte swap */
915 lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
916 lsrc_desc.gather = 0; /* For the last one, gather is not set */
917
918 src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
919
920
921 src_ring->per_transfer_context[write_index] = msdu;
922 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
923
924 src_ring->write_index = write_index;
925 war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
926
927 return QDF_STATUS_SUCCESS;
928}
Houston Hoffman4411ad42016-03-14 21:12:04 -0700929/**
930 * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
931 * @coyeng: copy engine handle
932 * @per_recv_context: virtual address of the nbuf
933 * @buffer: physical address of the nbuf
934 *
935 * Return: 0 if the buffer is enqueued
936 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800937int
938ce_recv_buf_enqueue(struct CE_handle *copyeng,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530939 void *per_recv_context, qdf_dma_addr_t buffer)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800940{
941 int status;
942 struct CE_state *CE_state = (struct CE_state *)copyeng;
943 struct CE_ring_state *dest_ring = CE_state->dest_ring;
944 uint32_t ctrl_addr = CE_state->ctrl_addr;
945 unsigned int nentries_mask = dest_ring->nentries_mask;
946 unsigned int write_index;
947 unsigned int sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800948 uint64_t dma_addr = buffer;
Komal Seelam644263d2016-02-22 20:45:49 +0530949 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530951 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800952 write_index = dest_ring->write_index;
953 sw_index = dest_ring->sw_index;
954
Houston Hoffman4411ad42016-03-14 21:12:04 -0700955 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530956 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700957 return -EIO;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958 }
959
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700960 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700961 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962 struct CE_dest_desc *dest_ring_base =
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700963 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800964 struct CE_dest_desc *dest_desc =
965 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
966
967 /* Update low 32 bit destination descriptor */
968 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
969#ifdef QCA_WIFI_3_0
970 dest_desc->buffer_addr_hi =
971 (uint32_t)((dma_addr >> 32) & 0x1F);
972#endif
973 dest_desc->nbytes = 0;
974
975 dest_ring->per_transfer_context[write_index] =
976 per_recv_context;
977
Komal Seelambd7c51d2016-02-24 10:27:30 +0530978 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
Houston Hoffman68e837e2015-12-04 12:57:24 -0800979 (union ce_desc *) dest_desc, per_recv_context,
980 write_index);
981
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 /* Update Destination Ring Write Index */
983 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700984 if (write_index != sw_index) {
985 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
986 dest_ring->write_index = write_index;
987 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530988 status = QDF_STATUS_SUCCESS;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700989 } else
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530990 status = QDF_STATUS_E_FAILURE;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700991
Houston Hoffman4411ad42016-03-14 21:12:04 -0700992 Q_TARGET_ACCESS_END(scn);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994 return status;
995}
996
997void
998ce_send_watermarks_set(struct CE_handle *copyeng,
999 unsigned int low_alert_nentries,
1000 unsigned int high_alert_nentries)
1001{
1002 struct CE_state *CE_state = (struct CE_state *)copyeng;
1003 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301004 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001005
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001006 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1007 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001008}
1009
1010void
1011ce_recv_watermarks_set(struct CE_handle *copyeng,
1012 unsigned int low_alert_nentries,
1013 unsigned int high_alert_nentries)
1014{
1015 struct CE_state *CE_state = (struct CE_state *)copyeng;
1016 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05301017 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001018
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001019 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1020 low_alert_nentries);
1021 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1022 high_alert_nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001023}
1024
1025unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1026{
1027 struct CE_state *CE_state = (struct CE_state *)copyeng;
1028 struct CE_ring_state *src_ring = CE_state->src_ring;
1029 unsigned int nentries_mask = src_ring->nentries_mask;
1030 unsigned int sw_index;
1031 unsigned int write_index;
1032
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301033 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001034 sw_index = src_ring->sw_index;
1035 write_index = src_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301036 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001037
1038 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1039}
1040
1041unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1042{
1043 struct CE_state *CE_state = (struct CE_state *)copyeng;
1044 struct CE_ring_state *dest_ring = CE_state->dest_ring;
1045 unsigned int nentries_mask = dest_ring->nentries_mask;
1046 unsigned int sw_index;
1047 unsigned int write_index;
1048
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301049 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001050 sw_index = dest_ring->sw_index;
1051 write_index = dest_ring->write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301052 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001053
1054 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1055}
1056
1057/*
1058 * Guts of ce_send_entries_done.
1059 * The caller takes responsibility for any necessary locking.
1060 */
1061unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +05301062ce_send_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001063 struct CE_state *CE_state)
1064{
1065 struct CE_ring_state *src_ring = CE_state->src_ring;
1066 uint32_t ctrl_addr = CE_state->ctrl_addr;
1067 unsigned int nentries_mask = src_ring->nentries_mask;
1068 unsigned int sw_index;
1069 unsigned int read_index;
1070
1071 sw_index = src_ring->sw_index;
1072 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1073
1074 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1075}
1076
1077unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1078{
1079 struct CE_state *CE_state = (struct CE_state *)copyeng;
1080 unsigned int nentries;
1081
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301082 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001083 nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301084 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085
1086 return nentries;
1087}
1088
1089/*
1090 * Guts of ce_recv_entries_done.
1091 * The caller takes responsibility for any necessary locking.
1092 */
1093unsigned int
Komal Seelam644263d2016-02-22 20:45:49 +05301094ce_recv_entries_done_nolock(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001095 struct CE_state *CE_state)
1096{
1097 struct CE_ring_state *dest_ring = CE_state->dest_ring;
1098 uint32_t ctrl_addr = CE_state->ctrl_addr;
1099 unsigned int nentries_mask = dest_ring->nentries_mask;
1100 unsigned int sw_index;
1101 unsigned int read_index;
1102
1103 sw_index = dest_ring->sw_index;
1104 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1105
1106 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1107}
1108
1109unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1110{
1111 struct CE_state *CE_state = (struct CE_state *)copyeng;
1112 unsigned int nentries;
1113
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301114 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001115 nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301116 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001117
1118 return nentries;
1119}
1120
1121/* Debug support */
1122void *ce_debug_cmplrn_context; /* completed recv next context */
1123void *ce_debug_cnclsn_context; /* cancel send next context */
1124void *ce_debug_rvkrn_context; /* revoke receive next context */
1125void *ce_debug_cmplsn_context; /* completed send next context */
1126
1127/*
1128 * Guts of ce_completed_recv_next.
1129 * The caller takes responsibility for any necessary locking.
1130 */
1131int
1132ce_completed_recv_next_nolock(struct CE_state *CE_state,
1133 void **per_CE_contextp,
1134 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301135 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001136 unsigned int *nbytesp,
1137 unsigned int *transfer_idp,
1138 unsigned int *flagsp)
1139{
1140 int status;
1141 struct CE_ring_state *dest_ring = CE_state->dest_ring;
1142 unsigned int nentries_mask = dest_ring->nentries_mask;
1143 unsigned int sw_index = dest_ring->sw_index;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301144 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001145 struct CE_dest_desc *dest_ring_base =
1146 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1147 struct CE_dest_desc *dest_desc =
1148 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1149 int nbytes;
1150 struct CE_dest_desc dest_desc_info;
1151 /*
1152 * By copying the dest_desc_info element to local memory, we could
1153 * avoid extra memory read from non-cachable memory.
1154 */
1155 dest_desc_info = *dest_desc;
1156 nbytes = dest_desc_info.nbytes;
1157 if (nbytes == 0) {
1158 /*
1159 * This closes a relatively unusual race where the Host
1160 * sees the updated DRRI before the update to the
1161 * corresponding descriptor has completed. We treat this
1162 * as a descriptor that is not yet done.
1163 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301164 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001165 goto done;
1166 }
1167
Komal Seelambd7c51d2016-02-24 10:27:30 +05301168 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001169 (union ce_desc *) dest_desc,
1170 dest_ring->per_transfer_context[sw_index],
1171 sw_index);
1172
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001173 dest_desc->nbytes = 0;
1174
1175 /* Return data from completed destination descriptor */
1176 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1177 *nbytesp = nbytes;
1178 *transfer_idp = dest_desc_info.meta_data;
1179 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1180
1181 if (per_CE_contextp) {
1182 *per_CE_contextp = CE_state->recv_context;
1183 }
1184
1185 ce_debug_cmplrn_context = dest_ring->per_transfer_context[sw_index];
1186 if (per_transfer_contextp) {
1187 *per_transfer_contextp = ce_debug_cmplrn_context;
1188 }
1189 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
1190
1191 /* Update sw_index */
1192 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1193 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301194 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001195
1196done:
1197 return status;
1198}
1199
1200int
1201ce_completed_recv_next(struct CE_handle *copyeng,
1202 void **per_CE_contextp,
1203 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301204 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001205 unsigned int *nbytesp,
1206 unsigned int *transfer_idp, unsigned int *flagsp)
1207{
1208 struct CE_state *CE_state = (struct CE_state *)copyeng;
1209 int status;
1210
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301211 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001212 status =
1213 ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
1214 per_transfer_contextp, bufferp,
1215 nbytesp, transfer_idp, flagsp);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301216 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001217
1218 return status;
1219}
1220
1221/* NB: Modeled after ce_completed_recv_next_nolock */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301222QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001223ce_revoke_recv_next(struct CE_handle *copyeng,
1224 void **per_CE_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301225 void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001226{
1227 struct CE_state *CE_state;
1228 struct CE_ring_state *dest_ring;
1229 unsigned int nentries_mask;
1230 unsigned int sw_index;
1231 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301232 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301233 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001234
1235 CE_state = (struct CE_state *)copyeng;
1236 dest_ring = CE_state->dest_ring;
1237 if (!dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301238 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001239 }
1240
1241 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301242 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001243 nentries_mask = dest_ring->nentries_mask;
1244 sw_index = dest_ring->sw_index;
1245 write_index = dest_ring->write_index;
1246 if (write_index != sw_index) {
1247 struct CE_dest_desc *dest_ring_base =
1248 (struct CE_dest_desc *)dest_ring->
1249 base_addr_owner_space;
1250 struct CE_dest_desc *dest_desc =
1251 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1252
1253 /* Return data from completed destination descriptor */
1254 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1255
1256 if (per_CE_contextp) {
1257 *per_CE_contextp = CE_state->recv_context;
1258 }
1259
1260 ce_debug_rvkrn_context =
1261 dest_ring->per_transfer_context[sw_index];
1262 if (per_transfer_contextp) {
1263 *per_transfer_contextp = ce_debug_rvkrn_context;
1264 }
1265 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
1266
1267 /* Update sw_index */
1268 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1269 dest_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301270 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301272 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001273 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301274 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001275
1276 return status;
1277}
1278
1279/*
1280 * Guts of ce_completed_send_next.
1281 * The caller takes responsibility for any necessary locking.
1282 */
1283int
1284ce_completed_send_next_nolock(struct CE_state *CE_state,
1285 void **per_CE_contextp,
1286 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301287 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001288 unsigned int *nbytesp,
1289 unsigned int *transfer_idp,
1290 unsigned int *sw_idx,
1291 unsigned int *hw_idx,
1292 uint32_t *toeplitz_hash_result)
1293{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301294 int status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001295 struct CE_ring_state *src_ring = CE_state->src_ring;
1296 uint32_t ctrl_addr = CE_state->ctrl_addr;
1297 unsigned int nentries_mask = src_ring->nentries_mask;
1298 unsigned int sw_index = src_ring->sw_index;
1299 unsigned int read_index;
Komal Seelam644263d2016-02-22 20:45:49 +05301300 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301
1302 if (src_ring->hw_index == sw_index) {
1303 /*
1304 * The SW completion index has caught up with the cached
1305 * version of the HW completion index.
1306 * Update the cached HW completion index to see whether
1307 * the SW has really caught up to the HW, or if the cached
1308 * value of the HW index has become stale.
1309 */
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001310 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001311 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001312 src_ring->hw_index =
Houston Hoffman3d0cda82015-12-03 13:25:05 -08001313 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
Houston Hoffman2c32cf62016-03-14 21:12:00 -07001314 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman987ab442016-03-14 21:12:02 -07001315 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001316 }
1317 read_index = src_ring->hw_index;
1318
1319 if (sw_idx)
1320 *sw_idx = sw_index;
1321
1322 if (hw_idx)
1323 *hw_idx = read_index;
1324
1325 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1326 struct CE_src_desc *shadow_base =
1327 (struct CE_src_desc *)src_ring->shadow_base;
1328 struct CE_src_desc *shadow_src_desc =
1329 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1330#ifdef QCA_WIFI_3_0
1331 struct CE_src_desc *src_ring_base =
1332 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1333 struct CE_src_desc *src_desc =
1334 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1335#endif
Komal Seelambd7c51d2016-02-24 10:27:30 +05301336 hif_record_ce_desc_event(scn, CE_state->id,
1337 HIF_TX_DESC_COMPLETION,
Houston Hoffman68e837e2015-12-04 12:57:24 -08001338 (union ce_desc *) shadow_src_desc,
1339 src_ring->per_transfer_context[sw_index],
1340 sw_index);
1341
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001342 /* Return data from completed source descriptor */
1343 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1344 *nbytesp = shadow_src_desc->nbytes;
1345 *transfer_idp = shadow_src_desc->meta_data;
1346#ifdef QCA_WIFI_3_0
1347 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1348#else
1349 *toeplitz_hash_result = 0;
1350#endif
1351 if (per_CE_contextp) {
1352 *per_CE_contextp = CE_state->send_context;
1353 }
1354
1355 ce_debug_cmplsn_context =
1356 src_ring->per_transfer_context[sw_index];
1357 if (per_transfer_contextp) {
1358 *per_transfer_contextp = ce_debug_cmplsn_context;
1359 }
1360 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1361
1362 /* Update sw_index */
1363 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1364 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301365 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001366 }
1367
1368 return status;
1369}
1370
1371/* NB: Modeled after ce_completed_send_next */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301372QDF_STATUS
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001373ce_cancel_send_next(struct CE_handle *copyeng,
1374 void **per_CE_contextp,
1375 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301376 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377 unsigned int *nbytesp,
1378 unsigned int *transfer_idp,
1379 uint32_t *toeplitz_hash_result)
1380{
1381 struct CE_state *CE_state;
1382 struct CE_ring_state *src_ring;
1383 unsigned int nentries_mask;
1384 unsigned int sw_index;
1385 unsigned int write_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301386 QDF_STATUS status;
Komal Seelam644263d2016-02-22 20:45:49 +05301387 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001388
1389 CE_state = (struct CE_state *)copyeng;
1390 src_ring = CE_state->src_ring;
1391 if (!src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301392 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001393 }
1394
1395 scn = CE_state->scn;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301396 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001397 nentries_mask = src_ring->nentries_mask;
1398 sw_index = src_ring->sw_index;
1399 write_index = src_ring->write_index;
1400
1401 if (write_index != sw_index) {
1402 struct CE_src_desc *src_ring_base =
1403 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1404 struct CE_src_desc *src_desc =
1405 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1406
1407 /* Return data from completed source descriptor */
1408 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1409 *nbytesp = src_desc->nbytes;
1410 *transfer_idp = src_desc->meta_data;
1411#ifdef QCA_WIFI_3_0
1412 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1413#else
1414 *toeplitz_hash_result = 0;
1415#endif
1416
1417 if (per_CE_contextp) {
1418 *per_CE_contextp = CE_state->send_context;
1419 }
1420
1421 ce_debug_cnclsn_context =
1422 src_ring->per_transfer_context[sw_index];
1423 if (per_transfer_contextp) {
1424 *per_transfer_contextp = ce_debug_cnclsn_context;
1425 }
1426 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1427
1428 /* Update sw_index */
1429 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1430 src_ring->sw_index = sw_index;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301431 status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001432 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301433 status = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001434 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301435 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001436
1437 return status;
1438}
1439
1440/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1441#define CE_WM_SHFT 1
1442
1443int
1444ce_completed_send_next(struct CE_handle *copyeng,
1445 void **per_CE_contextp,
1446 void **per_transfer_contextp,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301447 qdf_dma_addr_t *bufferp,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001448 unsigned int *nbytesp,
1449 unsigned int *transfer_idp,
1450 unsigned int *sw_idx,
1451 unsigned int *hw_idx,
1452 unsigned int *toeplitz_hash_result)
1453{
1454 struct CE_state *CE_state = (struct CE_state *)copyeng;
1455 int status;
1456
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301457 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001458 status =
1459 ce_completed_send_next_nolock(CE_state, per_CE_contextp,
1460 per_transfer_contextp, bufferp,
1461 nbytesp, transfer_idp, sw_idx,
1462 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301463 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001464
1465 return status;
1466}
1467
1468#ifdef ATH_11AC_TXCOMPACT
1469/* CE engine descriptor reap
1470 * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1471 * does recieve and reaping of completed descriptor ,
1472 * This function only handles reaping of Tx complete descriptor.
1473 * The Function is called from threshold reap poll routine
1474 * hif_send_complete_check so should not countain recieve functionality
1475 * within it .
1476 */
1477
Komal Seelam644263d2016-02-22 20:45:49 +05301478void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001479{
1480 void *CE_context;
1481 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301482 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001483 unsigned int nbytes;
1484 unsigned int id;
1485 unsigned int sw_idx, hw_idx;
1486 uint32_t toeplitz_hash_result;
Houston Hoffmana575ec22015-12-14 16:35:15 -08001487 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001488
Houston Hoffmanbac94542016-03-14 21:11:59 -07001489 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1490 return;
1491
Komal Seelambd7c51d2016-02-24 10:27:30 +05301492 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001493 NULL, NULL, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001494
1495 /* Since this function is called from both user context and
1496 * tasklet context the spinlock has to lock the bottom halves.
1497 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1498 * enabled in TX polling mode. If this is not the case, more
1499 * bottom halve spin lock changes are needed. Due to data path
1500 * performance concern, after internal discussion we've decided
1501 * to make minimum change, i.e., only address the issue occured
1502 * in this function. The possible negative effect of this minimum
1503 * change is that, in the future, if some other function will also
1504 * be opened to let the user context to use, those cases need to be
1505 * addressed by change spin_lock to spin_lock_bh also.
1506 */
1507
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301508 qdf_spin_lock_bh(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001509
1510 if (CE_state->send_cb) {
1511 {
1512 /* Pop completed send buffers and call the
1513 * registered send callback for each
1514 */
1515 while (ce_completed_send_next_nolock
1516 (CE_state, &CE_context,
1517 &transfer_context, &buf,
1518 &nbytes, &id, &sw_idx, &hw_idx,
1519 &toeplitz_hash_result) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301520 QDF_STATUS_SUCCESS) {
Houston Hoffmana575ec22015-12-14 16:35:15 -08001521 if (ce_id != CE_HTT_H2T_MSG) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301522 qdf_spin_unlock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001523 &CE_state->ce_index_lock);
1524 CE_state->send_cb(
1525 (struct CE_handle *)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001526 CE_state, CE_context,
1527 transfer_context, buf,
1528 nbytes, id, sw_idx, hw_idx,
1529 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301530 qdf_spin_lock_bh(
Houston Hoffman44b7e4a2015-09-03 17:01:22 -07001531 &CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001532 } else {
1533 struct HIF_CE_pipe_info *pipe_info =
1534 (struct HIF_CE_pipe_info *)
1535 CE_context;
1536
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301537 qdf_spin_lock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001538 completion_freeq_lock);
1539 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301540 qdf_spin_unlock_bh(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541 completion_freeq_lock);
1542 }
1543 }
1544 }
1545 }
1546
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301547 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
Houston Hoffmana575ec22015-12-14 16:35:15 -08001548
Komal Seelambd7c51d2016-02-24 10:27:30 +05301549 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
Houston Hoffmana575ec22015-12-14 16:35:15 -08001550 NULL, NULL, 0);
Houston Hoffmanbac94542016-03-14 21:11:59 -07001551 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001552}
1553
1554#endif /*ATH_11AC_TXCOMPACT */
1555
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001556/*
1557 * Number of times to check for any pending tx/rx completion on
1558 * a copy engine, this count should be big enough. Once we hit
1559 * this threashold we'll not check for any Tx/Rx comlpetion in same
1560 * interrupt handling. Note that this threashold is only used for
1561 * Rx interrupt processing, this can be used tor Tx as well if we
1562 * suspect any infinite loop in checking for pending Tx completion.
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001563 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001564#define CE_TXRX_COMP_CHECK_THRESHOLD 20
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001565
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001566#ifdef WLAN_FEATURE_FASTPATH
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001567/**
1568 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1569 * @ce_state: handle to copy engine state
1570 * @cmpl_msdus: Rx msdus
1571 * @num_cmpls: number of Rx msdus
1572 * @ctrl_addr: CE control address
1573 *
1574 * Return: None
1575 */
1576static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1577 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1578 uint32_t ctrl_addr)
1579{
1580 struct hif_softc *scn = ce_state->scn;
1581 struct CE_ring_state *dest_ring = ce_state->dest_ring;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001582 uint32_t nentries_mask = dest_ring->nentries_mask;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001583 uint32_t write_index;
1584
Houston Hoffman53641652016-04-29 18:24:32 -07001585 qdf_spin_unlock(&ce_state->ce_index_lock);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001586 (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls);
Houston Hoffman53641652016-04-29 18:24:32 -07001587 qdf_spin_lock(&ce_state->ce_index_lock);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001588
1589 /* Update Destination Ring Write Index */
1590 write_index = dest_ring->write_index;
1591 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -07001592
1593 hif_record_ce_desc_event(scn, ce_state->id,
1594 FAST_RX_WRITE_INDEX_UPDATE,
1595 NULL, NULL, write_index);
1596
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001597 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1598 dest_ring->write_index = write_index;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001599}
1600
Houston Hoffman56e0d702016-05-05 17:48:06 -07001601#define MSG_FLUSH_NUM 32
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001602/**
1603 * ce_per_engine_service_fast() - CE handler routine to service fastpath messages
1604 * @scn: hif_context
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001605 * @ce_id: Copy engine ID
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001606 * 1) Go through the CE ring, and find the completions
1607 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1608 * 3) Unmap buffer & accumulate in an array.
1609 * 4) Call message handler when array is full or when exiting the handler
1610 *
1611 * Return: void
1612 */
1613
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001614static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001615{
1616 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1617 struct CE_ring_state *dest_ring = ce_state->dest_ring;
1618 struct CE_dest_desc *dest_ring_base =
1619 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1620
1621 uint32_t nentries_mask = dest_ring->nentries_mask;
1622 uint32_t sw_index = dest_ring->sw_index;
1623 uint32_t nbytes;
1624 qdf_nbuf_t nbuf;
Houston Hoffman53641652016-04-29 18:24:32 -07001625 dma_addr_t paddr;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001626 struct CE_dest_desc *dest_desc;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001627 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1628 uint32_t ctrl_addr = ce_state->ctrl_addr;
1629 uint32_t nbuf_cmpl_idx = 0;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001630 unsigned int more_comp_cnt = 0;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001631
1632more_data:
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001633 for (;;) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001634
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001635 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1636 sw_index);
Houston Hoffmanfa260aa2016-04-26 16:14:13 -07001637
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001638 /*
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001639 * The following 2 reads are from non-cached memory
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001640 */
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001641 nbytes = dest_desc->nbytes;
1642
1643 /* If completion is invalid, break */
1644 if (qdf_unlikely(nbytes == 0))
1645 break;
1646
1647
1648 /*
1649 * Build the nbuf list from valid completions
1650 */
1651 nbuf = dest_ring->per_transfer_context[sw_index];
1652
1653 /*
1654 * No lock is needed here, since this is the only thread
1655 * that accesses the sw_index
1656 */
1657 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1658
1659 /*
1660 * CAREFUL : Uncached write, but still less expensive,
1661 * since most modern caches use "write-combining" to
1662 * flush multiple cache-writes all at once.
1663 */
1664 dest_desc->nbytes = 0;
1665
1666 /*
1667 * Per our understanding this is not required on our
1668 * since we are doing the same cache invalidation
1669 * operation on the same buffer twice in succession,
1670 * without any modifiication to this buffer by CPU in
1671 * between.
1672 * However, this code with 2 syncs in succession has
1673 * been undergoing some testing at a customer site,
1674 * and seemed to be showing no problems so far. Would
1675 * like to validate from the customer, that this line
1676 * is really not required, before we remove this line
1677 * completely.
1678 */
Houston Hoffman53641652016-04-29 18:24:32 -07001679 paddr = QDF_NBUF_CB_PADDR(nbuf);
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001680
Houston Hoffman53641652016-04-29 18:24:32 -07001681 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001682 (skb_end_pointer(nbuf) - (nbuf)->data),
1683 DMA_FROM_DEVICE);
1684
1685 qdf_nbuf_put_tail(nbuf, nbytes);
1686
1687 qdf_assert_always(nbuf->data != NULL);
1688
1689 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1690
1691 /*
1692 * we are not posting the buffers back instead
1693 * reusing the buffers
1694 */
1695 if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1696 hif_record_ce_desc_event(scn, ce_state->id,
1697 FAST_RX_SOFTWARE_INDEX_UPDATE,
1698 NULL, NULL, sw_index);
1699 dest_ring->sw_index = sw_index;
1700
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001701 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001702 MSG_FLUSH_NUM, ctrl_addr);
Houston Hoffman53641652016-04-29 18:24:32 -07001703
1704 ce_state->receive_count += MSG_FLUSH_NUM;
1705 if (qdf_unlikely(hif_ce_service_should_yield(
1706 scn, ce_state))) {
1707 ce_state->force_break = 1;
1708 qdf_atomic_set(&ce_state->rx_pending, 1);
1709 return;
1710 }
1711
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001712 nbuf_cmpl_idx = 0;
Houston Hoffman53641652016-04-29 18:24:32 -07001713 more_comp_cnt = 0;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001714 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001715 }
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001716
1717 hif_record_ce_desc_event(scn, ce_state->id,
1718 FAST_RX_SOFTWARE_INDEX_UPDATE,
1719 NULL, NULL, sw_index);
1720
1721 dest_ring->sw_index = sw_index;
1722
1723 /*
1724 * If there are not enough completions to fill the array,
1725 * just call the message handler here
1726 */
1727 if (nbuf_cmpl_idx) {
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001728 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1729 nbuf_cmpl_idx, ctrl_addr);
Houston Hoffman53641652016-04-29 18:24:32 -07001730
1731 ce_state->receive_count += nbuf_cmpl_idx;
1732 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1733 ce_state->force_break = 1;
1734 qdf_atomic_set(&ce_state->rx_pending, 1);
1735 return;
1736 }
1737
1738 /* check for more packets after upper layer processing */
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001739 nbuf_cmpl_idx = 0;
Houston Hoffman53641652016-04-29 18:24:32 -07001740 more_comp_cnt = 0;
1741 goto more_data;
Houston Hoffman3b252aa2016-04-29 17:53:59 -07001742 }
1743 qdf_atomic_set(&ce_state->rx_pending, 0);
1744 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1745 HOST_IS_COPY_COMPLETE_MASK);
1746
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001747 if (ce_recv_entries_done_nolock(scn, ce_state)) {
1748 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1749 goto more_data;
1750 } else {
1751 HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1752 __func__, nentries_mask,
1753 ce_state->dest_ring->sw_index,
1754 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1755 }
1756 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001757}
1758
1759#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001760static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001761{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001762}
1763#endif /* WLAN_FEATURE_FASTPATH */
1764
Houston Hoffman05652722016-04-29 16:58:59 -07001765#define CE_PER_ENGINE_SERVICE_MAX_TIME_JIFFIES 2
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001766/*
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001767 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1768 *
1769 * Invokes registered callbacks for recv_complete,
1770 * send_complete, and watermarks.
1771 *
1772 * Returns: number of messages processed
1773 */
Komal Seelam644263d2016-02-22 20:45:49 +05301774int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001775{
1776 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1777 uint32_t ctrl_addr = CE_state->ctrl_addr;
1778 void *CE_context;
1779 void *transfer_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301780 qdf_dma_addr_t buf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001781 unsigned int nbytes;
1782 unsigned int id;
1783 unsigned int flags;
1784 uint32_t CE_int_status;
1785 unsigned int more_comp_cnt = 0;
1786 unsigned int more_snd_comp_cnt = 0;
1787 unsigned int sw_idx, hw_idx;
1788 uint32_t toeplitz_hash_result;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301789 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001790
Houston Hoffman85925072016-05-06 17:02:18 -07001791 if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1792 return CE_state->receive_count;
1793
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1795 HIF_ERROR("[premature rc=0]\n");
1796 return 0; /* no work done */
1797 }
1798
Houston Hoffman53641652016-04-29 18:24:32 -07001799 /* Clear force_break flag and re-initialize receive_count to 0 */
1800 CE_state->receive_count = 0;
1801 CE_state->force_break = 0;
1802 CE_state->ce_service_yield_time = qdf_system_ticks() +
1803 CE_PER_ENGINE_SERVICE_MAX_TIME_JIFFIES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001804
Houston Hoffman53641652016-04-29 18:24:32 -07001805
1806 qdf_spin_lock(&CE_state->ce_index_lock);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001807 /*
1808 * With below check we make sure CE we are handling is datapath CE and
1809 * fastpath is enabled.
1810 */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001811 if (ce_is_fastpath_handler_registered(CE_state)) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001812 /* For datapath only Rx CEs */
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001813 ce_per_engine_service_fast(scn, CE_id);
Houston Hoffmanc8993b52016-05-12 21:05:15 -07001814 goto unlock_end;
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001815 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001816
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817more_completions:
1818 if (CE_state->recv_cb) {
1819
1820 /* Pop completed recv buffers and call
1821 * the registered recv callback for each
1822 */
1823 while (ce_completed_recv_next_nolock
1824 (CE_state, &CE_context, &transfer_context,
1825 &buf, &nbytes, &id, &flags) ==
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301826 QDF_STATUS_SUCCESS) {
1827 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828 CE_state->recv_cb((struct CE_handle *)CE_state,
1829 CE_context, transfer_context, buf,
1830 nbytes, id, flags);
1831
1832 /*
1833 * EV #112693 -
1834 * [Peregrine][ES1][WB342][Win8x86][Performance]
1835 * BSoD_0x133 occurred in VHT80 UDP_DL
1836 * Break out DPC by force if number of loops in
1837 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1838 * to avoid spending too long time in
1839 * DPC for each interrupt handling. Schedule another
1840 * DPC to avoid data loss if we had taken
1841 * force-break action before apply to Windows OS
1842 * only currently, Linux/MAC os can expand to their
1843 * platform if necessary
1844 */
1845
1846 /* Break the receive processes by
1847 * force if force_break set up
1848 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301849 if (qdf_unlikely(CE_state->force_break)) {
1850 qdf_atomic_set(&CE_state->rx_pending, 1);
Houston Hoffmanc8993b52016-05-12 21:05:15 -07001851 goto target_access_end;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001852 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301853 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854 }
1855 }
1856
1857 /*
1858 * Attention: We may experience potential infinite loop for below
1859 * While Loop during Sending Stress test.
1860 * Resolve the same way as Receive Case (Refer to EV #112693)
1861 */
1862
1863 if (CE_state->send_cb) {
1864 /* Pop completed send buffers and call
1865 * the registered send callback for each
1866 */
1867
1868#ifdef ATH_11AC_TXCOMPACT
1869 while (ce_completed_send_next_nolock
1870 (CE_state, &CE_context,
1871 &transfer_context, &buf, &nbytes,
1872 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301873 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001874
1875 if (CE_id != CE_HTT_H2T_MSG ||
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001876 QDF_IS_EPPING_ENABLED(mode)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301877 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001878 CE_state->send_cb((struct CE_handle *)CE_state,
1879 CE_context, transfer_context,
1880 buf, nbytes, id, sw_idx,
1881 hw_idx, toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301882 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001883 } else {
1884 struct HIF_CE_pipe_info *pipe_info =
1885 (struct HIF_CE_pipe_info *)CE_context;
1886
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301887 qdf_spin_lock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 completion_freeq_lock);
1889 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301890 qdf_spin_unlock(&pipe_info->
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001891 completion_freeq_lock);
1892 }
1893 }
1894#else /*ATH_11AC_TXCOMPACT */
1895 while (ce_completed_send_next_nolock
1896 (CE_state, &CE_context,
1897 &transfer_context, &buf, &nbytes,
1898 &id, &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301899 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1900 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 CE_state->send_cb((struct CE_handle *)CE_state,
1902 CE_context, transfer_context, buf,
1903 nbytes, id, sw_idx, hw_idx,
1904 toeplitz_hash_result);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301905 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906 }
1907#endif /*ATH_11AC_TXCOMPACT */
1908 }
1909
1910more_watermarks:
1911 if (CE_state->misc_cbs) {
1912 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1913 if (CE_int_status & CE_WATERMARK_MASK) {
1914 if (CE_state->watermark_cb) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301915 qdf_spin_unlock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916 /* Convert HW IS bits to software flags */
1917 flags =
1918 (CE_int_status & CE_WATERMARK_MASK) >>
1919 CE_WM_SHFT;
1920
1921 CE_state->
1922 watermark_cb((struct CE_handle *)CE_state,
1923 CE_state->wm_context, flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301924 qdf_spin_lock(&CE_state->ce_index_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001925 }
1926 }
1927 }
1928
1929 /*
1930 * Clear the misc interrupts (watermark) that were handled above,
1931 * and that will be checked again below.
1932 * Clear and check for copy-complete interrupts again, just in case
1933 * more copy completions happened while the misc interrupts were being
1934 * handled.
1935 */
1936 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1937 CE_WATERMARK_MASK |
1938 HOST_IS_COPY_COMPLETE_MASK);
1939
1940 /*
1941 * Now that per-engine interrupts are cleared, verify that
1942 * no recv interrupts arrive while processing send interrupts,
1943 * and no recv or send interrupts happened while processing
1944 * misc interrupts.Go back and check again.Keep checking until
1945 * we find no more events to process.
1946 */
1947 if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001948 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001949 more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1950 goto more_completions;
1951 } else {
1952 HIF_ERROR(
1953 "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1954 __func__, CE_state->dest_ring->nentries_mask,
1955 CE_state->dest_ring->sw_index,
1956 CE_DEST_RING_READ_IDX_GET(scn,
1957 CE_state->ctrl_addr));
1958 }
1959 }
1960
1961 if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001962 if (QDF_IS_EPPING_ENABLED(mode) ||
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963 more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1964 goto more_completions;
1965 } else {
1966 HIF_ERROR(
1967 "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1968 __func__, CE_state->src_ring->nentries_mask,
1969 CE_state->src_ring->sw_index,
1970 CE_SRC_RING_READ_IDX_GET(scn,
1971 CE_state->ctrl_addr));
1972 }
1973 }
1974
1975 if (CE_state->misc_cbs) {
1976 CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1977 if (CE_int_status & CE_WATERMARK_MASK) {
1978 if (CE_state->watermark_cb) {
1979 goto more_watermarks;
1980 }
1981 }
1982 }
1983
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301984 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001985
Houston Hoffmanc8993b52016-05-12 21:05:15 -07001986unlock_end:
1987 qdf_spin_unlock(&CE_state->ce_index_lock);
1988target_access_end:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001989 if (Q_TARGET_ACCESS_END(scn) < 0)
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001990 HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
1991 return CE_state->receive_count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992}
1993
1994/*
1995 * Handler for per-engine interrupts on ALL active CEs.
1996 * This is used in cases where the system is sharing a
1997 * single interrput for all CEs
1998 */
1999
Komal Seelam644263d2016-02-22 20:45:49 +05302000void ce_per_engine_service_any(int irq, struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002001{
2002 int CE_id;
2003 uint32_t intr_summary;
2004
Houston Hoffmanbac94542016-03-14 21:11:59 -07002005 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2006 return;
2007
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302008 if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002009 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2010 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302011 if (qdf_atomic_read(&CE_state->rx_pending)) {
2012 qdf_atomic_set(&CE_state->rx_pending, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002013 ce_per_engine_service(scn, CE_id);
2014 }
2015 }
2016
Houston Hoffmanbac94542016-03-14 21:11:59 -07002017 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002018 return;
2019 }
2020
2021 intr_summary = CE_INTERRUPT_SUMMARY(scn);
2022
2023 for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2024 if (intr_summary & (1 << CE_id)) {
2025 intr_summary &= ~(1 << CE_id);
2026 } else {
2027 continue; /* no intr pending on this CE */
2028 }
2029
2030 ce_per_engine_service(scn, CE_id);
2031 }
2032
Houston Hoffmanbac94542016-03-14 21:11:59 -07002033 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002034}
2035
2036/*
2037 * Adjust interrupts for the copy complete handler.
2038 * If it's needed for either send or recv, then unmask
2039 * this interrupt; otherwise, mask it.
2040 *
2041 * Called with target_lock held.
2042 */
2043static void
2044ce_per_engine_handler_adjust(struct CE_state *CE_state,
2045 int disable_copy_compl_intr)
2046{
2047 uint32_t ctrl_addr = CE_state->ctrl_addr;
Komal Seelam644263d2016-02-22 20:45:49 +05302048 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002049
2050 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
Houston Hoffmanbac94542016-03-14 21:11:59 -07002051
2052 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2053 return;
2054
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002055 if ((!disable_copy_compl_intr) &&
2056 (CE_state->send_cb || CE_state->recv_cb)) {
2057 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2058 } else {
2059 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2060 }
2061
2062 if (CE_state->watermark_cb) {
2063 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2064 } else {
2065 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2066 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07002067 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002068}
2069
2070/*Iterate the CE_state list and disable the compl interrupt
2071 * if it has been registered already.
2072 */
Komal Seelam644263d2016-02-22 20:45:49 +05302073void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074{
2075 int CE_id;
2076
Houston Hoffmanbac94542016-03-14 21:11:59 -07002077 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2078 return;
2079
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002080 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2081 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2082 uint32_t ctrl_addr = CE_state->ctrl_addr;
2083
2084 /* if the interrupt is currently enabled, disable it */
2085 if (!CE_state->disable_copy_compl_intr
2086 && (CE_state->send_cb || CE_state->recv_cb)) {
2087 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2088 }
2089
2090 if (CE_state->watermark_cb) {
2091 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2092 }
2093 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07002094 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002095}
2096
Komal Seelam644263d2016-02-22 20:45:49 +05302097void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002098{
2099 int CE_id;
2100
Houston Hoffmanbac94542016-03-14 21:11:59 -07002101 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2102 return;
2103
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002104 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2105 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2106 uint32_t ctrl_addr = CE_state->ctrl_addr;
2107
2108 /*
2109 * If the CE is supposed to have copy complete interrupts
2110 * enabled (i.e. there a callback registered, and the
2111 * "disable" flag is not set), then re-enable the interrupt.
2112 */
2113 if (!CE_state->disable_copy_compl_intr
2114 && (CE_state->send_cb || CE_state->recv_cb)) {
2115 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2116 }
2117
2118 if (CE_state->watermark_cb) {
2119 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2120 }
2121 }
Houston Hoffmanbac94542016-03-14 21:11:59 -07002122 Q_TARGET_ACCESS_END(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002123}
2124
Houston Hoffmana837c9a2015-09-03 12:47:01 -07002125/**
2126 * ce_send_cb_register(): register completion handler
2127 * @copyeng: CE_state representing the ce we are adding the behavior to
2128 * @fn_ptr: callback that the ce should use when processing tx completions
2129 * @disable_interrupts: if the interupts should be enabled or not.
2130 *
2131 * Caller should guarantee that no transactions are in progress before
2132 * switching the callback function.
2133 *
2134 * Registers the send context before the fn pointer so that if the cb is valid
2135 * the context should be valid.
2136 *
2137 * Beware that currently this function will enable completion interrupts.
2138 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002139void
2140ce_send_cb_register(struct CE_handle *copyeng,
2141 ce_send_cb fn_ptr,
2142 void *ce_send_context, int disable_interrupts)
2143{
2144 struct CE_state *CE_state = (struct CE_state *)copyeng;
2145
Sanjay Devnani9ce15772015-11-12 14:08:57 -08002146 if (CE_state == NULL) {
2147 pr_err("%s: Error CE state = NULL\n", __func__);
2148 return;
2149 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002150 CE_state->send_context = ce_send_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07002151 CE_state->send_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002153}
2154
Houston Hoffmana837c9a2015-09-03 12:47:01 -07002155/**
2156 * ce_recv_cb_register(): register completion handler
2157 * @copyeng: CE_state representing the ce we are adding the behavior to
2158 * @fn_ptr: callback that the ce should use when processing rx completions
2159 * @disable_interrupts: if the interupts should be enabled or not.
2160 *
2161 * Registers the send context before the fn pointer so that if the cb is valid
2162 * the context should be valid.
2163 *
2164 * Caller should guarantee that no transactions are in progress before
2165 * switching the callback function.
2166 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002167void
2168ce_recv_cb_register(struct CE_handle *copyeng,
2169 CE_recv_cb fn_ptr,
2170 void *CE_recv_context, int disable_interrupts)
2171{
2172 struct CE_state *CE_state = (struct CE_state *)copyeng;
2173
Sanjay Devnani9ce15772015-11-12 14:08:57 -08002174 if (CE_state == NULL) {
2175 pr_err("%s: ERROR CE state = NULL\n", __func__);
2176 return;
2177 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002178 CE_state->recv_context = CE_recv_context;
Houston Hoffmana837c9a2015-09-03 12:47:01 -07002179 CE_state->recv_cb = fn_ptr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002180 ce_per_engine_handler_adjust(CE_state, disable_interrupts);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002181}
2182
Houston Hoffmana837c9a2015-09-03 12:47:01 -07002183/**
2184 * ce_watermark_cb_register(): register completion handler
2185 * @copyeng: CE_state representing the ce we are adding the behavior to
2186 * @fn_ptr: callback that the ce should use when processing watermark events
2187 *
2188 * Caller should guarantee that no watermark events are being processed before
2189 * switching the callback function.
2190 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002191void
2192ce_watermark_cb_register(struct CE_handle *copyeng,
2193 CE_watermark_cb fn_ptr, void *CE_wm_context)
2194{
2195 struct CE_state *CE_state = (struct CE_state *)copyeng;
2196
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197 CE_state->watermark_cb = fn_ptr;
2198 CE_state->wm_context = CE_wm_context;
2199 ce_per_engine_handler_adjust(CE_state, 0);
2200 if (fn_ptr) {
2201 CE_state->misc_cbs = 1;
2202 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002203}
2204
Komal Seelam644263d2016-02-22 20:45:49 +05302205bool ce_get_rx_pending(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002206{
2207 int CE_id;
2208
2209 for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2210 struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302211 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002212 return true;
2213 }
2214
2215 return false;
2216}
2217
2218/**
2219 * ce_check_rx_pending() - ce_check_rx_pending
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002220 * @CE_state: context of the copy engine to check
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002221 *
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002222 * Return: true if there per_engine_service
2223 * didn't process all the rx descriptors.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002224 */
Houston Hoffmaneb2516c2016-04-01 12:53:50 -07002225bool ce_check_rx_pending(struct CE_state *CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302227 if (qdf_atomic_read(&CE_state->rx_pending))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002228 return true;
2229 else
2230 return false;
2231}
Houston Hoffman8ed92e52015-09-02 14:49:48 -07002232
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002233#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002234/**
2235 * ce_ipa_get_resource() - get uc resource on copyengine
2236 * @ce: copyengine context
2237 * @ce_sr_base_paddr: copyengine source ring base physical address
2238 * @ce_sr_ring_size: copyengine source ring size
2239 * @ce_reg_paddr: copyengine register physical address
2240 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002241 * Copy engine should release resource to micro controller
2242 * Micro controller needs
Leo Changd85f78d2015-11-13 10:55:34 -08002243 * - Copy engine source descriptor base address
2244 * - Copy engine source descriptor size
2245 * - PCI BAR address to access copy engine regiser
2246 *
2247 * Return: None
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002248 */
2249void ce_ipa_get_resource(struct CE_handle *ce,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302250 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002251 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302252 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002253{
2254 struct CE_state *CE_state = (struct CE_state *)ce;
2255 uint32_t ring_loop;
2256 struct CE_src_desc *ce_desc;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302257 qdf_dma_addr_t phy_mem_base;
Komal Seelam644263d2016-02-22 20:45:49 +05302258 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002259
2260 if (CE_RUNNING != CE_state->state) {
2261 *ce_sr_base_paddr = 0;
2262 *ce_sr_ring_size = 0;
2263 return;
2264 }
2265
2266 /* Update default value for descriptor */
2267 for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2268 ring_loop++) {
2269 ce_desc = (struct CE_src_desc *)
2270 ((char *)CE_state->src_ring->base_addr_owner_space +
2271 ring_loop * (sizeof(struct CE_src_desc)));
2272 CE_IPA_RING_INIT(ce_desc);
2273 }
2274
2275 /* Get BAR address */
2276 hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2277
Leo Changd85f78d2015-11-13 10:55:34 -08002278 *ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2279 *ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2280 sizeof(struct CE_src_desc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002281 *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2282 SR_WR_INDEX_ADDRESS;
2283 return;
2284}
2285#endif /* IPA_OFFLOAD */
2286