blob: 84aee3fe9c9021f75f74b57caf0b537103c4de9b [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053032#include "qdf_lock.h"
33#include "qdf_status.h"
34#include "qdf_status.h"
35#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include <targaddrs.h>
37#include <bmi_msg.h>
38#include "hif_io32.h"
39#include <hif.h>
40#include "regtable.h"
41#define ATH_MODULE_NAME hif
42#include <a_debug.h>
43#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053045#include "qdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080046#ifdef CONFIG_CNSS
47#include <net/cnss.h>
48#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080049#include "epping_main.h"
50#include "hif_debug.h"
51#include "ce_internal.h"
52#include "ce_reg.h"
53#include "ce_assignment.h"
54#include "ce_tasklet.h"
Houston Hoffmanbc693492016-03-14 21:11:41 -070055#include "platform_icnss.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080056#include "qwlan_version.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080057
58#define CE_POLL_TIMEOUT 10 /* ms */
59
60/* Forward references */
61static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
70
Komal Seelam644263d2016-02-22 20:45:49 +053071static int hif_post_recv_buffers(struct hif_softc *scn);
72static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080073
74static void ce_poll_timeout(void *arg)
75{
76 struct CE_state *CE_state = (struct CE_state *)arg;
77 if (CE_state->timer_inited) {
78 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053079 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080 }
81}
82
83static unsigned int roundup_pwr2(unsigned int n)
84{
85 int i;
86 unsigned int test_pwr2;
87
88 if (!(n & (n - 1)))
89 return n; /* already a power of 2 */
90
91 test_pwr2 = 4;
92 for (i = 0; i < 29; i++) {
93 if (test_pwr2 > n)
94 return test_pwr2;
95 test_pwr2 = test_pwr2 << 1;
96 }
97
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053098 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080099 return 0;
100}
101
102/*
103 * Initialize a Copy Engine based on caller-supplied attributes.
104 * This may be called once to initialize both source and destination
105 * rings or it may be called twice for separate source and destination
106 * initialization. It may be that only one side or the other is
107 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700108 *
109 * This should be called durring the initialization sequence before
110 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800111 */
Komal Seelam644263d2016-02-22 20:45:49 +0530112struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800113 unsigned int CE_id, struct CE_attr *attr)
114{
115 struct CE_state *CE_state;
116 uint32_t ctrl_addr;
117 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530118 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 bool malloc_CE_state = false;
120 bool malloc_src_ring = false;
121
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530122 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800123 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800124 CE_state = scn->ce_id_to_state[CE_id];
125
126 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800127 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530128 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800129 if (!CE_state) {
130 HIF_ERROR("%s: CE_state has no mem", __func__);
131 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700133 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530134 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700135 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530136 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700137
138 CE_state->id = CE_id;
139 CE_state->ctrl_addr = ctrl_addr;
140 CE_state->state = CE_RUNNING;
141 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800144
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530145 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800146 if (attr == NULL) {
147 /* Already initialized; caller wants the handle */
148 return (struct CE_handle *)CE_state;
149 }
150
151#ifdef ADRASTEA_SHADOW_REGISTERS
152 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
153 __func__);
154#endif
155
156 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530157 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800158 else
159 CE_state->src_sz_max = attr->src_sz_max;
160
Houston Hoffman68e837e2015-12-04 12:57:24 -0800161 ce_init_ce_desc_event_log(CE_id,
162 attr->src_nentries + attr->dest_nentries);
163
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800164 /* source ring setup */
165 nentries = attr->src_nentries;
166 if (nentries) {
167 struct CE_ring_state *src_ring;
168 unsigned CE_nbytes;
169 char *ptr;
170 uint64_t dma_addr;
171 nentries = roundup_pwr2(nentries);
172 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530173 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800174 } else {
175 CE_nbytes = sizeof(struct CE_ring_state)
176 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530177 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800178 if (!ptr) {
179 /* cannot allocate src ring. If the
180 * CE_state is allocated locally free
181 * CE_State and return error.
182 */
183 HIF_ERROR("%s: src ring has no mem", __func__);
184 if (malloc_CE_state) {
185 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800186 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530187 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800188 malloc_CE_state = false;
189 }
190 return NULL;
191 } else {
192 /* we can allocate src ring.
193 * Mark that the src ring is
194 * allocated locally
195 */
196 malloc_src_ring = true;
197 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530198 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800199
200 src_ring = CE_state->src_ring =
201 (struct CE_ring_state *)ptr;
202 ptr += sizeof(struct CE_ring_state);
203 src_ring->nentries = nentries;
204 src_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700205 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
206 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800207 src_ring->hw_index =
208 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
209 src_ring->sw_index = src_ring->hw_index;
210 src_ring->write_index =
211 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700212 if (Q_TARGET_ACCESS_END(scn) < 0)
213 goto error_target_access;
214
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800215 src_ring->low_water_mark_nentries = 0;
216 src_ring->high_water_mark_nentries = nentries;
217 src_ring->per_transfer_context = (void **)ptr;
218
219 /* Legacy platforms that do not support cache
220 * coherent DMA are unsupported
221 */
222 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530223 qdf_mem_alloc_consistent(scn->qdf_dev,
224 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800225 (nentries *
226 sizeof(struct CE_src_desc) +
227 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530228 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800229 if (src_ring->base_addr_owner_space_unaligned
230 == NULL) {
231 HIF_ERROR("%s: src ring has no DMA mem",
232 __func__);
233 goto error_no_dma_mem;
234 }
235 src_ring->base_addr_CE_space_unaligned = base_addr;
236
237 if (src_ring->
238 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
239 - 1)) {
240 src_ring->base_addr_CE_space =
241 (src_ring->base_addr_CE_space_unaligned
242 + CE_DESC_RING_ALIGN -
243 1) & ~(CE_DESC_RING_ALIGN - 1);
244
245 src_ring->base_addr_owner_space =
246 (void
247 *)(((size_t) src_ring->
248 base_addr_owner_space_unaligned +
249 CE_DESC_RING_ALIGN -
250 1) & ~(CE_DESC_RING_ALIGN - 1));
251 } else {
252 src_ring->base_addr_CE_space =
253 src_ring->base_addr_CE_space_unaligned;
254 src_ring->base_addr_owner_space =
255 src_ring->
256 base_addr_owner_space_unaligned;
257 }
258 /*
259 * Also allocate a shadow src ring in
260 * regular mem to use for faster access.
261 */
262 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530263 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800264 sizeof(struct CE_src_desc) +
265 CE_DESC_RING_ALIGN);
266 if (src_ring->shadow_base_unaligned == NULL) {
267 HIF_ERROR("%s: src ring no shadow_base mem",
268 __func__);
269 goto error_no_dma_mem;
270 }
271 src_ring->shadow_base = (struct CE_src_desc *)
272 (((size_t) src_ring->shadow_base_unaligned +
273 CE_DESC_RING_ALIGN - 1) &
274 ~(CE_DESC_RING_ALIGN - 1));
275
Houston Hoffman4411ad42016-03-14 21:12:04 -0700276 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
277 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800278 dma_addr = src_ring->base_addr_CE_space;
279 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
280 (uint32_t)(dma_addr & 0xFFFFFFFF));
281#ifdef WLAN_ENABLE_QCA6180
282 {
283 uint32_t tmp;
284 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
285 scn, ctrl_addr);
286 tmp &= ~0x1F;
287 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
288 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
289 ctrl_addr, (uint32_t)dma_addr);
290 }
291#endif
292 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
293 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
294#ifdef BIG_ENDIAN_HOST
295 /* Enable source ring byte swap for big endian host */
296 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
297#endif
298 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
299 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700300 if (Q_TARGET_ACCESS_END(scn) < 0)
301 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800302 }
303 }
304
305 /* destination ring setup */
306 nentries = attr->dest_nentries;
307 if (nentries) {
308 struct CE_ring_state *dest_ring;
309 unsigned CE_nbytes;
310 char *ptr;
311 uint64_t dma_addr;
312
313 nentries = roundup_pwr2(nentries);
314 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530315 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800316 } else {
317 CE_nbytes = sizeof(struct CE_ring_state)
318 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530319 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800320 if (!ptr) {
321 /* cannot allocate dst ring. If the CE_state
322 * or src ring is allocated locally free
323 * CE_State and src ring and return error.
324 */
325 HIF_ERROR("%s: dest ring has no mem",
326 __func__);
327 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530328 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800329 CE_state->src_ring = NULL;
330 malloc_src_ring = false;
331 }
332 if (malloc_CE_state) {
333 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800334 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530335 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 malloc_CE_state = false;
337 }
338 return NULL;
339 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530340 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800341
342 dest_ring = CE_state->dest_ring =
343 (struct CE_ring_state *)ptr;
344 ptr += sizeof(struct CE_ring_state);
345 dest_ring->nentries = nentries;
346 dest_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700347 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
348 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800349 dest_ring->sw_index =
350 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
351 dest_ring->write_index =
352 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700353 if (Q_TARGET_ACCESS_END(scn) < 0)
354 goto error_target_access;
355
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800356 dest_ring->low_water_mark_nentries = 0;
357 dest_ring->high_water_mark_nentries = nentries;
358 dest_ring->per_transfer_context = (void **)ptr;
359
360 /* Legacy platforms that do not support cache
361 * coherent DMA are unsupported */
362 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530363 qdf_mem_alloc_consistent(scn->qdf_dev,
364 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800365 (nentries *
366 sizeof(struct CE_dest_desc) +
367 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530368 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800369 if (dest_ring->base_addr_owner_space_unaligned
370 == NULL) {
371 HIF_ERROR("%s: dest ring has no DMA mem",
372 __func__);
373 goto error_no_dma_mem;
374 }
375 dest_ring->base_addr_CE_space_unaligned = base_addr;
376
377 /* Correctly initialize memory to 0 to
378 * prevent garbage data crashing system
379 * when download firmware
380 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530381 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800382 nentries * sizeof(struct CE_dest_desc) +
383 CE_DESC_RING_ALIGN);
384
385 if (dest_ring->
386 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
387 1)) {
388
389 dest_ring->base_addr_CE_space =
390 (dest_ring->
391 base_addr_CE_space_unaligned +
392 CE_DESC_RING_ALIGN -
393 1) & ~(CE_DESC_RING_ALIGN - 1);
394
395 dest_ring->base_addr_owner_space =
396 (void
397 *)(((size_t) dest_ring->
398 base_addr_owner_space_unaligned +
399 CE_DESC_RING_ALIGN -
400 1) & ~(CE_DESC_RING_ALIGN - 1));
401 } else {
402 dest_ring->base_addr_CE_space =
403 dest_ring->base_addr_CE_space_unaligned;
404 dest_ring->base_addr_owner_space =
405 dest_ring->
406 base_addr_owner_space_unaligned;
407 }
408
Houston Hoffman4411ad42016-03-14 21:12:04 -0700409 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
410 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800411 dma_addr = dest_ring->base_addr_CE_space;
412 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
413 (uint32_t)(dma_addr & 0xFFFFFFFF));
414#ifdef WLAN_ENABLE_QCA6180
415 {
416 uint32_t tmp;
417 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
418 ctrl_addr);
419 tmp &= ~0x1F;
420 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
421 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
422 ctrl_addr, (uint32_t)dma_addr);
423 }
424#endif
425 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
426#ifdef BIG_ENDIAN_HOST
427 /* Enable Dest ring byte swap for big endian host */
428 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
429#endif
430 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
431 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700432 if (Q_TARGET_ACCESS_END(scn) < 0)
433 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800434
435 /* epping */
436 /* poll timer */
437 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530438 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800439 &CE_state->poll_timer,
440 ce_poll_timeout,
441 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530442 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800443 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530444 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800445 CE_POLL_TIMEOUT);
446 }
447 }
448 }
449
450 /* Enable CE error interrupts */
Houston Hoffman4411ad42016-03-14 21:12:04 -0700451 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
452 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800453 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700454 if (Q_TARGET_ACCESS_END(scn) < 0)
455 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800456
457 return (struct CE_handle *)CE_state;
458
Houston Hoffman4411ad42016-03-14 21:12:04 -0700459error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800460error_no_dma_mem:
461 ce_fini((struct CE_handle *)CE_state);
462 return NULL;
463}
464
465#ifdef WLAN_FEATURE_FASTPATH
466/**
467 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
468 * No processing is required inside this function.
469 * @ce_hdl: Cope engine handle
470 * Using an assert, this function makes sure that,
471 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700472 *
473 * This is called while dismantling CE structures. No other thread
474 * should be using these structures while dismantling is occuring
475 * therfore no locking is needed.
476 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800477 * Return: none
478 */
479void
480ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
481{
482 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
483 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530484 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485 uint32_t sw_index, write_index;
486
487 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
488 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
489 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800490 sw_index = src_ring->sw_index;
491 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800492
493 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530494 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800495 }
496}
497#else
498void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
499{
500}
501#endif /* WLAN_FEATURE_FASTPATH */
502
503void ce_fini(struct CE_handle *copyeng)
504{
505 struct CE_state *CE_state = (struct CE_state *)copyeng;
506 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +0530507 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800508
509 CE_state->state = CE_UNUSED;
510 scn->ce_id_to_state[CE_id] = NULL;
511 if (CE_state->src_ring) {
512 /* Cleanup the HTT Tx ring */
513 ce_h2t_tx_ce_cleanup(copyeng);
514
515 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530516 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800517 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530518 qdf_mem_free_consistent(scn->qdf_dev,
519 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800520 (CE_state->src_ring->nentries *
521 sizeof(struct CE_src_desc) +
522 CE_DESC_RING_ALIGN),
523 CE_state->src_ring->
524 base_addr_owner_space_unaligned,
525 CE_state->src_ring->
526 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530527 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800528 }
529 if (CE_state->dest_ring) {
530 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530531 qdf_mem_free_consistent(scn->qdf_dev,
532 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 (CE_state->dest_ring->nentries *
534 sizeof(struct CE_dest_desc) +
535 CE_DESC_RING_ALIGN),
536 CE_state->dest_ring->
537 base_addr_owner_space_unaligned,
538 CE_state->dest_ring->
539 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530540 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800541
542 /* epping */
543 if (CE_state->timer_inited) {
544 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530545 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800546 }
547 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530548 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800549}
550
Komal Seelam5584a7c2016-02-24 19:22:48 +0530551void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530553 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800554
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530555 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800556 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530557 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800558 sizeof(hif_state->msg_callbacks_current));
559}
560
561/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530562QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +0530563hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800564 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530565 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800566{
Komal Seelam644263d2016-02-22 20:45:49 +0530567 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530568 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800569 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
570 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
571 int bytes = nbytes, nfrags = 0;
572 struct ce_sendlist sendlist;
573 int status, i = 0;
574 unsigned int mux_id = 0;
575
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530576 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800577
578 transfer_id =
579 (mux_id & MUX_ID_MASK) |
580 (transfer_id & TRANSACTION_ID_MASK);
581 data_attr &= DESC_DATA_FLAG_MASK;
582 /*
583 * The common case involves sending multiple fragments within a
584 * single download (the tx descriptor and the tx frame header).
585 * So, optimize for the case of multiple fragments by not even
586 * checking whether it's necessary to use a sendlist.
587 * The overhead of using a sendlist for a single buffer download
588 * is not a big deal, since it happens rarely (for WMI messages).
589 */
590 ce_sendlist_init(&sendlist);
591 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530592 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800593 int frag_bytes;
594
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530595 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
596 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800597 /*
598 * Clear the packet offset for all but the first CE desc.
599 */
600 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530601 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800602
603 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
604 frag_bytes >
605 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530606 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800607 (nbuf,
608 nfrags) ? 0 :
609 CE_SEND_FLAG_SWAP_DISABLE,
610 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530611 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800612 HIF_ERROR("%s: error, frag_num %d larger than limit",
613 __func__, nfrags);
614 return status;
615 }
616 bytes -= frag_bytes;
617 nfrags++;
618 } while (bytes > 0);
619
620 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530621 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800622 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530623 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800624 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530625 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800626 }
627 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530628 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800629
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530630 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800631 HIF_ERROR("%s: error CE handle is null", __func__);
632 return A_ERROR;
633 }
634
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530635 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530636 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530637 (uint8_t *)(qdf_nbuf_data(nbuf)),
638 sizeof(qdf_nbuf_data(nbuf))));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800639 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530640 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800641
642 return status;
643}
644
Komal Seelam5584a7c2016-02-24 19:22:48 +0530645void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
646 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800647{
Komal Seelam644263d2016-02-22 20:45:49 +0530648 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
649
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800650 if (!force) {
651 int resources;
652 /*
653 * Decide whether to actually poll for completions, or just
654 * wait for a later chance. If there seem to be plenty of
655 * resources left, then just wait, since checking involves
656 * reading a CE register, which is a relatively expensive
657 * operation.
658 */
Komal Seelam644263d2016-02-22 20:45:49 +0530659 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800660 /*
661 * If at least 50% of the total resources are still available,
662 * don't bother checking again yet.
663 */
664 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
665 return;
666 }
667 }
Houston Hoffman56936832016-03-16 12:16:24 -0700668#ifdef ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800669 ce_per_engine_servicereap(scn, pipe);
670#else
671 ce_per_engine_service(scn, pipe);
672#endif
673}
674
Komal Seelam5584a7c2016-02-24 19:22:48 +0530675uint16_t
676hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800677{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530678 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800679 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
680 uint16_t rv;
681
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530682 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800683 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530684 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800685 return rv;
686}
687
688/* Called by lower (CE) layer when a send to Target completes. */
689void
690hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530691 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800692 unsigned int nbytes, unsigned int transfer_id,
693 unsigned int sw_index, unsigned int hw_index,
694 unsigned int toeplitz_hash_result)
695{
696 struct HIF_CE_pipe_info *pipe_info =
697 (struct HIF_CE_pipe_info *)ce_context;
698 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530699 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800700 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700701 struct hif_msg_callbacks *msg_callbacks =
702 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800703
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800704 do {
705 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700706 * The upper layer callback will be triggered
707 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 */
Houston Hoffman85118512015-09-28 14:17:11 -0700709 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530710 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700711 == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530712 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -0800713 else
714 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700715 msg_callbacks->Context,
716 transfer_context, transfer_id,
717 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800718 }
719
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530720 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700721 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530722 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800723 } while (ce_completed_send_next(copyeng,
724 &ce_context, &transfer_context,
725 &CE_data, &nbytes, &transfer_id,
726 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530727 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800728}
729
Houston Hoffman910c6262015-09-28 12:56:25 -0700730/**
731 * hif_ce_do_recv(): send message from copy engine to upper layers
732 * @msg_callbacks: structure containing callback and callback context
733 * @netbuff: skb containing message
734 * @nbytes: number of bytes in the message
735 * @pipe_info: used for the pipe_number info
736 *
737 * Checks the packet length, configures the lenght in the netbuff,
738 * and calls the upper layer callback.
739 *
740 * return: None
741 */
742static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530743 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -0700744 struct HIF_CE_pipe_info *pipe_info) {
745 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530746 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -0700747 msg_callbacks->
748 rxCompletionHandler(msg_callbacks->Context,
749 netbuf, pipe_info->pipe_num);
750 } else {
751 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
752 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530753 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -0700754 }
755}
756
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800757/* Called by lower (CE) layer when data is received from the Target. */
758void
759hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530760 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800761 unsigned int nbytes, unsigned int transfer_id,
762 unsigned int flags)
763{
764 struct HIF_CE_pipe_info *pipe_info =
765 (struct HIF_CE_pipe_info *)ce_context;
766 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700767 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530768 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -0700769#ifdef HIF_PCI
770 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
771#endif
Houston Hoffman910c6262015-09-28 12:56:25 -0700772 struct hif_msg_callbacks *msg_callbacks =
773 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530774 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800775
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800776 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -0700777#ifdef HIF_PCI
778 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
779#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530780 qdf_nbuf_unmap_single(scn->qdf_dev,
781 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530782 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800783
Houston Hoffman910c6262015-09-28 12:56:25 -0700784 atomic_inc(&pipe_info->recv_bufs_needed);
785 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530786 if (scn->target_status == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530787 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -0800788 else
789 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700790 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800791
792 /* Set up force_break flag if num of receices reaches
793 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700794 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530795 count = ce_state->receive_count;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530796 if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700797 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800798 break;
799 }
800 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
801 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530802 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800803
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800804}
805
806/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
807
808void
Komal Seelam5584a7c2016-02-24 19:22:48 +0530809hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800810 struct hif_msg_callbacks *callbacks)
811{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530812 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800813
814#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
815 spin_lock_init(&pcie_access_log_lock);
816#endif
817 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530818 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800819 sizeof(hif_state->msg_callbacks_pending));
820
821}
822
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800823int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
824{
825 struct CE_handle *ce_diag = hif_state->ce_diag;
826 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +0530827 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700828 struct hif_msg_callbacks *hif_msg_callbacks =
829 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800830
831 /* daemonize("hif_compl_thread"); */
832
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800833 if (scn->ce_count == 0) {
834 HIF_ERROR("%s: Invalid ce_count\n", __func__);
835 return -EINVAL;
836 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700837
838 if (!hif_msg_callbacks ||
839 !hif_msg_callbacks->rxCompletionHandler ||
840 !hif_msg_callbacks->txCompletionHandler) {
841 HIF_ERROR("%s: no completion handler registered", __func__);
842 return -EFAULT;
843 }
844
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800845 A_TARGET_ACCESS_LIKELY(scn);
846 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
847 struct CE_attr attr;
848 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800849
850 pipe_info = &hif_state->pipe_info[pipe_num];
851 if (pipe_info->ce_hdl == ce_diag) {
852 continue; /* Handle Diagnostic CE specially */
853 }
854 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800855 if (attr.src_nentries) {
856 /* pipe used to send to target */
857 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
858 __func__, pipe_num, pipe_info);
859 ce_send_cb_register(pipe_info->ce_hdl,
860 hif_pci_ce_send_done, pipe_info,
861 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800862 pipe_info->num_sends_allowed = attr.src_nentries - 1;
863 }
864 if (attr.dest_nentries) {
865 /* pipe used to receive from target */
866 ce_recv_cb_register(pipe_info->ce_hdl,
867 hif_pci_ce_recv_data, pipe_info,
868 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800870
871 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530872 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800873 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800874
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800875 A_TARGET_ACCESS_UNLIKELY(scn);
876 return 0;
877}
878
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800879/*
880 * Install pending msg callbacks.
881 *
882 * TBDXXX: This hack is needed because upper layers install msg callbacks
883 * for use with HTC before BMI is done; yet this HIF implementation
884 * needs to continue to use BMI msg callbacks. Really, upper layers
885 * should not register HTC callbacks until AFTER BMI phase.
886 */
Komal Seelam644263d2016-02-22 20:45:49 +0530887static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800888{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530889 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530891 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800892 &hif_state->msg_callbacks_pending,
893 sizeof(hif_state->msg_callbacks_pending));
894}
895
Komal Seelam5584a7c2016-02-24 19:22:48 +0530896void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
897 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898{
899 int ul_is_polled, dl_is_polled;
900
Komal Seelam644263d2016-02-22 20:45:49 +0530901 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800902 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
903}
904
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800905/**
906 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +0530907 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800908 *
909 * Output the pipe error counts of each pipe to log file
910 *
911 * Return: N/A
912 */
Komal Seelam644263d2016-02-22 20:45:49 +0530913void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800914{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530915 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800916 int pipe_num;
917
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800918 if (hif_state == NULL) {
919 HIF_ERROR("%s hif_state is NULL", __func__);
920 return;
921 }
922 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
923 struct HIF_CE_pipe_info *pipe_info;
924
925 pipe_info = &hif_state->pipe_info[pipe_num];
926
927 if (pipe_info->nbuf_alloc_err_count > 0 ||
928 pipe_info->nbuf_dma_err_count > 0 ||
929 pipe_info->nbuf_ce_enqueue_err_count)
930 HIF_ERROR(
931 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
932 __func__, pipe_info->pipe_num,
933 atomic_read(&pipe_info->recv_bufs_needed),
934 pipe_info->nbuf_alloc_err_count,
935 pipe_info->nbuf_dma_err_count,
936 pipe_info->nbuf_ce_enqueue_err_count);
937 }
938}
939
940static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
941{
942 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530943 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +0530944 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530945 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800946 uint32_t bufs_posted = 0;
947
948 buf_sz = pipe_info->buf_sz;
949 if (buf_sz == 0) {
950 /* Unused Copy Engine */
951 return 0;
952 }
953
954 ce_hdl = pipe_info->ce_hdl;
955
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530956 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800957 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530958 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530959 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800960 int status;
961
962 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530963 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800964
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530965 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800966 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530967 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800968 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530969 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800970 &pipe_info->recv_bufs_needed_lock);
971 HIF_ERROR(
972 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
973 __func__, pipe_info->pipe_num,
974 atomic_read(&pipe_info->recv_bufs_needed),
975 pipe_info->nbuf_alloc_err_count);
976 atomic_inc(&pipe_info->recv_bufs_needed);
977 return 1;
978 }
979
980 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530981 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 * CE_data = dma_map_single(dev, data, buf_sz, );
983 * DMA_FROM_DEVICE);
984 */
985 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530986 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530987 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800988
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530989 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
990 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800991 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530992 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800993 HIF_ERROR(
994 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
995 __func__, pipe_info->pipe_num,
996 atomic_read(&pipe_info->recv_bufs_needed),
997 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530998 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800999 atomic_inc(&pipe_info->recv_bufs_needed);
1000 return 1;
1001 }
1002
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301003 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001004
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301005 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001006 buf_sz, DMA_FROM_DEVICE);
1007 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301008 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001009 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301010 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001011 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301012 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001013 HIF_ERROR(
1014 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1015 __func__, pipe_info->pipe_num,
1016 atomic_read(&pipe_info->recv_bufs_needed),
1017 pipe_info->nbuf_ce_enqueue_err_count);
1018 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301019 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001020 return 1;
1021 }
1022
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301023 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001024 bufs_posted++;
1025 }
1026 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001027 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001028 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1029 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001030 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001031 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1032 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001033 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001034 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1035
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301036 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001037
1038 return 0;
1039}
1040
1041/*
1042 * Try to post all desired receive buffers for all pipes.
1043 * Returns 0 if all desired buffers are posted,
1044 * non-zero if were were unable to completely
1045 * replenish receive buffers.
1046 */
Komal Seelam644263d2016-02-22 20:45:49 +05301047static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001048{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301049 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001050 int pipe_num, rv = 0;
1051
1052 A_TARGET_ACCESS_LIKELY(scn);
1053 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1054 struct HIF_CE_pipe_info *pipe_info;
1055
1056 pipe_info = &hif_state->pipe_info[pipe_num];
1057 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1058 rv = 1;
1059 goto done;
1060 }
1061 }
1062
1063done:
1064 A_TARGET_ACCESS_UNLIKELY(scn);
1065
1066 return rv;
1067}
1068
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301069QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001070{
Komal Seelam644263d2016-02-22 20:45:49 +05301071 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301072 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001073
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001074 hif_msg_callbacks_install(scn);
1075
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001076 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301077 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001078
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001079 /* Post buffers once to start things off. */
1080 (void)hif_post_recv_buffers(scn);
1081
1082 hif_state->started = true;
1083
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301084 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085}
1086
1087#ifdef WLAN_FEATURE_FASTPATH
1088/**
1089 * hif_enable_fastpath() Update that we have enabled fastpath mode
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301090 * @hif_ctx: HIF context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001091 *
1092 * For use in data path
1093 *
1094 * Retrun: void
1095 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301096void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001097{
Komal Seelam644263d2016-02-22 20:45:49 +05301098 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1099
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001100 HIF_INFO("Enabling fastpath mode\n");
Komal Seelam644263d2016-02-22 20:45:49 +05301101 scn->fastpath_mode_on = 1;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301102}
1103
1104/**
1105 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1106 * @hif_ctx: HIF Context
1107 *
1108 * For use in data path to skip HTC
1109 *
1110 * Return: bool
1111 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301112bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301113{
Komal Seelam644263d2016-02-22 20:45:49 +05301114 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1115
1116 return scn->fastpath_mode_on;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301117}
1118
1119/**
1120 * hif_get_ce_handle - API to get CE handle for FastPath mode
1121 * @hif_ctx: HIF Context
1122 * @id: CopyEngine Id
1123 *
1124 * API to return CE handle for fastpath mode
1125 *
1126 * Return: void
1127 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301128void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301129{
Komal Seelam644263d2016-02-22 20:45:49 +05301130 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1131
1132 return scn->ce_id_to_state[id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001133}
1134#endif /* WLAN_FEATURE_FASTPATH */
1135
1136void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1137{
Komal Seelam644263d2016-02-22 20:45:49 +05301138 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001139 struct CE_handle *ce_hdl;
1140 uint32_t buf_sz;
1141 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301142 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301143 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001144 void *per_CE_context;
1145
1146 buf_sz = pipe_info->buf_sz;
1147 if (buf_sz == 0) {
1148 /* Unused Copy Engine */
1149 return;
1150 }
1151
1152 hif_state = pipe_info->HIF_CE_state;
1153 if (!hif_state->started) {
1154 return;
1155 }
1156
Komal Seelam02cf2f82016-02-22 20:44:25 +05301157 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001158 ce_hdl = pipe_info->ce_hdl;
1159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301160 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001161 return;
1162 }
1163 while (ce_revoke_recv_next
1164 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301165 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301166 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301167 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301168 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001169 }
1170}
1171
1172void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1173{
1174 struct CE_handle *ce_hdl;
1175 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301176 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301177 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001178 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301179 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001180 unsigned int nbytes;
1181 unsigned int id;
1182 uint32_t buf_sz;
1183 uint32_t toeplitz_hash_result;
1184
1185 buf_sz = pipe_info->buf_sz;
1186 if (buf_sz == 0) {
1187 /* Unused Copy Engine */
1188 return;
1189 }
1190
1191 hif_state = pipe_info->HIF_CE_state;
1192 if (!hif_state->started) {
1193 return;
1194 }
1195
Komal Seelam02cf2f82016-02-22 20:44:25 +05301196 scn = HIF_GET_SOFTC(hif_state);
1197
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001198 ce_hdl = pipe_info->ce_hdl;
1199
1200 while (ce_cancel_send_next
1201 (ce_hdl, &per_CE_context,
1202 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301203 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1205 /*
1206 * Packets enqueued by htt_h2t_ver_req_msg() and
1207 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1208 * freed in htt_htc_misc_pkt_pool_free() in
1209 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001210 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001211 * which they are queued in.
1212 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301213 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001214 return;
1215 /* Indicate the completion to higer
1216 * layer to free the buffer */
1217 hif_state->msg_callbacks_current.
1218 txCompletionHandler(hif_state->
1219 msg_callbacks_current.Context,
1220 netbuf, id, toeplitz_hash_result);
1221 }
1222 }
1223}
1224
1225/*
1226 * Cleanup residual buffers for device shutdown:
1227 * buffers that were enqueued for receive
1228 * buffers that were to be sent
1229 * Note: Buffers that had completed but which were
1230 * not yet processed are on a completion queue. They
1231 * are handled when the completion thread shuts down.
1232 */
1233void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1234{
1235 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301236 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237
Komal Seelam02cf2f82016-02-22 20:44:25 +05301238 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001239 struct HIF_CE_pipe_info *pipe_info;
1240
1241 pipe_info = &hif_state->pipe_info[pipe_num];
1242 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1243 hif_send_buffer_cleanup_on_pipe(pipe_info);
1244 }
1245}
1246
Komal Seelam5584a7c2016-02-24 19:22:48 +05301247void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001248{
Komal Seelam644263d2016-02-22 20:45:49 +05301249 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301250 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301251
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001252 hif_buffer_cleanup(hif_state);
1253}
1254
Komal Seelam5584a7c2016-02-24 19:22:48 +05301255void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001256{
Komal Seelam644263d2016-02-22 20:45:49 +05301257 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301258 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001259 int pipe_num;
1260
1261 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001262
1263 /*
1264 * At this point, asynchronous threads are stopped,
1265 * The Target should not DMA nor interrupt, Host code may
1266 * not initiate anything more. So we just need to clean
1267 * up Host-side state.
1268 */
1269
1270 if (scn->athdiag_procfs_inited) {
1271 athdiag_procfs_remove();
1272 scn->athdiag_procfs_inited = false;
1273 }
1274
1275 hif_buffer_cleanup(hif_state);
1276
1277 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1278 struct HIF_CE_pipe_info *pipe_info;
1279
1280 pipe_info = &hif_state->pipe_info[pipe_num];
1281 if (pipe_info->ce_hdl) {
1282 ce_fini(pipe_info->ce_hdl);
1283 pipe_info->ce_hdl = NULL;
1284 pipe_info->buf_sz = 0;
1285 }
1286 }
1287
1288 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301289 qdf_timer_stop(&hif_state->sleep_timer);
1290 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291 hif_state->sleep_timer_init = false;
1292 }
1293
1294 hif_state->started = false;
1295}
1296
1297#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1298#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1299
1300
1301static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1302 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1303 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1304 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1305 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1306 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1307 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1308 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1309 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1310 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1311};
1312
1313
1314
1315/* CE_PCI TABLE */
1316/*
1317 * NOTE: the table below is out of date, though still a useful reference.
1318 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1319 * mapping of HTC services to HIF pipes.
1320 */
1321/*
1322 * This authoritative table defines Copy Engine configuration and the mapping
1323 * of services/endpoints to CEs. A subset of this information is passed to
1324 * the Target during startup as a prerequisite to entering BMI phase.
1325 * See:
1326 * target_service_to_ce_map - Target-side mapping
1327 * hif_map_service_to_pipe - Host-side mapping
1328 * target_ce_config - Target-side configuration
1329 * host_ce_config - Host-side configuration
1330 ============================================================================
1331 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1332 | | | ctio | Size | Frequency
1333 | | | n | |
1334 ============================================================================
1335 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1336 descriptor | | | | O(100B) | and regular
1337 download | | | | |
1338 ----------------------------------------------------------------------------
1339 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1340 indication | | | | O(10B) | regular
1341 upload | | | | |
1342 ----------------------------------------------------------------------------
1343 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1344 upload | | | | O(1000B) | (frequent
1345 e.g. noise | | | | | during IP1.0
1346 packets | | | | | testing)
1347 ----------------------------------------------------------------------------
1348 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1349 download | | | | O(1000B) | (frequent
1350 e.g. | | | | | during IP1.0
1351 misdirecte | | | | | testing)
1352 d EAPOL | | | | |
1353 packets | | | | |
1354 ----------------------------------------------------------------------------
1355 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1356 | DATA_VO (uplink) | | | |
1357 ----------------------------------------------------------------------------
1358 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1359 | DATA_VO (downlink) | | | |
1360 ----------------------------------------------------------------------------
1361 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1362 | | | | O(100B) |
1363 ----------------------------------------------------------------------------
1364 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1365 messages | (downlink) | | | O(100B) |
1366 | | | | |
1367 ----------------------------------------------------------------------------
1368 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1369 | HTC_RAW_STREAMS | | | |
1370 | (uplink) | | | |
1371 ----------------------------------------------------------------------------
1372 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1373 | HTC_RAW_STREAMS | | | |
1374 | (downlink) | | | |
1375 ----------------------------------------------------------------------------
1376 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1377 | | | | | infrequent
1378 ============================================================================
1379 */
1380
1381/*
1382 * Map from service/endpoint to Copy Engine.
1383 * This table is derived from the CE_PCI TABLE, above.
1384 * It is passed to the Target at startup for use by firmware.
1385 */
1386static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1387 {
1388 WMI_DATA_VO_SVC,
1389 PIPEDIR_OUT, /* out = UL = host -> target */
1390 3,
1391 },
1392 {
1393 WMI_DATA_VO_SVC,
1394 PIPEDIR_IN, /* in = DL = target -> host */
1395 2,
1396 },
1397 {
1398 WMI_DATA_BK_SVC,
1399 PIPEDIR_OUT, /* out = UL = host -> target */
1400 3,
1401 },
1402 {
1403 WMI_DATA_BK_SVC,
1404 PIPEDIR_IN, /* in = DL = target -> host */
1405 2,
1406 },
1407 {
1408 WMI_DATA_BE_SVC,
1409 PIPEDIR_OUT, /* out = UL = host -> target */
1410 3,
1411 },
1412 {
1413 WMI_DATA_BE_SVC,
1414 PIPEDIR_IN, /* in = DL = target -> host */
1415 2,
1416 },
1417 {
1418 WMI_DATA_VI_SVC,
1419 PIPEDIR_OUT, /* out = UL = host -> target */
1420 3,
1421 },
1422 {
1423 WMI_DATA_VI_SVC,
1424 PIPEDIR_IN, /* in = DL = target -> host */
1425 2,
1426 },
1427 {
1428 WMI_CONTROL_SVC,
1429 PIPEDIR_OUT, /* out = UL = host -> target */
1430 3,
1431 },
1432 {
1433 WMI_CONTROL_SVC,
1434 PIPEDIR_IN, /* in = DL = target -> host */
1435 2,
1436 },
1437 {
1438 HTC_CTRL_RSVD_SVC,
1439 PIPEDIR_OUT, /* out = UL = host -> target */
1440 0, /* could be moved to 3 (share with WMI) */
1441 },
1442 {
1443 HTC_CTRL_RSVD_SVC,
1444 PIPEDIR_IN, /* in = DL = target -> host */
1445 2,
1446 },
1447 {
1448 HTC_RAW_STREAMS_SVC, /* not currently used */
1449 PIPEDIR_OUT, /* out = UL = host -> target */
1450 0,
1451 },
1452 {
1453 HTC_RAW_STREAMS_SVC, /* not currently used */
1454 PIPEDIR_IN, /* in = DL = target -> host */
1455 2,
1456 },
1457 {
1458 HTT_DATA_MSG_SVC,
1459 PIPEDIR_OUT, /* out = UL = host -> target */
1460 4,
1461 },
1462 {
1463 HTT_DATA_MSG_SVC,
1464 PIPEDIR_IN, /* in = DL = target -> host */
1465 1,
1466 },
1467 {
1468 WDI_IPA_TX_SVC,
1469 PIPEDIR_OUT, /* in = DL = target -> host */
1470 5,
1471 },
1472 /* (Additions here) */
1473
1474 { /* Must be last */
1475 0,
1476 0,
1477 0,
1478 },
1479};
1480
1481static struct service_to_pipe *target_service_to_ce_map =
1482 target_service_to_ce_map_wlan;
1483static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1484
1485static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1486static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1487
1488static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1489 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1490 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1491 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1492 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1493 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1494 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1495 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1496 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1497 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1498 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1499 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1500 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1501 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1502 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1503 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1504 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1505 {0, 0, 0,}, /* Must be last */
1506};
1507
Houston Hoffman854e67f2016-03-14 21:11:39 -07001508/**
1509 * hif_get_target_ce_config() - get copy engine configuration
1510 * @target_ce_config_ret: basic copy engine configuration
1511 * @target_ce_config_sz_ret: size of the basic configuration in bytes
1512 * @target_service_to_ce_map_ret: service mapping for the copy engines
1513 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
1514 * @target_shadow_reg_cfg_ret: shadow register configuration
1515 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
1516 *
1517 * providing accessor to these values outside of this file.
1518 * currently these are stored in static pointers to const sections.
1519 * there are multiple configurations that are selected from at compile time.
1520 * Runtime selection would need to consider mode, target type and bus type.
1521 *
1522 * Return: return by parameter.
1523 */
1524void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
1525 int *target_ce_config_sz_ret,
1526 struct service_to_pipe **target_service_to_ce_map_ret,
1527 int *target_service_to_ce_map_sz_ret,
1528 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
1529 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530{
Houston Hoffman854e67f2016-03-14 21:11:39 -07001531 *target_ce_config_ret = target_ce_config;
1532 *target_ce_config_sz_ret = target_ce_config_sz;
1533 *target_service_to_ce_map_ret = target_service_to_ce_map;
1534 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
1535
1536 if (target_shadow_reg_cfg_ret)
1537 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
1538
1539 if (shadow_cfg_sz_ret)
1540 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542
1543/**
1544 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301545 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001546 *
1547 * This function passes the con_mode and CE configuration to
1548 * platform driver to enable wlan.
1549 *
Houston Hoffman108da402016-03-14 21:11:24 -07001550 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001551 */
Houston Hoffman108da402016-03-14 21:11:24 -07001552int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001553{
1554 struct icnss_wlan_enable_cfg cfg;
1555 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301556 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001557
Houston Hoffman854e67f2016-03-14 21:11:39 -07001558 hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
1559 &cfg.num_ce_tgt_cfg,
1560 (struct service_to_pipe **)&cfg.ce_svc_cfg,
1561 &cfg.num_ce_svc_pipe_cfg,
1562 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
1563 &cfg.num_shadow_reg_cfg);
1564
1565 /* translate from structure size to array size */
1566 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
1567 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
1568 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001569
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301570 if (QDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001571 mode = ICNSS_FTM;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301572 else if (WLAN_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001574 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001575 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001576
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001577 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1578}
1579
Houston Hoffman108da402016-03-14 21:11:24 -07001580/**
1581 * hif_ce_prepare_config() - load the correct static tables.
1582 * @scn: hif context
1583 *
1584 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001585 */
Houston Hoffman108da402016-03-14 21:11:24 -07001586void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001587{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301588 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001589 /* if epping is enabled we need to use the epping configuration. */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301590 if (WLAN_IS_EPPING_ENABLED(mode)) {
1591 if (WLAN_IS_EPPING_IRQ(mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592 host_ce_config = host_ce_config_wlan_epping_irq;
1593 else
1594 host_ce_config = host_ce_config_wlan_epping_poll;
1595 target_ce_config = target_ce_config_wlan_epping;
1596 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1597 target_service_to_ce_map =
1598 target_service_to_ce_map_wlan_epping;
1599 target_service_to_ce_map_sz =
1600 sizeof(target_service_to_ce_map_wlan_epping);
1601 }
Houston Hoffman108da402016-03-14 21:11:24 -07001602}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001603
Houston Hoffman108da402016-03-14 21:11:24 -07001604/**
1605 * hif_ce_open() - do ce specific allocations
1606 * @hif_sc: pointer to hif context
1607 *
1608 * return: 0 for success or QDF_STATUS_E_NOMEM
1609 */
1610QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
1611{
1612 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301614 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07001615 return QDF_STATUS_SUCCESS;
1616}
1617
1618/**
1619 * hif_ce_close() - do ce specific free
1620 * @hif_sc: pointer to hif context
1621 */
1622void hif_ce_close(struct hif_softc *hif_sc)
1623{
1624}
1625
1626/**
1627 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
1628 * @hif_sc: hif context
1629 *
1630 * uses state variables to support cleaning up when hif_config_ce fails.
1631 */
1632void hif_unconfig_ce(struct hif_softc *hif_sc)
1633{
1634 int pipe_num;
1635 struct HIF_CE_pipe_info *pipe_info;
1636 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1637
1638 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
1639 pipe_info = &hif_state->pipe_info[pipe_num];
1640 if (pipe_info->ce_hdl) {
1641 ce_unregister_irq(hif_state, (1 << pipe_num));
1642 hif_sc->request_irq_done = false;
1643 ce_fini(pipe_info->ce_hdl);
1644 pipe_info->ce_hdl = NULL;
1645 pipe_info->buf_sz = 0;
1646 }
1647 }
Houston Hoffman108da402016-03-14 21:11:24 -07001648 if (hif_sc->athdiag_procfs_inited) {
1649 athdiag_procfs_remove();
1650 hif_sc->athdiag_procfs_inited = false;
1651 }
1652}
1653
1654/**
1655 * hif_config_ce() - configure copy engines
1656 * @scn: hif context
1657 *
1658 * Prepares fw, copy engine hardware and host sw according
1659 * to the attributes selected by hif_ce_prepare_config.
1660 *
1661 * also calls athdiag_procfs_init
1662 *
1663 * return: 0 for success nonzero for failure.
1664 */
1665int hif_config_ce(struct hif_softc *scn)
1666{
1667 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1668 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1669 struct HIF_CE_pipe_info *pipe_info;
1670 int pipe_num;
1671#ifdef ADRASTEA_SHADOW_REGISTERS
1672 int i;
1673#endif
1674 QDF_STATUS rv = QDF_STATUS_SUCCESS;
1675
1676 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001677
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001678 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07001679
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001680 hif_config_rri_on_ddr(scn);
1681
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001682 /* During CE initializtion */
1683 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001684 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1685 struct CE_attr *attr;
1686
1687 pipe_info = &hif_state->pipe_info[pipe_num];
1688 pipe_info->pipe_num = pipe_num;
1689 pipe_info->HIF_CE_state = hif_state;
1690 attr = &host_ce_config[pipe_num];
1691 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301692 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001693 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301694 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001695 A_TARGET_ACCESS_UNLIKELY(scn);
1696 goto err;
1697 }
1698
1699 if (pipe_num == DIAG_CE_ID) {
1700 /* Reserve the ultimate CE for
1701 * Diagnostic Window support */
1702 hif_state->ce_diag =
1703 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1704 continue;
1705 }
1706
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301707 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
1708 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709 if (attr->dest_nentries > 0) {
1710 atomic_set(&pipe_info->recv_bufs_needed,
1711 init_buffer_count(attr->dest_nentries - 1));
1712 } else {
1713 atomic_set(&pipe_info->recv_bufs_needed, 0);
1714 }
1715 ce_tasklet_init(hif_state, (1 << pipe_num));
1716 ce_register_irq(hif_state, (1 << pipe_num));
1717 scn->request_irq_done = true;
1718 }
1719
1720 if (athdiag_procfs_init(scn) != 0) {
1721 A_TARGET_ACCESS_UNLIKELY(scn);
1722 goto err;
1723 }
1724 scn->athdiag_procfs_inited = true;
1725
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726 HIF_INFO_MED("%s: ce_init done", __func__);
1727
Houston Hoffman108da402016-03-14 21:11:24 -07001728 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001729
1730 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1731
1732#ifdef ADRASTEA_SHADOW_REGISTERS
1733 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1734 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1735 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1736 __func__, i,
1737 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1738 }
1739#endif
1740
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301741 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742
1743err:
1744 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07001745 hif_unconfig_ce(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001746 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301747 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001748}
1749
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001751/**
1752 * hif_ipa_get_ce_resource() - get uc resource on hif
1753 * @scn: bus context
1754 * @ce_sr_base_paddr: copyengine source ring base physical address
1755 * @ce_sr_ring_size: copyengine source ring size
1756 * @ce_reg_paddr: copyengine register physical address
1757 *
1758 * IPA micro controller data path offload feature enabled,
1759 * HIF should release copy engine related resource information to IPA UC
1760 * IPA UC will access hardware resource with released information
1761 *
1762 * Return: None
1763 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301764void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301765 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001766 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301767 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001768{
Komal Seelam644263d2016-02-22 20:45:49 +05301769 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301770 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001771 struct HIF_CE_pipe_info *pipe_info =
1772 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
1773 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1774
1775 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
1776 ce_reg_paddr);
1777 return;
1778}
1779#endif /* IPA_OFFLOAD */
1780
1781
1782#ifdef ADRASTEA_SHADOW_REGISTERS
1783
1784/*
1785 Current shadow register config
1786
1787 -----------------------------------------------------------
1788 Shadow Register | CE | src/dst write index
1789 -----------------------------------------------------------
1790 0 | 0 | src
1791 1 No Config - Doesn't point to anything
1792 2 No Config - Doesn't point to anything
1793 3 | 3 | src
1794 4 | 4 | src
1795 5 | 5 | src
1796 6 No Config - Doesn't point to anything
1797 7 | 7 | src
1798 8 No Config - Doesn't point to anything
1799 9 No Config - Doesn't point to anything
1800 10 No Config - Doesn't point to anything
1801 11 No Config - Doesn't point to anything
1802 -----------------------------------------------------------
1803 12 No Config - Doesn't point to anything
1804 13 | 1 | dst
1805 14 | 2 | dst
1806 15 No Config - Doesn't point to anything
1807 16 No Config - Doesn't point to anything
1808 17 No Config - Doesn't point to anything
1809 18 No Config - Doesn't point to anything
1810 19 | 7 | dst
1811 20 | 8 | dst
1812 21 No Config - Doesn't point to anything
1813 22 No Config - Doesn't point to anything
1814 23 No Config - Doesn't point to anything
1815 -----------------------------------------------------------
1816
1817
1818 ToDo - Move shadow register config to following in the future
1819 This helps free up a block of shadow registers towards the end.
1820 Can be used for other purposes
1821
1822 -----------------------------------------------------------
1823 Shadow Register | CE | src/dst write index
1824 -----------------------------------------------------------
1825 0 | 0 | src
1826 1 | 3 | src
1827 2 | 4 | src
1828 3 | 5 | src
1829 4 | 7 | src
1830 -----------------------------------------------------------
1831 5 | 1 | dst
1832 6 | 2 | dst
1833 7 | 7 | dst
1834 8 | 8 | dst
1835 -----------------------------------------------------------
1836 9 No Config - Doesn't point to anything
1837 12 No Config - Doesn't point to anything
1838 13 No Config - Doesn't point to anything
1839 14 No Config - Doesn't point to anything
1840 15 No Config - Doesn't point to anything
1841 16 No Config - Doesn't point to anything
1842 17 No Config - Doesn't point to anything
1843 18 No Config - Doesn't point to anything
1844 19 No Config - Doesn't point to anything
1845 20 No Config - Doesn't point to anything
1846 21 No Config - Doesn't point to anything
1847 22 No Config - Doesn't point to anything
1848 23 No Config - Doesn't point to anything
1849 -----------------------------------------------------------
1850*/
1851
Komal Seelam644263d2016-02-22 20:45:49 +05301852u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001853{
1854 u32 addr = 0;
1855
1856 switch (COPY_ENGINE_ID(ctrl_addr)) {
1857 case 0:
1858 addr = SHADOW_VALUE0;
1859 break;
1860 case 3:
1861 addr = SHADOW_VALUE3;
1862 break;
1863 case 4:
1864 addr = SHADOW_VALUE4;
1865 break;
1866 case 5:
1867 addr = SHADOW_VALUE5;
1868 break;
1869 case 7:
1870 addr = SHADOW_VALUE7;
1871 break;
1872 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001873 HIF_ERROR("invalid CE ctrl_addr\n");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301874 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001875
1876 }
1877 return addr;
1878
1879}
1880
Komal Seelam644263d2016-02-22 20:45:49 +05301881u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882{
1883 u32 addr = 0;
1884
1885 switch (COPY_ENGINE_ID(ctrl_addr)) {
1886 case 1:
1887 addr = SHADOW_VALUE13;
1888 break;
1889 case 2:
1890 addr = SHADOW_VALUE14;
1891 break;
1892 case 7:
1893 addr = SHADOW_VALUE19;
1894 break;
1895 case 8:
1896 addr = SHADOW_VALUE20;
1897 break;
1898 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001899 HIF_ERROR("invalid CE ctrl_addr\n");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301900 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 }
1902
1903 return addr;
1904
1905}
1906#endif
1907
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001908#if defined(FEATURE_LRO)
1909/**
1910 * ce_lro_flush_cb_register() - register the LRO flush
1911 * callback
1912 * @scn: HIF context
1913 * @handler: callback function
1914 * @data: opaque data pointer to be passed back
1915 *
1916 * Store the LRO flush callback provided
1917 *
1918 * Return: none
1919 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301920void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001921 void (handler)(void *), void *data)
1922{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001923 uint8_t ul, dl;
1924 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301925 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001926
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301927 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001928
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301929 if (QDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05301930 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001931 &ul, &dl, &ul_polled, &dl_polled)) {
1932 printk("%s cannot map service to pipe\n", __FUNCTION__);
1933 return;
1934 } else {
1935 struct CE_state *ce_state;
1936 ce_state = scn->ce_id_to_state[dl];
1937 ce_state->lro_flush_cb = handler;
1938 ce_state->lro_data = data;
1939 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001941
1942/**
1943 * ce_lro_flush_cb_deregister() - deregister the LRO flush
1944 * callback
1945 * @scn: HIF context
1946 *
1947 * Remove the LRO flush callback
1948 *
1949 * Return: none
1950 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301951void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001952{
1953 uint8_t ul, dl;
1954 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301955 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001956
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301957 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001958
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301959 if (QDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05301960 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07001961 &ul, &dl, &ul_polled, &dl_polled)) {
1962 printk("%s cannot map service to pipe\n", __FUNCTION__);
1963 return;
1964 } else {
1965 struct CE_state *ce_state;
1966 ce_state = scn->ce_id_to_state[dl];
1967 ce_state->lro_flush_cb = NULL;
1968 ce_state->lro_data = NULL;
1969 }
1970}
1971#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08001972
1973/**
1974 * hif_map_service_to_pipe() - returns the ce ids pertaining to
1975 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05301976 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08001977 * @svc_id: Service ID for which the mapping is needed.
1978 * @ul_pipe: address of the container in which ul pipe is returned.
1979 * @dl_pipe: address of the container in which dl pipe is returned.
1980 * @ul_is_polled: address of the container in which a bool
1981 * indicating if the UL CE for this service
1982 * is polled is returned.
1983 * @dl_is_polled: address of the container in which a bool
1984 * indicating if the DL CE for this service
1985 * is polled is returned.
1986 *
1987 * Return: Indicates whether this operation was successful.
1988 */
1989
Komal Seelam5584a7c2016-02-24 19:22:48 +05301990int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08001991 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1992 int *dl_is_polled)
1993{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301994 int status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08001995 unsigned int i;
1996 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08001997 struct service_to_pipe *tgt_svc_map_to_use;
1998 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301999 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2000 uint32_t mode = hif_get_conparam(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002001
Komal Seelambd7c51d2016-02-24 10:27:30 +05302002 if (WLAN_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002003 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2004 sz_tgt_svc_map_to_use =
2005 sizeof(target_service_to_ce_map_wlan_epping);
2006 } else {
2007 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2008 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2009 }
2010
2011 *dl_is_polled = 0; /* polling for received messages not supported */
2012
2013 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2014
2015 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2016 if (element.service_id == svc_id) {
2017
2018 if (element.pipedir == PIPEDIR_OUT)
2019 *ul_pipe = element.pipenum;
2020
2021 else if (element.pipedir == PIPEDIR_IN)
2022 *dl_pipe = element.pipenum;
2023 }
2024 }
2025
2026 *ul_is_polled =
2027 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2028
2029 return status;
2030}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002031
2032#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302033inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002034 uint32_t CE_ctrl_addr)
2035{
2036 uint32_t read_from_hw, srri_from_ddr = 0;
2037
2038 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2039
2040 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2041
2042 if (read_from_hw != srri_from_ddr) {
2043 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2044 srri_from_ddr, read_from_hw,
2045 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302046 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002047 }
2048 return srri_from_ddr;
2049}
2050
2051
Komal Seelam644263d2016-02-22 20:45:49 +05302052inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002053 uint32_t CE_ctrl_addr)
2054{
2055 uint32_t read_from_hw, drri_from_ddr = 0;
2056
2057 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2058
2059 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2060
2061 if (read_from_hw != drri_from_ddr) {
2062 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2063 drri_from_ddr, read_from_hw,
2064 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302065 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002066 }
2067 return drri_from_ddr;
2068}
2069
2070#endif
2071
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002072#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002073/**
2074 * hif_get_src_ring_read_index(): Called to get the SRRI
2075 *
Komal Seelam644263d2016-02-22 20:45:49 +05302076 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002077 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2078 *
2079 * This function returns the SRRI to the caller. For CEs that
2080 * dont have interrupts enabled, we look at the DDR based SRRI
2081 *
2082 * Return: SRRI
2083 */
Komal Seelam644263d2016-02-22 20:45:49 +05302084inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002085 uint32_t CE_ctrl_addr)
2086{
2087 struct CE_attr attr;
2088
2089 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2090 if (attr.flags & CE_ATTR_DISABLE_INTR)
2091 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2092 else
2093 return A_TARGET_READ(scn,
2094 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2095}
2096
2097/**
2098 * hif_get_dst_ring_read_index(): Called to get the DRRI
2099 *
Komal Seelam644263d2016-02-22 20:45:49 +05302100 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002101 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2102 *
2103 * This function returns the DRRI to the caller. For CEs that
2104 * dont have interrupts enabled, we look at the DDR based DRRI
2105 *
2106 * Return: DRRI
2107 */
Komal Seelam644263d2016-02-22 20:45:49 +05302108inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002109 uint32_t CE_ctrl_addr)
2110{
2111 struct CE_attr attr;
2112
2113 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2114
2115 if (attr.flags & CE_ATTR_DISABLE_INTR)
2116 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2117 else
2118 return A_TARGET_READ(scn,
2119 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2120}
2121
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002122/**
2123 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2124 *
Komal Seelam644263d2016-02-22 20:45:49 +05302125 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002126 *
2127 * This function allocates non cached memory on ddr and sends
2128 * the physical address of this memory to the CE hardware. The
2129 * hardware updates the RRI on this particular location.
2130 *
2131 * Return: None
2132 */
Komal Seelam644263d2016-02-22 20:45:49 +05302133static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002134{
2135 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302136 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002137 uint32_t high_paddr, low_paddr;
2138 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302139 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2140 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2141 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002142
2143 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2144 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2145
2146 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2147
2148 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2149 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2150
2151 for (i = 0; i < CE_COUNT; i++)
2152 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2153
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302154 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002155
2156 return;
2157}
2158#else
2159
2160/**
2161 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2162 *
Komal Seelam644263d2016-02-22 20:45:49 +05302163 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002164 *
2165 * This is a dummy implementation for platforms that don't
2166 * support this functionality.
2167 *
2168 * Return: None
2169 */
Komal Seelam644263d2016-02-22 20:45:49 +05302170static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002171{
2172 return;
2173}
2174#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302175
2176/**
2177 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302178 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302179 *
2180 * Output the copy engine registers
2181 *
2182 * Return: 0 for success or error code
2183 */
Komal Seelam644263d2016-02-22 20:45:49 +05302184int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302185{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302186 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302187 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2188 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2189 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2190 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302191 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302192
2193 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
Komal Seelam644263d2016-02-22 20:45:49 +05302194 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302195 (uint8_t *) &ce_reg_values[i][0],
2196 ce_reg_word_size * sizeof(uint32_t));
2197
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302198 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302199 HIF_ERROR("Dumping CE register failed!");
2200 return -EACCES;
2201 }
2202 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302203 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Govind Singh2443fb32016-01-13 17:44:48 +05302204 (uint8_t *) &ce_reg_values[i][0],
2205 ce_reg_word_size * sizeof(uint32_t));
2206 }
Govind Singh2443fb32016-01-13 17:44:48 +05302207 return 0;
2208}