blob: 7fa8305ce554a3a5d7eda477fd8f1a99dbc3073e [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053032#include "qdf_lock.h"
33#include "qdf_status.h"
34#include "qdf_status.h"
35#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include <targaddrs.h>
37#include <bmi_msg.h>
38#include "hif_io32.h"
39#include <hif.h>
40#include "regtable.h"
41#define ATH_MODULE_NAME hif
42#include <a_debug.h>
43#include "hif_main.h"
44#ifdef HIF_PCI
45#include "ce_bmi.h"
46#endif
47#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053048#include "qdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080049#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080064
65#define CE_POLL_TIMEOUT 10 /* ms */
66
67/* Forward references */
68static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
69
70/*
71 * Fix EV118783, poll to check whether a BMI response comes
72 * other than waiting for the interruption which may be lost.
73 */
74/* #define BMI_RSP_POLLING */
75#define BMI_RSP_TO_MILLISEC 1000
76
77
Komal Seelam644263d2016-02-22 20:45:49 +053078static int hif_post_recv_buffers(struct hif_softc *scn);
79static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080080
81static void ce_poll_timeout(void *arg)
82{
83 struct CE_state *CE_state = (struct CE_state *)arg;
84 if (CE_state->timer_inited) {
85 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053086 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087 }
88}
89
90static unsigned int roundup_pwr2(unsigned int n)
91{
92 int i;
93 unsigned int test_pwr2;
94
95 if (!(n & (n - 1)))
96 return n; /* already a power of 2 */
97
98 test_pwr2 = 4;
99 for (i = 0; i < 29; i++) {
100 if (test_pwr2 > n)
101 return test_pwr2;
102 test_pwr2 = test_pwr2 << 1;
103 }
104
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530105 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800106 return 0;
107}
108
109/*
110 * Initialize a Copy Engine based on caller-supplied attributes.
111 * This may be called once to initialize both source and destination
112 * rings or it may be called twice for separate source and destination
113 * initialization. It may be that only one side or the other is
114 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700115 *
116 * This should be called durring the initialization sequence before
117 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800118 */
Komal Seelam644263d2016-02-22 20:45:49 +0530119struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800120 unsigned int CE_id, struct CE_attr *attr)
121{
122 struct CE_state *CE_state;
123 uint32_t ctrl_addr;
124 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530125 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800126 bool malloc_CE_state = false;
127 bool malloc_src_ring = false;
128
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530129 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800130 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800131 CE_state = scn->ce_id_to_state[CE_id];
132
133 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800134 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530135 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800136 if (!CE_state) {
137 HIF_ERROR("%s: CE_state has no mem", __func__);
138 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700140 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700142 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530143 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700144
145 CE_state->id = CE_id;
146 CE_state->ctrl_addr = ctrl_addr;
147 CE_state->state = CE_RUNNING;
148 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800149 }
150 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800151
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530152 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800153 if (attr == NULL) {
154 /* Already initialized; caller wants the handle */
155 return (struct CE_handle *)CE_state;
156 }
157
158#ifdef ADRASTEA_SHADOW_REGISTERS
159 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
160 __func__);
161#endif
162
163 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530164 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800165 else
166 CE_state->src_sz_max = attr->src_sz_max;
167
Houston Hoffman68e837e2015-12-04 12:57:24 -0800168 ce_init_ce_desc_event_log(CE_id,
169 attr->src_nentries + attr->dest_nentries);
170
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800171 /* source ring setup */
172 nentries = attr->src_nentries;
173 if (nentries) {
174 struct CE_ring_state *src_ring;
175 unsigned CE_nbytes;
176 char *ptr;
177 uint64_t dma_addr;
178 nentries = roundup_pwr2(nentries);
179 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530180 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800181 } else {
182 CE_nbytes = sizeof(struct CE_ring_state)
183 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530184 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800185 if (!ptr) {
186 /* cannot allocate src ring. If the
187 * CE_state is allocated locally free
188 * CE_State and return error.
189 */
190 HIF_ERROR("%s: src ring has no mem", __func__);
191 if (malloc_CE_state) {
192 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800193 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530194 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800195 malloc_CE_state = false;
196 }
197 return NULL;
198 } else {
199 /* we can allocate src ring.
200 * Mark that the src ring is
201 * allocated locally
202 */
203 malloc_src_ring = true;
204 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530205 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800206
207 src_ring = CE_state->src_ring =
208 (struct CE_ring_state *)ptr;
209 ptr += sizeof(struct CE_ring_state);
210 src_ring->nentries = nentries;
211 src_ring->nentries_mask = nentries - 1;
212 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
213 src_ring->hw_index =
214 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
215 src_ring->sw_index = src_ring->hw_index;
216 src_ring->write_index =
217 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
218 A_TARGET_ACCESS_END_RET_PTR(scn);
219 src_ring->low_water_mark_nentries = 0;
220 src_ring->high_water_mark_nentries = nentries;
221 src_ring->per_transfer_context = (void **)ptr;
222
223 /* Legacy platforms that do not support cache
224 * coherent DMA are unsupported
225 */
226 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530227 qdf_mem_alloc_consistent(scn->qdf_dev,
228 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800229 (nentries *
230 sizeof(struct CE_src_desc) +
231 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530232 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800233 if (src_ring->base_addr_owner_space_unaligned
234 == NULL) {
235 HIF_ERROR("%s: src ring has no DMA mem",
236 __func__);
237 goto error_no_dma_mem;
238 }
239 src_ring->base_addr_CE_space_unaligned = base_addr;
240
241 if (src_ring->
242 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
243 - 1)) {
244 src_ring->base_addr_CE_space =
245 (src_ring->base_addr_CE_space_unaligned
246 + CE_DESC_RING_ALIGN -
247 1) & ~(CE_DESC_RING_ALIGN - 1);
248
249 src_ring->base_addr_owner_space =
250 (void
251 *)(((size_t) src_ring->
252 base_addr_owner_space_unaligned +
253 CE_DESC_RING_ALIGN -
254 1) & ~(CE_DESC_RING_ALIGN - 1));
255 } else {
256 src_ring->base_addr_CE_space =
257 src_ring->base_addr_CE_space_unaligned;
258 src_ring->base_addr_owner_space =
259 src_ring->
260 base_addr_owner_space_unaligned;
261 }
262 /*
263 * Also allocate a shadow src ring in
264 * regular mem to use for faster access.
265 */
266 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530267 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800268 sizeof(struct CE_src_desc) +
269 CE_DESC_RING_ALIGN);
270 if (src_ring->shadow_base_unaligned == NULL) {
271 HIF_ERROR("%s: src ring no shadow_base mem",
272 __func__);
273 goto error_no_dma_mem;
274 }
275 src_ring->shadow_base = (struct CE_src_desc *)
276 (((size_t) src_ring->shadow_base_unaligned +
277 CE_DESC_RING_ALIGN - 1) &
278 ~(CE_DESC_RING_ALIGN - 1));
279
280 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
281 dma_addr = src_ring->base_addr_CE_space;
282 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
283 (uint32_t)(dma_addr & 0xFFFFFFFF));
284#ifdef WLAN_ENABLE_QCA6180
285 {
286 uint32_t tmp;
287 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
288 scn, ctrl_addr);
289 tmp &= ~0x1F;
290 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
291 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
292 ctrl_addr, (uint32_t)dma_addr);
293 }
294#endif
295 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
296 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
297#ifdef BIG_ENDIAN_HOST
298 /* Enable source ring byte swap for big endian host */
299 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
300#endif
301 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
302 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
303 A_TARGET_ACCESS_END_RET_PTR(scn);
304 }
305 }
306
307 /* destination ring setup */
308 nentries = attr->dest_nentries;
309 if (nentries) {
310 struct CE_ring_state *dest_ring;
311 unsigned CE_nbytes;
312 char *ptr;
313 uint64_t dma_addr;
314
315 nentries = roundup_pwr2(nentries);
316 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530317 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800318 } else {
319 CE_nbytes = sizeof(struct CE_ring_state)
320 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530321 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800322 if (!ptr) {
323 /* cannot allocate dst ring. If the CE_state
324 * or src ring is allocated locally free
325 * CE_State and src ring and return error.
326 */
327 HIF_ERROR("%s: dest ring has no mem",
328 __func__);
329 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530330 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800331 CE_state->src_ring = NULL;
332 malloc_src_ring = false;
333 }
334 if (malloc_CE_state) {
335 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530337 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800338 malloc_CE_state = false;
339 }
340 return NULL;
341 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530342 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800343
344 dest_ring = CE_state->dest_ring =
345 (struct CE_ring_state *)ptr;
346 ptr += sizeof(struct CE_ring_state);
347 dest_ring->nentries = nentries;
348 dest_ring->nentries_mask = nentries - 1;
349 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
350 dest_ring->sw_index =
351 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
352 dest_ring->write_index =
353 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
354 A_TARGET_ACCESS_END_RET_PTR(scn);
355 dest_ring->low_water_mark_nentries = 0;
356 dest_ring->high_water_mark_nentries = nentries;
357 dest_ring->per_transfer_context = (void **)ptr;
358
359 /* Legacy platforms that do not support cache
360 * coherent DMA are unsupported */
361 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530362 qdf_mem_alloc_consistent(scn->qdf_dev,
363 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800364 (nentries *
365 sizeof(struct CE_dest_desc) +
366 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530367 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800368 if (dest_ring->base_addr_owner_space_unaligned
369 == NULL) {
370 HIF_ERROR("%s: dest ring has no DMA mem",
371 __func__);
372 goto error_no_dma_mem;
373 }
374 dest_ring->base_addr_CE_space_unaligned = base_addr;
375
376 /* Correctly initialize memory to 0 to
377 * prevent garbage data crashing system
378 * when download firmware
379 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530380 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800381 nentries * sizeof(struct CE_dest_desc) +
382 CE_DESC_RING_ALIGN);
383
384 if (dest_ring->
385 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
386 1)) {
387
388 dest_ring->base_addr_CE_space =
389 (dest_ring->
390 base_addr_CE_space_unaligned +
391 CE_DESC_RING_ALIGN -
392 1) & ~(CE_DESC_RING_ALIGN - 1);
393
394 dest_ring->base_addr_owner_space =
395 (void
396 *)(((size_t) dest_ring->
397 base_addr_owner_space_unaligned +
398 CE_DESC_RING_ALIGN -
399 1) & ~(CE_DESC_RING_ALIGN - 1));
400 } else {
401 dest_ring->base_addr_CE_space =
402 dest_ring->base_addr_CE_space_unaligned;
403 dest_ring->base_addr_owner_space =
404 dest_ring->
405 base_addr_owner_space_unaligned;
406 }
407
408 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
409 dma_addr = dest_ring->base_addr_CE_space;
410 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
411 (uint32_t)(dma_addr & 0xFFFFFFFF));
412#ifdef WLAN_ENABLE_QCA6180
413 {
414 uint32_t tmp;
415 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
416 ctrl_addr);
417 tmp &= ~0x1F;
418 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
419 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
420 ctrl_addr, (uint32_t)dma_addr);
421 }
422#endif
423 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
424#ifdef BIG_ENDIAN_HOST
425 /* Enable Dest ring byte swap for big endian host */
426 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
427#endif
428 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
429 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
430 A_TARGET_ACCESS_END_RET_PTR(scn);
431
432 /* epping */
433 /* poll timer */
434 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530435 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800436 &CE_state->poll_timer,
437 ce_poll_timeout,
438 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530439 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800440 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530441 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800442 CE_POLL_TIMEOUT);
443 }
444 }
445 }
446
447 /* Enable CE error interrupts */
448 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
449 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
450 A_TARGET_ACCESS_END_RET_PTR(scn);
451
452 return (struct CE_handle *)CE_state;
453
454error_no_dma_mem:
455 ce_fini((struct CE_handle *)CE_state);
456 return NULL;
457}
458
459#ifdef WLAN_FEATURE_FASTPATH
460/**
461 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
462 * No processing is required inside this function.
463 * @ce_hdl: Cope engine handle
464 * Using an assert, this function makes sure that,
465 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700466 *
467 * This is called while dismantling CE structures. No other thread
468 * should be using these structures while dismantling is occuring
469 * therfore no locking is needed.
470 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800471 * Return: none
472 */
473void
474ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
475{
476 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
477 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530478 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800479 uint32_t sw_index, write_index;
480
481 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
482 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
483 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800484 sw_index = src_ring->sw_index;
485 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800486
487 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530488 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800489 }
490}
491#else
492void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
493{
494}
495#endif /* WLAN_FEATURE_FASTPATH */
496
497void ce_fini(struct CE_handle *copyeng)
498{
499 struct CE_state *CE_state = (struct CE_state *)copyeng;
500 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +0530501 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800502
503 CE_state->state = CE_UNUSED;
504 scn->ce_id_to_state[CE_id] = NULL;
505 if (CE_state->src_ring) {
506 /* Cleanup the HTT Tx ring */
507 ce_h2t_tx_ce_cleanup(copyeng);
508
509 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530510 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800511 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530512 qdf_mem_free_consistent(scn->qdf_dev,
513 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800514 (CE_state->src_ring->nentries *
515 sizeof(struct CE_src_desc) +
516 CE_DESC_RING_ALIGN),
517 CE_state->src_ring->
518 base_addr_owner_space_unaligned,
519 CE_state->src_ring->
520 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530521 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800522 }
523 if (CE_state->dest_ring) {
524 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530525 qdf_mem_free_consistent(scn->qdf_dev,
526 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800527 (CE_state->dest_ring->nentries *
528 sizeof(struct CE_dest_desc) +
529 CE_DESC_RING_ALIGN),
530 CE_state->dest_ring->
531 base_addr_owner_space_unaligned,
532 CE_state->dest_ring->
533 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530534 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800535
536 /* epping */
537 if (CE_state->timer_inited) {
538 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530539 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800540 }
541 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530542 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800543}
544
Komal Seelam5584a7c2016-02-24 19:22:48 +0530545void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800546{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530547 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800548
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530549 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800550 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530551 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800552 sizeof(hif_state->msg_callbacks_current));
553}
554
555/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530556QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +0530557hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800558 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530559 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800560{
Komal Seelam644263d2016-02-22 20:45:49 +0530561 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530562 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800563 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
564 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
565 int bytes = nbytes, nfrags = 0;
566 struct ce_sendlist sendlist;
567 int status, i = 0;
568 unsigned int mux_id = 0;
569
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530570 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800571
572 transfer_id =
573 (mux_id & MUX_ID_MASK) |
574 (transfer_id & TRANSACTION_ID_MASK);
575 data_attr &= DESC_DATA_FLAG_MASK;
576 /*
577 * The common case involves sending multiple fragments within a
578 * single download (the tx descriptor and the tx frame header).
579 * So, optimize for the case of multiple fragments by not even
580 * checking whether it's necessary to use a sendlist.
581 * The overhead of using a sendlist for a single buffer download
582 * is not a big deal, since it happens rarely (for WMI messages).
583 */
584 ce_sendlist_init(&sendlist);
585 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530586 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800587 int frag_bytes;
588
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530589 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
590 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800591 /*
592 * Clear the packet offset for all but the first CE desc.
593 */
594 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530595 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800596
597 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
598 frag_bytes >
599 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530600 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800601 (nbuf,
602 nfrags) ? 0 :
603 CE_SEND_FLAG_SWAP_DISABLE,
604 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530605 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800606 HIF_ERROR("%s: error, frag_num %d larger than limit",
607 __func__, nfrags);
608 return status;
609 }
610 bytes -= frag_bytes;
611 nfrags++;
612 } while (bytes > 0);
613
614 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530615 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800616 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530617 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800618 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530619 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800620 }
621 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530622 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800623
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530624 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800625 HIF_ERROR("%s: error CE handle is null", __func__);
626 return A_ERROR;
627 }
628
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530629 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530630 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530631 (uint8_t *)(qdf_nbuf_data(nbuf)),
632 sizeof(qdf_nbuf_data(nbuf))));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800633 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530634 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800635
636 return status;
637}
638
Komal Seelam5584a7c2016-02-24 19:22:48 +0530639void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
640 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800641{
Komal Seelam644263d2016-02-22 20:45:49 +0530642 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
643
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800644 if (!force) {
645 int resources;
646 /*
647 * Decide whether to actually poll for completions, or just
648 * wait for a later chance. If there seem to be plenty of
649 * resources left, then just wait, since checking involves
650 * reading a CE register, which is a relatively expensive
651 * operation.
652 */
Komal Seelam644263d2016-02-22 20:45:49 +0530653 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800654 /*
655 * If at least 50% of the total resources are still available,
656 * don't bother checking again yet.
657 */
658 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
659 return;
660 }
661 }
Houston Hoffman56936832016-03-16 12:16:24 -0700662#ifdef ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800663 ce_per_engine_servicereap(scn, pipe);
664#else
665 ce_per_engine_service(scn, pipe);
666#endif
667}
668
Komal Seelam5584a7c2016-02-24 19:22:48 +0530669uint16_t
670hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800671{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530672 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800673 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
674 uint16_t rv;
675
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530676 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800677 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530678 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800679 return rv;
680}
681
682/* Called by lower (CE) layer when a send to Target completes. */
683void
684hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530685 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800686 unsigned int nbytes, unsigned int transfer_id,
687 unsigned int sw_index, unsigned int hw_index,
688 unsigned int toeplitz_hash_result)
689{
690 struct HIF_CE_pipe_info *pipe_info =
691 (struct HIF_CE_pipe_info *)ce_context;
692 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530693 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800694 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700695 struct hif_msg_callbacks *msg_callbacks =
696 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800697
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800698 do {
699 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700700 * The upper layer callback will be triggered
701 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800702 */
Houston Hoffman85118512015-09-28 14:17:11 -0700703 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530704 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700705 == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530706 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -0800707 else
708 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700709 msg_callbacks->Context,
710 transfer_context, transfer_id,
711 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800712 }
713
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530714 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700715 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530716 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800717 } while (ce_completed_send_next(copyeng,
718 &ce_context, &transfer_context,
719 &CE_data, &nbytes, &transfer_id,
720 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530721 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800722}
723
Houston Hoffman910c6262015-09-28 12:56:25 -0700724/**
725 * hif_ce_do_recv(): send message from copy engine to upper layers
726 * @msg_callbacks: structure containing callback and callback context
727 * @netbuff: skb containing message
728 * @nbytes: number of bytes in the message
729 * @pipe_info: used for the pipe_number info
730 *
731 * Checks the packet length, configures the lenght in the netbuff,
732 * and calls the upper layer callback.
733 *
734 * return: None
735 */
736static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530737 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -0700738 struct HIF_CE_pipe_info *pipe_info) {
739 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530740 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -0700741 msg_callbacks->
742 rxCompletionHandler(msg_callbacks->Context,
743 netbuf, pipe_info->pipe_num);
744 } else {
745 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
746 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530747 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -0700748 }
749}
750
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800751/* Called by lower (CE) layer when data is received from the Target. */
752void
753hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530754 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800755 unsigned int nbytes, unsigned int transfer_id,
756 unsigned int flags)
757{
758 struct HIF_CE_pipe_info *pipe_info =
759 (struct HIF_CE_pipe_info *)ce_context;
760 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700761 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530762 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530763 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(hif_state);
Houston Hoffman910c6262015-09-28 12:56:25 -0700764 struct hif_msg_callbacks *msg_callbacks =
765 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530766 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800767
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800768 do {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530769 hif_pm_runtime_mark_last_busy(hif_sc->dev);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530770 qdf_nbuf_unmap_single(scn->qdf_dev,
771 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530772 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800773
Houston Hoffman910c6262015-09-28 12:56:25 -0700774 atomic_inc(&pipe_info->recv_bufs_needed);
775 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530776 if (scn->target_status == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530777 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -0800778 else
779 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700780 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800781
782 /* Set up force_break flag if num of receices reaches
783 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700784 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530785 count = ce_state->receive_count;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530786 if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700787 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800788 break;
789 }
790 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
791 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530792 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800793
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800794}
795
796/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
797
798void
Komal Seelam5584a7c2016-02-24 19:22:48 +0530799hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800800 struct hif_msg_callbacks *callbacks)
801{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530802 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800803
804#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
805 spin_lock_init(&pcie_access_log_lock);
806#endif
807 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530808 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800809 sizeof(hif_state->msg_callbacks_pending));
810
811}
812
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800813int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
814{
815 struct CE_handle *ce_diag = hif_state->ce_diag;
816 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +0530817 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700818 struct hif_msg_callbacks *hif_msg_callbacks =
819 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800820
821 /* daemonize("hif_compl_thread"); */
822
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800823 if (scn->ce_count == 0) {
824 HIF_ERROR("%s: Invalid ce_count\n", __func__);
825 return -EINVAL;
826 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700827
828 if (!hif_msg_callbacks ||
829 !hif_msg_callbacks->rxCompletionHandler ||
830 !hif_msg_callbacks->txCompletionHandler) {
831 HIF_ERROR("%s: no completion handler registered", __func__);
832 return -EFAULT;
833 }
834
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800835 A_TARGET_ACCESS_LIKELY(scn);
836 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
837 struct CE_attr attr;
838 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800839
840 pipe_info = &hif_state->pipe_info[pipe_num];
841 if (pipe_info->ce_hdl == ce_diag) {
842 continue; /* Handle Diagnostic CE specially */
843 }
844 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800845 if (attr.src_nentries) {
846 /* pipe used to send to target */
847 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
848 __func__, pipe_num, pipe_info);
849 ce_send_cb_register(pipe_info->ce_hdl,
850 hif_pci_ce_send_done, pipe_info,
851 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800852 pipe_info->num_sends_allowed = attr.src_nentries - 1;
853 }
854 if (attr.dest_nentries) {
855 /* pipe used to receive from target */
856 ce_recv_cb_register(pipe_info->ce_hdl,
857 hif_pci_ce_recv_data, pipe_info,
858 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800859 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800860
861 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530862 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800863 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800864
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800865 A_TARGET_ACCESS_UNLIKELY(scn);
866 return 0;
867}
868
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869/*
870 * Install pending msg callbacks.
871 *
872 * TBDXXX: This hack is needed because upper layers install msg callbacks
873 * for use with HTC before BMI is done; yet this HIF implementation
874 * needs to continue to use BMI msg callbacks. Really, upper layers
875 * should not register HTC callbacks until AFTER BMI phase.
876 */
Komal Seelam644263d2016-02-22 20:45:49 +0530877static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800878{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530879 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800880
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530881 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800882 &hif_state->msg_callbacks_pending,
883 sizeof(hif_state->msg_callbacks_pending));
884}
885
Komal Seelam5584a7c2016-02-24 19:22:48 +0530886void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
887 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800888{
889 int ul_is_polled, dl_is_polled;
890
Komal Seelam644263d2016-02-22 20:45:49 +0530891 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800892 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
893}
894
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800895/**
896 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +0530897 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 *
899 * Output the pipe error counts of each pipe to log file
900 *
901 * Return: N/A
902 */
Komal Seelam644263d2016-02-22 20:45:49 +0530903void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800904{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530905 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800906 int pipe_num;
907
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800908 if (hif_state == NULL) {
909 HIF_ERROR("%s hif_state is NULL", __func__);
910 return;
911 }
912 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
913 struct HIF_CE_pipe_info *pipe_info;
914
915 pipe_info = &hif_state->pipe_info[pipe_num];
916
917 if (pipe_info->nbuf_alloc_err_count > 0 ||
918 pipe_info->nbuf_dma_err_count > 0 ||
919 pipe_info->nbuf_ce_enqueue_err_count)
920 HIF_ERROR(
921 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
922 __func__, pipe_info->pipe_num,
923 atomic_read(&pipe_info->recv_bufs_needed),
924 pipe_info->nbuf_alloc_err_count,
925 pipe_info->nbuf_dma_err_count,
926 pipe_info->nbuf_ce_enqueue_err_count);
927 }
928}
929
930static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
931{
932 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530933 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +0530934 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936 uint32_t bufs_posted = 0;
937
938 buf_sz = pipe_info->buf_sz;
939 if (buf_sz == 0) {
940 /* Unused Copy Engine */
941 return 0;
942 }
943
944 ce_hdl = pipe_info->ce_hdl;
945
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530946 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800947 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530948 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530949 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950 int status;
951
952 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530953 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800954
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530955 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800956 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530957 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530959 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800960 &pipe_info->recv_bufs_needed_lock);
961 HIF_ERROR(
962 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
963 __func__, pipe_info->pipe_num,
964 atomic_read(&pipe_info->recv_bufs_needed),
965 pipe_info->nbuf_alloc_err_count);
966 atomic_inc(&pipe_info->recv_bufs_needed);
967 return 1;
968 }
969
970 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530971 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800972 * CE_data = dma_map_single(dev, data, buf_sz, );
973 * DMA_FROM_DEVICE);
974 */
975 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530976 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530977 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800978
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530979 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
980 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800981 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530982 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800983 HIF_ERROR(
984 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
985 __func__, pipe_info->pipe_num,
986 atomic_read(&pipe_info->recv_bufs_needed),
987 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530988 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989 atomic_inc(&pipe_info->recv_bufs_needed);
990 return 1;
991 }
992
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530993 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530995 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800996 buf_sz, DMA_FROM_DEVICE);
997 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530998 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800999 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301000 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001001 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301002 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001003 HIF_ERROR(
1004 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1005 __func__, pipe_info->pipe_num,
1006 atomic_read(&pipe_info->recv_bufs_needed),
1007 pipe_info->nbuf_ce_enqueue_err_count);
1008 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301009 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001010 return 1;
1011 }
1012
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301013 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001014 bufs_posted++;
1015 }
1016 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001017 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001018 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1019 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001020 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001021 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1022 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001023 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001024 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1025
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301026 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001027
1028 return 0;
1029}
1030
1031/*
1032 * Try to post all desired receive buffers for all pipes.
1033 * Returns 0 if all desired buffers are posted,
1034 * non-zero if were were unable to completely
1035 * replenish receive buffers.
1036 */
Komal Seelam644263d2016-02-22 20:45:49 +05301037static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001038{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301039 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 int pipe_num, rv = 0;
1041
1042 A_TARGET_ACCESS_LIKELY(scn);
1043 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1044 struct HIF_CE_pipe_info *pipe_info;
1045
1046 pipe_info = &hif_state->pipe_info[pipe_num];
1047 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1048 rv = 1;
1049 goto done;
1050 }
1051 }
1052
1053done:
1054 A_TARGET_ACCESS_UNLIKELY(scn);
1055
1056 return rv;
1057}
1058
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301059QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001060{
Komal Seelam644263d2016-02-22 20:45:49 +05301061 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301062 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001063
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001064 hif_msg_callbacks_install(scn);
1065
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001066 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301067 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001068
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001069 /* Post buffers once to start things off. */
1070 (void)hif_post_recv_buffers(scn);
1071
1072 hif_state->started = true;
1073
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301074 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001075}
1076
1077#ifdef WLAN_FEATURE_FASTPATH
1078/**
1079 * hif_enable_fastpath() Update that we have enabled fastpath mode
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301080 * @hif_ctx: HIF context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001081 *
1082 * For use in data path
1083 *
1084 * Retrun: void
1085 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301086void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001087{
Komal Seelam644263d2016-02-22 20:45:49 +05301088 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1089
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001090 HIF_INFO("Enabling fastpath mode\n");
Komal Seelam644263d2016-02-22 20:45:49 +05301091 scn->fastpath_mode_on = 1;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301092}
1093
1094/**
1095 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1096 * @hif_ctx: HIF Context
1097 *
1098 * For use in data path to skip HTC
1099 *
1100 * Return: bool
1101 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301102bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301103{
Komal Seelam644263d2016-02-22 20:45:49 +05301104 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1105
1106 return scn->fastpath_mode_on;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301107}
1108
1109/**
1110 * hif_get_ce_handle - API to get CE handle for FastPath mode
1111 * @hif_ctx: HIF Context
1112 * @id: CopyEngine Id
1113 *
1114 * API to return CE handle for fastpath mode
1115 *
1116 * Return: void
1117 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301118void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301119{
Komal Seelam644263d2016-02-22 20:45:49 +05301120 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1121
1122 return scn->ce_id_to_state[id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001123}
1124#endif /* WLAN_FEATURE_FASTPATH */
1125
1126void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1127{
Komal Seelam644263d2016-02-22 20:45:49 +05301128 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001129 struct CE_handle *ce_hdl;
1130 uint32_t buf_sz;
1131 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301132 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301133 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001134 void *per_CE_context;
1135
1136 buf_sz = pipe_info->buf_sz;
1137 if (buf_sz == 0) {
1138 /* Unused Copy Engine */
1139 return;
1140 }
1141
1142 hif_state = pipe_info->HIF_CE_state;
1143 if (!hif_state->started) {
1144 return;
1145 }
1146
Komal Seelam02cf2f82016-02-22 20:44:25 +05301147 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001148 ce_hdl = pipe_info->ce_hdl;
1149
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301150 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001151 return;
1152 }
1153 while (ce_revoke_recv_next
1154 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301155 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301156 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301157 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301158 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001159 }
1160}
1161
1162void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1163{
1164 struct CE_handle *ce_hdl;
1165 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301166 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301167 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001168 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301169 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001170 unsigned int nbytes;
1171 unsigned int id;
1172 uint32_t buf_sz;
1173 uint32_t toeplitz_hash_result;
1174
1175 buf_sz = pipe_info->buf_sz;
1176 if (buf_sz == 0) {
1177 /* Unused Copy Engine */
1178 return;
1179 }
1180
1181 hif_state = pipe_info->HIF_CE_state;
1182 if (!hif_state->started) {
1183 return;
1184 }
1185
Komal Seelam02cf2f82016-02-22 20:44:25 +05301186 scn = HIF_GET_SOFTC(hif_state);
1187
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001188 ce_hdl = pipe_info->ce_hdl;
1189
1190 while (ce_cancel_send_next
1191 (ce_hdl, &per_CE_context,
1192 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301193 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001194 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1195 /*
1196 * Packets enqueued by htt_h2t_ver_req_msg() and
1197 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1198 * freed in htt_htc_misc_pkt_pool_free() in
1199 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001200 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201 * which they are queued in.
1202 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301203 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204 return;
1205 /* Indicate the completion to higer
1206 * layer to free the buffer */
1207 hif_state->msg_callbacks_current.
1208 txCompletionHandler(hif_state->
1209 msg_callbacks_current.Context,
1210 netbuf, id, toeplitz_hash_result);
1211 }
1212 }
1213}
1214
1215/*
1216 * Cleanup residual buffers for device shutdown:
1217 * buffers that were enqueued for receive
1218 * buffers that were to be sent
1219 * Note: Buffers that had completed but which were
1220 * not yet processed are on a completion queue. They
1221 * are handled when the completion thread shuts down.
1222 */
1223void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1224{
1225 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301226 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001227
Komal Seelam02cf2f82016-02-22 20:44:25 +05301228 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001229 struct HIF_CE_pipe_info *pipe_info;
1230
1231 pipe_info = &hif_state->pipe_info[pipe_num];
1232 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1233 hif_send_buffer_cleanup_on_pipe(pipe_info);
1234 }
1235}
1236
Komal Seelam5584a7c2016-02-24 19:22:48 +05301237void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001238{
Komal Seelam644263d2016-02-22 20:45:49 +05301239 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301240 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301241
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001242 hif_buffer_cleanup(hif_state);
1243}
1244
Komal Seelam5584a7c2016-02-24 19:22:48 +05301245void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001246{
Komal Seelam644263d2016-02-22 20:45:49 +05301247 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301248 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001249 int pipe_num;
1250
1251 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001252
1253 /*
1254 * At this point, asynchronous threads are stopped,
1255 * The Target should not DMA nor interrupt, Host code may
1256 * not initiate anything more. So we just need to clean
1257 * up Host-side state.
1258 */
1259
1260 if (scn->athdiag_procfs_inited) {
1261 athdiag_procfs_remove();
1262 scn->athdiag_procfs_inited = false;
1263 }
1264
1265 hif_buffer_cleanup(hif_state);
1266
1267 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1268 struct HIF_CE_pipe_info *pipe_info;
1269
1270 pipe_info = &hif_state->pipe_info[pipe_num];
1271 if (pipe_info->ce_hdl) {
1272 ce_fini(pipe_info->ce_hdl);
1273 pipe_info->ce_hdl = NULL;
1274 pipe_info->buf_sz = 0;
1275 }
1276 }
1277
1278 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301279 qdf_timer_stop(&hif_state->sleep_timer);
1280 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001281 hif_state->sleep_timer_init = false;
1282 }
1283
1284 hif_state->started = false;
1285}
1286
1287#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1288#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1289
1290
1291static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1292 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1293 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1294 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1295 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1296 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1297 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1298 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1299 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1300 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1301};
1302
1303
1304
1305/* CE_PCI TABLE */
1306/*
1307 * NOTE: the table below is out of date, though still a useful reference.
1308 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1309 * mapping of HTC services to HIF pipes.
1310 */
1311/*
1312 * This authoritative table defines Copy Engine configuration and the mapping
1313 * of services/endpoints to CEs. A subset of this information is passed to
1314 * the Target during startup as a prerequisite to entering BMI phase.
1315 * See:
1316 * target_service_to_ce_map - Target-side mapping
1317 * hif_map_service_to_pipe - Host-side mapping
1318 * target_ce_config - Target-side configuration
1319 * host_ce_config - Host-side configuration
1320 ============================================================================
1321 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1322 | | | ctio | Size | Frequency
1323 | | | n | |
1324 ============================================================================
1325 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1326 descriptor | | | | O(100B) | and regular
1327 download | | | | |
1328 ----------------------------------------------------------------------------
1329 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1330 indication | | | | O(10B) | regular
1331 upload | | | | |
1332 ----------------------------------------------------------------------------
1333 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1334 upload | | | | O(1000B) | (frequent
1335 e.g. noise | | | | | during IP1.0
1336 packets | | | | | testing)
1337 ----------------------------------------------------------------------------
1338 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1339 download | | | | O(1000B) | (frequent
1340 e.g. | | | | | during IP1.0
1341 misdirecte | | | | | testing)
1342 d EAPOL | | | | |
1343 packets | | | | |
1344 ----------------------------------------------------------------------------
1345 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1346 | DATA_VO (uplink) | | | |
1347 ----------------------------------------------------------------------------
1348 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1349 | DATA_VO (downlink) | | | |
1350 ----------------------------------------------------------------------------
1351 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1352 | | | | O(100B) |
1353 ----------------------------------------------------------------------------
1354 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1355 messages | (downlink) | | | O(100B) |
1356 | | | | |
1357 ----------------------------------------------------------------------------
1358 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1359 | HTC_RAW_STREAMS | | | |
1360 | (uplink) | | | |
1361 ----------------------------------------------------------------------------
1362 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1363 | HTC_RAW_STREAMS | | | |
1364 | (downlink) | | | |
1365 ----------------------------------------------------------------------------
1366 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1367 | | | | | infrequent
1368 ============================================================================
1369 */
1370
1371/*
1372 * Map from service/endpoint to Copy Engine.
1373 * This table is derived from the CE_PCI TABLE, above.
1374 * It is passed to the Target at startup for use by firmware.
1375 */
1376static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1377 {
1378 WMI_DATA_VO_SVC,
1379 PIPEDIR_OUT, /* out = UL = host -> target */
1380 3,
1381 },
1382 {
1383 WMI_DATA_VO_SVC,
1384 PIPEDIR_IN, /* in = DL = target -> host */
1385 2,
1386 },
1387 {
1388 WMI_DATA_BK_SVC,
1389 PIPEDIR_OUT, /* out = UL = host -> target */
1390 3,
1391 },
1392 {
1393 WMI_DATA_BK_SVC,
1394 PIPEDIR_IN, /* in = DL = target -> host */
1395 2,
1396 },
1397 {
1398 WMI_DATA_BE_SVC,
1399 PIPEDIR_OUT, /* out = UL = host -> target */
1400 3,
1401 },
1402 {
1403 WMI_DATA_BE_SVC,
1404 PIPEDIR_IN, /* in = DL = target -> host */
1405 2,
1406 },
1407 {
1408 WMI_DATA_VI_SVC,
1409 PIPEDIR_OUT, /* out = UL = host -> target */
1410 3,
1411 },
1412 {
1413 WMI_DATA_VI_SVC,
1414 PIPEDIR_IN, /* in = DL = target -> host */
1415 2,
1416 },
1417 {
1418 WMI_CONTROL_SVC,
1419 PIPEDIR_OUT, /* out = UL = host -> target */
1420 3,
1421 },
1422 {
1423 WMI_CONTROL_SVC,
1424 PIPEDIR_IN, /* in = DL = target -> host */
1425 2,
1426 },
1427 {
1428 HTC_CTRL_RSVD_SVC,
1429 PIPEDIR_OUT, /* out = UL = host -> target */
1430 0, /* could be moved to 3 (share with WMI) */
1431 },
1432 {
1433 HTC_CTRL_RSVD_SVC,
1434 PIPEDIR_IN, /* in = DL = target -> host */
1435 2,
1436 },
1437 {
1438 HTC_RAW_STREAMS_SVC, /* not currently used */
1439 PIPEDIR_OUT, /* out = UL = host -> target */
1440 0,
1441 },
1442 {
1443 HTC_RAW_STREAMS_SVC, /* not currently used */
1444 PIPEDIR_IN, /* in = DL = target -> host */
1445 2,
1446 },
1447 {
1448 HTT_DATA_MSG_SVC,
1449 PIPEDIR_OUT, /* out = UL = host -> target */
1450 4,
1451 },
1452 {
1453 HTT_DATA_MSG_SVC,
1454 PIPEDIR_IN, /* in = DL = target -> host */
1455 1,
1456 },
1457 {
1458 WDI_IPA_TX_SVC,
1459 PIPEDIR_OUT, /* in = DL = target -> host */
1460 5,
1461 },
1462 /* (Additions here) */
1463
1464 { /* Must be last */
1465 0,
1466 0,
1467 0,
1468 },
1469};
1470
1471static struct service_to_pipe *target_service_to_ce_map =
1472 target_service_to_ce_map_wlan;
1473static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1474
1475static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1476static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1477
1478static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1479 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1480 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1481 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1482 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1483 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1484 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1485 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1486 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1487 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1488 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1489 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1490 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1491 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1492 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1493 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1494 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1495 {0, 0, 0,}, /* Must be last */
1496};
1497
1498#ifdef HIF_PCI
1499/*
1500 * Send an interrupt to the device to wake up the Target CPU
1501 * so it has an opportunity to notice any changed state.
1502 */
Komal Seelam644263d2016-02-22 20:45:49 +05301503void hif_wake_target_cpu(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301505 QDF_STATUS rv;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 uint32_t core_ctrl;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301507 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001508
Komal Seelam644263d2016-02-22 20:45:49 +05301509 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001510 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1511 &core_ctrl);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301512 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001513 /* A_INUM_FIRMWARE interrupt to Target CPU */
1514 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1515
Komal Seelam644263d2016-02-22 20:45:49 +05301516 rv = hif_diag_write_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001517 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1518 core_ctrl);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301519 QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520}
1521#endif
1522
1523static void hif_sleep_entry(void *arg)
1524{
1525 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +05301526 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527 uint32_t idle_ms;
Komal Seelam644263d2016-02-22 20:45:49 +05301528
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001529 if (scn->recovery)
1530 return;
1531
Komal Seelambd7c51d2016-02-24 10:27:30 +05301532 if (hif_is_driver_unloading(scn))
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001533 return;
1534
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301535 qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536 if (hif_state->verified_awake == false) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301537 idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001538 - hif_state->sleep_ticks);
1539 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301540 if (!qdf_atomic_read(&scn->link_suspended)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541 soc_wake_reset(scn);
1542 hif_state->fake_sleep = false;
1543 }
1544 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301545 qdf_timer_stop(&hif_state->sleep_timer);
1546 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1548 }
1549 } else {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301550 qdf_timer_stop(&hif_state->sleep_timer);
1551 qdf_timer_start(&hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001552 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1553 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301554 qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001555}
1556#define HIF_HIA_MAX_POLL_LOOP 1000000
1557#define HIF_HIA_POLLING_DELAY_MS 10
1558
1559#ifndef HIF_PCI
Komal Seelam644263d2016-02-22 20:45:49 +05301560int hif_set_hia(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001561{
1562 return 0;
1563}
1564#else
Komal Seelam644263d2016-02-22 20:45:49 +05301565int hif_set_hia(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001566{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301567 QDF_STATUS rv;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001568 uint32_t interconnect_targ_addr = 0;
1569 uint32_t pcie_state_targ_addr = 0;
1570 uint32_t pipe_cfg_targ_addr = 0;
1571 uint32_t svc_to_pipe_map = 0;
1572 uint32_t pcie_config_flags = 0;
1573 uint32_t flag2_value = 0;
1574 uint32_t flag2_targ_addr = 0;
1575#ifdef QCA_WIFI_3_0
1576 uint32_t host_interest_area = 0;
1577 uint8_t i;
1578#else
1579 uint32_t ealloc_value = 0;
1580 uint32_t ealloc_targ_addr = 0;
1581 uint8_t banks_switched = 1;
1582 uint32_t chip_id;
1583#endif
1584 uint32_t pipe_cfg_addr;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301585 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301586 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Komal Seelam91553ce2016-01-27 18:57:10 +05301587 uint32_t target_type = tgt_info->target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001588
1589 HIF_TRACE("%s: E", __func__);
1590
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001591 if (ADRASTEA_BU)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301592 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593
1594#ifdef QCA_WIFI_3_0
1595 i = 0;
1596 while (i < HIF_HIA_MAX_POLL_LOOP) {
1597 host_interest_area = hif_read32_mb(scn->mem +
1598 A_SOC_CORE_SCRATCH_0_ADDRESS);
1599 if ((host_interest_area & 0x01) == 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301600 qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 host_interest_area = 0;
1602 i++;
1603 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1604 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1605 }
1606 } else {
1607 host_interest_area &= (~0x01);
1608 hif_write32_mb(scn->mem + 0x113014, 0);
1609 break;
1610 }
1611 }
1612
1613 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1614 HIF_ERROR("%s: hia polling timeout", __func__);
1615 return -EIO;
1616 }
1617
1618 if (host_interest_area == 0) {
1619 HIF_ERROR("%s: host_interest_area = 0", __func__);
1620 return -EIO;
1621 }
1622
1623 interconnect_targ_addr = host_interest_area +
1624 offsetof(struct host_interest_area_t,
1625 hi_interconnect_state);
1626
1627 flag2_targ_addr = host_interest_area +
1628 offsetof(struct host_interest_area_t, hi_option_flag2);
1629
1630#else
Komal Seelam91553ce2016-01-27 18:57:10 +05301631 interconnect_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001632 offsetof(struct host_interest_s, hi_interconnect_state));
Komal Seelam91553ce2016-01-27 18:57:10 +05301633 ealloc_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001634 offsetof(struct host_interest_s, hi_early_alloc));
Komal Seelam91553ce2016-01-27 18:57:10 +05301635 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001636 offsetof(struct host_interest_s, hi_option_flag2));
1637#endif
1638 /* Supply Target-side CE configuration */
Komal Seelam644263d2016-02-22 20:45:49 +05301639 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001640 &pcie_state_targ_addr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301641 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1643 __func__, interconnect_targ_addr, rv);
1644 goto done;
1645 }
1646 if (pcie_state_targ_addr == 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301647 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648 HIF_ERROR("%s: pcie state addr is 0", __func__);
1649 goto done;
1650 }
1651 pipe_cfg_addr = pcie_state_targ_addr +
1652 offsetof(struct pcie_state_s,
1653 pipe_cfg_addr);
Komal Seelam644263d2016-02-22 20:45:49 +05301654 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001655 pipe_cfg_addr,
1656 &pipe_cfg_targ_addr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301657 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001658 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1659 __func__, pipe_cfg_addr, rv);
1660 goto done;
1661 }
1662 if (pipe_cfg_targ_addr == 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301663 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1665 goto done;
1666 }
1667
Komal Seelam644263d2016-02-22 20:45:49 +05301668 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669 (uint8_t *) target_ce_config,
1670 target_ce_config_sz);
1671
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301672 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1674 goto done;
1675 }
1676
Komal Seelam644263d2016-02-22 20:45:49 +05301677 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001678 pcie_state_targ_addr +
1679 offsetof(struct pcie_state_s,
1680 svc_to_pipe_map),
1681 &svc_to_pipe_map);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301682 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001683 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1684 goto done;
1685 }
1686 if (svc_to_pipe_map == 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301687 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001688 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1689 goto done;
1690 }
1691
Komal Seelam644263d2016-02-22 20:45:49 +05301692 rv = hif_diag_write_mem(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001693 svc_to_pipe_map,
1694 (uint8_t *) target_service_to_ce_map,
1695 target_service_to_ce_map_sz);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301696 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1698 goto done;
1699 }
1700
Komal Seelam644263d2016-02-22 20:45:49 +05301701 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001702 pcie_state_targ_addr +
1703 offsetof(struct pcie_state_s,
1704 config_flags),
1705 &pcie_config_flags);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301706 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001707 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1708 goto done;
1709 }
1710#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1711 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1712#else
1713 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1714#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1715 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1716#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1717 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1718#endif
Komal Seelam644263d2016-02-22 20:45:49 +05301719 rv = hif_diag_write_mem(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720 pcie_state_targ_addr +
1721 offsetof(struct pcie_state_s,
1722 config_flags),
1723 (uint8_t *) &pcie_config_flags,
1724 sizeof(pcie_config_flags));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301725 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1727 goto done;
1728 }
1729
1730#ifndef QCA_WIFI_3_0
1731 /* configure early allocation */
Komal Seelam91553ce2016-01-27 18:57:10 +05301732 ealloc_targ_addr = hif_hia_item_address(target_type,
1733 offsetof(
1734 struct host_interest_s,
1735 hi_early_alloc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001736
Komal Seelam644263d2016-02-22 20:45:49 +05301737 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 &ealloc_value);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301739 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1741 goto done;
1742 }
1743
1744 /* 1 bank is switched to IRAM, except ROME 1.0 */
1745 ealloc_value |=
1746 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1747 HI_EARLY_ALLOC_MAGIC_MASK);
1748
Komal Seelam644263d2016-02-22 20:45:49 +05301749 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750 CHIP_ID_ADDRESS |
1751 RTC_SOC_BASE_ADDRESS, &chip_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301752 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001753 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1754 goto done;
1755 }
1756 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
Komal Seelam91553ce2016-01-27 18:57:10 +05301757 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001758 switch (CHIP_ID_REVISION_GET(chip_id)) {
1759 case 0x2: /* ROME 1.3 */
1760 /* 2 banks are switched to IRAM */
1761 banks_switched = 2;
1762 break;
1763 case 0x4: /* ROME 2.1 */
1764 case 0x5: /* ROME 2.2 */
1765 banks_switched = 6;
1766 break;
1767 case 0x8: /* ROME 3.0 */
1768 case 0x9: /* ROME 3.1 */
1769 case 0xA: /* ROME 3.2 */
1770 banks_switched = 9;
1771 break;
1772 case 0x0: /* ROME 1.0 */
1773 case 0x1: /* ROME 1.1 */
1774 default:
1775 /* 3 banks are switched to IRAM */
1776 banks_switched = 3;
1777 break;
1778 }
1779 }
1780
1781 ealloc_value |=
1782 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1783 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1784
Komal Seelam644263d2016-02-22 20:45:49 +05301785 rv = hif_diag_write_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001786 ealloc_targ_addr,
1787 ealloc_value);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301788 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001789 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1790 goto done;
1791 }
1792#endif
1793
1794 /* Tell Target to proceed with initialization */
Komal Seelam91553ce2016-01-27 18:57:10 +05301795 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001796 offsetof(
1797 struct host_interest_s,
1798 hi_option_flag2));
1799
Komal Seelam644263d2016-02-22 20:45:49 +05301800 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001801 &flag2_value);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301802 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1804 goto done;
1805 }
1806
1807 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
Komal Seelam644263d2016-02-22 20:45:49 +05301808 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001809 flag2_value);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301810 if (rv != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001811 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1812 goto done;
1813 }
1814
1815 hif_wake_target_cpu(scn);
1816
1817done:
1818
1819 return rv;
1820}
1821#endif
1822
1823/**
1824 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301825 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001826 *
1827 * This function passes the con_mode and CE configuration to
1828 * platform driver to enable wlan.
1829 *
Houston Hoffman108da402016-03-14 21:11:24 -07001830 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001831 */
Houston Hoffman108da402016-03-14 21:11:24 -07001832int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001833{
1834 struct icnss_wlan_enable_cfg cfg;
1835 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301836 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001837
1838 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1839 sizeof(struct CE_pipe_config);
1840 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1841 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1842 sizeof(struct service_to_pipe);
1843 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1844 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
Komal Seelam644263d2016-02-22 20:45:49 +05301845 cfg.shadow_reg_cfg =
1846 (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001847
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301848 if (QDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001849 mode = ICNSS_FTM;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301850 else if (WLAN_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001851 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001852 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001853 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001854
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001855 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1856}
1857
Houston Hoffman108da402016-03-14 21:11:24 -07001858/**
1859 * hif_ce_prepare_config() - load the correct static tables.
1860 * @scn: hif context
1861 *
1862 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001863 */
Houston Hoffman108da402016-03-14 21:11:24 -07001864void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001865{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301866 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001867 /* if epping is enabled we need to use the epping configuration. */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301868 if (WLAN_IS_EPPING_ENABLED(mode)) {
1869 if (WLAN_IS_EPPING_IRQ(mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001870 host_ce_config = host_ce_config_wlan_epping_irq;
1871 else
1872 host_ce_config = host_ce_config_wlan_epping_poll;
1873 target_ce_config = target_ce_config_wlan_epping;
1874 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1875 target_service_to_ce_map =
1876 target_service_to_ce_map_wlan_epping;
1877 target_service_to_ce_map_sz =
1878 sizeof(target_service_to_ce_map_wlan_epping);
1879 }
Houston Hoffman108da402016-03-14 21:11:24 -07001880}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001881
Houston Hoffman108da402016-03-14 21:11:24 -07001882/**
1883 * hif_ce_open() - do ce specific allocations
1884 * @hif_sc: pointer to hif context
1885 *
1886 * return: 0 for success or QDF_STATUS_E_NOMEM
1887 */
1888QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
1889{
1890 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001891
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301892 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07001893 return QDF_STATUS_SUCCESS;
1894}
1895
1896/**
1897 * hif_ce_close() - do ce specific free
1898 * @hif_sc: pointer to hif context
1899 */
1900void hif_ce_close(struct hif_softc *hif_sc)
1901{
1902}
1903
1904/**
1905 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
1906 * @hif_sc: hif context
1907 *
1908 * uses state variables to support cleaning up when hif_config_ce fails.
1909 */
1910void hif_unconfig_ce(struct hif_softc *hif_sc)
1911{
1912 int pipe_num;
1913 struct HIF_CE_pipe_info *pipe_info;
1914 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1915
1916 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
1917 pipe_info = &hif_state->pipe_info[pipe_num];
1918 if (pipe_info->ce_hdl) {
1919 ce_unregister_irq(hif_state, (1 << pipe_num));
1920 hif_sc->request_irq_done = false;
1921 ce_fini(pipe_info->ce_hdl);
1922 pipe_info->ce_hdl = NULL;
1923 pipe_info->buf_sz = 0;
1924 }
1925 }
1926 if (hif_state->sleep_timer_init) {
1927 qdf_timer_stop(&hif_state->sleep_timer);
1928 qdf_timer_free(&hif_state->sleep_timer);
1929 hif_state->sleep_timer_init = false;
1930 }
1931 if (hif_sc->athdiag_procfs_inited) {
1932 athdiag_procfs_remove();
1933 hif_sc->athdiag_procfs_inited = false;
1934 }
1935}
1936
1937/**
1938 * hif_config_ce() - configure copy engines
1939 * @scn: hif context
1940 *
1941 * Prepares fw, copy engine hardware and host sw according
1942 * to the attributes selected by hif_ce_prepare_config.
1943 *
1944 * also calls athdiag_procfs_init
1945 *
1946 * return: 0 for success nonzero for failure.
1947 */
1948int hif_config_ce(struct hif_softc *scn)
1949{
1950 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1951 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1952 struct HIF_CE_pipe_info *pipe_info;
1953 int pipe_num;
1954#ifdef ADRASTEA_SHADOW_REGISTERS
1955 int i;
1956#endif
1957 QDF_STATUS rv = QDF_STATUS_SUCCESS;
1958
1959 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001960
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 hif_state->keep_awake_count = 0;
1962
1963 hif_state->fake_sleep = false;
1964 hif_state->sleep_ticks = 0;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301965 qdf_timer_init(NULL, &hif_state->sleep_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966 hif_sleep_entry, (void *)hif_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301967 QDF_TIMER_TYPE_WAKE_APPS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001968 hif_state->sleep_timer_init = true;
1969 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07001970
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971#ifdef HIF_PCI
1972#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1973 /* Force AWAKE forever/till the driver is loaded */
1974 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1975 return -EACCES;
1976#endif
1977#endif
1978
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001979 hif_config_rri_on_ddr(scn);
1980
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981 /* During CE initializtion */
1982 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1984 struct CE_attr *attr;
1985
1986 pipe_info = &hif_state->pipe_info[pipe_num];
1987 pipe_info->pipe_num = pipe_num;
1988 pipe_info->HIF_CE_state = hif_state;
1989 attr = &host_ce_config[pipe_num];
1990 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301991 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301993 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001994 A_TARGET_ACCESS_UNLIKELY(scn);
1995 goto err;
1996 }
1997
1998 if (pipe_num == DIAG_CE_ID) {
1999 /* Reserve the ultimate CE for
2000 * Diagnostic Window support */
2001 hif_state->ce_diag =
2002 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
2003 continue;
2004 }
2005
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302006 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2007 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002008 if (attr->dest_nentries > 0) {
2009 atomic_set(&pipe_info->recv_bufs_needed,
2010 init_buffer_count(attr->dest_nentries - 1));
2011 } else {
2012 atomic_set(&pipe_info->recv_bufs_needed, 0);
2013 }
2014 ce_tasklet_init(hif_state, (1 << pipe_num));
2015 ce_register_irq(hif_state, (1 << pipe_num));
2016 scn->request_irq_done = true;
2017 }
2018
2019 if (athdiag_procfs_init(scn) != 0) {
2020 A_TARGET_ACCESS_UNLIKELY(scn);
2021 goto err;
2022 }
2023 scn->athdiag_procfs_inited = true;
2024
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025 HIF_INFO_MED("%s: ce_init done", __func__);
2026
Houston Hoffman108da402016-03-14 21:11:24 -07002027 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002028
2029 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2030
2031#ifdef ADRASTEA_SHADOW_REGISTERS
2032 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
2033 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2034 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
2035 __func__, i,
2036 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2037 }
2038#endif
2039
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302040 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002041
2042err:
2043 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002044 hif_unconfig_ce(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002045 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302046 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002047}
2048
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002049#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002050/**
2051 * hif_ipa_get_ce_resource() - get uc resource on hif
2052 * @scn: bus context
2053 * @ce_sr_base_paddr: copyengine source ring base physical address
2054 * @ce_sr_ring_size: copyengine source ring size
2055 * @ce_reg_paddr: copyengine register physical address
2056 *
2057 * IPA micro controller data path offload feature enabled,
2058 * HIF should release copy engine related resource information to IPA UC
2059 * IPA UC will access hardware resource with released information
2060 *
2061 * Return: None
2062 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302063void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302064 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002065 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302066 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002067{
Komal Seelam644263d2016-02-22 20:45:49 +05302068 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302069 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002070 struct HIF_CE_pipe_info *pipe_info =
2071 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2072 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2073
2074 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2075 ce_reg_paddr);
2076 return;
2077}
2078#endif /* IPA_OFFLOAD */
2079
2080
2081#ifdef ADRASTEA_SHADOW_REGISTERS
2082
2083/*
2084 Current shadow register config
2085
2086 -----------------------------------------------------------
2087 Shadow Register | CE | src/dst write index
2088 -----------------------------------------------------------
2089 0 | 0 | src
2090 1 No Config - Doesn't point to anything
2091 2 No Config - Doesn't point to anything
2092 3 | 3 | src
2093 4 | 4 | src
2094 5 | 5 | src
2095 6 No Config - Doesn't point to anything
2096 7 | 7 | src
2097 8 No Config - Doesn't point to anything
2098 9 No Config - Doesn't point to anything
2099 10 No Config - Doesn't point to anything
2100 11 No Config - Doesn't point to anything
2101 -----------------------------------------------------------
2102 12 No Config - Doesn't point to anything
2103 13 | 1 | dst
2104 14 | 2 | dst
2105 15 No Config - Doesn't point to anything
2106 16 No Config - Doesn't point to anything
2107 17 No Config - Doesn't point to anything
2108 18 No Config - Doesn't point to anything
2109 19 | 7 | dst
2110 20 | 8 | dst
2111 21 No Config - Doesn't point to anything
2112 22 No Config - Doesn't point to anything
2113 23 No Config - Doesn't point to anything
2114 -----------------------------------------------------------
2115
2116
2117 ToDo - Move shadow register config to following in the future
2118 This helps free up a block of shadow registers towards the end.
2119 Can be used for other purposes
2120
2121 -----------------------------------------------------------
2122 Shadow Register | CE | src/dst write index
2123 -----------------------------------------------------------
2124 0 | 0 | src
2125 1 | 3 | src
2126 2 | 4 | src
2127 3 | 5 | src
2128 4 | 7 | src
2129 -----------------------------------------------------------
2130 5 | 1 | dst
2131 6 | 2 | dst
2132 7 | 7 | dst
2133 8 | 8 | dst
2134 -----------------------------------------------------------
2135 9 No Config - Doesn't point to anything
2136 12 No Config - Doesn't point to anything
2137 13 No Config - Doesn't point to anything
2138 14 No Config - Doesn't point to anything
2139 15 No Config - Doesn't point to anything
2140 16 No Config - Doesn't point to anything
2141 17 No Config - Doesn't point to anything
2142 18 No Config - Doesn't point to anything
2143 19 No Config - Doesn't point to anything
2144 20 No Config - Doesn't point to anything
2145 21 No Config - Doesn't point to anything
2146 22 No Config - Doesn't point to anything
2147 23 No Config - Doesn't point to anything
2148 -----------------------------------------------------------
2149*/
2150
Komal Seelam644263d2016-02-22 20:45:49 +05302151u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152{
2153 u32 addr = 0;
2154
2155 switch (COPY_ENGINE_ID(ctrl_addr)) {
2156 case 0:
2157 addr = SHADOW_VALUE0;
2158 break;
2159 case 3:
2160 addr = SHADOW_VALUE3;
2161 break;
2162 case 4:
2163 addr = SHADOW_VALUE4;
2164 break;
2165 case 5:
2166 addr = SHADOW_VALUE5;
2167 break;
2168 case 7:
2169 addr = SHADOW_VALUE7;
2170 break;
2171 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002172 HIF_ERROR("invalid CE ctrl_addr\n");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302173 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002174
2175 }
2176 return addr;
2177
2178}
2179
Komal Seelam644263d2016-02-22 20:45:49 +05302180u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002181{
2182 u32 addr = 0;
2183
2184 switch (COPY_ENGINE_ID(ctrl_addr)) {
2185 case 1:
2186 addr = SHADOW_VALUE13;
2187 break;
2188 case 2:
2189 addr = SHADOW_VALUE14;
2190 break;
2191 case 7:
2192 addr = SHADOW_VALUE19;
2193 break;
2194 case 8:
2195 addr = SHADOW_VALUE20;
2196 break;
2197 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002198 HIF_ERROR("invalid CE ctrl_addr\n");
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302199 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002200 }
2201
2202 return addr;
2203
2204}
2205#endif
2206
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002207#if defined(FEATURE_LRO)
2208/**
2209 * ce_lro_flush_cb_register() - register the LRO flush
2210 * callback
2211 * @scn: HIF context
2212 * @handler: callback function
2213 * @data: opaque data pointer to be passed back
2214 *
2215 * Store the LRO flush callback provided
2216 *
2217 * Return: none
2218 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302219void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002220 void (handler)(void *), void *data)
2221{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002222 uint8_t ul, dl;
2223 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302224 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002225
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302226 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002227
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302228 if (QDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05302229 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002230 &ul, &dl, &ul_polled, &dl_polled)) {
2231 printk("%s cannot map service to pipe\n", __FUNCTION__);
2232 return;
2233 } else {
2234 struct CE_state *ce_state;
2235 ce_state = scn->ce_id_to_state[dl];
2236 ce_state->lro_flush_cb = handler;
2237 ce_state->lro_data = data;
2238 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002239}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002240
2241/**
2242 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2243 * callback
2244 * @scn: HIF context
2245 *
2246 * Remove the LRO flush callback
2247 *
2248 * Return: none
2249 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302250void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002251{
2252 uint8_t ul, dl;
2253 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302254 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002255
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302256 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002257
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302258 if (QDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05302259 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002260 &ul, &dl, &ul_polled, &dl_polled)) {
2261 printk("%s cannot map service to pipe\n", __FUNCTION__);
2262 return;
2263 } else {
2264 struct CE_state *ce_state;
2265 ce_state = scn->ce_id_to_state[dl];
2266 ce_state->lro_flush_cb = NULL;
2267 ce_state->lro_data = NULL;
2268 }
2269}
2270#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002271
2272/**
2273 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2274 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302275 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002276 * @svc_id: Service ID for which the mapping is needed.
2277 * @ul_pipe: address of the container in which ul pipe is returned.
2278 * @dl_pipe: address of the container in which dl pipe is returned.
2279 * @ul_is_polled: address of the container in which a bool
2280 * indicating if the UL CE for this service
2281 * is polled is returned.
2282 * @dl_is_polled: address of the container in which a bool
2283 * indicating if the DL CE for this service
2284 * is polled is returned.
2285 *
2286 * Return: Indicates whether this operation was successful.
2287 */
2288
Komal Seelam5584a7c2016-02-24 19:22:48 +05302289int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002290 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2291 int *dl_is_polled)
2292{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302293 int status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002294 unsigned int i;
2295 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002296 struct service_to_pipe *tgt_svc_map_to_use;
2297 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302298 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2299 uint32_t mode = hif_get_conparam(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002300
Komal Seelambd7c51d2016-02-24 10:27:30 +05302301 if (WLAN_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002302 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2303 sz_tgt_svc_map_to_use =
2304 sizeof(target_service_to_ce_map_wlan_epping);
2305 } else {
2306 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2307 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2308 }
2309
2310 *dl_is_polled = 0; /* polling for received messages not supported */
2311
2312 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2313
2314 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2315 if (element.service_id == svc_id) {
2316
2317 if (element.pipedir == PIPEDIR_OUT)
2318 *ul_pipe = element.pipenum;
2319
2320 else if (element.pipedir == PIPEDIR_IN)
2321 *dl_pipe = element.pipenum;
2322 }
2323 }
2324
2325 *ul_is_polled =
2326 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2327
2328 return status;
2329}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002330
2331#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302332inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002333 uint32_t CE_ctrl_addr)
2334{
2335 uint32_t read_from_hw, srri_from_ddr = 0;
2336
2337 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2338
2339 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2340
2341 if (read_from_hw != srri_from_ddr) {
2342 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2343 srri_from_ddr, read_from_hw,
2344 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302345 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002346 }
2347 return srri_from_ddr;
2348}
2349
2350
Komal Seelam644263d2016-02-22 20:45:49 +05302351inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002352 uint32_t CE_ctrl_addr)
2353{
2354 uint32_t read_from_hw, drri_from_ddr = 0;
2355
2356 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2357
2358 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2359
2360 if (read_from_hw != drri_from_ddr) {
2361 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2362 drri_from_ddr, read_from_hw,
2363 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302364 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002365 }
2366 return drri_from_ddr;
2367}
2368
2369#endif
2370
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002371#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002372/**
2373 * hif_get_src_ring_read_index(): Called to get the SRRI
2374 *
Komal Seelam644263d2016-02-22 20:45:49 +05302375 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002376 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2377 *
2378 * This function returns the SRRI to the caller. For CEs that
2379 * dont have interrupts enabled, we look at the DDR based SRRI
2380 *
2381 * Return: SRRI
2382 */
Komal Seelam644263d2016-02-22 20:45:49 +05302383inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002384 uint32_t CE_ctrl_addr)
2385{
2386 struct CE_attr attr;
2387
2388 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2389 if (attr.flags & CE_ATTR_DISABLE_INTR)
2390 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2391 else
2392 return A_TARGET_READ(scn,
2393 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2394}
2395
2396/**
2397 * hif_get_dst_ring_read_index(): Called to get the DRRI
2398 *
Komal Seelam644263d2016-02-22 20:45:49 +05302399 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002400 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2401 *
2402 * This function returns the DRRI to the caller. For CEs that
2403 * dont have interrupts enabled, we look at the DDR based DRRI
2404 *
2405 * Return: DRRI
2406 */
Komal Seelam644263d2016-02-22 20:45:49 +05302407inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002408 uint32_t CE_ctrl_addr)
2409{
2410 struct CE_attr attr;
2411
2412 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2413
2414 if (attr.flags & CE_ATTR_DISABLE_INTR)
2415 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2416 else
2417 return A_TARGET_READ(scn,
2418 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2419}
2420
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002421/**
2422 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2423 *
Komal Seelam644263d2016-02-22 20:45:49 +05302424 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002425 *
2426 * This function allocates non cached memory on ddr and sends
2427 * the physical address of this memory to the CE hardware. The
2428 * hardware updates the RRI on this particular location.
2429 *
2430 * Return: None
2431 */
Komal Seelam644263d2016-02-22 20:45:49 +05302432static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002433{
2434 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302435 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002436 uint32_t high_paddr, low_paddr;
2437 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302438 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2439 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2440 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002441
2442 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2443 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2444
2445 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2446
2447 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2448 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2449
2450 for (i = 0; i < CE_COUNT; i++)
2451 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2452
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302453 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002454
2455 return;
2456}
2457#else
2458
2459/**
2460 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2461 *
Komal Seelam644263d2016-02-22 20:45:49 +05302462 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002463 *
2464 * This is a dummy implementation for platforms that don't
2465 * support this functionality.
2466 *
2467 * Return: None
2468 */
Komal Seelam644263d2016-02-22 20:45:49 +05302469static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002470{
2471 return;
2472}
2473#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302474
2475/**
2476 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302477 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302478 *
2479 * Output the copy engine registers
2480 *
2481 * Return: 0 for success or error code
2482 */
Komal Seelam644263d2016-02-22 20:45:49 +05302483int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302484{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302485 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302486 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2487 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2488 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2489 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302490 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302491
2492 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
Komal Seelam644263d2016-02-22 20:45:49 +05302493 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302494 (uint8_t *) &ce_reg_values[i][0],
2495 ce_reg_word_size * sizeof(uint32_t));
2496
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302497 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302498 HIF_ERROR("Dumping CE register failed!");
2499 return -EACCES;
2500 }
2501 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302502 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Govind Singh2443fb32016-01-13 17:44:48 +05302503 (uint8_t *) &ce_reg_values[i][0],
2504 ce_reg_word_size * sizeof(uint32_t));
2505 }
Govind Singh2443fb32016-01-13 17:44:48 +05302506 return 0;
2507}