blob: 58f87d5938819cc1c0597c7ad41463e678ef19e2 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#ifdef CONFIG_CNSS
49#include <net/cnss.h>
50#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080051#include "epping_main.h"
52#include "hif_debug.h"
53#include "ce_internal.h"
54#include "ce_reg.h"
55#include "ce_assignment.h"
56#include "ce_tasklet.h"
57#ifdef HIF_PCI
58#include "icnss_stub.h"
59#else
60#include <soc/qcom/icnss.h>
61#endif
62#include "qwlan_version.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080063
64#define CE_POLL_TIMEOUT 10 /* ms */
65
66/* Forward references */
67static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
76
Komal Seelam644263d2016-02-22 20:45:49 +053077static int hif_post_recv_buffers(struct hif_softc *scn);
78static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080079
80static void ce_poll_timeout(void *arg)
81{
82 struct CE_state *CE_state = (struct CE_state *)arg;
83 if (CE_state->timer_inited) {
84 ce_per_engine_service(CE_state->scn, CE_state->id);
85 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
86 }
87}
88
89static unsigned int roundup_pwr2(unsigned int n)
90{
91 int i;
92 unsigned int test_pwr2;
93
94 if (!(n & (n - 1)))
95 return n; /* already a power of 2 */
96
97 test_pwr2 = 4;
98 for (i = 0; i < 29; i++) {
99 if (test_pwr2 > n)
100 return test_pwr2;
101 test_pwr2 = test_pwr2 << 1;
102 }
103
104 CDF_ASSERT(0); /* n too large */
105 return 0;
106}
107
108/*
109 * Initialize a Copy Engine based on caller-supplied attributes.
110 * This may be called once to initialize both source and destination
111 * rings or it may be called twice for separate source and destination
112 * initialization. It may be that only one side or the other is
113 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700114 *
115 * This should be called durring the initialization sequence before
116 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800117 */
Komal Seelam644263d2016-02-22 20:45:49 +0530118struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 unsigned int CE_id, struct CE_attr *attr)
120{
121 struct CE_state *CE_state;
122 uint32_t ctrl_addr;
123 unsigned int nentries;
124 cdf_dma_addr_t base_addr;
125 bool malloc_CE_state = false;
126 bool malloc_src_ring = false;
127
128 CDF_ASSERT(CE_id < scn->ce_count);
129 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800130 CE_state = scn->ce_id_to_state[CE_id];
131
132 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800133 CE_state =
134 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
135 if (!CE_state) {
136 HIF_ERROR("%s: CE_state has no mem", __func__);
137 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800138 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700139 malloc_CE_state = true;
140 cdf_mem_zero(CE_state, sizeof(*CE_state));
141 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700142 cdf_spinlock_init(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700143
144 CE_state->id = CE_id;
145 CE_state->ctrl_addr = ctrl_addr;
146 CE_state->state = CE_RUNNING;
147 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800148 }
149 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150
151 cdf_atomic_init(&CE_state->rx_pending);
152 if (attr == NULL) {
153 /* Already initialized; caller wants the handle */
154 return (struct CE_handle *)CE_state;
155 }
156
157#ifdef ADRASTEA_SHADOW_REGISTERS
158 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
159 __func__);
160#endif
161
162 if (CE_state->src_sz_max)
163 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
164 else
165 CE_state->src_sz_max = attr->src_sz_max;
166
Houston Hoffman68e837e2015-12-04 12:57:24 -0800167 ce_init_ce_desc_event_log(CE_id,
168 attr->src_nentries + attr->dest_nentries);
169
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800170 /* source ring setup */
171 nentries = attr->src_nentries;
172 if (nentries) {
173 struct CE_ring_state *src_ring;
174 unsigned CE_nbytes;
175 char *ptr;
176 uint64_t dma_addr;
177 nentries = roundup_pwr2(nentries);
178 if (CE_state->src_ring) {
179 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
180 } else {
181 CE_nbytes = sizeof(struct CE_ring_state)
182 + (nentries * sizeof(void *));
183 ptr = cdf_mem_malloc(CE_nbytes);
184 if (!ptr) {
185 /* cannot allocate src ring. If the
186 * CE_state is allocated locally free
187 * CE_State and return error.
188 */
189 HIF_ERROR("%s: src ring has no mem", __func__);
190 if (malloc_CE_state) {
191 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800192 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800193 cdf_mem_free(CE_state);
194 malloc_CE_state = false;
195 }
196 return NULL;
197 } else {
198 /* we can allocate src ring.
199 * Mark that the src ring is
200 * allocated locally
201 */
202 malloc_src_ring = true;
203 }
204 cdf_mem_zero(ptr, CE_nbytes);
205
206 src_ring = CE_state->src_ring =
207 (struct CE_ring_state *)ptr;
208 ptr += sizeof(struct CE_ring_state);
209 src_ring->nentries = nentries;
210 src_ring->nentries_mask = nentries - 1;
211 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
212 src_ring->hw_index =
213 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
214 src_ring->sw_index = src_ring->hw_index;
215 src_ring->write_index =
216 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
217 A_TARGET_ACCESS_END_RET_PTR(scn);
218 src_ring->low_water_mark_nentries = 0;
219 src_ring->high_water_mark_nentries = nentries;
220 src_ring->per_transfer_context = (void **)ptr;
221
222 /* Legacy platforms that do not support cache
223 * coherent DMA are unsupported
224 */
225 src_ring->base_addr_owner_space_unaligned =
226 cdf_os_mem_alloc_consistent(scn->cdf_dev,
227 (nentries *
228 sizeof(struct CE_src_desc) +
229 CE_DESC_RING_ALIGN),
230 &base_addr, 0);
231 if (src_ring->base_addr_owner_space_unaligned
232 == NULL) {
233 HIF_ERROR("%s: src ring has no DMA mem",
234 __func__);
235 goto error_no_dma_mem;
236 }
237 src_ring->base_addr_CE_space_unaligned = base_addr;
238
239 if (src_ring->
240 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
241 - 1)) {
242 src_ring->base_addr_CE_space =
243 (src_ring->base_addr_CE_space_unaligned
244 + CE_DESC_RING_ALIGN -
245 1) & ~(CE_DESC_RING_ALIGN - 1);
246
247 src_ring->base_addr_owner_space =
248 (void
249 *)(((size_t) src_ring->
250 base_addr_owner_space_unaligned +
251 CE_DESC_RING_ALIGN -
252 1) & ~(CE_DESC_RING_ALIGN - 1));
253 } else {
254 src_ring->base_addr_CE_space =
255 src_ring->base_addr_CE_space_unaligned;
256 src_ring->base_addr_owner_space =
257 src_ring->
258 base_addr_owner_space_unaligned;
259 }
260 /*
261 * Also allocate a shadow src ring in
262 * regular mem to use for faster access.
263 */
264 src_ring->shadow_base_unaligned =
265 cdf_mem_malloc(nentries *
266 sizeof(struct CE_src_desc) +
267 CE_DESC_RING_ALIGN);
268 if (src_ring->shadow_base_unaligned == NULL) {
269 HIF_ERROR("%s: src ring no shadow_base mem",
270 __func__);
271 goto error_no_dma_mem;
272 }
273 src_ring->shadow_base = (struct CE_src_desc *)
274 (((size_t) src_ring->shadow_base_unaligned +
275 CE_DESC_RING_ALIGN - 1) &
276 ~(CE_DESC_RING_ALIGN - 1));
277
278 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
279 dma_addr = src_ring->base_addr_CE_space;
280 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
281 (uint32_t)(dma_addr & 0xFFFFFFFF));
282#ifdef WLAN_ENABLE_QCA6180
283 {
284 uint32_t tmp;
285 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
286 scn, ctrl_addr);
287 tmp &= ~0x1F;
288 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
289 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
290 ctrl_addr, (uint32_t)dma_addr);
291 }
292#endif
293 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
294 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
295#ifdef BIG_ENDIAN_HOST
296 /* Enable source ring byte swap for big endian host */
297 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
298#endif
299 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
300 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
301 A_TARGET_ACCESS_END_RET_PTR(scn);
302 }
303 }
304
305 /* destination ring setup */
306 nentries = attr->dest_nentries;
307 if (nentries) {
308 struct CE_ring_state *dest_ring;
309 unsigned CE_nbytes;
310 char *ptr;
311 uint64_t dma_addr;
312
313 nentries = roundup_pwr2(nentries);
314 if (CE_state->dest_ring) {
315 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
316 } else {
317 CE_nbytes = sizeof(struct CE_ring_state)
318 + (nentries * sizeof(void *));
319 ptr = cdf_mem_malloc(CE_nbytes);
320 if (!ptr) {
321 /* cannot allocate dst ring. If the CE_state
322 * or src ring is allocated locally free
323 * CE_State and src ring and return error.
324 */
325 HIF_ERROR("%s: dest ring has no mem",
326 __func__);
327 if (malloc_src_ring) {
328 cdf_mem_free(CE_state->src_ring);
329 CE_state->src_ring = NULL;
330 malloc_src_ring = false;
331 }
332 if (malloc_CE_state) {
333 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800334 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800335 cdf_mem_free(CE_state);
336 malloc_CE_state = false;
337 }
338 return NULL;
339 }
340 cdf_mem_zero(ptr, CE_nbytes);
341
342 dest_ring = CE_state->dest_ring =
343 (struct CE_ring_state *)ptr;
344 ptr += sizeof(struct CE_ring_state);
345 dest_ring->nentries = nentries;
346 dest_ring->nentries_mask = nentries - 1;
347 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
348 dest_ring->sw_index =
349 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
350 dest_ring->write_index =
351 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
352 A_TARGET_ACCESS_END_RET_PTR(scn);
353 dest_ring->low_water_mark_nentries = 0;
354 dest_ring->high_water_mark_nentries = nentries;
355 dest_ring->per_transfer_context = (void **)ptr;
356
357 /* Legacy platforms that do not support cache
358 * coherent DMA are unsupported */
359 dest_ring->base_addr_owner_space_unaligned =
360 cdf_os_mem_alloc_consistent(scn->cdf_dev,
361 (nentries *
362 sizeof(struct CE_dest_desc) +
363 CE_DESC_RING_ALIGN),
364 &base_addr, 0);
365 if (dest_ring->base_addr_owner_space_unaligned
366 == NULL) {
367 HIF_ERROR("%s: dest ring has no DMA mem",
368 __func__);
369 goto error_no_dma_mem;
370 }
371 dest_ring->base_addr_CE_space_unaligned = base_addr;
372
373 /* Correctly initialize memory to 0 to
374 * prevent garbage data crashing system
375 * when download firmware
376 */
377 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
378 nentries * sizeof(struct CE_dest_desc) +
379 CE_DESC_RING_ALIGN);
380
381 if (dest_ring->
382 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
383 1)) {
384
385 dest_ring->base_addr_CE_space =
386 (dest_ring->
387 base_addr_CE_space_unaligned +
388 CE_DESC_RING_ALIGN -
389 1) & ~(CE_DESC_RING_ALIGN - 1);
390
391 dest_ring->base_addr_owner_space =
392 (void
393 *)(((size_t) dest_ring->
394 base_addr_owner_space_unaligned +
395 CE_DESC_RING_ALIGN -
396 1) & ~(CE_DESC_RING_ALIGN - 1));
397 } else {
398 dest_ring->base_addr_CE_space =
399 dest_ring->base_addr_CE_space_unaligned;
400 dest_ring->base_addr_owner_space =
401 dest_ring->
402 base_addr_owner_space_unaligned;
403 }
404
405 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
406 dma_addr = dest_ring->base_addr_CE_space;
407 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
408 (uint32_t)(dma_addr & 0xFFFFFFFF));
409#ifdef WLAN_ENABLE_QCA6180
410 {
411 uint32_t tmp;
412 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
413 ctrl_addr);
414 tmp &= ~0x1F;
415 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
416 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
417 ctrl_addr, (uint32_t)dma_addr);
418 }
419#endif
420 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
421#ifdef BIG_ENDIAN_HOST
422 /* Enable Dest ring byte swap for big endian host */
423 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
424#endif
425 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
426 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
427 A_TARGET_ACCESS_END_RET_PTR(scn);
428
429 /* epping */
430 /* poll timer */
431 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
432 cdf_softirq_timer_init(scn->cdf_dev,
433 &CE_state->poll_timer,
434 ce_poll_timeout,
435 CE_state,
436 CDF_TIMER_TYPE_SW);
437 CE_state->timer_inited = true;
438 cdf_softirq_timer_mod(&CE_state->poll_timer,
439 CE_POLL_TIMEOUT);
440 }
441 }
442 }
443
444 /* Enable CE error interrupts */
445 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
446 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
447 A_TARGET_ACCESS_END_RET_PTR(scn);
448
449 return (struct CE_handle *)CE_state;
450
451error_no_dma_mem:
452 ce_fini((struct CE_handle *)CE_state);
453 return NULL;
454}
455
456#ifdef WLAN_FEATURE_FASTPATH
457/**
458 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
459 * No processing is required inside this function.
460 * @ce_hdl: Cope engine handle
461 * Using an assert, this function makes sure that,
462 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700463 *
464 * This is called while dismantling CE structures. No other thread
465 * should be using these structures while dismantling is occuring
466 * therfore no locking is needed.
467 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800468 * Return: none
469 */
470void
471ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
472{
473 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
474 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530475 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800476 uint32_t sw_index, write_index;
477
478 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
479 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
480 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800481 sw_index = src_ring->sw_index;
482 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800483
484 /* At this point Tx CE should be clean */
485 cdf_assert_always(sw_index == write_index);
486 }
487}
488#else
489void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
490{
491}
492#endif /* WLAN_FEATURE_FASTPATH */
493
494void ce_fini(struct CE_handle *copyeng)
495{
496 struct CE_state *CE_state = (struct CE_state *)copyeng;
497 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +0530498 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800499
500 CE_state->state = CE_UNUSED;
501 scn->ce_id_to_state[CE_id] = NULL;
502 if (CE_state->src_ring) {
503 /* Cleanup the HTT Tx ring */
504 ce_h2t_tx_ce_cleanup(copyeng);
505
506 if (CE_state->src_ring->shadow_base_unaligned)
507 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
508 if (CE_state->src_ring->base_addr_owner_space_unaligned)
509 cdf_os_mem_free_consistent(scn->cdf_dev,
510 (CE_state->src_ring->nentries *
511 sizeof(struct CE_src_desc) +
512 CE_DESC_RING_ALIGN),
513 CE_state->src_ring->
514 base_addr_owner_space_unaligned,
515 CE_state->src_ring->
516 base_addr_CE_space, 0);
517 cdf_mem_free(CE_state->src_ring);
518 }
519 if (CE_state->dest_ring) {
520 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
521 cdf_os_mem_free_consistent(scn->cdf_dev,
522 (CE_state->dest_ring->nentries *
523 sizeof(struct CE_dest_desc) +
524 CE_DESC_RING_ALIGN),
525 CE_state->dest_ring->
526 base_addr_owner_space_unaligned,
527 CE_state->dest_ring->
528 base_addr_CE_space, 0);
529 cdf_mem_free(CE_state->dest_ring);
530
531 /* epping */
532 if (CE_state->timer_inited) {
533 CE_state->timer_inited = false;
534 cdf_softirq_timer_free(&CE_state->poll_timer);
535 }
536 }
537 cdf_mem_free(CE_state);
538}
539
Komal Seelam5584a7c2016-02-24 19:22:48 +0530540void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800541{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530542 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800543
544 cdf_mem_zero(&hif_state->msg_callbacks_pending,
545 sizeof(hif_state->msg_callbacks_pending));
546 cdf_mem_zero(&hif_state->msg_callbacks_current,
547 sizeof(hif_state->msg_callbacks_current));
548}
549
550/* Send the first nbytes bytes of the buffer */
551CDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +0530552hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800553 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
554 cdf_nbuf_t nbuf, unsigned int data_attr)
555{
Komal Seelam644263d2016-02-22 20:45:49 +0530556 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530557 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800558 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
559 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
560 int bytes = nbytes, nfrags = 0;
561 struct ce_sendlist sendlist;
562 int status, i = 0;
563 unsigned int mux_id = 0;
564
565 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
566
567 transfer_id =
568 (mux_id & MUX_ID_MASK) |
569 (transfer_id & TRANSACTION_ID_MASK);
570 data_attr &= DESC_DATA_FLAG_MASK;
571 /*
572 * The common case involves sending multiple fragments within a
573 * single download (the tx descriptor and the tx frame header).
574 * So, optimize for the case of multiple fragments by not even
575 * checking whether it's necessary to use a sendlist.
576 * The overhead of using a sendlist for a single buffer download
577 * is not a big deal, since it happens rarely (for WMI messages).
578 */
579 ce_sendlist_init(&sendlist);
580 do {
Houston Hoffman56936832016-03-16 12:16:24 -0700581 cdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800582 int frag_bytes;
583
Houston Hoffman56936832016-03-16 12:16:24 -0700584 frag_paddr = cdf_nbuf_get_frag_paddr(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800585 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
586 /*
587 * Clear the packet offset for all but the first CE desc.
588 */
589 if (i++ > 0)
590 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
591
592 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
593 frag_bytes >
594 bytes ? bytes : frag_bytes,
595 cdf_nbuf_get_frag_is_wordstream
596 (nbuf,
597 nfrags) ? 0 :
598 CE_SEND_FLAG_SWAP_DISABLE,
599 data_attr);
600 if (status != CDF_STATUS_SUCCESS) {
601 HIF_ERROR("%s: error, frag_num %d larger than limit",
602 __func__, nfrags);
603 return status;
604 }
605 bytes -= frag_bytes;
606 nfrags++;
607 } while (bytes > 0);
608
609 /* Make sure we have resources to handle this request */
610 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
611 if (pipe_info->num_sends_allowed < nfrags) {
612 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
613 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
614 return CDF_STATUS_E_RESOURCES;
615 }
616 pipe_info->num_sends_allowed -= nfrags;
617 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
618
619 if (cdf_unlikely(ce_hdl == NULL)) {
620 HIF_ERROR("%s: error CE handle is null", __func__);
621 return A_ERROR;
622 }
623
624 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
625 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
626 (uint8_t *)(cdf_nbuf_data(nbuf)),
627 sizeof(cdf_nbuf_data(nbuf))));
628 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
629 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
630
631 return status;
632}
633
Komal Seelam5584a7c2016-02-24 19:22:48 +0530634void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
635 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800636{
Komal Seelam644263d2016-02-22 20:45:49 +0530637 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
638
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800639 if (!force) {
640 int resources;
641 /*
642 * Decide whether to actually poll for completions, or just
643 * wait for a later chance. If there seem to be plenty of
644 * resources left, then just wait, since checking involves
645 * reading a CE register, which is a relatively expensive
646 * operation.
647 */
Komal Seelam644263d2016-02-22 20:45:49 +0530648 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800649 /*
650 * If at least 50% of the total resources are still available,
651 * don't bother checking again yet.
652 */
653 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
654 return;
655 }
656 }
Houston Hoffman56936832016-03-16 12:16:24 -0700657#ifdef ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800658 ce_per_engine_servicereap(scn, pipe);
659#else
660 ce_per_engine_service(scn, pipe);
661#endif
662}
663
Komal Seelam5584a7c2016-02-24 19:22:48 +0530664uint16_t
665hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800666{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530667 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800668 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
669 uint16_t rv;
670
671 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
672 rv = pipe_info->num_sends_allowed;
673 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
674 return rv;
675}
676
677/* Called by lower (CE) layer when a send to Target completes. */
678void
679hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
680 void *transfer_context, cdf_dma_addr_t CE_data,
681 unsigned int nbytes, unsigned int transfer_id,
682 unsigned int sw_index, unsigned int hw_index,
683 unsigned int toeplitz_hash_result)
684{
685 struct HIF_CE_pipe_info *pipe_info =
686 (struct HIF_CE_pipe_info *)ce_context;
687 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +0530688 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800689 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700690 struct hif_msg_callbacks *msg_callbacks =
691 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800692
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693 do {
694 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700695 * The upper layer callback will be triggered
696 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800697 */
Houston Hoffman85118512015-09-28 14:17:11 -0700698 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530699 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700700 == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800701 cdf_nbuf_free(transfer_context);
702 else
703 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700704 msg_callbacks->Context,
705 transfer_context, transfer_id,
706 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800707 }
708
709 cdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700710 pipe_info->num_sends_allowed++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800712 } while (ce_completed_send_next(copyeng,
713 &ce_context, &transfer_context,
714 &CE_data, &nbytes, &transfer_id,
715 &sw_idx, &hw_idx,
716 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800717}
718
Houston Hoffman910c6262015-09-28 12:56:25 -0700719/**
720 * hif_ce_do_recv(): send message from copy engine to upper layers
721 * @msg_callbacks: structure containing callback and callback context
722 * @netbuff: skb containing message
723 * @nbytes: number of bytes in the message
724 * @pipe_info: used for the pipe_number info
725 *
726 * Checks the packet length, configures the lenght in the netbuff,
727 * and calls the upper layer callback.
728 *
729 * return: None
730 */
731static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
732 cdf_nbuf_t netbuf, int nbytes,
733 struct HIF_CE_pipe_info *pipe_info) {
734 if (nbytes <= pipe_info->buf_sz) {
735 cdf_nbuf_set_pktlen(netbuf, nbytes);
736 msg_callbacks->
737 rxCompletionHandler(msg_callbacks->Context,
738 netbuf, pipe_info->pipe_num);
739 } else {
740 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
741 __func__, netbuf, nbytes);
742 cdf_nbuf_free(netbuf);
743 }
744}
745
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800746/* Called by lower (CE) layer when data is received from the Target. */
747void
748hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
749 void *transfer_context, cdf_dma_addr_t CE_data,
750 unsigned int nbytes, unsigned int transfer_id,
751 unsigned int flags)
752{
753 struct HIF_CE_pipe_info *pipe_info =
754 (struct HIF_CE_pipe_info *)ce_context;
755 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700756 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +0530757 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530758 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(hif_state);
Houston Hoffman910c6262015-09-28 12:56:25 -0700759 struct hif_msg_callbacks *msg_callbacks =
760 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530761 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800762
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800763 do {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530764 hif_pm_runtime_mark_last_busy(hif_sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800765 cdf_nbuf_unmap_single(scn->cdf_dev,
766 (cdf_nbuf_t) transfer_context,
767 CDF_DMA_FROM_DEVICE);
768
Houston Hoffman910c6262015-09-28 12:56:25 -0700769 atomic_inc(&pipe_info->recv_bufs_needed);
770 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530771 if (scn->target_status == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800772 cdf_nbuf_free(transfer_context);
773 else
774 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700775 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800776
777 /* Set up force_break flag if num of receices reaches
778 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700779 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +0530780 count = ce_state->receive_count;
781 if (cdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700782 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800783 break;
784 }
785 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
786 &CE_data, &nbytes, &transfer_id,
787 &flags) == CDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800788
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800789}
790
791/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
792
793void
Komal Seelam5584a7c2016-02-24 19:22:48 +0530794hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800795 struct hif_msg_callbacks *callbacks)
796{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530797 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800798
799#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
800 spin_lock_init(&pcie_access_log_lock);
801#endif
802 /* Save callbacks for later installation */
803 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
804 sizeof(hif_state->msg_callbacks_pending));
805
806}
807
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800808int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
809{
810 struct CE_handle *ce_diag = hif_state->ce_diag;
811 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +0530812 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700813 struct hif_msg_callbacks *hif_msg_callbacks =
814 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800815
816 /* daemonize("hif_compl_thread"); */
817
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800818 if (scn->ce_count == 0) {
819 HIF_ERROR("%s: Invalid ce_count\n", __func__);
820 return -EINVAL;
821 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700822
823 if (!hif_msg_callbacks ||
824 !hif_msg_callbacks->rxCompletionHandler ||
825 !hif_msg_callbacks->txCompletionHandler) {
826 HIF_ERROR("%s: no completion handler registered", __func__);
827 return -EFAULT;
828 }
829
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800830 A_TARGET_ACCESS_LIKELY(scn);
831 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
832 struct CE_attr attr;
833 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800834
835 pipe_info = &hif_state->pipe_info[pipe_num];
836 if (pipe_info->ce_hdl == ce_diag) {
837 continue; /* Handle Diagnostic CE specially */
838 }
839 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800840 if (attr.src_nentries) {
841 /* pipe used to send to target */
842 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
843 __func__, pipe_num, pipe_info);
844 ce_send_cb_register(pipe_info->ce_hdl,
845 hif_pci_ce_send_done, pipe_info,
846 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800847 pipe_info->num_sends_allowed = attr.src_nentries - 1;
848 }
849 if (attr.dest_nentries) {
850 /* pipe used to receive from target */
851 ce_recv_cb_register(pipe_info->ce_hdl,
852 hif_pci_ce_recv_data, pipe_info,
853 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800854 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800855
856 if (attr.src_nentries)
857 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800858 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800859
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800860 A_TARGET_ACCESS_UNLIKELY(scn);
861 return 0;
862}
863
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800864/*
865 * Install pending msg callbacks.
866 *
867 * TBDXXX: This hack is needed because upper layers install msg callbacks
868 * for use with HTC before BMI is done; yet this HIF implementation
869 * needs to continue to use BMI msg callbacks. Really, upper layers
870 * should not register HTC callbacks until AFTER BMI phase.
871 */
Komal Seelam644263d2016-02-22 20:45:49 +0530872static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800873{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530874 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800875
876 cdf_mem_copy(&hif_state->msg_callbacks_current,
877 &hif_state->msg_callbacks_pending,
878 sizeof(hif_state->msg_callbacks_pending));
879}
880
Komal Seelam5584a7c2016-02-24 19:22:48 +0530881void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
882 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800883{
884 int ul_is_polled, dl_is_polled;
885
Komal Seelam644263d2016-02-22 20:45:49 +0530886 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800887 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
888}
889
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890/**
891 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +0530892 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893 *
894 * Output the pipe error counts of each pipe to log file
895 *
896 * Return: N/A
897 */
Komal Seelam644263d2016-02-22 20:45:49 +0530898void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800899{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530900 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800901 int pipe_num;
902
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 if (hif_state == NULL) {
904 HIF_ERROR("%s hif_state is NULL", __func__);
905 return;
906 }
907 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
908 struct HIF_CE_pipe_info *pipe_info;
909
910 pipe_info = &hif_state->pipe_info[pipe_num];
911
912 if (pipe_info->nbuf_alloc_err_count > 0 ||
913 pipe_info->nbuf_dma_err_count > 0 ||
914 pipe_info->nbuf_ce_enqueue_err_count)
915 HIF_ERROR(
916 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
917 __func__, pipe_info->pipe_num,
918 atomic_read(&pipe_info->recv_bufs_needed),
919 pipe_info->nbuf_alloc_err_count,
920 pipe_info->nbuf_dma_err_count,
921 pipe_info->nbuf_ce_enqueue_err_count);
922 }
923}
924
925static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
926{
927 struct CE_handle *ce_hdl;
928 cdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +0530929 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800930 CDF_STATUS ret;
931 uint32_t bufs_posted = 0;
932
933 buf_sz = pipe_info->buf_sz;
934 if (buf_sz == 0) {
935 /* Unused Copy Engine */
936 return 0;
937 }
938
939 ce_hdl = pipe_info->ce_hdl;
940
941 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
942 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
943 cdf_dma_addr_t CE_data; /* CE space buffer address */
944 cdf_nbuf_t nbuf;
945 int status;
946
947 atomic_dec(&pipe_info->recv_bufs_needed);
948 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
949
950 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
951 if (!nbuf) {
952 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
953 pipe_info->nbuf_alloc_err_count++;
954 cdf_spin_unlock_bh(
955 &pipe_info->recv_bufs_needed_lock);
956 HIF_ERROR(
957 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
958 __func__, pipe_info->pipe_num,
959 atomic_read(&pipe_info->recv_bufs_needed),
960 pipe_info->nbuf_alloc_err_count);
961 atomic_inc(&pipe_info->recv_bufs_needed);
962 return 1;
963 }
964
965 /*
966 * cdf_nbuf_peek_header(nbuf, &data, &unused);
967 * CE_data = dma_map_single(dev, data, buf_sz, );
968 * DMA_FROM_DEVICE);
969 */
970 ret =
971 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
972 CDF_DMA_FROM_DEVICE);
973
974 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
975 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
976 pipe_info->nbuf_dma_err_count++;
977 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
978 HIF_ERROR(
979 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
980 __func__, pipe_info->pipe_num,
981 atomic_read(&pipe_info->recv_bufs_needed),
982 pipe_info->nbuf_dma_err_count);
983 cdf_nbuf_free(nbuf);
984 atomic_inc(&pipe_info->recv_bufs_needed);
985 return 1;
986 }
987
Houston Hoffman56936832016-03-16 12:16:24 -0700988 CE_data = cdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989
990 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
991 buf_sz, DMA_FROM_DEVICE);
992 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
993 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
994 if (status != EOK) {
995 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
996 pipe_info->nbuf_ce_enqueue_err_count++;
997 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
998 HIF_ERROR(
999 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1000 __func__, pipe_info->pipe_num,
1001 atomic_read(&pipe_info->recv_bufs_needed),
1002 pipe_info->nbuf_ce_enqueue_err_count);
1003 atomic_inc(&pipe_info->recv_bufs_needed);
1004 cdf_nbuf_free(nbuf);
1005 return 1;
1006 }
1007
1008 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1009 bufs_posted++;
1010 }
1011 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001012 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001013 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1014 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001015 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001016 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1017 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001018 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001019 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1020
1021 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1022
1023 return 0;
1024}
1025
1026/*
1027 * Try to post all desired receive buffers for all pipes.
1028 * Returns 0 if all desired buffers are posted,
1029 * non-zero if were were unable to completely
1030 * replenish receive buffers.
1031 */
Komal Seelam644263d2016-02-22 20:45:49 +05301032static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001033{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301034 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001035 int pipe_num, rv = 0;
1036
1037 A_TARGET_ACCESS_LIKELY(scn);
1038 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1039 struct HIF_CE_pipe_info *pipe_info;
1040
1041 pipe_info = &hif_state->pipe_info[pipe_num];
1042 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1043 rv = 1;
1044 goto done;
1045 }
1046 }
1047
1048done:
1049 A_TARGET_ACCESS_UNLIKELY(scn);
1050
1051 return rv;
1052}
1053
Komal Seelam5584a7c2016-02-24 19:22:48 +05301054CDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001055{
Komal Seelam644263d2016-02-22 20:45:49 +05301056 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301057 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001058
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001059 hif_msg_callbacks_install(scn);
1060
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001061 if (hif_completion_thread_startup(hif_state))
1062 return CDF_STATUS_E_FAILURE;
1063
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064 /* Post buffers once to start things off. */
1065 (void)hif_post_recv_buffers(scn);
1066
1067 hif_state->started = true;
1068
1069 return CDF_STATUS_SUCCESS;
1070}
1071
1072#ifdef WLAN_FEATURE_FASTPATH
1073/**
1074 * hif_enable_fastpath() Update that we have enabled fastpath mode
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301075 * @hif_ctx: HIF context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001076 *
1077 * For use in data path
1078 *
1079 * Retrun: void
1080 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301081void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001082{
Komal Seelam644263d2016-02-22 20:45:49 +05301083 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1084
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085 HIF_INFO("Enabling fastpath mode\n");
Komal Seelam644263d2016-02-22 20:45:49 +05301086 scn->fastpath_mode_on = 1;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301087}
1088
1089/**
1090 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1091 * @hif_ctx: HIF Context
1092 *
1093 * For use in data path to skip HTC
1094 *
1095 * Return: bool
1096 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301097bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301098{
Komal Seelam644263d2016-02-22 20:45:49 +05301099 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1100
1101 return scn->fastpath_mode_on;
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301102}
1103
1104/**
1105 * hif_get_ce_handle - API to get CE handle for FastPath mode
1106 * @hif_ctx: HIF Context
1107 * @id: CopyEngine Id
1108 *
1109 * API to return CE handle for fastpath mode
1110 *
1111 * Return: void
1112 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301113void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301114{
Komal Seelam644263d2016-02-22 20:45:49 +05301115 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1116
1117 return scn->ce_id_to_state[id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001118}
1119#endif /* WLAN_FEATURE_FASTPATH */
1120
1121void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1122{
Komal Seelam644263d2016-02-22 20:45:49 +05301123 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001124 struct CE_handle *ce_hdl;
1125 uint32_t buf_sz;
1126 struct HIF_CE_state *hif_state;
1127 cdf_nbuf_t netbuf;
1128 cdf_dma_addr_t CE_data;
1129 void *per_CE_context;
1130
1131 buf_sz = pipe_info->buf_sz;
1132 if (buf_sz == 0) {
1133 /* Unused Copy Engine */
1134 return;
1135 }
1136
1137 hif_state = pipe_info->HIF_CE_state;
1138 if (!hif_state->started) {
1139 return;
1140 }
1141
Komal Seelam02cf2f82016-02-22 20:44:25 +05301142 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001143 ce_hdl = pipe_info->ce_hdl;
1144
1145 if (scn->cdf_dev == NULL) {
1146 return;
1147 }
1148 while (ce_revoke_recv_next
1149 (ce_hdl, &per_CE_context, (void **)&netbuf,
1150 &CE_data) == CDF_STATUS_SUCCESS) {
1151 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1152 CDF_DMA_FROM_DEVICE);
1153 cdf_nbuf_free(netbuf);
1154 }
1155}
1156
1157void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1158{
1159 struct CE_handle *ce_hdl;
1160 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301161 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001162 cdf_nbuf_t netbuf;
1163 void *per_CE_context;
1164 cdf_dma_addr_t CE_data;
1165 unsigned int nbytes;
1166 unsigned int id;
1167 uint32_t buf_sz;
1168 uint32_t toeplitz_hash_result;
1169
1170 buf_sz = pipe_info->buf_sz;
1171 if (buf_sz == 0) {
1172 /* Unused Copy Engine */
1173 return;
1174 }
1175
1176 hif_state = pipe_info->HIF_CE_state;
1177 if (!hif_state->started) {
1178 return;
1179 }
1180
Komal Seelam02cf2f82016-02-22 20:44:25 +05301181 scn = HIF_GET_SOFTC(hif_state);
1182
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001183 ce_hdl = pipe_info->ce_hdl;
1184
1185 while (ce_cancel_send_next
1186 (ce_hdl, &per_CE_context,
1187 (void **)&netbuf, &CE_data, &nbytes,
1188 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1189 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1190 /*
1191 * Packets enqueued by htt_h2t_ver_req_msg() and
1192 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1193 * freed in htt_htc_misc_pkt_pool_free() in
1194 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001195 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001196 * which they are queued in.
1197 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301198 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001199 return;
1200 /* Indicate the completion to higer
1201 * layer to free the buffer */
1202 hif_state->msg_callbacks_current.
1203 txCompletionHandler(hif_state->
1204 msg_callbacks_current.Context,
1205 netbuf, id, toeplitz_hash_result);
1206 }
1207 }
1208}
1209
1210/*
1211 * Cleanup residual buffers for device shutdown:
1212 * buffers that were enqueued for receive
1213 * buffers that were to be sent
1214 * Note: Buffers that had completed but which were
1215 * not yet processed are on a completion queue. They
1216 * are handled when the completion thread shuts down.
1217 */
1218void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1219{
1220 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301221 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222
Komal Seelam02cf2f82016-02-22 20:44:25 +05301223 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001224 struct HIF_CE_pipe_info *pipe_info;
1225
1226 pipe_info = &hif_state->pipe_info[pipe_num];
1227 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1228 hif_send_buffer_cleanup_on_pipe(pipe_info);
1229 }
1230}
1231
Komal Seelam5584a7c2016-02-24 19:22:48 +05301232void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001233{
Komal Seelam644263d2016-02-22 20:45:49 +05301234 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301235 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301236
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237 hif_buffer_cleanup(hif_state);
1238}
1239
Komal Seelam5584a7c2016-02-24 19:22:48 +05301240void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001241{
Komal Seelam644263d2016-02-22 20:45:49 +05301242 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301243 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001244 int pipe_num;
1245
1246 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001247
1248 /*
1249 * At this point, asynchronous threads are stopped,
1250 * The Target should not DMA nor interrupt, Host code may
1251 * not initiate anything more. So we just need to clean
1252 * up Host-side state.
1253 */
1254
1255 if (scn->athdiag_procfs_inited) {
1256 athdiag_procfs_remove();
1257 scn->athdiag_procfs_inited = false;
1258 }
1259
1260 hif_buffer_cleanup(hif_state);
1261
1262 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1263 struct HIF_CE_pipe_info *pipe_info;
1264
1265 pipe_info = &hif_state->pipe_info[pipe_num];
1266 if (pipe_info->ce_hdl) {
1267 ce_fini(pipe_info->ce_hdl);
1268 pipe_info->ce_hdl = NULL;
1269 pipe_info->buf_sz = 0;
1270 }
1271 }
1272
1273 if (hif_state->sleep_timer_init) {
1274 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1275 cdf_softirq_timer_free(&hif_state->sleep_timer);
1276 hif_state->sleep_timer_init = false;
1277 }
1278
1279 hif_state->started = false;
1280}
1281
1282#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1283#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1284
1285
1286static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1287 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1288 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1289 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1290 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1291 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1292 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1293 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1294 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1295 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1296};
1297
1298
1299
1300/* CE_PCI TABLE */
1301/*
1302 * NOTE: the table below is out of date, though still a useful reference.
1303 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1304 * mapping of HTC services to HIF pipes.
1305 */
1306/*
1307 * This authoritative table defines Copy Engine configuration and the mapping
1308 * of services/endpoints to CEs. A subset of this information is passed to
1309 * the Target during startup as a prerequisite to entering BMI phase.
1310 * See:
1311 * target_service_to_ce_map - Target-side mapping
1312 * hif_map_service_to_pipe - Host-side mapping
1313 * target_ce_config - Target-side configuration
1314 * host_ce_config - Host-side configuration
1315 ============================================================================
1316 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1317 | | | ctio | Size | Frequency
1318 | | | n | |
1319 ============================================================================
1320 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1321 descriptor | | | | O(100B) | and regular
1322 download | | | | |
1323 ----------------------------------------------------------------------------
1324 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1325 indication | | | | O(10B) | regular
1326 upload | | | | |
1327 ----------------------------------------------------------------------------
1328 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1329 upload | | | | O(1000B) | (frequent
1330 e.g. noise | | | | | during IP1.0
1331 packets | | | | | testing)
1332 ----------------------------------------------------------------------------
1333 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1334 download | | | | O(1000B) | (frequent
1335 e.g. | | | | | during IP1.0
1336 misdirecte | | | | | testing)
1337 d EAPOL | | | | |
1338 packets | | | | |
1339 ----------------------------------------------------------------------------
1340 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1341 | DATA_VO (uplink) | | | |
1342 ----------------------------------------------------------------------------
1343 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1344 | DATA_VO (downlink) | | | |
1345 ----------------------------------------------------------------------------
1346 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1347 | | | | O(100B) |
1348 ----------------------------------------------------------------------------
1349 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1350 messages | (downlink) | | | O(100B) |
1351 | | | | |
1352 ----------------------------------------------------------------------------
1353 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1354 | HTC_RAW_STREAMS | | | |
1355 | (uplink) | | | |
1356 ----------------------------------------------------------------------------
1357 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1358 | HTC_RAW_STREAMS | | | |
1359 | (downlink) | | | |
1360 ----------------------------------------------------------------------------
1361 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1362 | | | | | infrequent
1363 ============================================================================
1364 */
1365
1366/*
1367 * Map from service/endpoint to Copy Engine.
1368 * This table is derived from the CE_PCI TABLE, above.
1369 * It is passed to the Target at startup for use by firmware.
1370 */
1371static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1372 {
1373 WMI_DATA_VO_SVC,
1374 PIPEDIR_OUT, /* out = UL = host -> target */
1375 3,
1376 },
1377 {
1378 WMI_DATA_VO_SVC,
1379 PIPEDIR_IN, /* in = DL = target -> host */
1380 2,
1381 },
1382 {
1383 WMI_DATA_BK_SVC,
1384 PIPEDIR_OUT, /* out = UL = host -> target */
1385 3,
1386 },
1387 {
1388 WMI_DATA_BK_SVC,
1389 PIPEDIR_IN, /* in = DL = target -> host */
1390 2,
1391 },
1392 {
1393 WMI_DATA_BE_SVC,
1394 PIPEDIR_OUT, /* out = UL = host -> target */
1395 3,
1396 },
1397 {
1398 WMI_DATA_BE_SVC,
1399 PIPEDIR_IN, /* in = DL = target -> host */
1400 2,
1401 },
1402 {
1403 WMI_DATA_VI_SVC,
1404 PIPEDIR_OUT, /* out = UL = host -> target */
1405 3,
1406 },
1407 {
1408 WMI_DATA_VI_SVC,
1409 PIPEDIR_IN, /* in = DL = target -> host */
1410 2,
1411 },
1412 {
1413 WMI_CONTROL_SVC,
1414 PIPEDIR_OUT, /* out = UL = host -> target */
1415 3,
1416 },
1417 {
1418 WMI_CONTROL_SVC,
1419 PIPEDIR_IN, /* in = DL = target -> host */
1420 2,
1421 },
1422 {
1423 HTC_CTRL_RSVD_SVC,
1424 PIPEDIR_OUT, /* out = UL = host -> target */
1425 0, /* could be moved to 3 (share with WMI) */
1426 },
1427 {
1428 HTC_CTRL_RSVD_SVC,
1429 PIPEDIR_IN, /* in = DL = target -> host */
1430 2,
1431 },
1432 {
1433 HTC_RAW_STREAMS_SVC, /* not currently used */
1434 PIPEDIR_OUT, /* out = UL = host -> target */
1435 0,
1436 },
1437 {
1438 HTC_RAW_STREAMS_SVC, /* not currently used */
1439 PIPEDIR_IN, /* in = DL = target -> host */
1440 2,
1441 },
1442 {
1443 HTT_DATA_MSG_SVC,
1444 PIPEDIR_OUT, /* out = UL = host -> target */
1445 4,
1446 },
1447 {
1448 HTT_DATA_MSG_SVC,
1449 PIPEDIR_IN, /* in = DL = target -> host */
1450 1,
1451 },
1452 {
1453 WDI_IPA_TX_SVC,
1454 PIPEDIR_OUT, /* in = DL = target -> host */
1455 5,
1456 },
1457 /* (Additions here) */
1458
1459 { /* Must be last */
1460 0,
1461 0,
1462 0,
1463 },
1464};
1465
1466static struct service_to_pipe *target_service_to_ce_map =
1467 target_service_to_ce_map_wlan;
1468static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1469
1470static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1471static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1472
1473static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1474 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1475 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1476 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1477 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1478 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1479 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1480 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1481 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1482 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1483 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1484 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1485 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1486 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1487 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1488 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1489 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1490 {0, 0, 0,}, /* Must be last */
1491};
1492
1493#ifdef HIF_PCI
1494/*
1495 * Send an interrupt to the device to wake up the Target CPU
1496 * so it has an opportunity to notice any changed state.
1497 */
Komal Seelam644263d2016-02-22 20:45:49 +05301498void hif_wake_target_cpu(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001499{
1500 CDF_STATUS rv;
1501 uint32_t core_ctrl;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301502 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001503
Komal Seelam644263d2016-02-22 20:45:49 +05301504 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001505 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1506 &core_ctrl);
1507 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1508 /* A_INUM_FIRMWARE interrupt to Target CPU */
1509 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1510
Komal Seelam644263d2016-02-22 20:45:49 +05301511 rv = hif_diag_write_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1513 core_ctrl);
1514 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1515}
1516#endif
1517
1518static void hif_sleep_entry(void *arg)
1519{
1520 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
Komal Seelam644263d2016-02-22 20:45:49 +05301521 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001522 uint32_t idle_ms;
Komal Seelam644263d2016-02-22 20:45:49 +05301523
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524 if (scn->recovery)
1525 return;
1526
Komal Seelambd7c51d2016-02-24 10:27:30 +05301527 if (hif_is_driver_unloading(scn))
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001528 return;
1529
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1531 if (hif_state->verified_awake == false) {
1532 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1533 - hif_state->sleep_ticks);
1534 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1535 if (!cdf_atomic_read(&scn->link_suspended)) {
1536 soc_wake_reset(scn);
1537 hif_state->fake_sleep = false;
1538 }
1539 } else {
1540 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1541 cdf_softirq_timer_start(&hif_state->sleep_timer,
1542 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1543 }
1544 } else {
1545 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1546 cdf_softirq_timer_start(&hif_state->sleep_timer,
1547 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1548 }
1549 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1550}
1551#define HIF_HIA_MAX_POLL_LOOP 1000000
1552#define HIF_HIA_POLLING_DELAY_MS 10
1553
1554#ifndef HIF_PCI
Komal Seelam644263d2016-02-22 20:45:49 +05301555int hif_set_hia(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001556{
1557 return 0;
1558}
1559#else
Komal Seelam644263d2016-02-22 20:45:49 +05301560int hif_set_hia(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001561{
1562 CDF_STATUS rv;
1563 uint32_t interconnect_targ_addr = 0;
1564 uint32_t pcie_state_targ_addr = 0;
1565 uint32_t pipe_cfg_targ_addr = 0;
1566 uint32_t svc_to_pipe_map = 0;
1567 uint32_t pcie_config_flags = 0;
1568 uint32_t flag2_value = 0;
1569 uint32_t flag2_targ_addr = 0;
1570#ifdef QCA_WIFI_3_0
1571 uint32_t host_interest_area = 0;
1572 uint8_t i;
1573#else
1574 uint32_t ealloc_value = 0;
1575 uint32_t ealloc_targ_addr = 0;
1576 uint8_t banks_switched = 1;
1577 uint32_t chip_id;
1578#endif
1579 uint32_t pipe_cfg_addr;
Komal Seelam5584a7c2016-02-24 19:22:48 +05301580 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301581 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Komal Seelam91553ce2016-01-27 18:57:10 +05301582 uint32_t target_type = tgt_info->target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001583
1584 HIF_TRACE("%s: E", __func__);
1585
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001586 if (ADRASTEA_BU)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001587 return CDF_STATUS_SUCCESS;
1588
1589#ifdef QCA_WIFI_3_0
1590 i = 0;
1591 while (i < HIF_HIA_MAX_POLL_LOOP) {
1592 host_interest_area = hif_read32_mb(scn->mem +
1593 A_SOC_CORE_SCRATCH_0_ADDRESS);
1594 if ((host_interest_area & 0x01) == 0) {
1595 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1596 host_interest_area = 0;
1597 i++;
1598 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1599 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1600 }
1601 } else {
1602 host_interest_area &= (~0x01);
1603 hif_write32_mb(scn->mem + 0x113014, 0);
1604 break;
1605 }
1606 }
1607
1608 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1609 HIF_ERROR("%s: hia polling timeout", __func__);
1610 return -EIO;
1611 }
1612
1613 if (host_interest_area == 0) {
1614 HIF_ERROR("%s: host_interest_area = 0", __func__);
1615 return -EIO;
1616 }
1617
1618 interconnect_targ_addr = host_interest_area +
1619 offsetof(struct host_interest_area_t,
1620 hi_interconnect_state);
1621
1622 flag2_targ_addr = host_interest_area +
1623 offsetof(struct host_interest_area_t, hi_option_flag2);
1624
1625#else
Komal Seelam91553ce2016-01-27 18:57:10 +05301626 interconnect_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001627 offsetof(struct host_interest_s, hi_interconnect_state));
Komal Seelam91553ce2016-01-27 18:57:10 +05301628 ealloc_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001629 offsetof(struct host_interest_s, hi_early_alloc));
Komal Seelam91553ce2016-01-27 18:57:10 +05301630 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001631 offsetof(struct host_interest_s, hi_option_flag2));
1632#endif
1633 /* Supply Target-side CE configuration */
Komal Seelam644263d2016-02-22 20:45:49 +05301634 rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001635 &pcie_state_targ_addr);
1636 if (rv != CDF_STATUS_SUCCESS) {
1637 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1638 __func__, interconnect_targ_addr, rv);
1639 goto done;
1640 }
1641 if (pcie_state_targ_addr == 0) {
1642 rv = CDF_STATUS_E_FAILURE;
1643 HIF_ERROR("%s: pcie state addr is 0", __func__);
1644 goto done;
1645 }
1646 pipe_cfg_addr = pcie_state_targ_addr +
1647 offsetof(struct pcie_state_s,
1648 pipe_cfg_addr);
Komal Seelam644263d2016-02-22 20:45:49 +05301649 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001650 pipe_cfg_addr,
1651 &pipe_cfg_targ_addr);
1652 if (rv != CDF_STATUS_SUCCESS) {
1653 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1654 __func__, pipe_cfg_addr, rv);
1655 goto done;
1656 }
1657 if (pipe_cfg_targ_addr == 0) {
1658 rv = CDF_STATUS_E_FAILURE;
1659 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1660 goto done;
1661 }
1662
Komal Seelam644263d2016-02-22 20:45:49 +05301663 rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664 (uint8_t *) target_ce_config,
1665 target_ce_config_sz);
1666
1667 if (rv != CDF_STATUS_SUCCESS) {
1668 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1669 goto done;
1670 }
1671
Komal Seelam644263d2016-02-22 20:45:49 +05301672 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673 pcie_state_targ_addr +
1674 offsetof(struct pcie_state_s,
1675 svc_to_pipe_map),
1676 &svc_to_pipe_map);
1677 if (rv != CDF_STATUS_SUCCESS) {
1678 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1679 goto done;
1680 }
1681 if (svc_to_pipe_map == 0) {
1682 rv = CDF_STATUS_E_FAILURE;
1683 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1684 goto done;
1685 }
1686
Komal Seelam644263d2016-02-22 20:45:49 +05301687 rv = hif_diag_write_mem(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001688 svc_to_pipe_map,
1689 (uint8_t *) target_service_to_ce_map,
1690 target_service_to_ce_map_sz);
1691 if (rv != CDF_STATUS_SUCCESS) {
1692 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1693 goto done;
1694 }
1695
Komal Seelam644263d2016-02-22 20:45:49 +05301696 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697 pcie_state_targ_addr +
1698 offsetof(struct pcie_state_s,
1699 config_flags),
1700 &pcie_config_flags);
1701 if (rv != CDF_STATUS_SUCCESS) {
1702 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1703 goto done;
1704 }
1705#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1706 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1707#else
1708 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1709#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1710 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1711#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1712 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1713#endif
Komal Seelam644263d2016-02-22 20:45:49 +05301714 rv = hif_diag_write_mem(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001715 pcie_state_targ_addr +
1716 offsetof(struct pcie_state_s,
1717 config_flags),
1718 (uint8_t *) &pcie_config_flags,
1719 sizeof(pcie_config_flags));
1720 if (rv != CDF_STATUS_SUCCESS) {
1721 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1722 goto done;
1723 }
1724
1725#ifndef QCA_WIFI_3_0
1726 /* configure early allocation */
Komal Seelam91553ce2016-01-27 18:57:10 +05301727 ealloc_targ_addr = hif_hia_item_address(target_type,
1728 offsetof(
1729 struct host_interest_s,
1730 hi_early_alloc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731
Komal Seelam644263d2016-02-22 20:45:49 +05301732 rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733 &ealloc_value);
1734 if (rv != CDF_STATUS_SUCCESS) {
1735 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1736 goto done;
1737 }
1738
1739 /* 1 bank is switched to IRAM, except ROME 1.0 */
1740 ealloc_value |=
1741 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1742 HI_EARLY_ALLOC_MAGIC_MASK);
1743
Komal Seelam644263d2016-02-22 20:45:49 +05301744 rv = hif_diag_read_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001745 CHIP_ID_ADDRESS |
1746 RTC_SOC_BASE_ADDRESS, &chip_id);
1747 if (rv != CDF_STATUS_SUCCESS) {
1748 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1749 goto done;
1750 }
1751 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
Komal Seelam91553ce2016-01-27 18:57:10 +05301752 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001753 switch (CHIP_ID_REVISION_GET(chip_id)) {
1754 case 0x2: /* ROME 1.3 */
1755 /* 2 banks are switched to IRAM */
1756 banks_switched = 2;
1757 break;
1758 case 0x4: /* ROME 2.1 */
1759 case 0x5: /* ROME 2.2 */
1760 banks_switched = 6;
1761 break;
1762 case 0x8: /* ROME 3.0 */
1763 case 0x9: /* ROME 3.1 */
1764 case 0xA: /* ROME 3.2 */
1765 banks_switched = 9;
1766 break;
1767 case 0x0: /* ROME 1.0 */
1768 case 0x1: /* ROME 1.1 */
1769 default:
1770 /* 3 banks are switched to IRAM */
1771 banks_switched = 3;
1772 break;
1773 }
1774 }
1775
1776 ealloc_value |=
1777 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1778 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1779
Komal Seelam644263d2016-02-22 20:45:49 +05301780 rv = hif_diag_write_access(hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001781 ealloc_targ_addr,
1782 ealloc_value);
1783 if (rv != CDF_STATUS_SUCCESS) {
1784 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1785 goto done;
1786 }
1787#endif
1788
1789 /* Tell Target to proceed with initialization */
Komal Seelam91553ce2016-01-27 18:57:10 +05301790 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001791 offsetof(
1792 struct host_interest_s,
1793 hi_option_flag2));
1794
Komal Seelam644263d2016-02-22 20:45:49 +05301795 rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001796 &flag2_value);
1797 if (rv != CDF_STATUS_SUCCESS) {
1798 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1799 goto done;
1800 }
1801
1802 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
Komal Seelam644263d2016-02-22 20:45:49 +05301803 rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001804 flag2_value);
1805 if (rv != CDF_STATUS_SUCCESS) {
1806 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1807 goto done;
1808 }
1809
1810 hif_wake_target_cpu(scn);
1811
1812done:
1813
1814 return rv;
1815}
1816#endif
1817
1818/**
1819 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301820 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001821 *
1822 * This function passes the con_mode and CE configuration to
1823 * platform driver to enable wlan.
1824 *
1825 * Return: void
1826 */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301827static int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828{
1829 struct icnss_wlan_enable_cfg cfg;
1830 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301831 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832
1833 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1834 sizeof(struct CE_pipe_config);
1835 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1836 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1837 sizeof(struct service_to_pipe);
1838 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1839 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
Komal Seelam644263d2016-02-22 20:45:49 +05301840 cfg.shadow_reg_cfg =
1841 (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001842
Peng Xu7b962532015-10-02 17:17:03 -07001843 if (CDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001844 mode = ICNSS_FTM;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301845 else if (WLAN_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001846 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001847 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001849
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001850 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1851}
1852
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001853/*
1854 * Called from PCI layer whenever a new PCI device is probed.
1855 * Initializes per-device HIF state and notifies the main
1856 * driver that a new HIF device is present.
1857 */
Komal Seelam644263d2016-02-22 20:45:49 +05301858int hif_config_ce(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001859{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001860 struct HIF_CE_pipe_info *pipe_info;
1861 int pipe_num;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301862 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001863#ifdef ADRASTEA_SHADOW_REGISTERS
1864 int i;
1865#endif
1866 CDF_STATUS rv = CDF_STATUS_SUCCESS;
1867 int ret;
Komal Seelam644263d2016-02-22 20:45:49 +05301868 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam5584a7c2016-02-24 19:22:48 +05301869 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001870 struct icnss_soc_info soc_info;
Komal Seelam644263d2016-02-22 20:45:49 +05301871 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001872
1873 /* if epping is enabled we need to use the epping configuration. */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301874 if (WLAN_IS_EPPING_ENABLED(mode)) {
1875 if (WLAN_IS_EPPING_IRQ(mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001876 host_ce_config = host_ce_config_wlan_epping_irq;
1877 else
1878 host_ce_config = host_ce_config_wlan_epping_poll;
1879 target_ce_config = target_ce_config_wlan_epping;
1880 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1881 target_service_to_ce_map =
1882 target_service_to_ce_map_wlan_epping;
1883 target_service_to_ce_map_sz =
1884 sizeof(target_service_to_ce_map_wlan_epping);
1885 }
1886
Komal Seelambd7c51d2016-02-24 10:27:30 +05301887 ret = hif_wlan_enable(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888
1889 if (ret) {
1890 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
1891 return CDF_STATUS_NOT_INITIALIZED;
1892 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893
1894 scn->notice_send = true;
1895
1896 cdf_mem_zero(&soc_info, sizeof(soc_info));
Komal Seelamf8600682016-02-02 18:17:13 +05301897 ret = icnss_get_soc_info(scn, &soc_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898 if (ret < 0) {
1899 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
1900 return CDF_STATUS_NOT_INITIALIZED;
1901 }
1902
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903 scn->mem = soc_info.v_addr;
1904 scn->mem_pa = soc_info.p_addr;
Komal Seelam91553ce2016-01-27 18:57:10 +05301905 tgt_info->soc_version = soc_info.version;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906
1907 cdf_spinlock_init(&hif_state->keep_awake_lock);
1908
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909 hif_state->keep_awake_count = 0;
1910
1911 hif_state->fake_sleep = false;
1912 hif_state->sleep_ticks = 0;
1913 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
1914 hif_sleep_entry, (void *)hif_state,
1915 CDF_TIMER_TYPE_WAKE_APPS);
1916 hif_state->sleep_timer_init = true;
1917 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
1918#ifdef HIF_PCI
1919#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1920 /* Force AWAKE forever/till the driver is loaded */
1921 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1922 return -EACCES;
1923#endif
1924#endif
1925
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001926 hif_config_rri_on_ddr(scn);
1927
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001928 /* During CE initializtion */
1929 scn->ce_count = HOST_CE_COUNT;
1930 A_TARGET_ACCESS_LIKELY(scn);
1931 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1932 struct CE_attr *attr;
1933
1934 pipe_info = &hif_state->pipe_info[pipe_num];
1935 pipe_info->pipe_num = pipe_num;
1936 pipe_info->HIF_CE_state = hif_state;
1937 attr = &host_ce_config[pipe_num];
1938 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
1939 CDF_ASSERT(pipe_info->ce_hdl != NULL);
1940 if (pipe_info->ce_hdl == NULL) {
1941 rv = CDF_STATUS_E_FAILURE;
1942 A_TARGET_ACCESS_UNLIKELY(scn);
1943 goto err;
1944 }
1945
1946 if (pipe_num == DIAG_CE_ID) {
1947 /* Reserve the ultimate CE for
1948 * Diagnostic Window support */
1949 hif_state->ce_diag =
1950 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1951 continue;
1952 }
1953
1954 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
1955 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
1956 if (attr->dest_nentries > 0) {
1957 atomic_set(&pipe_info->recv_bufs_needed,
1958 init_buffer_count(attr->dest_nentries - 1));
1959 } else {
1960 atomic_set(&pipe_info->recv_bufs_needed, 0);
1961 }
1962 ce_tasklet_init(hif_state, (1 << pipe_num));
1963 ce_register_irq(hif_state, (1 << pipe_num));
1964 scn->request_irq_done = true;
1965 }
1966
1967 if (athdiag_procfs_init(scn) != 0) {
1968 A_TARGET_ACCESS_UNLIKELY(scn);
1969 goto err;
1970 }
1971 scn->athdiag_procfs_inited = true;
1972
1973 /*
1974 * Initially, establish CE completion handlers for use with BMI.
1975 * These are overwritten with generic handlers after we exit BMI phase.
1976 */
1977 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1978#ifdef HIF_PCI
1979 ce_send_cb_register(
1980 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1981#ifndef BMI_RSP_POLLING
1982 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1983 ce_recv_cb_register(
1984 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1985#endif
1986#endif
1987 HIF_INFO_MED("%s: ce_init done", __func__);
1988
1989 rv = hif_set_hia(scn);
1990
1991 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1992
1993 A_TARGET_ACCESS_UNLIKELY(scn);
1994
1995 if (rv != CDF_STATUS_SUCCESS)
1996 goto err;
1997 else
Komal Seelam644263d2016-02-22 20:45:49 +05301998 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001999
2000 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2001
2002#ifdef ADRASTEA_SHADOW_REGISTERS
2003 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
2004 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2005 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
2006 __func__, i,
2007 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2008 }
2009#endif
2010
2011
2012 return rv != CDF_STATUS_SUCCESS;
2013
2014err:
2015 /* Failure, so clean up */
2016 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2017 pipe_info = &hif_state->pipe_info[pipe_num];
2018 if (pipe_info->ce_hdl) {
2019 ce_unregister_irq(hif_state, (1 << pipe_num));
2020 scn->request_irq_done = false;
2021 ce_fini(pipe_info->ce_hdl);
2022 pipe_info->ce_hdl = NULL;
2023 pipe_info->buf_sz = 0;
2024 }
2025 }
2026 if (hif_state->sleep_timer_init) {
2027 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2028 cdf_softirq_timer_free(&hif_state->sleep_timer);
2029 hif_state->sleep_timer_init = false;
2030 }
Komal Seelam43301de2016-02-02 18:20:48 +05302031
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002032 athdiag_procfs_remove();
2033 scn->athdiag_procfs_inited = false;
2034 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2035 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2036}
2037
2038
2039
2040
2041
2042
2043#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002044/**
2045 * hif_ipa_get_ce_resource() - get uc resource on hif
2046 * @scn: bus context
2047 * @ce_sr_base_paddr: copyengine source ring base physical address
2048 * @ce_sr_ring_size: copyengine source ring size
2049 * @ce_reg_paddr: copyengine register physical address
2050 *
2051 * IPA micro controller data path offload feature enabled,
2052 * HIF should release copy engine related resource information to IPA UC
2053 * IPA UC will access hardware resource with released information
2054 *
2055 * Return: None
2056 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302057void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Leo Changd85f78d2015-11-13 10:55:34 -08002058 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002059 uint32_t *ce_sr_ring_size,
2060 cdf_dma_addr_t *ce_reg_paddr)
2061{
Komal Seelam644263d2016-02-22 20:45:49 +05302062 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302063 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002064 struct HIF_CE_pipe_info *pipe_info =
2065 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2066 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2067
2068 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2069 ce_reg_paddr);
2070 return;
2071}
2072#endif /* IPA_OFFLOAD */
2073
2074
2075#ifdef ADRASTEA_SHADOW_REGISTERS
2076
2077/*
2078 Current shadow register config
2079
2080 -----------------------------------------------------------
2081 Shadow Register | CE | src/dst write index
2082 -----------------------------------------------------------
2083 0 | 0 | src
2084 1 No Config - Doesn't point to anything
2085 2 No Config - Doesn't point to anything
2086 3 | 3 | src
2087 4 | 4 | src
2088 5 | 5 | src
2089 6 No Config - Doesn't point to anything
2090 7 | 7 | src
2091 8 No Config - Doesn't point to anything
2092 9 No Config - Doesn't point to anything
2093 10 No Config - Doesn't point to anything
2094 11 No Config - Doesn't point to anything
2095 -----------------------------------------------------------
2096 12 No Config - Doesn't point to anything
2097 13 | 1 | dst
2098 14 | 2 | dst
2099 15 No Config - Doesn't point to anything
2100 16 No Config - Doesn't point to anything
2101 17 No Config - Doesn't point to anything
2102 18 No Config - Doesn't point to anything
2103 19 | 7 | dst
2104 20 | 8 | dst
2105 21 No Config - Doesn't point to anything
2106 22 No Config - Doesn't point to anything
2107 23 No Config - Doesn't point to anything
2108 -----------------------------------------------------------
2109
2110
2111 ToDo - Move shadow register config to following in the future
2112 This helps free up a block of shadow registers towards the end.
2113 Can be used for other purposes
2114
2115 -----------------------------------------------------------
2116 Shadow Register | CE | src/dst write index
2117 -----------------------------------------------------------
2118 0 | 0 | src
2119 1 | 3 | src
2120 2 | 4 | src
2121 3 | 5 | src
2122 4 | 7 | src
2123 -----------------------------------------------------------
2124 5 | 1 | dst
2125 6 | 2 | dst
2126 7 | 7 | dst
2127 8 | 8 | dst
2128 -----------------------------------------------------------
2129 9 No Config - Doesn't point to anything
2130 12 No Config - Doesn't point to anything
2131 13 No Config - Doesn't point to anything
2132 14 No Config - Doesn't point to anything
2133 15 No Config - Doesn't point to anything
2134 16 No Config - Doesn't point to anything
2135 17 No Config - Doesn't point to anything
2136 18 No Config - Doesn't point to anything
2137 19 No Config - Doesn't point to anything
2138 20 No Config - Doesn't point to anything
2139 21 No Config - Doesn't point to anything
2140 22 No Config - Doesn't point to anything
2141 23 No Config - Doesn't point to anything
2142 -----------------------------------------------------------
2143*/
2144
Komal Seelam644263d2016-02-22 20:45:49 +05302145u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002146{
2147 u32 addr = 0;
2148
2149 switch (COPY_ENGINE_ID(ctrl_addr)) {
2150 case 0:
2151 addr = SHADOW_VALUE0;
2152 break;
2153 case 3:
2154 addr = SHADOW_VALUE3;
2155 break;
2156 case 4:
2157 addr = SHADOW_VALUE4;
2158 break;
2159 case 5:
2160 addr = SHADOW_VALUE5;
2161 break;
2162 case 7:
2163 addr = SHADOW_VALUE7;
2164 break;
2165 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002166 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002167 CDF_ASSERT(0);
2168
2169 }
2170 return addr;
2171
2172}
2173
Komal Seelam644263d2016-02-22 20:45:49 +05302174u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002175{
2176 u32 addr = 0;
2177
2178 switch (COPY_ENGINE_ID(ctrl_addr)) {
2179 case 1:
2180 addr = SHADOW_VALUE13;
2181 break;
2182 case 2:
2183 addr = SHADOW_VALUE14;
2184 break;
2185 case 7:
2186 addr = SHADOW_VALUE19;
2187 break;
2188 case 8:
2189 addr = SHADOW_VALUE20;
2190 break;
2191 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002192 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002193 CDF_ASSERT(0);
2194 }
2195
2196 return addr;
2197
2198}
2199#endif
2200
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002201#if defined(FEATURE_LRO)
2202/**
2203 * ce_lro_flush_cb_register() - register the LRO flush
2204 * callback
2205 * @scn: HIF context
2206 * @handler: callback function
2207 * @data: opaque data pointer to be passed back
2208 *
2209 * Store the LRO flush callback provided
2210 *
2211 * Return: none
2212 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302213void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002214 void (handler)(void *), void *data)
2215{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002216 uint8_t ul, dl;
2217 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302218 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002219
2220 CDF_ASSERT(scn != NULL);
2221
2222 if (CDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05302223 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002224 &ul, &dl, &ul_polled, &dl_polled)) {
2225 printk("%s cannot map service to pipe\n", __FUNCTION__);
2226 return;
2227 } else {
2228 struct CE_state *ce_state;
2229 ce_state = scn->ce_id_to_state[dl];
2230 ce_state->lro_flush_cb = handler;
2231 ce_state->lro_data = data;
2232 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002233}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002234
2235/**
2236 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2237 * callback
2238 * @scn: HIF context
2239 *
2240 * Remove the LRO flush callback
2241 *
2242 * Return: none
2243 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302244void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002245{
2246 uint8_t ul, dl;
2247 int ul_polled, dl_polled;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302248 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002249
2250 CDF_ASSERT(scn != NULL);
2251
2252 if (CDF_STATUS_SUCCESS !=
Komal Seelam644263d2016-02-22 20:45:49 +05302253 hif_map_service_to_pipe(hif_hdl, HTT_DATA_MSG_SVC,
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002254 &ul, &dl, &ul_polled, &dl_polled)) {
2255 printk("%s cannot map service to pipe\n", __FUNCTION__);
2256 return;
2257 } else {
2258 struct CE_state *ce_state;
2259 ce_state = scn->ce_id_to_state[dl];
2260 ce_state->lro_flush_cb = NULL;
2261 ce_state->lro_data = NULL;
2262 }
2263}
2264#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002265
2266/**
2267 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2268 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302269 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002270 * @svc_id: Service ID for which the mapping is needed.
2271 * @ul_pipe: address of the container in which ul pipe is returned.
2272 * @dl_pipe: address of the container in which dl pipe is returned.
2273 * @ul_is_polled: address of the container in which a bool
2274 * indicating if the UL CE for this service
2275 * is polled is returned.
2276 * @dl_is_polled: address of the container in which a bool
2277 * indicating if the DL CE for this service
2278 * is polled is returned.
2279 *
2280 * Return: Indicates whether this operation was successful.
2281 */
2282
Komal Seelam5584a7c2016-02-24 19:22:48 +05302283int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002284 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2285 int *dl_is_polled)
2286{
2287 int status = CDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002288 unsigned int i;
2289 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002290 struct service_to_pipe *tgt_svc_map_to_use;
2291 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302292 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2293 uint32_t mode = hif_get_conparam(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002294
Komal Seelambd7c51d2016-02-24 10:27:30 +05302295 if (WLAN_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002296 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2297 sz_tgt_svc_map_to_use =
2298 sizeof(target_service_to_ce_map_wlan_epping);
2299 } else {
2300 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2301 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2302 }
2303
2304 *dl_is_polled = 0; /* polling for received messages not supported */
2305
2306 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2307
2308 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2309 if (element.service_id == svc_id) {
2310
2311 if (element.pipedir == PIPEDIR_OUT)
2312 *ul_pipe = element.pipenum;
2313
2314 else if (element.pipedir == PIPEDIR_IN)
2315 *dl_pipe = element.pipenum;
2316 }
2317 }
2318
2319 *ul_is_polled =
2320 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2321
2322 return status;
2323}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002324
2325#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302326inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002327 uint32_t CE_ctrl_addr)
2328{
2329 uint32_t read_from_hw, srri_from_ddr = 0;
2330
2331 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2332
2333 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2334
2335 if (read_from_hw != srri_from_ddr) {
2336 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2337 srri_from_ddr, read_from_hw,
2338 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2339 CDF_ASSERT(0);
2340 }
2341 return srri_from_ddr;
2342}
2343
2344
Komal Seelam644263d2016-02-22 20:45:49 +05302345inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002346 uint32_t CE_ctrl_addr)
2347{
2348 uint32_t read_from_hw, drri_from_ddr = 0;
2349
2350 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2351
2352 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2353
2354 if (read_from_hw != drri_from_ddr) {
2355 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2356 drri_from_ddr, read_from_hw,
2357 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2358 CDF_ASSERT(0);
2359 }
2360 return drri_from_ddr;
2361}
2362
2363#endif
2364
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002365#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002366/**
2367 * hif_get_src_ring_read_index(): Called to get the SRRI
2368 *
Komal Seelam644263d2016-02-22 20:45:49 +05302369 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002370 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2371 *
2372 * This function returns the SRRI to the caller. For CEs that
2373 * dont have interrupts enabled, we look at the DDR based SRRI
2374 *
2375 * Return: SRRI
2376 */
Komal Seelam644263d2016-02-22 20:45:49 +05302377inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002378 uint32_t CE_ctrl_addr)
2379{
2380 struct CE_attr attr;
2381
2382 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2383 if (attr.flags & CE_ATTR_DISABLE_INTR)
2384 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2385 else
2386 return A_TARGET_READ(scn,
2387 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2388}
2389
2390/**
2391 * hif_get_dst_ring_read_index(): Called to get the DRRI
2392 *
Komal Seelam644263d2016-02-22 20:45:49 +05302393 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002394 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2395 *
2396 * This function returns the DRRI to the caller. For CEs that
2397 * dont have interrupts enabled, we look at the DDR based DRRI
2398 *
2399 * Return: DRRI
2400 */
Komal Seelam644263d2016-02-22 20:45:49 +05302401inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002402 uint32_t CE_ctrl_addr)
2403{
2404 struct CE_attr attr;
2405
2406 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2407
2408 if (attr.flags & CE_ATTR_DISABLE_INTR)
2409 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2410 else
2411 return A_TARGET_READ(scn,
2412 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2413}
2414
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002415/**
2416 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2417 *
Komal Seelam644263d2016-02-22 20:45:49 +05302418 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002419 *
2420 * This function allocates non cached memory on ddr and sends
2421 * the physical address of this memory to the CE hardware. The
2422 * hardware updates the RRI on this particular location.
2423 *
2424 * Return: None
2425 */
Komal Seelam644263d2016-02-22 20:45:49 +05302426static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002427{
2428 unsigned int i;
2429 cdf_dma_addr_t paddr_rri_on_ddr;
2430 uint32_t high_paddr, low_paddr;
2431 scn->vaddr_rri_on_ddr =
2432 (uint32_t *)cdf_os_mem_alloc_consistent(scn->cdf_dev,
2433 (CE_COUNT*sizeof(uint32_t)), &paddr_rri_on_ddr, 0);
2434
2435 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2436 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2437
2438 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2439
2440 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2441 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2442
2443 for (i = 0; i < CE_COUNT; i++)
2444 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2445
2446 cdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
2447
2448 return;
2449}
2450#else
2451
2452/**
2453 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2454 *
Komal Seelam644263d2016-02-22 20:45:49 +05302455 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002456 *
2457 * This is a dummy implementation for platforms that don't
2458 * support this functionality.
2459 *
2460 * Return: None
2461 */
Komal Seelam644263d2016-02-22 20:45:49 +05302462static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002463{
2464 return;
2465}
2466#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302467
2468/**
2469 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302470 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302471 *
2472 * Output the copy engine registers
2473 *
2474 * Return: 0 for success or error code
2475 */
Komal Seelam644263d2016-02-22 20:45:49 +05302476int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302477{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302478 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302479 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2480 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2481 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2482 uint16_t i;
2483 CDF_STATUS status;
2484
2485 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
Komal Seelam644263d2016-02-22 20:45:49 +05302486 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302487 (uint8_t *) &ce_reg_values[i][0],
2488 ce_reg_word_size * sizeof(uint32_t));
2489
2490 if (status != CDF_STATUS_SUCCESS) {
2491 HIF_ERROR("Dumping CE register failed!");
2492 return -EACCES;
2493 }
2494 HIF_ERROR("CE%d Registers:", i);
2495 cdf_trace_hex_dump(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_DEBUG,
2496 (uint8_t *) &ce_reg_values[i][0],
2497 ce_reg_word_size * sizeof(uint32_t));
2498 }
Govind Singh2443fb32016-01-13 17:44:48 +05302499 return 0;
2500}