blob: 6694dea4cfe4e340baa6637509599ebe149e9cea [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
48#include "cds_api.h"
49#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053064#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065
66#define CE_POLL_TIMEOUT 10 /* ms */
67
68/* Forward references */
69static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
78
79static int hif_post_recv_buffers(struct ol_softc *scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -080080static void hif_config_rri_on_ddr(struct ol_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081
82static void ce_poll_timeout(void *arg)
83{
84 struct CE_state *CE_state = (struct CE_state *)arg;
85 if (CE_state->timer_inited) {
86 ce_per_engine_service(CE_state->scn, CE_state->id);
87 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
88 }
89}
90
91static unsigned int roundup_pwr2(unsigned int n)
92{
93 int i;
94 unsigned int test_pwr2;
95
96 if (!(n & (n - 1)))
97 return n; /* already a power of 2 */
98
99 test_pwr2 = 4;
100 for (i = 0; i < 29; i++) {
101 if (test_pwr2 > n)
102 return test_pwr2;
103 test_pwr2 = test_pwr2 << 1;
104 }
105
106 CDF_ASSERT(0); /* n too large */
107 return 0;
108}
109
110/*
111 * Initialize a Copy Engine based on caller-supplied attributes.
112 * This may be called once to initialize both source and destination
113 * rings or it may be called twice for separate source and destination
114 * initialization. It may be that only one side or the other is
115 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700116 *
117 * This should be called durring the initialization sequence before
118 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 */
120struct CE_handle *ce_init(struct ol_softc *scn,
121 unsigned int CE_id, struct CE_attr *attr)
122{
123 struct CE_state *CE_state;
124 uint32_t ctrl_addr;
125 unsigned int nentries;
126 cdf_dma_addr_t base_addr;
127 bool malloc_CE_state = false;
128 bool malloc_src_ring = false;
129
130 CDF_ASSERT(CE_id < scn->ce_count);
131 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132 CE_state = scn->ce_id_to_state[CE_id];
133
134 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135 CE_state =
136 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
137 if (!CE_state) {
138 HIF_ERROR("%s: CE_state has no mem", __func__);
139 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700141 malloc_CE_state = true;
142 cdf_mem_zero(CE_state, sizeof(*CE_state));
143 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700144 cdf_spinlock_init(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700145
146 CE_state->id = CE_id;
147 CE_state->ctrl_addr = ctrl_addr;
148 CE_state->state = CE_RUNNING;
149 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150 }
151 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800152
153 cdf_atomic_init(&CE_state->rx_pending);
154 if (attr == NULL) {
155 /* Already initialized; caller wants the handle */
156 return (struct CE_handle *)CE_state;
157 }
158
159#ifdef ADRASTEA_SHADOW_REGISTERS
160 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
161 __func__);
162#endif
163
164 if (CE_state->src_sz_max)
165 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
166 else
167 CE_state->src_sz_max = attr->src_sz_max;
168
Houston Hoffman68e837e2015-12-04 12:57:24 -0800169 ce_init_ce_desc_event_log(CE_id,
170 attr->src_nentries + attr->dest_nentries);
171
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800172 /* source ring setup */
173 nentries = attr->src_nentries;
174 if (nentries) {
175 struct CE_ring_state *src_ring;
176 unsigned CE_nbytes;
177 char *ptr;
178 uint64_t dma_addr;
179 nentries = roundup_pwr2(nentries);
180 if (CE_state->src_ring) {
181 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
182 } else {
183 CE_nbytes = sizeof(struct CE_ring_state)
184 + (nentries * sizeof(void *));
185 ptr = cdf_mem_malloc(CE_nbytes);
186 if (!ptr) {
187 /* cannot allocate src ring. If the
188 * CE_state is allocated locally free
189 * CE_State and return error.
190 */
191 HIF_ERROR("%s: src ring has no mem", __func__);
192 if (malloc_CE_state) {
193 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800194 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800195 cdf_mem_free(CE_state);
196 malloc_CE_state = false;
197 }
198 return NULL;
199 } else {
200 /* we can allocate src ring.
201 * Mark that the src ring is
202 * allocated locally
203 */
204 malloc_src_ring = true;
205 }
206 cdf_mem_zero(ptr, CE_nbytes);
207
208 src_ring = CE_state->src_ring =
209 (struct CE_ring_state *)ptr;
210 ptr += sizeof(struct CE_ring_state);
211 src_ring->nentries = nentries;
212 src_ring->nentries_mask = nentries - 1;
213 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
214 src_ring->hw_index =
215 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
216 src_ring->sw_index = src_ring->hw_index;
217 src_ring->write_index =
218 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
219 A_TARGET_ACCESS_END_RET_PTR(scn);
220 src_ring->low_water_mark_nentries = 0;
221 src_ring->high_water_mark_nentries = nentries;
222 src_ring->per_transfer_context = (void **)ptr;
223
224 /* Legacy platforms that do not support cache
225 * coherent DMA are unsupported
226 */
227 src_ring->base_addr_owner_space_unaligned =
228 cdf_os_mem_alloc_consistent(scn->cdf_dev,
229 (nentries *
230 sizeof(struct CE_src_desc) +
231 CE_DESC_RING_ALIGN),
232 &base_addr, 0);
233 if (src_ring->base_addr_owner_space_unaligned
234 == NULL) {
235 HIF_ERROR("%s: src ring has no DMA mem",
236 __func__);
237 goto error_no_dma_mem;
238 }
239 src_ring->base_addr_CE_space_unaligned = base_addr;
240
241 if (src_ring->
242 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
243 - 1)) {
244 src_ring->base_addr_CE_space =
245 (src_ring->base_addr_CE_space_unaligned
246 + CE_DESC_RING_ALIGN -
247 1) & ~(CE_DESC_RING_ALIGN - 1);
248
249 src_ring->base_addr_owner_space =
250 (void
251 *)(((size_t) src_ring->
252 base_addr_owner_space_unaligned +
253 CE_DESC_RING_ALIGN -
254 1) & ~(CE_DESC_RING_ALIGN - 1));
255 } else {
256 src_ring->base_addr_CE_space =
257 src_ring->base_addr_CE_space_unaligned;
258 src_ring->base_addr_owner_space =
259 src_ring->
260 base_addr_owner_space_unaligned;
261 }
262 /*
263 * Also allocate a shadow src ring in
264 * regular mem to use for faster access.
265 */
266 src_ring->shadow_base_unaligned =
267 cdf_mem_malloc(nentries *
268 sizeof(struct CE_src_desc) +
269 CE_DESC_RING_ALIGN);
270 if (src_ring->shadow_base_unaligned == NULL) {
271 HIF_ERROR("%s: src ring no shadow_base mem",
272 __func__);
273 goto error_no_dma_mem;
274 }
275 src_ring->shadow_base = (struct CE_src_desc *)
276 (((size_t) src_ring->shadow_base_unaligned +
277 CE_DESC_RING_ALIGN - 1) &
278 ~(CE_DESC_RING_ALIGN - 1));
279
280 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
281 dma_addr = src_ring->base_addr_CE_space;
282 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
283 (uint32_t)(dma_addr & 0xFFFFFFFF));
284#ifdef WLAN_ENABLE_QCA6180
285 {
286 uint32_t tmp;
287 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
288 scn, ctrl_addr);
289 tmp &= ~0x1F;
290 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
291 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
292 ctrl_addr, (uint32_t)dma_addr);
293 }
294#endif
295 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
296 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
297#ifdef BIG_ENDIAN_HOST
298 /* Enable source ring byte swap for big endian host */
299 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
300#endif
301 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
302 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
303 A_TARGET_ACCESS_END_RET_PTR(scn);
304 }
305 }
306
307 /* destination ring setup */
308 nentries = attr->dest_nentries;
309 if (nentries) {
310 struct CE_ring_state *dest_ring;
311 unsigned CE_nbytes;
312 char *ptr;
313 uint64_t dma_addr;
314
315 nentries = roundup_pwr2(nentries);
316 if (CE_state->dest_ring) {
317 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
318 } else {
319 CE_nbytes = sizeof(struct CE_ring_state)
320 + (nentries * sizeof(void *));
321 ptr = cdf_mem_malloc(CE_nbytes);
322 if (!ptr) {
323 /* cannot allocate dst ring. If the CE_state
324 * or src ring is allocated locally free
325 * CE_State and src ring and return error.
326 */
327 HIF_ERROR("%s: dest ring has no mem",
328 __func__);
329 if (malloc_src_ring) {
330 cdf_mem_free(CE_state->src_ring);
331 CE_state->src_ring = NULL;
332 malloc_src_ring = false;
333 }
334 if (malloc_CE_state) {
335 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 cdf_mem_free(CE_state);
338 malloc_CE_state = false;
339 }
340 return NULL;
341 }
342 cdf_mem_zero(ptr, CE_nbytes);
343
344 dest_ring = CE_state->dest_ring =
345 (struct CE_ring_state *)ptr;
346 ptr += sizeof(struct CE_ring_state);
347 dest_ring->nentries = nentries;
348 dest_ring->nentries_mask = nentries - 1;
349 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
350 dest_ring->sw_index =
351 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
352 dest_ring->write_index =
353 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
354 A_TARGET_ACCESS_END_RET_PTR(scn);
355 dest_ring->low_water_mark_nentries = 0;
356 dest_ring->high_water_mark_nentries = nentries;
357 dest_ring->per_transfer_context = (void **)ptr;
358
359 /* Legacy platforms that do not support cache
360 * coherent DMA are unsupported */
361 dest_ring->base_addr_owner_space_unaligned =
362 cdf_os_mem_alloc_consistent(scn->cdf_dev,
363 (nentries *
364 sizeof(struct CE_dest_desc) +
365 CE_DESC_RING_ALIGN),
366 &base_addr, 0);
367 if (dest_ring->base_addr_owner_space_unaligned
368 == NULL) {
369 HIF_ERROR("%s: dest ring has no DMA mem",
370 __func__);
371 goto error_no_dma_mem;
372 }
373 dest_ring->base_addr_CE_space_unaligned = base_addr;
374
375 /* Correctly initialize memory to 0 to
376 * prevent garbage data crashing system
377 * when download firmware
378 */
379 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
380 nentries * sizeof(struct CE_dest_desc) +
381 CE_DESC_RING_ALIGN);
382
383 if (dest_ring->
384 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
385 1)) {
386
387 dest_ring->base_addr_CE_space =
388 (dest_ring->
389 base_addr_CE_space_unaligned +
390 CE_DESC_RING_ALIGN -
391 1) & ~(CE_DESC_RING_ALIGN - 1);
392
393 dest_ring->base_addr_owner_space =
394 (void
395 *)(((size_t) dest_ring->
396 base_addr_owner_space_unaligned +
397 CE_DESC_RING_ALIGN -
398 1) & ~(CE_DESC_RING_ALIGN - 1));
399 } else {
400 dest_ring->base_addr_CE_space =
401 dest_ring->base_addr_CE_space_unaligned;
402 dest_ring->base_addr_owner_space =
403 dest_ring->
404 base_addr_owner_space_unaligned;
405 }
406
407 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
408 dma_addr = dest_ring->base_addr_CE_space;
409 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
410 (uint32_t)(dma_addr & 0xFFFFFFFF));
411#ifdef WLAN_ENABLE_QCA6180
412 {
413 uint32_t tmp;
414 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
415 ctrl_addr);
416 tmp &= ~0x1F;
417 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
418 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
419 ctrl_addr, (uint32_t)dma_addr);
420 }
421#endif
422 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
423#ifdef BIG_ENDIAN_HOST
424 /* Enable Dest ring byte swap for big endian host */
425 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
426#endif
427 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
428 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
429 A_TARGET_ACCESS_END_RET_PTR(scn);
430
431 /* epping */
432 /* poll timer */
433 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
434 cdf_softirq_timer_init(scn->cdf_dev,
435 &CE_state->poll_timer,
436 ce_poll_timeout,
437 CE_state,
438 CDF_TIMER_TYPE_SW);
439 CE_state->timer_inited = true;
440 cdf_softirq_timer_mod(&CE_state->poll_timer,
441 CE_POLL_TIMEOUT);
442 }
443 }
444 }
445
446 /* Enable CE error interrupts */
447 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
448 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
449 A_TARGET_ACCESS_END_RET_PTR(scn);
450
451 return (struct CE_handle *)CE_state;
452
453error_no_dma_mem:
454 ce_fini((struct CE_handle *)CE_state);
455 return NULL;
456}
457
458#ifdef WLAN_FEATURE_FASTPATH
459/**
460 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
461 * No processing is required inside this function.
462 * @ce_hdl: Cope engine handle
463 * Using an assert, this function makes sure that,
464 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700465 *
466 * This is called while dismantling CE structures. No other thread
467 * should be using these structures while dismantling is occuring
468 * therfore no locking is needed.
469 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800470 * Return: none
471 */
472void
473ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
474{
475 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
476 struct CE_ring_state *src_ring = ce_state->src_ring;
477 struct ol_softc *sc = ce_state->scn;
478 uint32_t sw_index, write_index;
479
480 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
481 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
482 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800483 sw_index = src_ring->sw_index;
484 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485
486 /* At this point Tx CE should be clean */
487 cdf_assert_always(sw_index == write_index);
488 }
489}
490#else
491void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
492{
493}
494#endif /* WLAN_FEATURE_FASTPATH */
495
496void ce_fini(struct CE_handle *copyeng)
497{
498 struct CE_state *CE_state = (struct CE_state *)copyeng;
499 unsigned int CE_id = CE_state->id;
500 struct ol_softc *scn = CE_state->scn;
501
502 CE_state->state = CE_UNUSED;
503 scn->ce_id_to_state[CE_id] = NULL;
504 if (CE_state->src_ring) {
505 /* Cleanup the HTT Tx ring */
506 ce_h2t_tx_ce_cleanup(copyeng);
507
508 if (CE_state->src_ring->shadow_base_unaligned)
509 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
510 if (CE_state->src_ring->base_addr_owner_space_unaligned)
511 cdf_os_mem_free_consistent(scn->cdf_dev,
512 (CE_state->src_ring->nentries *
513 sizeof(struct CE_src_desc) +
514 CE_DESC_RING_ALIGN),
515 CE_state->src_ring->
516 base_addr_owner_space_unaligned,
517 CE_state->src_ring->
518 base_addr_CE_space, 0);
519 cdf_mem_free(CE_state->src_ring);
520 }
521 if (CE_state->dest_ring) {
522 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
523 cdf_os_mem_free_consistent(scn->cdf_dev,
524 (CE_state->dest_ring->nentries *
525 sizeof(struct CE_dest_desc) +
526 CE_DESC_RING_ALIGN),
527 CE_state->dest_ring->
528 base_addr_owner_space_unaligned,
529 CE_state->dest_ring->
530 base_addr_CE_space, 0);
531 cdf_mem_free(CE_state->dest_ring);
532
533 /* epping */
534 if (CE_state->timer_inited) {
535 CE_state->timer_inited = false;
536 cdf_softirq_timer_free(&CE_state->poll_timer);
537 }
538 }
539 cdf_mem_free(CE_state);
540}
541
Komal Seelam02cf2f82016-02-22 20:44:25 +0530542void hif_detach_htc(struct ol_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800543{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530544 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800545
546 cdf_mem_zero(&hif_state->msg_callbacks_pending,
547 sizeof(hif_state->msg_callbacks_pending));
548 cdf_mem_zero(&hif_state->msg_callbacks_current,
549 sizeof(hif_state->msg_callbacks_current));
550}
551
552/* Send the first nbytes bytes of the buffer */
553CDF_STATUS
Komal Seelam02cf2f82016-02-22 20:44:25 +0530554hif_send_head(struct ol_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800555 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
556 cdf_nbuf_t nbuf, unsigned int data_attr)
557{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530558 struct ol_softc *scn = HIF_GET_SOFTC(hif_ctx);
559 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800560 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
561 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
562 int bytes = nbytes, nfrags = 0;
563 struct ce_sendlist sendlist;
564 int status, i = 0;
565 unsigned int mux_id = 0;
566
567 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
568
569 transfer_id =
570 (mux_id & MUX_ID_MASK) |
571 (transfer_id & TRANSACTION_ID_MASK);
572 data_attr &= DESC_DATA_FLAG_MASK;
573 /*
574 * The common case involves sending multiple fragments within a
575 * single download (the tx descriptor and the tx frame header).
576 * So, optimize for the case of multiple fragments by not even
577 * checking whether it's necessary to use a sendlist.
578 * The overhead of using a sendlist for a single buffer download
579 * is not a big deal, since it happens rarely (for WMI messages).
580 */
581 ce_sendlist_init(&sendlist);
582 do {
583 uint32_t frag_paddr;
584 int frag_bytes;
585
586 frag_paddr = cdf_nbuf_get_frag_paddr_lo(nbuf, nfrags);
587 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
588 /*
589 * Clear the packet offset for all but the first CE desc.
590 */
591 if (i++ > 0)
592 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
593
594 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
595 frag_bytes >
596 bytes ? bytes : frag_bytes,
597 cdf_nbuf_get_frag_is_wordstream
598 (nbuf,
599 nfrags) ? 0 :
600 CE_SEND_FLAG_SWAP_DISABLE,
601 data_attr);
602 if (status != CDF_STATUS_SUCCESS) {
603 HIF_ERROR("%s: error, frag_num %d larger than limit",
604 __func__, nfrags);
605 return status;
606 }
607 bytes -= frag_bytes;
608 nfrags++;
609 } while (bytes > 0);
610
611 /* Make sure we have resources to handle this request */
612 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
613 if (pipe_info->num_sends_allowed < nfrags) {
614 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
615 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
616 return CDF_STATUS_E_RESOURCES;
617 }
618 pipe_info->num_sends_allowed -= nfrags;
619 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
620
621 if (cdf_unlikely(ce_hdl == NULL)) {
622 HIF_ERROR("%s: error CE handle is null", __func__);
623 return A_ERROR;
624 }
625
626 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
627 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
628 (uint8_t *)(cdf_nbuf_data(nbuf)),
629 sizeof(cdf_nbuf_data(nbuf))));
630 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
631 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
632
633 return status;
634}
635
636void hif_send_complete_check(struct ol_softc *scn, uint8_t pipe, int force)
637{
638 if (!force) {
639 int resources;
640 /*
641 * Decide whether to actually poll for completions, or just
642 * wait for a later chance. If there seem to be plenty of
643 * resources left, then just wait, since checking involves
644 * reading a CE register, which is a relatively expensive
645 * operation.
646 */
647 resources = hif_get_free_queue_number(scn, pipe);
648 /*
649 * If at least 50% of the total resources are still available,
650 * don't bother checking again yet.
651 */
652 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
653 return;
654 }
655 }
656#ifdef ATH_11AC_TXCOMPACT
657 ce_per_engine_servicereap(scn, pipe);
658#else
659 ce_per_engine_service(scn, pipe);
660#endif
661}
662
Komal Seelam02cf2f82016-02-22 20:44:25 +0530663uint16_t hif_get_free_queue_number(struct ol_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800664{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530665 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800666 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
667 uint16_t rv;
668
669 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
670 rv = pipe_info->num_sends_allowed;
671 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
672 return rv;
673}
674
675/* Called by lower (CE) layer when a send to Target completes. */
676void
677hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
678 void *transfer_context, cdf_dma_addr_t CE_data,
679 unsigned int nbytes, unsigned int transfer_id,
680 unsigned int sw_index, unsigned int hw_index,
681 unsigned int toeplitz_hash_result)
682{
683 struct HIF_CE_pipe_info *pipe_info =
684 (struct HIF_CE_pipe_info *)ce_context;
685 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530686 struct ol_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800687 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700688 struct hif_msg_callbacks *msg_callbacks =
689 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800690
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800691 do {
692 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700693 * The upper layer callback will be triggered
694 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800695 */
Houston Hoffman85118512015-09-28 14:17:11 -0700696 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530697 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700698 == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800699 cdf_nbuf_free(transfer_context);
700 else
701 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700702 msg_callbacks->Context,
703 transfer_context, transfer_id,
704 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800705 }
706
707 cdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700708 pipe_info->num_sends_allowed++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800709 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800710 } while (ce_completed_send_next(copyeng,
711 &ce_context, &transfer_context,
712 &CE_data, &nbytes, &transfer_id,
713 &sw_idx, &hw_idx,
714 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800715}
716
Houston Hoffman910c6262015-09-28 12:56:25 -0700717/**
718 * hif_ce_do_recv(): send message from copy engine to upper layers
719 * @msg_callbacks: structure containing callback and callback context
720 * @netbuff: skb containing message
721 * @nbytes: number of bytes in the message
722 * @pipe_info: used for the pipe_number info
723 *
724 * Checks the packet length, configures the lenght in the netbuff,
725 * and calls the upper layer callback.
726 *
727 * return: None
728 */
729static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
730 cdf_nbuf_t netbuf, int nbytes,
731 struct HIF_CE_pipe_info *pipe_info) {
732 if (nbytes <= pipe_info->buf_sz) {
733 cdf_nbuf_set_pktlen(netbuf, nbytes);
734 msg_callbacks->
735 rxCompletionHandler(msg_callbacks->Context,
736 netbuf, pipe_info->pipe_num);
737 } else {
738 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
739 __func__, netbuf, nbytes);
740 cdf_nbuf_free(netbuf);
741 }
742}
743
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800744/* Called by lower (CE) layer when data is received from the Target. */
745void
746hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
747 void *transfer_context, cdf_dma_addr_t CE_data,
748 unsigned int nbytes, unsigned int transfer_id,
749 unsigned int flags)
750{
751 struct HIF_CE_pipe_info *pipe_info =
752 (struct HIF_CE_pipe_info *)ce_context;
753 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700754 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530755 struct ol_softc *scn = HIF_GET_SOFTC(hif_state);
756 struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(hif_state);
Houston Hoffman910c6262015-09-28 12:56:25 -0700757 struct hif_msg_callbacks *msg_callbacks =
758 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800759
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800760 do {
Komal Seelam02cf2f82016-02-22 20:44:25 +0530761 hif_pm_runtime_mark_last_busy(hif_sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800762 cdf_nbuf_unmap_single(scn->cdf_dev,
763 (cdf_nbuf_t) transfer_context,
764 CDF_DMA_FROM_DEVICE);
765
Houston Hoffman910c6262015-09-28 12:56:25 -0700766 atomic_inc(&pipe_info->recv_bufs_needed);
767 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530768 if (scn->target_status == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800769 cdf_nbuf_free(transfer_context);
770 else
771 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700772 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800773
774 /* Set up force_break flag if num of receices reaches
775 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700776 ce_state->receive_count++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800777 if (cdf_unlikely(hif_max_num_receives_reached(
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700778 ce_state->receive_count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700779 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800780 break;
781 }
782 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
783 &CE_data, &nbytes, &transfer_id,
784 &flags) == CDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800785
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800786}
787
788/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
789
790void
Komal Seelam02cf2f82016-02-22 20:44:25 +0530791hif_post_init(struct ol_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800792 struct hif_msg_callbacks *callbacks)
793{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530794 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800795
796#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
797 spin_lock_init(&pcie_access_log_lock);
798#endif
799 /* Save callbacks for later installation */
800 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
801 sizeof(hif_state->msg_callbacks_pending));
802
803}
804
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800805int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
806{
807 struct CE_handle *ce_diag = hif_state->ce_diag;
808 int pipe_num;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530809 struct ol_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700810 struct hif_msg_callbacks *hif_msg_callbacks =
811 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800812
813 /* daemonize("hif_compl_thread"); */
814
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800815 if (scn->ce_count == 0) {
816 HIF_ERROR("%s: Invalid ce_count\n", __func__);
817 return -EINVAL;
818 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700819
820 if (!hif_msg_callbacks ||
821 !hif_msg_callbacks->rxCompletionHandler ||
822 !hif_msg_callbacks->txCompletionHandler) {
823 HIF_ERROR("%s: no completion handler registered", __func__);
824 return -EFAULT;
825 }
826
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800827 A_TARGET_ACCESS_LIKELY(scn);
828 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
829 struct CE_attr attr;
830 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800831
832 pipe_info = &hif_state->pipe_info[pipe_num];
833 if (pipe_info->ce_hdl == ce_diag) {
834 continue; /* Handle Diagnostic CE specially */
835 }
836 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800837 if (attr.src_nentries) {
838 /* pipe used to send to target */
839 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
840 __func__, pipe_num, pipe_info);
841 ce_send_cb_register(pipe_info->ce_hdl,
842 hif_pci_ce_send_done, pipe_info,
843 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800844 pipe_info->num_sends_allowed = attr.src_nentries - 1;
845 }
846 if (attr.dest_nentries) {
847 /* pipe used to receive from target */
848 ce_recv_cb_register(pipe_info->ce_hdl,
849 hif_pci_ce_recv_data, pipe_info,
850 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800851 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800852
853 if (attr.src_nentries)
854 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800855 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800856
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800857 A_TARGET_ACCESS_UNLIKELY(scn);
858 return 0;
859}
860
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800861/*
862 * Install pending msg callbacks.
863 *
864 * TBDXXX: This hack is needed because upper layers install msg callbacks
865 * for use with HTC before BMI is done; yet this HIF implementation
866 * needs to continue to use BMI msg callbacks. Really, upper layers
867 * should not register HTC callbacks until AFTER BMI phase.
868 */
869static void hif_msg_callbacks_install(struct ol_softc *scn)
870{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530871 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800872
873 cdf_mem_copy(&hif_state->msg_callbacks_current,
874 &hif_state->msg_callbacks_pending,
875 sizeof(hif_state->msg_callbacks_pending));
876}
877
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800878void
879hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe, uint8_t *DLPipe)
880{
881 int ul_is_polled, dl_is_polled;
882
883 (void)hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
884 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
885}
886
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800887/**
888 * hif_dump_pipe_debug_count() - Log error count
889 * @scn: ol_softc pointer.
890 *
891 * Output the pipe error counts of each pipe to log file
892 *
893 * Return: N/A
894 */
895void hif_dump_pipe_debug_count(struct ol_softc *scn)
896{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530897 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 int pipe_num;
899
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800900 if (hif_state == NULL) {
901 HIF_ERROR("%s hif_state is NULL", __func__);
902 return;
903 }
904 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
905 struct HIF_CE_pipe_info *pipe_info;
906
907 pipe_info = &hif_state->pipe_info[pipe_num];
908
909 if (pipe_info->nbuf_alloc_err_count > 0 ||
910 pipe_info->nbuf_dma_err_count > 0 ||
911 pipe_info->nbuf_ce_enqueue_err_count)
912 HIF_ERROR(
913 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
914 __func__, pipe_info->pipe_num,
915 atomic_read(&pipe_info->recv_bufs_needed),
916 pipe_info->nbuf_alloc_err_count,
917 pipe_info->nbuf_dma_err_count,
918 pipe_info->nbuf_ce_enqueue_err_count);
919 }
920}
921
922static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
923{
924 struct CE_handle *ce_hdl;
925 cdf_size_t buf_sz;
Komal Seelam02cf2f82016-02-22 20:44:25 +0530926 struct ol_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800927 CDF_STATUS ret;
928 uint32_t bufs_posted = 0;
929
930 buf_sz = pipe_info->buf_sz;
931 if (buf_sz == 0) {
932 /* Unused Copy Engine */
933 return 0;
934 }
935
936 ce_hdl = pipe_info->ce_hdl;
937
938 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
939 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
940 cdf_dma_addr_t CE_data; /* CE space buffer address */
941 cdf_nbuf_t nbuf;
942 int status;
943
944 atomic_dec(&pipe_info->recv_bufs_needed);
945 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
946
947 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
948 if (!nbuf) {
949 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
950 pipe_info->nbuf_alloc_err_count++;
951 cdf_spin_unlock_bh(
952 &pipe_info->recv_bufs_needed_lock);
953 HIF_ERROR(
954 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
955 __func__, pipe_info->pipe_num,
956 atomic_read(&pipe_info->recv_bufs_needed),
957 pipe_info->nbuf_alloc_err_count);
958 atomic_inc(&pipe_info->recv_bufs_needed);
959 return 1;
960 }
961
962 /*
963 * cdf_nbuf_peek_header(nbuf, &data, &unused);
964 * CE_data = dma_map_single(dev, data, buf_sz, );
965 * DMA_FROM_DEVICE);
966 */
967 ret =
968 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
969 CDF_DMA_FROM_DEVICE);
970
971 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
972 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
973 pipe_info->nbuf_dma_err_count++;
974 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
975 HIF_ERROR(
976 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
977 __func__, pipe_info->pipe_num,
978 atomic_read(&pipe_info->recv_bufs_needed),
979 pipe_info->nbuf_dma_err_count);
980 cdf_nbuf_free(nbuf);
981 atomic_inc(&pipe_info->recv_bufs_needed);
982 return 1;
983 }
984
985 CE_data = cdf_nbuf_get_frag_paddr_lo(nbuf, 0);
986
987 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
988 buf_sz, DMA_FROM_DEVICE);
989 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
990 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
991 if (status != EOK) {
992 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
993 pipe_info->nbuf_ce_enqueue_err_count++;
994 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
995 HIF_ERROR(
996 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
997 __func__, pipe_info->pipe_num,
998 atomic_read(&pipe_info->recv_bufs_needed),
999 pipe_info->nbuf_ce_enqueue_err_count);
1000 atomic_inc(&pipe_info->recv_bufs_needed);
1001 cdf_nbuf_free(nbuf);
1002 return 1;
1003 }
1004
1005 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1006 bufs_posted++;
1007 }
1008 pipe_info->nbuf_alloc_err_count =
1009 (pipe_info->nbuf_alloc_err_count > bufs_posted)?
1010 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1011 pipe_info->nbuf_dma_err_count =
1012 (pipe_info->nbuf_dma_err_count > bufs_posted)?
1013 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1014 pipe_info->nbuf_ce_enqueue_err_count =
1015 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted)?
1016 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1017
1018 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1019
1020 return 0;
1021}
1022
1023/*
1024 * Try to post all desired receive buffers for all pipes.
1025 * Returns 0 if all desired buffers are posted,
1026 * non-zero if were were unable to completely
1027 * replenish receive buffers.
1028 */
1029static int hif_post_recv_buffers(struct ol_softc *scn)
1030{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301031 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001032 int pipe_num, rv = 0;
1033
1034 A_TARGET_ACCESS_LIKELY(scn);
1035 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1036 struct HIF_CE_pipe_info *pipe_info;
1037
1038 pipe_info = &hif_state->pipe_info[pipe_num];
1039 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1040 rv = 1;
1041 goto done;
1042 }
1043 }
1044
1045done:
1046 A_TARGET_ACCESS_UNLIKELY(scn);
1047
1048 return rv;
1049}
1050
Komal Seelam02cf2f82016-02-22 20:44:25 +05301051CDF_STATUS hif_start(struct ol_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001052{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301053 struct ol_softc *scn = HIF_GET_SOFTC(hif_ctx);
1054 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001055
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001056 hif_msg_callbacks_install(scn);
1057
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001058 if (hif_completion_thread_startup(hif_state))
1059 return CDF_STATUS_E_FAILURE;
1060
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001061 /* Post buffers once to start things off. */
1062 (void)hif_post_recv_buffers(scn);
1063
1064 hif_state->started = true;
1065
1066 return CDF_STATUS_SUCCESS;
1067}
1068
1069#ifdef WLAN_FEATURE_FASTPATH
1070/**
1071 * hif_enable_fastpath() Update that we have enabled fastpath mode
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301072 * @hif_ctx: HIF context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001073 *
1074 * For use in data path
1075 *
1076 * Retrun: void
1077 */
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301078void hif_enable_fastpath(struct ol_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001079{
1080 HIF_INFO("Enabling fastpath mode\n");
Komal Seelamc92a0cf2016-02-22 20:43:52 +05301081 hif_ctx->fastpath_mode_on = 1;
1082}
1083
1084/**
1085 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1086 * @hif_ctx: HIF Context
1087 *
1088 * For use in data path to skip HTC
1089 *
1090 * Return: bool
1091 */
1092bool hif_is_fastpath_mode_enabled(struct ol_softc *hif_ctx)
1093{
1094 return hif_ctx->fastpath_mode_on;
1095}
1096
1097/**
1098 * hif_get_ce_handle - API to get CE handle for FastPath mode
1099 * @hif_ctx: HIF Context
1100 * @id: CopyEngine Id
1101 *
1102 * API to return CE handle for fastpath mode
1103 *
1104 * Return: void
1105 */
1106void *hif_get_ce_handle(struct ol_softc *hif_ctx, int id)
1107{
1108 return hif_ctx->ce_id_to_state[id];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001109}
1110#endif /* WLAN_FEATURE_FASTPATH */
1111
1112void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1113{
1114 struct ol_softc *scn;
1115 struct CE_handle *ce_hdl;
1116 uint32_t buf_sz;
1117 struct HIF_CE_state *hif_state;
1118 cdf_nbuf_t netbuf;
1119 cdf_dma_addr_t CE_data;
1120 void *per_CE_context;
1121
1122 buf_sz = pipe_info->buf_sz;
1123 if (buf_sz == 0) {
1124 /* Unused Copy Engine */
1125 return;
1126 }
1127
1128 hif_state = pipe_info->HIF_CE_state;
1129 if (!hif_state->started) {
1130 return;
1131 }
1132
Komal Seelam02cf2f82016-02-22 20:44:25 +05301133 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001134 ce_hdl = pipe_info->ce_hdl;
1135
1136 if (scn->cdf_dev == NULL) {
1137 return;
1138 }
1139 while (ce_revoke_recv_next
1140 (ce_hdl, &per_CE_context, (void **)&netbuf,
1141 &CE_data) == CDF_STATUS_SUCCESS) {
1142 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1143 CDF_DMA_FROM_DEVICE);
1144 cdf_nbuf_free(netbuf);
1145 }
1146}
1147
1148void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1149{
1150 struct CE_handle *ce_hdl;
1151 struct HIF_CE_state *hif_state;
Komal Seelam02cf2f82016-02-22 20:44:25 +05301152 struct ol_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001153 cdf_nbuf_t netbuf;
1154 void *per_CE_context;
1155 cdf_dma_addr_t CE_data;
1156 unsigned int nbytes;
1157 unsigned int id;
1158 uint32_t buf_sz;
1159 uint32_t toeplitz_hash_result;
1160
1161 buf_sz = pipe_info->buf_sz;
1162 if (buf_sz == 0) {
1163 /* Unused Copy Engine */
1164 return;
1165 }
1166
1167 hif_state = pipe_info->HIF_CE_state;
1168 if (!hif_state->started) {
1169 return;
1170 }
1171
Komal Seelam02cf2f82016-02-22 20:44:25 +05301172 scn = HIF_GET_SOFTC(hif_state);
1173
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001174 ce_hdl = pipe_info->ce_hdl;
1175
1176 while (ce_cancel_send_next
1177 (ce_hdl, &per_CE_context,
1178 (void **)&netbuf, &CE_data, &nbytes,
1179 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1180 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1181 /*
1182 * Packets enqueued by htt_h2t_ver_req_msg() and
1183 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1184 * freed in htt_htc_misc_pkt_pool_free() in
1185 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001186 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001187 * which they are queued in.
1188 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301189 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001190 return;
1191 /* Indicate the completion to higer
1192 * layer to free the buffer */
1193 hif_state->msg_callbacks_current.
1194 txCompletionHandler(hif_state->
1195 msg_callbacks_current.Context,
1196 netbuf, id, toeplitz_hash_result);
1197 }
1198 }
1199}
1200
1201/*
1202 * Cleanup residual buffers for device shutdown:
1203 * buffers that were enqueued for receive
1204 * buffers that were to be sent
1205 * Note: Buffers that had completed but which were
1206 * not yet processed are on a completion queue. They
1207 * are handled when the completion thread shuts down.
1208 */
1209void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1210{
1211 int pipe_num;
Komal Seelam02cf2f82016-02-22 20:44:25 +05301212 struct ol_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213
Komal Seelam02cf2f82016-02-22 20:44:25 +05301214 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001215 struct HIF_CE_pipe_info *pipe_info;
1216
1217 pipe_info = &hif_state->pipe_info[pipe_num];
1218 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1219 hif_send_buffer_cleanup_on_pipe(pipe_info);
1220 }
1221}
1222
Komal Seelam02cf2f82016-02-22 20:44:25 +05301223void hif_flush_surprise_remove(struct ol_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001224{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301225 struct ol_softc *scn = hif_ctx;
1226 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001227 hif_buffer_cleanup(hif_state);
1228}
1229
Komal Seelam02cf2f82016-02-22 20:44:25 +05301230void hif_stop(struct ol_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001231{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301232 struct ol_softc *scn = HIF_GET_SOFTC(hif_ctx);
1233 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001234 int pipe_num;
1235
1236 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237
1238 /*
1239 * At this point, asynchronous threads are stopped,
1240 * The Target should not DMA nor interrupt, Host code may
1241 * not initiate anything more. So we just need to clean
1242 * up Host-side state.
1243 */
1244
1245 if (scn->athdiag_procfs_inited) {
1246 athdiag_procfs_remove();
1247 scn->athdiag_procfs_inited = false;
1248 }
1249
1250 hif_buffer_cleanup(hif_state);
1251
1252 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1253 struct HIF_CE_pipe_info *pipe_info;
1254
1255 pipe_info = &hif_state->pipe_info[pipe_num];
1256 if (pipe_info->ce_hdl) {
1257 ce_fini(pipe_info->ce_hdl);
1258 pipe_info->ce_hdl = NULL;
1259 pipe_info->buf_sz = 0;
1260 }
1261 }
1262
1263 if (hif_state->sleep_timer_init) {
1264 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1265 cdf_softirq_timer_free(&hif_state->sleep_timer);
1266 hif_state->sleep_timer_init = false;
1267 }
1268
1269 hif_state->started = false;
1270}
1271
1272#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1273#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1274
1275
1276static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1277 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1278 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1279 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1280 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1281 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1282 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1283 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1284 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1285 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1286};
1287
1288
1289
1290/* CE_PCI TABLE */
1291/*
1292 * NOTE: the table below is out of date, though still a useful reference.
1293 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1294 * mapping of HTC services to HIF pipes.
1295 */
1296/*
1297 * This authoritative table defines Copy Engine configuration and the mapping
1298 * of services/endpoints to CEs. A subset of this information is passed to
1299 * the Target during startup as a prerequisite to entering BMI phase.
1300 * See:
1301 * target_service_to_ce_map - Target-side mapping
1302 * hif_map_service_to_pipe - Host-side mapping
1303 * target_ce_config - Target-side configuration
1304 * host_ce_config - Host-side configuration
1305 ============================================================================
1306 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1307 | | | ctio | Size | Frequency
1308 | | | n | |
1309 ============================================================================
1310 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1311 descriptor | | | | O(100B) | and regular
1312 download | | | | |
1313 ----------------------------------------------------------------------------
1314 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1315 indication | | | | O(10B) | regular
1316 upload | | | | |
1317 ----------------------------------------------------------------------------
1318 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1319 upload | | | | O(1000B) | (frequent
1320 e.g. noise | | | | | during IP1.0
1321 packets | | | | | testing)
1322 ----------------------------------------------------------------------------
1323 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1324 download | | | | O(1000B) | (frequent
1325 e.g. | | | | | during IP1.0
1326 misdirecte | | | | | testing)
1327 d EAPOL | | | | |
1328 packets | | | | |
1329 ----------------------------------------------------------------------------
1330 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1331 | DATA_VO (uplink) | | | |
1332 ----------------------------------------------------------------------------
1333 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1334 | DATA_VO (downlink) | | | |
1335 ----------------------------------------------------------------------------
1336 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1337 | | | | O(100B) |
1338 ----------------------------------------------------------------------------
1339 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1340 messages | (downlink) | | | O(100B) |
1341 | | | | |
1342 ----------------------------------------------------------------------------
1343 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1344 | HTC_RAW_STREAMS | | | |
1345 | (uplink) | | | |
1346 ----------------------------------------------------------------------------
1347 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1348 | HTC_RAW_STREAMS | | | |
1349 | (downlink) | | | |
1350 ----------------------------------------------------------------------------
1351 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1352 | | | | | infrequent
1353 ============================================================================
1354 */
1355
1356/*
1357 * Map from service/endpoint to Copy Engine.
1358 * This table is derived from the CE_PCI TABLE, above.
1359 * It is passed to the Target at startup for use by firmware.
1360 */
1361static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1362 {
1363 WMI_DATA_VO_SVC,
1364 PIPEDIR_OUT, /* out = UL = host -> target */
1365 3,
1366 },
1367 {
1368 WMI_DATA_VO_SVC,
1369 PIPEDIR_IN, /* in = DL = target -> host */
1370 2,
1371 },
1372 {
1373 WMI_DATA_BK_SVC,
1374 PIPEDIR_OUT, /* out = UL = host -> target */
1375 3,
1376 },
1377 {
1378 WMI_DATA_BK_SVC,
1379 PIPEDIR_IN, /* in = DL = target -> host */
1380 2,
1381 },
1382 {
1383 WMI_DATA_BE_SVC,
1384 PIPEDIR_OUT, /* out = UL = host -> target */
1385 3,
1386 },
1387 {
1388 WMI_DATA_BE_SVC,
1389 PIPEDIR_IN, /* in = DL = target -> host */
1390 2,
1391 },
1392 {
1393 WMI_DATA_VI_SVC,
1394 PIPEDIR_OUT, /* out = UL = host -> target */
1395 3,
1396 },
1397 {
1398 WMI_DATA_VI_SVC,
1399 PIPEDIR_IN, /* in = DL = target -> host */
1400 2,
1401 },
1402 {
1403 WMI_CONTROL_SVC,
1404 PIPEDIR_OUT, /* out = UL = host -> target */
1405 3,
1406 },
1407 {
1408 WMI_CONTROL_SVC,
1409 PIPEDIR_IN, /* in = DL = target -> host */
1410 2,
1411 },
1412 {
1413 HTC_CTRL_RSVD_SVC,
1414 PIPEDIR_OUT, /* out = UL = host -> target */
1415 0, /* could be moved to 3 (share with WMI) */
1416 },
1417 {
1418 HTC_CTRL_RSVD_SVC,
1419 PIPEDIR_IN, /* in = DL = target -> host */
1420 2,
1421 },
1422 {
1423 HTC_RAW_STREAMS_SVC, /* not currently used */
1424 PIPEDIR_OUT, /* out = UL = host -> target */
1425 0,
1426 },
1427 {
1428 HTC_RAW_STREAMS_SVC, /* not currently used */
1429 PIPEDIR_IN, /* in = DL = target -> host */
1430 2,
1431 },
1432 {
1433 HTT_DATA_MSG_SVC,
1434 PIPEDIR_OUT, /* out = UL = host -> target */
1435 4,
1436 },
1437 {
1438 HTT_DATA_MSG_SVC,
1439 PIPEDIR_IN, /* in = DL = target -> host */
1440 1,
1441 },
1442 {
1443 WDI_IPA_TX_SVC,
1444 PIPEDIR_OUT, /* in = DL = target -> host */
1445 5,
1446 },
1447 /* (Additions here) */
1448
1449 { /* Must be last */
1450 0,
1451 0,
1452 0,
1453 },
1454};
1455
1456static struct service_to_pipe *target_service_to_ce_map =
1457 target_service_to_ce_map_wlan;
1458static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1459
1460static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1461static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1462
1463static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1464 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1465 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1466 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1467 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1468 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1469 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1470 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1471 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1472 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1473 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1474 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1475 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1476 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1477 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1478 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1479 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1480 {0, 0, 0,}, /* Must be last */
1481};
1482
1483#ifdef HIF_PCI
1484/*
1485 * Send an interrupt to the device to wake up the Target CPU
1486 * so it has an opportunity to notice any changed state.
1487 */
1488void hif_wake_target_cpu(struct ol_softc *scn)
1489{
1490 CDF_STATUS rv;
1491 uint32_t core_ctrl;
1492
1493 rv = hif_diag_read_access(scn,
1494 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1495 &core_ctrl);
1496 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1497 /* A_INUM_FIRMWARE interrupt to Target CPU */
1498 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1499
1500 rv = hif_diag_write_access(scn,
1501 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1502 core_ctrl);
1503 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1504}
1505#endif
1506
1507static void hif_sleep_entry(void *arg)
1508{
1509 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
Komal Seelam02cf2f82016-02-22 20:44:25 +05301510 struct ol_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001511 uint32_t idle_ms;
1512 if (scn->recovery)
1513 return;
1514
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08001515 if (cds_is_driver_unloading())
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001516 return;
1517
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001518 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1519 if (hif_state->verified_awake == false) {
1520 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1521 - hif_state->sleep_ticks);
1522 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1523 if (!cdf_atomic_read(&scn->link_suspended)) {
1524 soc_wake_reset(scn);
1525 hif_state->fake_sleep = false;
1526 }
1527 } else {
1528 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1529 cdf_softirq_timer_start(&hif_state->sleep_timer,
1530 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1531 }
1532 } else {
1533 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1534 cdf_softirq_timer_start(&hif_state->sleep_timer,
1535 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1536 }
1537 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1538}
1539#define HIF_HIA_MAX_POLL_LOOP 1000000
1540#define HIF_HIA_POLLING_DELAY_MS 10
1541
1542#ifndef HIF_PCI
1543int hif_set_hia(struct ol_softc *scn)
1544{
1545 return 0;
1546}
1547#else
1548int hif_set_hia(struct ol_softc *scn)
1549{
1550 CDF_STATUS rv;
1551 uint32_t interconnect_targ_addr = 0;
1552 uint32_t pcie_state_targ_addr = 0;
1553 uint32_t pipe_cfg_targ_addr = 0;
1554 uint32_t svc_to_pipe_map = 0;
1555 uint32_t pcie_config_flags = 0;
1556 uint32_t flag2_value = 0;
1557 uint32_t flag2_targ_addr = 0;
1558#ifdef QCA_WIFI_3_0
1559 uint32_t host_interest_area = 0;
1560 uint8_t i;
1561#else
1562 uint32_t ealloc_value = 0;
1563 uint32_t ealloc_targ_addr = 0;
1564 uint8_t banks_switched = 1;
1565 uint32_t chip_id;
1566#endif
1567 uint32_t pipe_cfg_addr;
Komal Seelam91553ce2016-01-27 18:57:10 +05301568 struct hif_target_info *tgt_info = hif_get_target_info_handle(scn);
1569 uint32_t target_type = tgt_info->target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001570
1571 HIF_TRACE("%s: E", __func__);
1572
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001573 if (ADRASTEA_BU)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001574 return CDF_STATUS_SUCCESS;
1575
1576#ifdef QCA_WIFI_3_0
1577 i = 0;
1578 while (i < HIF_HIA_MAX_POLL_LOOP) {
1579 host_interest_area = hif_read32_mb(scn->mem +
1580 A_SOC_CORE_SCRATCH_0_ADDRESS);
1581 if ((host_interest_area & 0x01) == 0) {
1582 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1583 host_interest_area = 0;
1584 i++;
1585 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1586 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1587 }
1588 } else {
1589 host_interest_area &= (~0x01);
1590 hif_write32_mb(scn->mem + 0x113014, 0);
1591 break;
1592 }
1593 }
1594
1595 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1596 HIF_ERROR("%s: hia polling timeout", __func__);
1597 return -EIO;
1598 }
1599
1600 if (host_interest_area == 0) {
1601 HIF_ERROR("%s: host_interest_area = 0", __func__);
1602 return -EIO;
1603 }
1604
1605 interconnect_targ_addr = host_interest_area +
1606 offsetof(struct host_interest_area_t,
1607 hi_interconnect_state);
1608
1609 flag2_targ_addr = host_interest_area +
1610 offsetof(struct host_interest_area_t, hi_option_flag2);
1611
1612#else
Komal Seelam91553ce2016-01-27 18:57:10 +05301613 interconnect_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614 offsetof(struct host_interest_s, hi_interconnect_state));
Komal Seelam91553ce2016-01-27 18:57:10 +05301615 ealloc_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 offsetof(struct host_interest_s, hi_early_alloc));
Komal Seelam91553ce2016-01-27 18:57:10 +05301617 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001618 offsetof(struct host_interest_s, hi_option_flag2));
1619#endif
1620 /* Supply Target-side CE configuration */
1621 rv = hif_diag_read_access(scn, interconnect_targ_addr,
1622 &pcie_state_targ_addr);
1623 if (rv != CDF_STATUS_SUCCESS) {
1624 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1625 __func__, interconnect_targ_addr, rv);
1626 goto done;
1627 }
1628 if (pcie_state_targ_addr == 0) {
1629 rv = CDF_STATUS_E_FAILURE;
1630 HIF_ERROR("%s: pcie state addr is 0", __func__);
1631 goto done;
1632 }
1633 pipe_cfg_addr = pcie_state_targ_addr +
1634 offsetof(struct pcie_state_s,
1635 pipe_cfg_addr);
1636 rv = hif_diag_read_access(scn,
1637 pipe_cfg_addr,
1638 &pipe_cfg_targ_addr);
1639 if (rv != CDF_STATUS_SUCCESS) {
1640 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1641 __func__, pipe_cfg_addr, rv);
1642 goto done;
1643 }
1644 if (pipe_cfg_targ_addr == 0) {
1645 rv = CDF_STATUS_E_FAILURE;
1646 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1647 goto done;
1648 }
1649
1650 rv = hif_diag_write_mem(scn, pipe_cfg_targ_addr,
1651 (uint8_t *) target_ce_config,
1652 target_ce_config_sz);
1653
1654 if (rv != CDF_STATUS_SUCCESS) {
1655 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1656 goto done;
1657 }
1658
1659 rv = hif_diag_read_access(scn,
1660 pcie_state_targ_addr +
1661 offsetof(struct pcie_state_s,
1662 svc_to_pipe_map),
1663 &svc_to_pipe_map);
1664 if (rv != CDF_STATUS_SUCCESS) {
1665 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1666 goto done;
1667 }
1668 if (svc_to_pipe_map == 0) {
1669 rv = CDF_STATUS_E_FAILURE;
1670 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1671 goto done;
1672 }
1673
1674 rv = hif_diag_write_mem(scn,
1675 svc_to_pipe_map,
1676 (uint8_t *) target_service_to_ce_map,
1677 target_service_to_ce_map_sz);
1678 if (rv != CDF_STATUS_SUCCESS) {
1679 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1680 goto done;
1681 }
1682
1683 rv = hif_diag_read_access(scn,
1684 pcie_state_targ_addr +
1685 offsetof(struct pcie_state_s,
1686 config_flags),
1687 &pcie_config_flags);
1688 if (rv != CDF_STATUS_SUCCESS) {
1689 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1690 goto done;
1691 }
1692#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1693 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1694#else
1695 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1696#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1697 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1698#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1699 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1700#endif
1701 rv = hif_diag_write_mem(scn,
1702 pcie_state_targ_addr +
1703 offsetof(struct pcie_state_s,
1704 config_flags),
1705 (uint8_t *) &pcie_config_flags,
1706 sizeof(pcie_config_flags));
1707 if (rv != CDF_STATUS_SUCCESS) {
1708 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1709 goto done;
1710 }
1711
1712#ifndef QCA_WIFI_3_0
1713 /* configure early allocation */
Komal Seelam91553ce2016-01-27 18:57:10 +05301714 ealloc_targ_addr = hif_hia_item_address(target_type,
1715 offsetof(
1716 struct host_interest_s,
1717 hi_early_alloc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001718
1719 rv = hif_diag_read_access(scn, ealloc_targ_addr,
1720 &ealloc_value);
1721 if (rv != CDF_STATUS_SUCCESS) {
1722 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1723 goto done;
1724 }
1725
1726 /* 1 bank is switched to IRAM, except ROME 1.0 */
1727 ealloc_value |=
1728 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1729 HI_EARLY_ALLOC_MAGIC_MASK);
1730
1731 rv = hif_diag_read_access(scn,
1732 CHIP_ID_ADDRESS |
1733 RTC_SOC_BASE_ADDRESS, &chip_id);
1734 if (rv != CDF_STATUS_SUCCESS) {
1735 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1736 goto done;
1737 }
1738 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
Komal Seelam91553ce2016-01-27 18:57:10 +05301739 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740 switch (CHIP_ID_REVISION_GET(chip_id)) {
1741 case 0x2: /* ROME 1.3 */
1742 /* 2 banks are switched to IRAM */
1743 banks_switched = 2;
1744 break;
1745 case 0x4: /* ROME 2.1 */
1746 case 0x5: /* ROME 2.2 */
1747 banks_switched = 6;
1748 break;
1749 case 0x8: /* ROME 3.0 */
1750 case 0x9: /* ROME 3.1 */
1751 case 0xA: /* ROME 3.2 */
1752 banks_switched = 9;
1753 break;
1754 case 0x0: /* ROME 1.0 */
1755 case 0x1: /* ROME 1.1 */
1756 default:
1757 /* 3 banks are switched to IRAM */
1758 banks_switched = 3;
1759 break;
1760 }
1761 }
1762
1763 ealloc_value |=
1764 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1765 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1766
1767 rv = hif_diag_write_access(scn,
1768 ealloc_targ_addr,
1769 ealloc_value);
1770 if (rv != CDF_STATUS_SUCCESS) {
1771 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1772 goto done;
1773 }
1774#endif
1775
1776 /* Tell Target to proceed with initialization */
Komal Seelam91553ce2016-01-27 18:57:10 +05301777 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001778 offsetof(
1779 struct host_interest_s,
1780 hi_option_flag2));
1781
1782 rv = hif_diag_read_access(scn, flag2_targ_addr,
1783 &flag2_value);
1784 if (rv != CDF_STATUS_SUCCESS) {
1785 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1786 goto done;
1787 }
1788
1789 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1790 rv = hif_diag_write_access(scn, flag2_targ_addr,
1791 flag2_value);
1792 if (rv != CDF_STATUS_SUCCESS) {
1793 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1794 goto done;
1795 }
1796
1797 hif_wake_target_cpu(scn);
1798
1799done:
1800
1801 return rv;
1802}
1803#endif
1804
1805/**
1806 * hif_wlan_enable(): call the platform driver to enable wlan
1807 *
1808 * This function passes the con_mode and CE configuration to
1809 * platform driver to enable wlan.
1810 *
1811 * Return: void
1812 */
1813static int hif_wlan_enable(void)
1814{
1815 struct icnss_wlan_enable_cfg cfg;
1816 enum icnss_driver_mode mode;
1817 uint32_t con_mode = cds_get_conparam();
1818
1819 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1820 sizeof(struct CE_pipe_config);
1821 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1822 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1823 sizeof(struct service_to_pipe);
1824 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1825 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
1826 cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
1827
Peng Xu7b962532015-10-02 17:17:03 -07001828 if (CDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 mode = ICNSS_FTM;
Peng Xu7b962532015-10-02 17:17:03 -07001830 else if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001831 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001832 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001833 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001834
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001835 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1836}
1837
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001838/*
1839 * Called from PCI layer whenever a new PCI device is probed.
1840 * Initializes per-device HIF state and notifies the main
1841 * driver that a new HIF device is present.
1842 */
1843int hif_config_ce(hif_handle_t hif_hdl)
1844{
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001845 struct HIF_CE_pipe_info *pipe_info;
1846 int pipe_num;
1847#ifdef ADRASTEA_SHADOW_REGISTERS
1848 int i;
1849#endif
1850 CDF_STATUS rv = CDF_STATUS_SUCCESS;
1851 int ret;
1852 struct ol_softc *scn = hif_hdl;
Komal Seelam43301de2016-02-02 18:20:48 +05301853 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854 struct icnss_soc_info soc_info;
Komal Seelam91553ce2016-01-27 18:57:10 +05301855 struct hif_target_info *tgt_info = hif_get_target_info_handle(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001856
1857 /* if epping is enabled we need to use the epping configuration. */
1858 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1859 if (WLAN_IS_EPPING_IRQ(cds_get_conparam()))
1860 host_ce_config = host_ce_config_wlan_epping_irq;
1861 else
1862 host_ce_config = host_ce_config_wlan_epping_poll;
1863 target_ce_config = target_ce_config_wlan_epping;
1864 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1865 target_service_to_ce_map =
1866 target_service_to_ce_map_wlan_epping;
1867 target_service_to_ce_map_sz =
1868 sizeof(target_service_to_ce_map_wlan_epping);
1869 }
1870
1871 ret = hif_wlan_enable();
1872
1873 if (ret) {
1874 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
1875 return CDF_STATUS_NOT_INITIALIZED;
1876 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001877
1878 scn->notice_send = true;
1879
1880 cdf_mem_zero(&soc_info, sizeof(soc_info));
Komal Seelamf8600682016-02-02 18:17:13 +05301881 ret = icnss_get_soc_info(scn, &soc_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882 if (ret < 0) {
1883 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
1884 return CDF_STATUS_NOT_INITIALIZED;
1885 }
1886
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001887 scn->mem = soc_info.v_addr;
1888 scn->mem_pa = soc_info.p_addr;
Komal Seelam91553ce2016-01-27 18:57:10 +05301889 tgt_info->soc_version = soc_info.version;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890
1891 cdf_spinlock_init(&hif_state->keep_awake_lock);
1892
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893 hif_state->keep_awake_count = 0;
1894
1895 hif_state->fake_sleep = false;
1896 hif_state->sleep_ticks = 0;
1897 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
1898 hif_sleep_entry, (void *)hif_state,
1899 CDF_TIMER_TYPE_WAKE_APPS);
1900 hif_state->sleep_timer_init = true;
1901 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
1902#ifdef HIF_PCI
1903#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1904 /* Force AWAKE forever/till the driver is loaded */
1905 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1906 return -EACCES;
1907#endif
1908#endif
1909
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001910 hif_config_rri_on_ddr(scn);
1911
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001912 /* During CE initializtion */
1913 scn->ce_count = HOST_CE_COUNT;
1914 A_TARGET_ACCESS_LIKELY(scn);
1915 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1916 struct CE_attr *attr;
1917
1918 pipe_info = &hif_state->pipe_info[pipe_num];
1919 pipe_info->pipe_num = pipe_num;
1920 pipe_info->HIF_CE_state = hif_state;
1921 attr = &host_ce_config[pipe_num];
1922 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
1923 CDF_ASSERT(pipe_info->ce_hdl != NULL);
1924 if (pipe_info->ce_hdl == NULL) {
1925 rv = CDF_STATUS_E_FAILURE;
1926 A_TARGET_ACCESS_UNLIKELY(scn);
1927 goto err;
1928 }
1929
1930 if (pipe_num == DIAG_CE_ID) {
1931 /* Reserve the ultimate CE for
1932 * Diagnostic Window support */
1933 hif_state->ce_diag =
1934 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1935 continue;
1936 }
1937
1938 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
1939 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
1940 if (attr->dest_nentries > 0) {
1941 atomic_set(&pipe_info->recv_bufs_needed,
1942 init_buffer_count(attr->dest_nentries - 1));
1943 } else {
1944 atomic_set(&pipe_info->recv_bufs_needed, 0);
1945 }
1946 ce_tasklet_init(hif_state, (1 << pipe_num));
1947 ce_register_irq(hif_state, (1 << pipe_num));
1948 scn->request_irq_done = true;
1949 }
1950
1951 if (athdiag_procfs_init(scn) != 0) {
1952 A_TARGET_ACCESS_UNLIKELY(scn);
1953 goto err;
1954 }
1955 scn->athdiag_procfs_inited = true;
1956
1957 /*
1958 * Initially, establish CE completion handlers for use with BMI.
1959 * These are overwritten with generic handlers after we exit BMI phase.
1960 */
1961 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1962#ifdef HIF_PCI
1963 ce_send_cb_register(
1964 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1965#ifndef BMI_RSP_POLLING
1966 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1967 ce_recv_cb_register(
1968 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1969#endif
1970#endif
1971 HIF_INFO_MED("%s: ce_init done", __func__);
1972
1973 rv = hif_set_hia(scn);
1974
1975 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1976
1977 A_TARGET_ACCESS_UNLIKELY(scn);
1978
1979 if (rv != CDF_STATUS_SUCCESS)
1980 goto err;
1981 else
Komal Seelamf8600682016-02-02 18:17:13 +05301982 init_tasklet_workers(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983
1984 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1985
1986#ifdef ADRASTEA_SHADOW_REGISTERS
1987 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1988 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1989 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1990 __func__, i,
1991 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1992 }
1993#endif
1994
1995
1996 return rv != CDF_STATUS_SUCCESS;
1997
1998err:
1999 /* Failure, so clean up */
2000 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2001 pipe_info = &hif_state->pipe_info[pipe_num];
2002 if (pipe_info->ce_hdl) {
2003 ce_unregister_irq(hif_state, (1 << pipe_num));
2004 scn->request_irq_done = false;
2005 ce_fini(pipe_info->ce_hdl);
2006 pipe_info->ce_hdl = NULL;
2007 pipe_info->buf_sz = 0;
2008 }
2009 }
2010 if (hif_state->sleep_timer_init) {
2011 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2012 cdf_softirq_timer_free(&hif_state->sleep_timer);
2013 hif_state->sleep_timer_init = false;
2014 }
Komal Seelam43301de2016-02-02 18:20:48 +05302015
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002016 athdiag_procfs_remove();
2017 scn->athdiag_procfs_inited = false;
2018 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2019 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2020}
2021
2022
2023
2024
2025
2026
2027#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002028/**
2029 * hif_ipa_get_ce_resource() - get uc resource on hif
2030 * @scn: bus context
2031 * @ce_sr_base_paddr: copyengine source ring base physical address
2032 * @ce_sr_ring_size: copyengine source ring size
2033 * @ce_reg_paddr: copyengine register physical address
2034 *
2035 * IPA micro controller data path offload feature enabled,
2036 * HIF should release copy engine related resource information to IPA UC
2037 * IPA UC will access hardware resource with released information
2038 *
2039 * Return: None
2040 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05302041void hif_ipa_get_ce_resource(struct ol_softc *hif_ctx,
Leo Changd85f78d2015-11-13 10:55:34 -08002042 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002043 uint32_t *ce_sr_ring_size,
2044 cdf_dma_addr_t *ce_reg_paddr)
2045{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302046 struct ol_softc *scn = HIF_GET_SOFTC(hif_ctx);
2047 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002048 struct HIF_CE_pipe_info *pipe_info =
2049 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2050 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2051
2052 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2053 ce_reg_paddr);
2054 return;
2055}
2056#endif /* IPA_OFFLOAD */
2057
2058
2059#ifdef ADRASTEA_SHADOW_REGISTERS
2060
2061/*
2062 Current shadow register config
2063
2064 -----------------------------------------------------------
2065 Shadow Register | CE | src/dst write index
2066 -----------------------------------------------------------
2067 0 | 0 | src
2068 1 No Config - Doesn't point to anything
2069 2 No Config - Doesn't point to anything
2070 3 | 3 | src
2071 4 | 4 | src
2072 5 | 5 | src
2073 6 No Config - Doesn't point to anything
2074 7 | 7 | src
2075 8 No Config - Doesn't point to anything
2076 9 No Config - Doesn't point to anything
2077 10 No Config - Doesn't point to anything
2078 11 No Config - Doesn't point to anything
2079 -----------------------------------------------------------
2080 12 No Config - Doesn't point to anything
2081 13 | 1 | dst
2082 14 | 2 | dst
2083 15 No Config - Doesn't point to anything
2084 16 No Config - Doesn't point to anything
2085 17 No Config - Doesn't point to anything
2086 18 No Config - Doesn't point to anything
2087 19 | 7 | dst
2088 20 | 8 | dst
2089 21 No Config - Doesn't point to anything
2090 22 No Config - Doesn't point to anything
2091 23 No Config - Doesn't point to anything
2092 -----------------------------------------------------------
2093
2094
2095 ToDo - Move shadow register config to following in the future
2096 This helps free up a block of shadow registers towards the end.
2097 Can be used for other purposes
2098
2099 -----------------------------------------------------------
2100 Shadow Register | CE | src/dst write index
2101 -----------------------------------------------------------
2102 0 | 0 | src
2103 1 | 3 | src
2104 2 | 4 | src
2105 3 | 5 | src
2106 4 | 7 | src
2107 -----------------------------------------------------------
2108 5 | 1 | dst
2109 6 | 2 | dst
2110 7 | 7 | dst
2111 8 | 8 | dst
2112 -----------------------------------------------------------
2113 9 No Config - Doesn't point to anything
2114 12 No Config - Doesn't point to anything
2115 13 No Config - Doesn't point to anything
2116 14 No Config - Doesn't point to anything
2117 15 No Config - Doesn't point to anything
2118 16 No Config - Doesn't point to anything
2119 17 No Config - Doesn't point to anything
2120 18 No Config - Doesn't point to anything
2121 19 No Config - Doesn't point to anything
2122 20 No Config - Doesn't point to anything
2123 21 No Config - Doesn't point to anything
2124 22 No Config - Doesn't point to anything
2125 23 No Config - Doesn't point to anything
2126 -----------------------------------------------------------
2127*/
2128
2129u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2130{
2131 u32 addr = 0;
2132
2133 switch (COPY_ENGINE_ID(ctrl_addr)) {
2134 case 0:
2135 addr = SHADOW_VALUE0;
2136 break;
2137 case 3:
2138 addr = SHADOW_VALUE3;
2139 break;
2140 case 4:
2141 addr = SHADOW_VALUE4;
2142 break;
2143 case 5:
2144 addr = SHADOW_VALUE5;
2145 break;
2146 case 7:
2147 addr = SHADOW_VALUE7;
2148 break;
2149 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002150 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002151 CDF_ASSERT(0);
2152
2153 }
2154 return addr;
2155
2156}
2157
2158u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2159{
2160 u32 addr = 0;
2161
2162 switch (COPY_ENGINE_ID(ctrl_addr)) {
2163 case 1:
2164 addr = SHADOW_VALUE13;
2165 break;
2166 case 2:
2167 addr = SHADOW_VALUE14;
2168 break;
2169 case 7:
2170 addr = SHADOW_VALUE19;
2171 break;
2172 case 8:
2173 addr = SHADOW_VALUE20;
2174 break;
2175 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002176 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002177 CDF_ASSERT(0);
2178 }
2179
2180 return addr;
2181
2182}
2183#endif
2184
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002185#if defined(FEATURE_LRO)
2186/**
2187 * ce_lro_flush_cb_register() - register the LRO flush
2188 * callback
2189 * @scn: HIF context
2190 * @handler: callback function
2191 * @data: opaque data pointer to be passed back
2192 *
2193 * Store the LRO flush callback provided
2194 *
2195 * Return: none
2196 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197void ce_lro_flush_cb_register(struct ol_softc *scn,
2198 void (handler)(void *), void *data)
2199{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002200 uint8_t ul, dl;
2201 int ul_polled, dl_polled;
2202
2203 CDF_ASSERT(scn != NULL);
2204
2205 if (CDF_STATUS_SUCCESS !=
2206 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2207 &ul, &dl, &ul_polled, &dl_polled)) {
2208 printk("%s cannot map service to pipe\n", __FUNCTION__);
2209 return;
2210 } else {
2211 struct CE_state *ce_state;
2212 ce_state = scn->ce_id_to_state[dl];
2213 ce_state->lro_flush_cb = handler;
2214 ce_state->lro_data = data;
2215 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002217
2218/**
2219 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2220 * callback
2221 * @scn: HIF context
2222 *
2223 * Remove the LRO flush callback
2224 *
2225 * Return: none
2226 */
2227void ce_lro_flush_cb_deregister(struct ol_softc *scn)
2228{
2229 uint8_t ul, dl;
2230 int ul_polled, dl_polled;
2231
2232 CDF_ASSERT(scn != NULL);
2233
2234 if (CDF_STATUS_SUCCESS !=
2235 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2236 &ul, &dl, &ul_polled, &dl_polled)) {
2237 printk("%s cannot map service to pipe\n", __FUNCTION__);
2238 return;
2239 } else {
2240 struct CE_state *ce_state;
2241 ce_state = scn->ce_id_to_state[dl];
2242 ce_state->lro_flush_cb = NULL;
2243 ce_state->lro_data = NULL;
2244 }
2245}
2246#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002247
2248/**
2249 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2250 * this service
2251 * @scn: ol_softc pointer.
2252 * @svc_id: Service ID for which the mapping is needed.
2253 * @ul_pipe: address of the container in which ul pipe is returned.
2254 * @dl_pipe: address of the container in which dl pipe is returned.
2255 * @ul_is_polled: address of the container in which a bool
2256 * indicating if the UL CE for this service
2257 * is polled is returned.
2258 * @dl_is_polled: address of the container in which a bool
2259 * indicating if the DL CE for this service
2260 * is polled is returned.
2261 *
2262 * Return: Indicates whether this operation was successful.
2263 */
2264
2265int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
2266 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2267 int *dl_is_polled)
2268{
2269 int status = CDF_STATUS_SUCCESS;
2270
2271 unsigned int i;
2272 struct service_to_pipe element;
2273
2274 struct service_to_pipe *tgt_svc_map_to_use;
2275 size_t sz_tgt_svc_map_to_use;
2276
2277 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2278 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2279 sz_tgt_svc_map_to_use =
2280 sizeof(target_service_to_ce_map_wlan_epping);
2281 } else {
2282 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2283 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2284 }
2285
2286 *dl_is_polled = 0; /* polling for received messages not supported */
2287
2288 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2289
2290 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2291 if (element.service_id == svc_id) {
2292
2293 if (element.pipedir == PIPEDIR_OUT)
2294 *ul_pipe = element.pipenum;
2295
2296 else if (element.pipedir == PIPEDIR_IN)
2297 *dl_pipe = element.pipenum;
2298 }
2299 }
2300
2301 *ul_is_polled =
2302 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2303
2304 return status;
2305}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002306
2307#ifdef SHADOW_REG_DEBUG
2308inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct ol_softc *scn,
2309 uint32_t CE_ctrl_addr)
2310{
2311 uint32_t read_from_hw, srri_from_ddr = 0;
2312
2313 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2314
2315 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2316
2317 if (read_from_hw != srri_from_ddr) {
2318 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2319 srri_from_ddr, read_from_hw,
2320 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2321 CDF_ASSERT(0);
2322 }
2323 return srri_from_ddr;
2324}
2325
2326
2327inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct ol_softc *scn,
2328 uint32_t CE_ctrl_addr)
2329{
2330 uint32_t read_from_hw, drri_from_ddr = 0;
2331
2332 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2333
2334 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2335
2336 if (read_from_hw != drri_from_ddr) {
2337 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2338 drri_from_ddr, read_from_hw,
2339 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2340 CDF_ASSERT(0);
2341 }
2342 return drri_from_ddr;
2343}
2344
2345#endif
2346
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002347#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002348/**
2349 * hif_get_src_ring_read_index(): Called to get the SRRI
2350 *
2351 * @scn: ol_softc pointer
2352 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2353 *
2354 * This function returns the SRRI to the caller. For CEs that
2355 * dont have interrupts enabled, we look at the DDR based SRRI
2356 *
2357 * Return: SRRI
2358 */
2359inline unsigned int hif_get_src_ring_read_index(struct ol_softc *scn,
2360 uint32_t CE_ctrl_addr)
2361{
2362 struct CE_attr attr;
2363
2364 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2365 if (attr.flags & CE_ATTR_DISABLE_INTR)
2366 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2367 else
2368 return A_TARGET_READ(scn,
2369 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2370}
2371
2372/**
2373 * hif_get_dst_ring_read_index(): Called to get the DRRI
2374 *
2375 * @scn: ol_softc pointer
2376 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2377 *
2378 * This function returns the DRRI to the caller. For CEs that
2379 * dont have interrupts enabled, we look at the DDR based DRRI
2380 *
2381 * Return: DRRI
2382 */
2383inline unsigned int hif_get_dst_ring_read_index(struct ol_softc *scn,
2384 uint32_t CE_ctrl_addr)
2385{
2386 struct CE_attr attr;
2387
2388 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2389
2390 if (attr.flags & CE_ATTR_DISABLE_INTR)
2391 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2392 else
2393 return A_TARGET_READ(scn,
2394 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2395}
2396
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002397/**
2398 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2399 *
2400 * @scn: ol_softc pointer
2401 *
2402 * This function allocates non cached memory on ddr and sends
2403 * the physical address of this memory to the CE hardware. The
2404 * hardware updates the RRI on this particular location.
2405 *
2406 * Return: None
2407 */
2408static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2409{
2410 unsigned int i;
2411 cdf_dma_addr_t paddr_rri_on_ddr;
2412 uint32_t high_paddr, low_paddr;
2413 scn->vaddr_rri_on_ddr =
2414 (uint32_t *)cdf_os_mem_alloc_consistent(scn->cdf_dev,
2415 (CE_COUNT*sizeof(uint32_t)), &paddr_rri_on_ddr, 0);
2416
2417 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2418 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2419
2420 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2421
2422 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2423 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2424
2425 for (i = 0; i < CE_COUNT; i++)
2426 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2427
2428 cdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
2429
2430 return;
2431}
2432#else
2433
2434/**
2435 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2436 *
2437 * @scn: ol_softc pointer
2438 *
2439 * This is a dummy implementation for platforms that don't
2440 * support this functionality.
2441 *
2442 * Return: None
2443 */
2444static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2445{
2446 return;
2447}
2448#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302449
2450/**
2451 * hif_dump_ce_registers() - dump ce registers
2452 * @scn: ol_softc pointer.
2453 *
2454 * Output the copy engine registers
2455 *
2456 * Return: 0 for success or error code
2457 */
2458int hif_dump_ce_registers(struct ol_softc *scn)
2459{
2460 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2461 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2462 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2463 uint16_t i;
2464 CDF_STATUS status;
2465
2466 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
2467 status = hif_diag_read_mem(scn, ce_reg_address,
2468 (uint8_t *) &ce_reg_values[i][0],
2469 ce_reg_word_size * sizeof(uint32_t));
2470
2471 if (status != CDF_STATUS_SUCCESS) {
2472 HIF_ERROR("Dumping CE register failed!");
2473 return -EACCES;
2474 }
2475 HIF_ERROR("CE%d Registers:", i);
2476 cdf_trace_hex_dump(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_DEBUG,
2477 (uint8_t *) &ce_reg_values[i][0],
2478 ce_reg_word_size * sizeof(uint32_t));
2479 }
2480
2481 return 0;
2482}