blob: 54ef56ed4160f76296778e6972628f17acbf9df6 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
48#include "cds_api.h"
49#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053064#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065
66#define CE_POLL_TIMEOUT 10 /* ms */
67
68/* Forward references */
69static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
78
79static int hif_post_recv_buffers(struct ol_softc *scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -080080static void hif_config_rri_on_ddr(struct ol_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081
82static void ce_poll_timeout(void *arg)
83{
84 struct CE_state *CE_state = (struct CE_state *)arg;
85 if (CE_state->timer_inited) {
86 ce_per_engine_service(CE_state->scn, CE_state->id);
87 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
88 }
89}
90
91static unsigned int roundup_pwr2(unsigned int n)
92{
93 int i;
94 unsigned int test_pwr2;
95
96 if (!(n & (n - 1)))
97 return n; /* already a power of 2 */
98
99 test_pwr2 = 4;
100 for (i = 0; i < 29; i++) {
101 if (test_pwr2 > n)
102 return test_pwr2;
103 test_pwr2 = test_pwr2 << 1;
104 }
105
106 CDF_ASSERT(0); /* n too large */
107 return 0;
108}
109
110/*
111 * Initialize a Copy Engine based on caller-supplied attributes.
112 * This may be called once to initialize both source and destination
113 * rings or it may be called twice for separate source and destination
114 * initialization. It may be that only one side or the other is
115 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700116 *
117 * This should be called durring the initialization sequence before
118 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 */
120struct CE_handle *ce_init(struct ol_softc *scn,
121 unsigned int CE_id, struct CE_attr *attr)
122{
123 struct CE_state *CE_state;
124 uint32_t ctrl_addr;
125 unsigned int nentries;
126 cdf_dma_addr_t base_addr;
127 bool malloc_CE_state = false;
128 bool malloc_src_ring = false;
129
130 CDF_ASSERT(CE_id < scn->ce_count);
131 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132 CE_state = scn->ce_id_to_state[CE_id];
133
134 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135 CE_state =
136 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
137 if (!CE_state) {
138 HIF_ERROR("%s: CE_state has no mem", __func__);
139 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700141 malloc_CE_state = true;
142 cdf_mem_zero(CE_state, sizeof(*CE_state));
143 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700144 cdf_spinlock_init(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700145
146 CE_state->id = CE_id;
147 CE_state->ctrl_addr = ctrl_addr;
148 CE_state->state = CE_RUNNING;
149 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150 }
151 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800152
153 cdf_atomic_init(&CE_state->rx_pending);
154 if (attr == NULL) {
155 /* Already initialized; caller wants the handle */
156 return (struct CE_handle *)CE_state;
157 }
158
159#ifdef ADRASTEA_SHADOW_REGISTERS
160 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
161 __func__);
162#endif
163
164 if (CE_state->src_sz_max)
165 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
166 else
167 CE_state->src_sz_max = attr->src_sz_max;
168
Houston Hoffman68e837e2015-12-04 12:57:24 -0800169 ce_init_ce_desc_event_log(CE_id,
170 attr->src_nentries + attr->dest_nentries);
171
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800172 /* source ring setup */
173 nentries = attr->src_nentries;
174 if (nentries) {
175 struct CE_ring_state *src_ring;
176 unsigned CE_nbytes;
177 char *ptr;
178 uint64_t dma_addr;
179 nentries = roundup_pwr2(nentries);
180 if (CE_state->src_ring) {
181 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
182 } else {
183 CE_nbytes = sizeof(struct CE_ring_state)
184 + (nentries * sizeof(void *));
185 ptr = cdf_mem_malloc(CE_nbytes);
186 if (!ptr) {
187 /* cannot allocate src ring. If the
188 * CE_state is allocated locally free
189 * CE_State and return error.
190 */
191 HIF_ERROR("%s: src ring has no mem", __func__);
192 if (malloc_CE_state) {
193 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800194 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800195 cdf_mem_free(CE_state);
196 malloc_CE_state = false;
197 }
198 return NULL;
199 } else {
200 /* we can allocate src ring.
201 * Mark that the src ring is
202 * allocated locally
203 */
204 malloc_src_ring = true;
205 }
206 cdf_mem_zero(ptr, CE_nbytes);
207
208 src_ring = CE_state->src_ring =
209 (struct CE_ring_state *)ptr;
210 ptr += sizeof(struct CE_ring_state);
211 src_ring->nentries = nentries;
212 src_ring->nentries_mask = nentries - 1;
213 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
214 src_ring->hw_index =
215 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
216 src_ring->sw_index = src_ring->hw_index;
217 src_ring->write_index =
218 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
219 A_TARGET_ACCESS_END_RET_PTR(scn);
220 src_ring->low_water_mark_nentries = 0;
221 src_ring->high_water_mark_nentries = nentries;
222 src_ring->per_transfer_context = (void **)ptr;
223
224 /* Legacy platforms that do not support cache
225 * coherent DMA are unsupported
226 */
227 src_ring->base_addr_owner_space_unaligned =
228 cdf_os_mem_alloc_consistent(scn->cdf_dev,
229 (nentries *
230 sizeof(struct CE_src_desc) +
231 CE_DESC_RING_ALIGN),
232 &base_addr, 0);
233 if (src_ring->base_addr_owner_space_unaligned
234 == NULL) {
235 HIF_ERROR("%s: src ring has no DMA mem",
236 __func__);
237 goto error_no_dma_mem;
238 }
239 src_ring->base_addr_CE_space_unaligned = base_addr;
240
241 if (src_ring->
242 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
243 - 1)) {
244 src_ring->base_addr_CE_space =
245 (src_ring->base_addr_CE_space_unaligned
246 + CE_DESC_RING_ALIGN -
247 1) & ~(CE_DESC_RING_ALIGN - 1);
248
249 src_ring->base_addr_owner_space =
250 (void
251 *)(((size_t) src_ring->
252 base_addr_owner_space_unaligned +
253 CE_DESC_RING_ALIGN -
254 1) & ~(CE_DESC_RING_ALIGN - 1));
255 } else {
256 src_ring->base_addr_CE_space =
257 src_ring->base_addr_CE_space_unaligned;
258 src_ring->base_addr_owner_space =
259 src_ring->
260 base_addr_owner_space_unaligned;
261 }
262 /*
263 * Also allocate a shadow src ring in
264 * regular mem to use for faster access.
265 */
266 src_ring->shadow_base_unaligned =
267 cdf_mem_malloc(nentries *
268 sizeof(struct CE_src_desc) +
269 CE_DESC_RING_ALIGN);
270 if (src_ring->shadow_base_unaligned == NULL) {
271 HIF_ERROR("%s: src ring no shadow_base mem",
272 __func__);
273 goto error_no_dma_mem;
274 }
275 src_ring->shadow_base = (struct CE_src_desc *)
276 (((size_t) src_ring->shadow_base_unaligned +
277 CE_DESC_RING_ALIGN - 1) &
278 ~(CE_DESC_RING_ALIGN - 1));
279
280 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
281 dma_addr = src_ring->base_addr_CE_space;
282 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
283 (uint32_t)(dma_addr & 0xFFFFFFFF));
284#ifdef WLAN_ENABLE_QCA6180
285 {
286 uint32_t tmp;
287 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
288 scn, ctrl_addr);
289 tmp &= ~0x1F;
290 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
291 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
292 ctrl_addr, (uint32_t)dma_addr);
293 }
294#endif
295 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
296 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
297#ifdef BIG_ENDIAN_HOST
298 /* Enable source ring byte swap for big endian host */
299 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
300#endif
301 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
302 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
303 A_TARGET_ACCESS_END_RET_PTR(scn);
304 }
305 }
306
307 /* destination ring setup */
308 nentries = attr->dest_nentries;
309 if (nentries) {
310 struct CE_ring_state *dest_ring;
311 unsigned CE_nbytes;
312 char *ptr;
313 uint64_t dma_addr;
314
315 nentries = roundup_pwr2(nentries);
316 if (CE_state->dest_ring) {
317 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
318 } else {
319 CE_nbytes = sizeof(struct CE_ring_state)
320 + (nentries * sizeof(void *));
321 ptr = cdf_mem_malloc(CE_nbytes);
322 if (!ptr) {
323 /* cannot allocate dst ring. If the CE_state
324 * or src ring is allocated locally free
325 * CE_State and src ring and return error.
326 */
327 HIF_ERROR("%s: dest ring has no mem",
328 __func__);
329 if (malloc_src_ring) {
330 cdf_mem_free(CE_state->src_ring);
331 CE_state->src_ring = NULL;
332 malloc_src_ring = false;
333 }
334 if (malloc_CE_state) {
335 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 cdf_mem_free(CE_state);
338 malloc_CE_state = false;
339 }
340 return NULL;
341 }
342 cdf_mem_zero(ptr, CE_nbytes);
343
344 dest_ring = CE_state->dest_ring =
345 (struct CE_ring_state *)ptr;
346 ptr += sizeof(struct CE_ring_state);
347 dest_ring->nentries = nentries;
348 dest_ring->nentries_mask = nentries - 1;
349 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
350 dest_ring->sw_index =
351 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
352 dest_ring->write_index =
353 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
354 A_TARGET_ACCESS_END_RET_PTR(scn);
355 dest_ring->low_water_mark_nentries = 0;
356 dest_ring->high_water_mark_nentries = nentries;
357 dest_ring->per_transfer_context = (void **)ptr;
358
359 /* Legacy platforms that do not support cache
360 * coherent DMA are unsupported */
361 dest_ring->base_addr_owner_space_unaligned =
362 cdf_os_mem_alloc_consistent(scn->cdf_dev,
363 (nentries *
364 sizeof(struct CE_dest_desc) +
365 CE_DESC_RING_ALIGN),
366 &base_addr, 0);
367 if (dest_ring->base_addr_owner_space_unaligned
368 == NULL) {
369 HIF_ERROR("%s: dest ring has no DMA mem",
370 __func__);
371 goto error_no_dma_mem;
372 }
373 dest_ring->base_addr_CE_space_unaligned = base_addr;
374
375 /* Correctly initialize memory to 0 to
376 * prevent garbage data crashing system
377 * when download firmware
378 */
379 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
380 nentries * sizeof(struct CE_dest_desc) +
381 CE_DESC_RING_ALIGN);
382
383 if (dest_ring->
384 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
385 1)) {
386
387 dest_ring->base_addr_CE_space =
388 (dest_ring->
389 base_addr_CE_space_unaligned +
390 CE_DESC_RING_ALIGN -
391 1) & ~(CE_DESC_RING_ALIGN - 1);
392
393 dest_ring->base_addr_owner_space =
394 (void
395 *)(((size_t) dest_ring->
396 base_addr_owner_space_unaligned +
397 CE_DESC_RING_ALIGN -
398 1) & ~(CE_DESC_RING_ALIGN - 1));
399 } else {
400 dest_ring->base_addr_CE_space =
401 dest_ring->base_addr_CE_space_unaligned;
402 dest_ring->base_addr_owner_space =
403 dest_ring->
404 base_addr_owner_space_unaligned;
405 }
406
407 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
408 dma_addr = dest_ring->base_addr_CE_space;
409 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
410 (uint32_t)(dma_addr & 0xFFFFFFFF));
411#ifdef WLAN_ENABLE_QCA6180
412 {
413 uint32_t tmp;
414 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
415 ctrl_addr);
416 tmp &= ~0x1F;
417 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
418 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
419 ctrl_addr, (uint32_t)dma_addr);
420 }
421#endif
422 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
423#ifdef BIG_ENDIAN_HOST
424 /* Enable Dest ring byte swap for big endian host */
425 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
426#endif
427 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
428 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
429 A_TARGET_ACCESS_END_RET_PTR(scn);
430
431 /* epping */
432 /* poll timer */
433 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
434 cdf_softirq_timer_init(scn->cdf_dev,
435 &CE_state->poll_timer,
436 ce_poll_timeout,
437 CE_state,
438 CDF_TIMER_TYPE_SW);
439 CE_state->timer_inited = true;
440 cdf_softirq_timer_mod(&CE_state->poll_timer,
441 CE_POLL_TIMEOUT);
442 }
443 }
444 }
445
446 /* Enable CE error interrupts */
447 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
448 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
449 A_TARGET_ACCESS_END_RET_PTR(scn);
450
451 return (struct CE_handle *)CE_state;
452
453error_no_dma_mem:
454 ce_fini((struct CE_handle *)CE_state);
455 return NULL;
456}
457
458#ifdef WLAN_FEATURE_FASTPATH
459/**
460 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
461 * No processing is required inside this function.
462 * @ce_hdl: Cope engine handle
463 * Using an assert, this function makes sure that,
464 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700465 *
466 * This is called while dismantling CE structures. No other thread
467 * should be using these structures while dismantling is occuring
468 * therfore no locking is needed.
469 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800470 * Return: none
471 */
472void
473ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
474{
475 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
476 struct CE_ring_state *src_ring = ce_state->src_ring;
477 struct ol_softc *sc = ce_state->scn;
478 uint32_t sw_index, write_index;
479
480 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
481 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
482 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800483 sw_index = src_ring->sw_index;
484 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485
486 /* At this point Tx CE should be clean */
487 cdf_assert_always(sw_index == write_index);
488 }
489}
490#else
491void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
492{
493}
494#endif /* WLAN_FEATURE_FASTPATH */
495
496void ce_fini(struct CE_handle *copyeng)
497{
498 struct CE_state *CE_state = (struct CE_state *)copyeng;
499 unsigned int CE_id = CE_state->id;
500 struct ol_softc *scn = CE_state->scn;
501
502 CE_state->state = CE_UNUSED;
503 scn->ce_id_to_state[CE_id] = NULL;
504 if (CE_state->src_ring) {
505 /* Cleanup the HTT Tx ring */
506 ce_h2t_tx_ce_cleanup(copyeng);
507
508 if (CE_state->src_ring->shadow_base_unaligned)
509 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
510 if (CE_state->src_ring->base_addr_owner_space_unaligned)
511 cdf_os_mem_free_consistent(scn->cdf_dev,
512 (CE_state->src_ring->nentries *
513 sizeof(struct CE_src_desc) +
514 CE_DESC_RING_ALIGN),
515 CE_state->src_ring->
516 base_addr_owner_space_unaligned,
517 CE_state->src_ring->
518 base_addr_CE_space, 0);
519 cdf_mem_free(CE_state->src_ring);
520 }
521 if (CE_state->dest_ring) {
522 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
523 cdf_os_mem_free_consistent(scn->cdf_dev,
524 (CE_state->dest_ring->nentries *
525 sizeof(struct CE_dest_desc) +
526 CE_DESC_RING_ALIGN),
527 CE_state->dest_ring->
528 base_addr_owner_space_unaligned,
529 CE_state->dest_ring->
530 base_addr_CE_space, 0);
531 cdf_mem_free(CE_state->dest_ring);
532
533 /* epping */
534 if (CE_state->timer_inited) {
535 CE_state->timer_inited = false;
536 cdf_softirq_timer_free(&CE_state->poll_timer);
537 }
538 }
539 cdf_mem_free(CE_state);
540}
541
542void hif_detach_htc(struct ol_softc *scn)
543{
544 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
545
546 cdf_mem_zero(&hif_state->msg_callbacks_pending,
547 sizeof(hif_state->msg_callbacks_pending));
548 cdf_mem_zero(&hif_state->msg_callbacks_current,
549 sizeof(hif_state->msg_callbacks_current));
550}
551
552/* Send the first nbytes bytes of the buffer */
553CDF_STATUS
554hif_send_head(struct ol_softc *scn,
555 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
556 cdf_nbuf_t nbuf, unsigned int data_attr)
557{
558 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
559 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
560 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
561 int bytes = nbytes, nfrags = 0;
562 struct ce_sendlist sendlist;
563 int status, i = 0;
564 unsigned int mux_id = 0;
565
566 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
567
568 transfer_id =
569 (mux_id & MUX_ID_MASK) |
570 (transfer_id & TRANSACTION_ID_MASK);
571 data_attr &= DESC_DATA_FLAG_MASK;
572 /*
573 * The common case involves sending multiple fragments within a
574 * single download (the tx descriptor and the tx frame header).
575 * So, optimize for the case of multiple fragments by not even
576 * checking whether it's necessary to use a sendlist.
577 * The overhead of using a sendlist for a single buffer download
578 * is not a big deal, since it happens rarely (for WMI messages).
579 */
580 ce_sendlist_init(&sendlist);
581 do {
582 uint32_t frag_paddr;
583 int frag_bytes;
584
585 frag_paddr = cdf_nbuf_get_frag_paddr_lo(nbuf, nfrags);
586 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
587 /*
588 * Clear the packet offset for all but the first CE desc.
589 */
590 if (i++ > 0)
591 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
592
593 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
594 frag_bytes >
595 bytes ? bytes : frag_bytes,
596 cdf_nbuf_get_frag_is_wordstream
597 (nbuf,
598 nfrags) ? 0 :
599 CE_SEND_FLAG_SWAP_DISABLE,
600 data_attr);
601 if (status != CDF_STATUS_SUCCESS) {
602 HIF_ERROR("%s: error, frag_num %d larger than limit",
603 __func__, nfrags);
604 return status;
605 }
606 bytes -= frag_bytes;
607 nfrags++;
608 } while (bytes > 0);
609
610 /* Make sure we have resources to handle this request */
611 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
612 if (pipe_info->num_sends_allowed < nfrags) {
613 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
614 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
615 return CDF_STATUS_E_RESOURCES;
616 }
617 pipe_info->num_sends_allowed -= nfrags;
618 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
619
620 if (cdf_unlikely(ce_hdl == NULL)) {
621 HIF_ERROR("%s: error CE handle is null", __func__);
622 return A_ERROR;
623 }
624
625 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
626 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
627 (uint8_t *)(cdf_nbuf_data(nbuf)),
628 sizeof(cdf_nbuf_data(nbuf))));
629 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
630 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
631
632 return status;
633}
634
635void hif_send_complete_check(struct ol_softc *scn, uint8_t pipe, int force)
636{
637 if (!force) {
638 int resources;
639 /*
640 * Decide whether to actually poll for completions, or just
641 * wait for a later chance. If there seem to be plenty of
642 * resources left, then just wait, since checking involves
643 * reading a CE register, which is a relatively expensive
644 * operation.
645 */
646 resources = hif_get_free_queue_number(scn, pipe);
647 /*
648 * If at least 50% of the total resources are still available,
649 * don't bother checking again yet.
650 */
651 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
652 return;
653 }
654 }
655#ifdef ATH_11AC_TXCOMPACT
656 ce_per_engine_servicereap(scn, pipe);
657#else
658 ce_per_engine_service(scn, pipe);
659#endif
660}
661
662uint16_t hif_get_free_queue_number(struct ol_softc *scn, uint8_t pipe)
663{
664 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
665 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
666 uint16_t rv;
667
668 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
669 rv = pipe_info->num_sends_allowed;
670 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
671 return rv;
672}
673
674/* Called by lower (CE) layer when a send to Target completes. */
675void
676hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
677 void *transfer_context, cdf_dma_addr_t CE_data,
678 unsigned int nbytes, unsigned int transfer_id,
679 unsigned int sw_index, unsigned int hw_index,
680 unsigned int toeplitz_hash_result)
681{
682 struct HIF_CE_pipe_info *pipe_info =
683 (struct HIF_CE_pipe_info *)ce_context;
684 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800685 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700686 struct hif_msg_callbacks *msg_callbacks =
687 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800689 do {
690 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700691 * The upper layer callback will be triggered
692 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693 */
Houston Hoffman85118512015-09-28 14:17:11 -0700694 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700695 if (hif_state->scn->target_status
696 == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800697 cdf_nbuf_free(transfer_context);
698 else
699 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700700 msg_callbacks->Context,
701 transfer_context, transfer_id,
702 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800703 }
704
705 cdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700706 pipe_info->num_sends_allowed++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800707 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 } while (ce_completed_send_next(copyeng,
709 &ce_context, &transfer_context,
710 &CE_data, &nbytes, &transfer_id,
711 &sw_idx, &hw_idx,
712 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713}
714
Houston Hoffman910c6262015-09-28 12:56:25 -0700715/**
716 * hif_ce_do_recv(): send message from copy engine to upper layers
717 * @msg_callbacks: structure containing callback and callback context
718 * @netbuff: skb containing message
719 * @nbytes: number of bytes in the message
720 * @pipe_info: used for the pipe_number info
721 *
722 * Checks the packet length, configures the lenght in the netbuff,
723 * and calls the upper layer callback.
724 *
725 * return: None
726 */
727static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
728 cdf_nbuf_t netbuf, int nbytes,
729 struct HIF_CE_pipe_info *pipe_info) {
730 if (nbytes <= pipe_info->buf_sz) {
731 cdf_nbuf_set_pktlen(netbuf, nbytes);
732 msg_callbacks->
733 rxCompletionHandler(msg_callbacks->Context,
734 netbuf, pipe_info->pipe_num);
735 } else {
736 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
737 __func__, netbuf, nbytes);
738 cdf_nbuf_free(netbuf);
739 }
740}
741
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742/* Called by lower (CE) layer when data is received from the Target. */
743void
744hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
745 void *transfer_context, cdf_dma_addr_t CE_data,
746 unsigned int nbytes, unsigned int transfer_id,
747 unsigned int flags)
748{
749 struct HIF_CE_pipe_info *pipe_info =
750 (struct HIF_CE_pipe_info *)ce_context;
751 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700752 struct CE_state *ce_state = (struct CE_state *) copyeng;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800753 struct ol_softc *scn = hif_state->scn;
Houston Hoffman910c6262015-09-28 12:56:25 -0700754 struct hif_msg_callbacks *msg_callbacks =
755 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800757 do {
Houston Hoffman226a3b12015-11-12 18:00:21 -0800758 hif_pm_runtime_mark_last_busy(scn->hif_sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800759 cdf_nbuf_unmap_single(scn->cdf_dev,
760 (cdf_nbuf_t) transfer_context,
761 CDF_DMA_FROM_DEVICE);
762
Houston Hoffman910c6262015-09-28 12:56:25 -0700763 atomic_inc(&pipe_info->recv_bufs_needed);
764 hif_post_recv_buffers_for_pipe(pipe_info);
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700765 if (hif_state->scn->target_status == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800766 cdf_nbuf_free(transfer_context);
767 else
768 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700769 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800770
771 /* Set up force_break flag if num of receices reaches
772 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700773 ce_state->receive_count++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800774 if (cdf_unlikely(hif_max_num_receives_reached(
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700775 ce_state->receive_count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700776 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800777 break;
778 }
779 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
780 &CE_data, &nbytes, &transfer_id,
781 &flags) == CDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -0800782
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800783}
784
785/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
786
787void
788hif_post_init(struct ol_softc *scn, void *unused,
789 struct hif_msg_callbacks *callbacks)
790{
791 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
792
793#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
794 spin_lock_init(&pcie_access_log_lock);
795#endif
796 /* Save callbacks for later installation */
797 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
798 sizeof(hif_state->msg_callbacks_pending));
799
800}
801
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
803{
804 struct CE_handle *ce_diag = hif_state->ce_diag;
805 int pipe_num;
806 struct ol_softc *scn = hif_state->scn;
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700807 struct hif_msg_callbacks *hif_msg_callbacks =
808 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800809
810 /* daemonize("hif_compl_thread"); */
811
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800812 if (scn->ce_count == 0) {
813 HIF_ERROR("%s: Invalid ce_count\n", __func__);
814 return -EINVAL;
815 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700816
817 if (!hif_msg_callbacks ||
818 !hif_msg_callbacks->rxCompletionHandler ||
819 !hif_msg_callbacks->txCompletionHandler) {
820 HIF_ERROR("%s: no completion handler registered", __func__);
821 return -EFAULT;
822 }
823
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800824 A_TARGET_ACCESS_LIKELY(scn);
825 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
826 struct CE_attr attr;
827 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800828
829 pipe_info = &hif_state->pipe_info[pipe_num];
830 if (pipe_info->ce_hdl == ce_diag) {
831 continue; /* Handle Diagnostic CE specially */
832 }
833 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800834 if (attr.src_nentries) {
835 /* pipe used to send to target */
836 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
837 __func__, pipe_num, pipe_info);
838 ce_send_cb_register(pipe_info->ce_hdl,
839 hif_pci_ce_send_done, pipe_info,
840 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800841 pipe_info->num_sends_allowed = attr.src_nentries - 1;
842 }
843 if (attr.dest_nentries) {
844 /* pipe used to receive from target */
845 ce_recv_cb_register(pipe_info->ce_hdl,
846 hif_pci_ce_recv_data, pipe_info,
847 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800848 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800849
850 if (attr.src_nentries)
851 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800852 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800853
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800854 A_TARGET_ACCESS_UNLIKELY(scn);
855 return 0;
856}
857
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800858/*
859 * Install pending msg callbacks.
860 *
861 * TBDXXX: This hack is needed because upper layers install msg callbacks
862 * for use with HTC before BMI is done; yet this HIF implementation
863 * needs to continue to use BMI msg callbacks. Really, upper layers
864 * should not register HTC callbacks until AFTER BMI phase.
865 */
866static void hif_msg_callbacks_install(struct ol_softc *scn)
867{
868 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
869
870 cdf_mem_copy(&hif_state->msg_callbacks_current,
871 &hif_state->msg_callbacks_pending,
872 sizeof(hif_state->msg_callbacks_pending));
873}
874
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800875void
876hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe, uint8_t *DLPipe)
877{
878 int ul_is_polled, dl_is_polled;
879
880 (void)hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
881 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
882}
883
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800884/**
885 * hif_dump_pipe_debug_count() - Log error count
886 * @scn: ol_softc pointer.
887 *
888 * Output the pipe error counts of each pipe to log file
889 *
890 * Return: N/A
891 */
892void hif_dump_pipe_debug_count(struct ol_softc *scn)
893{
894 struct HIF_CE_state *hif_state;
895 int pipe_num;
896
897 if (scn == NULL) {
898 HIF_ERROR("%s scn is NULL", __func__);
899 return;
900 }
901 hif_state = (struct HIF_CE_state *)scn->hif_hdl;
902 if (hif_state == NULL) {
903 HIF_ERROR("%s hif_state is NULL", __func__);
904 return;
905 }
906 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
907 struct HIF_CE_pipe_info *pipe_info;
908
909 pipe_info = &hif_state->pipe_info[pipe_num];
910
911 if (pipe_info->nbuf_alloc_err_count > 0 ||
912 pipe_info->nbuf_dma_err_count > 0 ||
913 pipe_info->nbuf_ce_enqueue_err_count)
914 HIF_ERROR(
915 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
916 __func__, pipe_info->pipe_num,
917 atomic_read(&pipe_info->recv_bufs_needed),
918 pipe_info->nbuf_alloc_err_count,
919 pipe_info->nbuf_dma_err_count,
920 pipe_info->nbuf_ce_enqueue_err_count);
921 }
922}
923
924static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
925{
926 struct CE_handle *ce_hdl;
927 cdf_size_t buf_sz;
928 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
929 struct ol_softc *scn = hif_state->scn;
930 CDF_STATUS ret;
931 uint32_t bufs_posted = 0;
932
933 buf_sz = pipe_info->buf_sz;
934 if (buf_sz == 0) {
935 /* Unused Copy Engine */
936 return 0;
937 }
938
939 ce_hdl = pipe_info->ce_hdl;
940
941 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
942 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
943 cdf_dma_addr_t CE_data; /* CE space buffer address */
944 cdf_nbuf_t nbuf;
945 int status;
946
947 atomic_dec(&pipe_info->recv_bufs_needed);
948 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
949
950 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
951 if (!nbuf) {
952 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
953 pipe_info->nbuf_alloc_err_count++;
954 cdf_spin_unlock_bh(
955 &pipe_info->recv_bufs_needed_lock);
956 HIF_ERROR(
957 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
958 __func__, pipe_info->pipe_num,
959 atomic_read(&pipe_info->recv_bufs_needed),
960 pipe_info->nbuf_alloc_err_count);
961 atomic_inc(&pipe_info->recv_bufs_needed);
962 return 1;
963 }
964
965 /*
966 * cdf_nbuf_peek_header(nbuf, &data, &unused);
967 * CE_data = dma_map_single(dev, data, buf_sz, );
968 * DMA_FROM_DEVICE);
969 */
970 ret =
971 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
972 CDF_DMA_FROM_DEVICE);
973
974 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
975 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
976 pipe_info->nbuf_dma_err_count++;
977 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
978 HIF_ERROR(
979 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
980 __func__, pipe_info->pipe_num,
981 atomic_read(&pipe_info->recv_bufs_needed),
982 pipe_info->nbuf_dma_err_count);
983 cdf_nbuf_free(nbuf);
984 atomic_inc(&pipe_info->recv_bufs_needed);
985 return 1;
986 }
987
988 CE_data = cdf_nbuf_get_frag_paddr_lo(nbuf, 0);
989
990 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
991 buf_sz, DMA_FROM_DEVICE);
992 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
993 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
994 if (status != EOK) {
995 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
996 pipe_info->nbuf_ce_enqueue_err_count++;
997 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
998 HIF_ERROR(
999 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1000 __func__, pipe_info->pipe_num,
1001 atomic_read(&pipe_info->recv_bufs_needed),
1002 pipe_info->nbuf_ce_enqueue_err_count);
1003 atomic_inc(&pipe_info->recv_bufs_needed);
1004 cdf_nbuf_free(nbuf);
1005 return 1;
1006 }
1007
1008 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1009 bufs_posted++;
1010 }
1011 pipe_info->nbuf_alloc_err_count =
1012 (pipe_info->nbuf_alloc_err_count > bufs_posted)?
1013 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1014 pipe_info->nbuf_dma_err_count =
1015 (pipe_info->nbuf_dma_err_count > bufs_posted)?
1016 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1017 pipe_info->nbuf_ce_enqueue_err_count =
1018 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted)?
1019 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1020
1021 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1022
1023 return 0;
1024}
1025
1026/*
1027 * Try to post all desired receive buffers for all pipes.
1028 * Returns 0 if all desired buffers are posted,
1029 * non-zero if were were unable to completely
1030 * replenish receive buffers.
1031 */
1032static int hif_post_recv_buffers(struct ol_softc *scn)
1033{
1034 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1035 int pipe_num, rv = 0;
1036
1037 A_TARGET_ACCESS_LIKELY(scn);
1038 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1039 struct HIF_CE_pipe_info *pipe_info;
1040
1041 pipe_info = &hif_state->pipe_info[pipe_num];
1042 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1043 rv = 1;
1044 goto done;
1045 }
1046 }
1047
1048done:
1049 A_TARGET_ACCESS_UNLIKELY(scn);
1050
1051 return rv;
1052}
1053
1054CDF_STATUS hif_start(struct ol_softc *scn)
1055{
1056 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1057
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001058 hif_msg_callbacks_install(scn);
1059
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001060 if (hif_completion_thread_startup(hif_state))
1061 return CDF_STATUS_E_FAILURE;
1062
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001063 /* Post buffers once to start things off. */
1064 (void)hif_post_recv_buffers(scn);
1065
1066 hif_state->started = true;
1067
1068 return CDF_STATUS_SUCCESS;
1069}
1070
1071#ifdef WLAN_FEATURE_FASTPATH
1072/**
1073 * hif_enable_fastpath() Update that we have enabled fastpath mode
1074 * @hif_device: HIF context
1075 *
1076 * For use in data path
1077 *
1078 * Retrun: void
1079 */
1080void
1081hif_enable_fastpath(struct ol_softc *hif_device)
1082{
1083 HIF_INFO("Enabling fastpath mode\n");
1084 hif_device->fastpath_mode_on = 1;
1085}
1086#endif /* WLAN_FEATURE_FASTPATH */
1087
1088void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1089{
1090 struct ol_softc *scn;
1091 struct CE_handle *ce_hdl;
1092 uint32_t buf_sz;
1093 struct HIF_CE_state *hif_state;
1094 cdf_nbuf_t netbuf;
1095 cdf_dma_addr_t CE_data;
1096 void *per_CE_context;
1097
1098 buf_sz = pipe_info->buf_sz;
1099 if (buf_sz == 0) {
1100 /* Unused Copy Engine */
1101 return;
1102 }
1103
1104 hif_state = pipe_info->HIF_CE_state;
1105 if (!hif_state->started) {
1106 return;
1107 }
1108
1109 scn = hif_state->scn;
1110 ce_hdl = pipe_info->ce_hdl;
1111
1112 if (scn->cdf_dev == NULL) {
1113 return;
1114 }
1115 while (ce_revoke_recv_next
1116 (ce_hdl, &per_CE_context, (void **)&netbuf,
1117 &CE_data) == CDF_STATUS_SUCCESS) {
1118 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1119 CDF_DMA_FROM_DEVICE);
1120 cdf_nbuf_free(netbuf);
1121 }
1122}
1123
1124void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1125{
1126 struct CE_handle *ce_hdl;
1127 struct HIF_CE_state *hif_state;
1128 cdf_nbuf_t netbuf;
1129 void *per_CE_context;
1130 cdf_dma_addr_t CE_data;
1131 unsigned int nbytes;
1132 unsigned int id;
1133 uint32_t buf_sz;
1134 uint32_t toeplitz_hash_result;
1135
1136 buf_sz = pipe_info->buf_sz;
1137 if (buf_sz == 0) {
1138 /* Unused Copy Engine */
1139 return;
1140 }
1141
1142 hif_state = pipe_info->HIF_CE_state;
1143 if (!hif_state->started) {
1144 return;
1145 }
1146
1147 ce_hdl = pipe_info->ce_hdl;
1148
1149 while (ce_cancel_send_next
1150 (ce_hdl, &per_CE_context,
1151 (void **)&netbuf, &CE_data, &nbytes,
1152 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1153 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1154 /*
1155 * Packets enqueued by htt_h2t_ver_req_msg() and
1156 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1157 * freed in htt_htc_misc_pkt_pool_free() in
1158 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001159 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001160 * which they are queued in.
1161 */
1162 if (id == hif_state->scn->htc_endpoint)
1163 return;
1164 /* Indicate the completion to higer
1165 * layer to free the buffer */
1166 hif_state->msg_callbacks_current.
1167 txCompletionHandler(hif_state->
1168 msg_callbacks_current.Context,
1169 netbuf, id, toeplitz_hash_result);
1170 }
1171 }
1172}
1173
1174/*
1175 * Cleanup residual buffers for device shutdown:
1176 * buffers that were enqueued for receive
1177 * buffers that were to be sent
1178 * Note: Buffers that had completed but which were
1179 * not yet processed are on a completion queue. They
1180 * are handled when the completion thread shuts down.
1181 */
1182void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1183{
1184 int pipe_num;
1185
1186 for (pipe_num = 0; pipe_num < hif_state->scn->ce_count; pipe_num++) {
1187 struct HIF_CE_pipe_info *pipe_info;
1188
1189 pipe_info = &hif_state->pipe_info[pipe_num];
1190 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1191 hif_send_buffer_cleanup_on_pipe(pipe_info);
1192 }
1193}
1194
1195void hif_flush_surprise_remove(struct ol_softc *scn)
1196{
1197 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1198 hif_buffer_cleanup(hif_state);
1199}
1200
1201void hif_stop(struct ol_softc *scn)
1202{
1203 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1204 int pipe_num;
1205
1206 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207
1208 /*
1209 * At this point, asynchronous threads are stopped,
1210 * The Target should not DMA nor interrupt, Host code may
1211 * not initiate anything more. So we just need to clean
1212 * up Host-side state.
1213 */
1214
1215 if (scn->athdiag_procfs_inited) {
1216 athdiag_procfs_remove();
1217 scn->athdiag_procfs_inited = false;
1218 }
1219
1220 hif_buffer_cleanup(hif_state);
1221
1222 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1223 struct HIF_CE_pipe_info *pipe_info;
1224
1225 pipe_info = &hif_state->pipe_info[pipe_num];
1226 if (pipe_info->ce_hdl) {
1227 ce_fini(pipe_info->ce_hdl);
1228 pipe_info->ce_hdl = NULL;
1229 pipe_info->buf_sz = 0;
1230 }
1231 }
1232
1233 if (hif_state->sleep_timer_init) {
1234 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1235 cdf_softirq_timer_free(&hif_state->sleep_timer);
1236 hif_state->sleep_timer_init = false;
1237 }
1238
1239 hif_state->started = false;
1240}
1241
1242#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1243#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1244
1245
1246static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1247 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1248 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1249 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1250 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1251 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1252 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1253 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1254 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1255 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1256};
1257
1258
1259
1260/* CE_PCI TABLE */
1261/*
1262 * NOTE: the table below is out of date, though still a useful reference.
1263 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1264 * mapping of HTC services to HIF pipes.
1265 */
1266/*
1267 * This authoritative table defines Copy Engine configuration and the mapping
1268 * of services/endpoints to CEs. A subset of this information is passed to
1269 * the Target during startup as a prerequisite to entering BMI phase.
1270 * See:
1271 * target_service_to_ce_map - Target-side mapping
1272 * hif_map_service_to_pipe - Host-side mapping
1273 * target_ce_config - Target-side configuration
1274 * host_ce_config - Host-side configuration
1275 ============================================================================
1276 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1277 | | | ctio | Size | Frequency
1278 | | | n | |
1279 ============================================================================
1280 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1281 descriptor | | | | O(100B) | and regular
1282 download | | | | |
1283 ----------------------------------------------------------------------------
1284 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1285 indication | | | | O(10B) | regular
1286 upload | | | | |
1287 ----------------------------------------------------------------------------
1288 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1289 upload | | | | O(1000B) | (frequent
1290 e.g. noise | | | | | during IP1.0
1291 packets | | | | | testing)
1292 ----------------------------------------------------------------------------
1293 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1294 download | | | | O(1000B) | (frequent
1295 e.g. | | | | | during IP1.0
1296 misdirecte | | | | | testing)
1297 d EAPOL | | | | |
1298 packets | | | | |
1299 ----------------------------------------------------------------------------
1300 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1301 | DATA_VO (uplink) | | | |
1302 ----------------------------------------------------------------------------
1303 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1304 | DATA_VO (downlink) | | | |
1305 ----------------------------------------------------------------------------
1306 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1307 | | | | O(100B) |
1308 ----------------------------------------------------------------------------
1309 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1310 messages | (downlink) | | | O(100B) |
1311 | | | | |
1312 ----------------------------------------------------------------------------
1313 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1314 | HTC_RAW_STREAMS | | | |
1315 | (uplink) | | | |
1316 ----------------------------------------------------------------------------
1317 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1318 | HTC_RAW_STREAMS | | | |
1319 | (downlink) | | | |
1320 ----------------------------------------------------------------------------
1321 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1322 | | | | | infrequent
1323 ============================================================================
1324 */
1325
1326/*
1327 * Map from service/endpoint to Copy Engine.
1328 * This table is derived from the CE_PCI TABLE, above.
1329 * It is passed to the Target at startup for use by firmware.
1330 */
1331static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1332 {
1333 WMI_DATA_VO_SVC,
1334 PIPEDIR_OUT, /* out = UL = host -> target */
1335 3,
1336 },
1337 {
1338 WMI_DATA_VO_SVC,
1339 PIPEDIR_IN, /* in = DL = target -> host */
1340 2,
1341 },
1342 {
1343 WMI_DATA_BK_SVC,
1344 PIPEDIR_OUT, /* out = UL = host -> target */
1345 3,
1346 },
1347 {
1348 WMI_DATA_BK_SVC,
1349 PIPEDIR_IN, /* in = DL = target -> host */
1350 2,
1351 },
1352 {
1353 WMI_DATA_BE_SVC,
1354 PIPEDIR_OUT, /* out = UL = host -> target */
1355 3,
1356 },
1357 {
1358 WMI_DATA_BE_SVC,
1359 PIPEDIR_IN, /* in = DL = target -> host */
1360 2,
1361 },
1362 {
1363 WMI_DATA_VI_SVC,
1364 PIPEDIR_OUT, /* out = UL = host -> target */
1365 3,
1366 },
1367 {
1368 WMI_DATA_VI_SVC,
1369 PIPEDIR_IN, /* in = DL = target -> host */
1370 2,
1371 },
1372 {
1373 WMI_CONTROL_SVC,
1374 PIPEDIR_OUT, /* out = UL = host -> target */
1375 3,
1376 },
1377 {
1378 WMI_CONTROL_SVC,
1379 PIPEDIR_IN, /* in = DL = target -> host */
1380 2,
1381 },
1382 {
1383 HTC_CTRL_RSVD_SVC,
1384 PIPEDIR_OUT, /* out = UL = host -> target */
1385 0, /* could be moved to 3 (share with WMI) */
1386 },
1387 {
1388 HTC_CTRL_RSVD_SVC,
1389 PIPEDIR_IN, /* in = DL = target -> host */
1390 2,
1391 },
1392 {
1393 HTC_RAW_STREAMS_SVC, /* not currently used */
1394 PIPEDIR_OUT, /* out = UL = host -> target */
1395 0,
1396 },
1397 {
1398 HTC_RAW_STREAMS_SVC, /* not currently used */
1399 PIPEDIR_IN, /* in = DL = target -> host */
1400 2,
1401 },
1402 {
1403 HTT_DATA_MSG_SVC,
1404 PIPEDIR_OUT, /* out = UL = host -> target */
1405 4,
1406 },
1407 {
1408 HTT_DATA_MSG_SVC,
1409 PIPEDIR_IN, /* in = DL = target -> host */
1410 1,
1411 },
1412 {
1413 WDI_IPA_TX_SVC,
1414 PIPEDIR_OUT, /* in = DL = target -> host */
1415 5,
1416 },
1417 /* (Additions here) */
1418
1419 { /* Must be last */
1420 0,
1421 0,
1422 0,
1423 },
1424};
1425
1426static struct service_to_pipe *target_service_to_ce_map =
1427 target_service_to_ce_map_wlan;
1428static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1429
1430static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1431static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1432
1433static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1434 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1435 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1436 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1437 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1438 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1439 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1440 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1441 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1442 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1443 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1444 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1445 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1446 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1447 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1448 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1449 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1450 {0, 0, 0,}, /* Must be last */
1451};
1452
1453#ifdef HIF_PCI
1454/*
1455 * Send an interrupt to the device to wake up the Target CPU
1456 * so it has an opportunity to notice any changed state.
1457 */
1458void hif_wake_target_cpu(struct ol_softc *scn)
1459{
1460 CDF_STATUS rv;
1461 uint32_t core_ctrl;
1462
1463 rv = hif_diag_read_access(scn,
1464 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1465 &core_ctrl);
1466 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1467 /* A_INUM_FIRMWARE interrupt to Target CPU */
1468 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1469
1470 rv = hif_diag_write_access(scn,
1471 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1472 core_ctrl);
1473 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1474}
1475#endif
1476
1477static void hif_sleep_entry(void *arg)
1478{
1479 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1480 struct ol_softc *scn = hif_state->scn;
1481 uint32_t idle_ms;
1482 if (scn->recovery)
1483 return;
1484
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08001485 if (cds_is_driver_unloading())
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001486 return;
1487
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001488 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1489 if (hif_state->verified_awake == false) {
1490 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1491 - hif_state->sleep_ticks);
1492 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1493 if (!cdf_atomic_read(&scn->link_suspended)) {
1494 soc_wake_reset(scn);
1495 hif_state->fake_sleep = false;
1496 }
1497 } else {
1498 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1499 cdf_softirq_timer_start(&hif_state->sleep_timer,
1500 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1501 }
1502 } else {
1503 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1504 cdf_softirq_timer_start(&hif_state->sleep_timer,
1505 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1506 }
1507 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1508}
1509#define HIF_HIA_MAX_POLL_LOOP 1000000
1510#define HIF_HIA_POLLING_DELAY_MS 10
1511
1512#ifndef HIF_PCI
1513int hif_set_hia(struct ol_softc *scn)
1514{
1515 return 0;
1516}
1517#else
1518int hif_set_hia(struct ol_softc *scn)
1519{
1520 CDF_STATUS rv;
1521 uint32_t interconnect_targ_addr = 0;
1522 uint32_t pcie_state_targ_addr = 0;
1523 uint32_t pipe_cfg_targ_addr = 0;
1524 uint32_t svc_to_pipe_map = 0;
1525 uint32_t pcie_config_flags = 0;
1526 uint32_t flag2_value = 0;
1527 uint32_t flag2_targ_addr = 0;
1528#ifdef QCA_WIFI_3_0
1529 uint32_t host_interest_area = 0;
1530 uint8_t i;
1531#else
1532 uint32_t ealloc_value = 0;
1533 uint32_t ealloc_targ_addr = 0;
1534 uint8_t banks_switched = 1;
1535 uint32_t chip_id;
1536#endif
1537 uint32_t pipe_cfg_addr;
Komal Seelam91553ce2016-01-27 18:57:10 +05301538 struct hif_target_info *tgt_info = hif_get_target_info_handle(scn);
1539 uint32_t target_type = tgt_info->target_type;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540
1541 HIF_TRACE("%s: E", __func__);
1542
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001543 if (ADRASTEA_BU)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001544 return CDF_STATUS_SUCCESS;
1545
1546#ifdef QCA_WIFI_3_0
1547 i = 0;
1548 while (i < HIF_HIA_MAX_POLL_LOOP) {
1549 host_interest_area = hif_read32_mb(scn->mem +
1550 A_SOC_CORE_SCRATCH_0_ADDRESS);
1551 if ((host_interest_area & 0x01) == 0) {
1552 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1553 host_interest_area = 0;
1554 i++;
1555 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1556 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1557 }
1558 } else {
1559 host_interest_area &= (~0x01);
1560 hif_write32_mb(scn->mem + 0x113014, 0);
1561 break;
1562 }
1563 }
1564
1565 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1566 HIF_ERROR("%s: hia polling timeout", __func__);
1567 return -EIO;
1568 }
1569
1570 if (host_interest_area == 0) {
1571 HIF_ERROR("%s: host_interest_area = 0", __func__);
1572 return -EIO;
1573 }
1574
1575 interconnect_targ_addr = host_interest_area +
1576 offsetof(struct host_interest_area_t,
1577 hi_interconnect_state);
1578
1579 flag2_targ_addr = host_interest_area +
1580 offsetof(struct host_interest_area_t, hi_option_flag2);
1581
1582#else
Komal Seelam91553ce2016-01-27 18:57:10 +05301583 interconnect_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584 offsetof(struct host_interest_s, hi_interconnect_state));
Komal Seelam91553ce2016-01-27 18:57:10 +05301585 ealloc_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 offsetof(struct host_interest_s, hi_early_alloc));
Komal Seelam91553ce2016-01-27 18:57:10 +05301587 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001588 offsetof(struct host_interest_s, hi_option_flag2));
1589#endif
1590 /* Supply Target-side CE configuration */
1591 rv = hif_diag_read_access(scn, interconnect_targ_addr,
1592 &pcie_state_targ_addr);
1593 if (rv != CDF_STATUS_SUCCESS) {
1594 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1595 __func__, interconnect_targ_addr, rv);
1596 goto done;
1597 }
1598 if (pcie_state_targ_addr == 0) {
1599 rv = CDF_STATUS_E_FAILURE;
1600 HIF_ERROR("%s: pcie state addr is 0", __func__);
1601 goto done;
1602 }
1603 pipe_cfg_addr = pcie_state_targ_addr +
1604 offsetof(struct pcie_state_s,
1605 pipe_cfg_addr);
1606 rv = hif_diag_read_access(scn,
1607 pipe_cfg_addr,
1608 &pipe_cfg_targ_addr);
1609 if (rv != CDF_STATUS_SUCCESS) {
1610 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1611 __func__, pipe_cfg_addr, rv);
1612 goto done;
1613 }
1614 if (pipe_cfg_targ_addr == 0) {
1615 rv = CDF_STATUS_E_FAILURE;
1616 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1617 goto done;
1618 }
1619
1620 rv = hif_diag_write_mem(scn, pipe_cfg_targ_addr,
1621 (uint8_t *) target_ce_config,
1622 target_ce_config_sz);
1623
1624 if (rv != CDF_STATUS_SUCCESS) {
1625 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1626 goto done;
1627 }
1628
1629 rv = hif_diag_read_access(scn,
1630 pcie_state_targ_addr +
1631 offsetof(struct pcie_state_s,
1632 svc_to_pipe_map),
1633 &svc_to_pipe_map);
1634 if (rv != CDF_STATUS_SUCCESS) {
1635 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1636 goto done;
1637 }
1638 if (svc_to_pipe_map == 0) {
1639 rv = CDF_STATUS_E_FAILURE;
1640 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1641 goto done;
1642 }
1643
1644 rv = hif_diag_write_mem(scn,
1645 svc_to_pipe_map,
1646 (uint8_t *) target_service_to_ce_map,
1647 target_service_to_ce_map_sz);
1648 if (rv != CDF_STATUS_SUCCESS) {
1649 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1650 goto done;
1651 }
1652
1653 rv = hif_diag_read_access(scn,
1654 pcie_state_targ_addr +
1655 offsetof(struct pcie_state_s,
1656 config_flags),
1657 &pcie_config_flags);
1658 if (rv != CDF_STATUS_SUCCESS) {
1659 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1660 goto done;
1661 }
1662#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1663 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1664#else
1665 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1666#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1667 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1668#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1669 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1670#endif
1671 rv = hif_diag_write_mem(scn,
1672 pcie_state_targ_addr +
1673 offsetof(struct pcie_state_s,
1674 config_flags),
1675 (uint8_t *) &pcie_config_flags,
1676 sizeof(pcie_config_flags));
1677 if (rv != CDF_STATUS_SUCCESS) {
1678 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1679 goto done;
1680 }
1681
1682#ifndef QCA_WIFI_3_0
1683 /* configure early allocation */
Komal Seelam91553ce2016-01-27 18:57:10 +05301684 ealloc_targ_addr = hif_hia_item_address(target_type,
1685 offsetof(
1686 struct host_interest_s,
1687 hi_early_alloc));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001688
1689 rv = hif_diag_read_access(scn, ealloc_targ_addr,
1690 &ealloc_value);
1691 if (rv != CDF_STATUS_SUCCESS) {
1692 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1693 goto done;
1694 }
1695
1696 /* 1 bank is switched to IRAM, except ROME 1.0 */
1697 ealloc_value |=
1698 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1699 HI_EARLY_ALLOC_MAGIC_MASK);
1700
1701 rv = hif_diag_read_access(scn,
1702 CHIP_ID_ADDRESS |
1703 RTC_SOC_BASE_ADDRESS, &chip_id);
1704 if (rv != CDF_STATUS_SUCCESS) {
1705 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1706 goto done;
1707 }
1708 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
Komal Seelam91553ce2016-01-27 18:57:10 +05301709 tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001710 switch (CHIP_ID_REVISION_GET(chip_id)) {
1711 case 0x2: /* ROME 1.3 */
1712 /* 2 banks are switched to IRAM */
1713 banks_switched = 2;
1714 break;
1715 case 0x4: /* ROME 2.1 */
1716 case 0x5: /* ROME 2.2 */
1717 banks_switched = 6;
1718 break;
1719 case 0x8: /* ROME 3.0 */
1720 case 0x9: /* ROME 3.1 */
1721 case 0xA: /* ROME 3.2 */
1722 banks_switched = 9;
1723 break;
1724 case 0x0: /* ROME 1.0 */
1725 case 0x1: /* ROME 1.1 */
1726 default:
1727 /* 3 banks are switched to IRAM */
1728 banks_switched = 3;
1729 break;
1730 }
1731 }
1732
1733 ealloc_value |=
1734 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1735 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1736
1737 rv = hif_diag_write_access(scn,
1738 ealloc_targ_addr,
1739 ealloc_value);
1740 if (rv != CDF_STATUS_SUCCESS) {
1741 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1742 goto done;
1743 }
1744#endif
1745
1746 /* Tell Target to proceed with initialization */
Komal Seelam91553ce2016-01-27 18:57:10 +05301747 flag2_targ_addr = hif_hia_item_address(target_type,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001748 offsetof(
1749 struct host_interest_s,
1750 hi_option_flag2));
1751
1752 rv = hif_diag_read_access(scn, flag2_targ_addr,
1753 &flag2_value);
1754 if (rv != CDF_STATUS_SUCCESS) {
1755 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1756 goto done;
1757 }
1758
1759 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1760 rv = hif_diag_write_access(scn, flag2_targ_addr,
1761 flag2_value);
1762 if (rv != CDF_STATUS_SUCCESS) {
1763 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1764 goto done;
1765 }
1766
1767 hif_wake_target_cpu(scn);
1768
1769done:
1770
1771 return rv;
1772}
1773#endif
1774
1775/**
1776 * hif_wlan_enable(): call the platform driver to enable wlan
1777 *
1778 * This function passes the con_mode and CE configuration to
1779 * platform driver to enable wlan.
1780 *
1781 * Return: void
1782 */
1783static int hif_wlan_enable(void)
1784{
1785 struct icnss_wlan_enable_cfg cfg;
1786 enum icnss_driver_mode mode;
1787 uint32_t con_mode = cds_get_conparam();
1788
1789 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1790 sizeof(struct CE_pipe_config);
1791 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1792 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1793 sizeof(struct service_to_pipe);
1794 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1795 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
1796 cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
1797
Peng Xu7b962532015-10-02 17:17:03 -07001798 if (CDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001799 mode = ICNSS_FTM;
Peng Xu7b962532015-10-02 17:17:03 -07001800 else if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001801 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001802 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001804
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001805 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1806}
1807
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001808/*
1809 * Called from PCI layer whenever a new PCI device is probed.
1810 * Initializes per-device HIF state and notifies the main
1811 * driver that a new HIF device is present.
1812 */
1813int hif_config_ce(hif_handle_t hif_hdl)
1814{
1815 struct HIF_CE_state *hif_state;
1816 struct HIF_CE_pipe_info *pipe_info;
1817 int pipe_num;
1818#ifdef ADRASTEA_SHADOW_REGISTERS
1819 int i;
1820#endif
1821 CDF_STATUS rv = CDF_STATUS_SUCCESS;
1822 int ret;
1823 struct ol_softc *scn = hif_hdl;
1824 struct icnss_soc_info soc_info;
Komal Seelam91553ce2016-01-27 18:57:10 +05301825 struct hif_target_info *tgt_info = hif_get_target_info_handle(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001826
1827 /* if epping is enabled we need to use the epping configuration. */
1828 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1829 if (WLAN_IS_EPPING_IRQ(cds_get_conparam()))
1830 host_ce_config = host_ce_config_wlan_epping_irq;
1831 else
1832 host_ce_config = host_ce_config_wlan_epping_poll;
1833 target_ce_config = target_ce_config_wlan_epping;
1834 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1835 target_service_to_ce_map =
1836 target_service_to_ce_map_wlan_epping;
1837 target_service_to_ce_map_sz =
1838 sizeof(target_service_to_ce_map_wlan_epping);
1839 }
1840
1841 ret = hif_wlan_enable();
1842
1843 if (ret) {
1844 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
1845 return CDF_STATUS_NOT_INITIALIZED;
1846 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001847
1848 scn->notice_send = true;
1849
1850 cdf_mem_zero(&soc_info, sizeof(soc_info));
Komal Seelamf8600682016-02-02 18:17:13 +05301851 ret = icnss_get_soc_info(scn, &soc_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001852 if (ret < 0) {
1853 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
1854 return CDF_STATUS_NOT_INITIALIZED;
1855 }
1856
1857 hif_state = (struct HIF_CE_state *)cdf_mem_malloc(sizeof(*hif_state));
1858 if (!hif_state) {
1859 return -ENOMEM;
1860 }
1861 cdf_mem_zero(hif_state, sizeof(*hif_state));
1862
1863 hif_state->scn = scn;
1864 scn->hif_hdl = hif_state;
1865 scn->mem = soc_info.v_addr;
1866 scn->mem_pa = soc_info.p_addr;
Komal Seelam91553ce2016-01-27 18:57:10 +05301867 tgt_info->soc_version = soc_info.version;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001868
1869 cdf_spinlock_init(&hif_state->keep_awake_lock);
1870
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001871 hif_state->keep_awake_count = 0;
1872
1873 hif_state->fake_sleep = false;
1874 hif_state->sleep_ticks = 0;
1875 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
1876 hif_sleep_entry, (void *)hif_state,
1877 CDF_TIMER_TYPE_WAKE_APPS);
1878 hif_state->sleep_timer_init = true;
1879 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
1880#ifdef HIF_PCI
1881#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1882 /* Force AWAKE forever/till the driver is loaded */
1883 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1884 return -EACCES;
1885#endif
1886#endif
1887
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001888 hif_config_rri_on_ddr(scn);
1889
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890 /* During CE initializtion */
1891 scn->ce_count = HOST_CE_COUNT;
1892 A_TARGET_ACCESS_LIKELY(scn);
1893 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1894 struct CE_attr *attr;
1895
1896 pipe_info = &hif_state->pipe_info[pipe_num];
1897 pipe_info->pipe_num = pipe_num;
1898 pipe_info->HIF_CE_state = hif_state;
1899 attr = &host_ce_config[pipe_num];
1900 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
1901 CDF_ASSERT(pipe_info->ce_hdl != NULL);
1902 if (pipe_info->ce_hdl == NULL) {
1903 rv = CDF_STATUS_E_FAILURE;
1904 A_TARGET_ACCESS_UNLIKELY(scn);
1905 goto err;
1906 }
1907
1908 if (pipe_num == DIAG_CE_ID) {
1909 /* Reserve the ultimate CE for
1910 * Diagnostic Window support */
1911 hif_state->ce_diag =
1912 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1913 continue;
1914 }
1915
1916 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
1917 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
1918 if (attr->dest_nentries > 0) {
1919 atomic_set(&pipe_info->recv_bufs_needed,
1920 init_buffer_count(attr->dest_nentries - 1));
1921 } else {
1922 atomic_set(&pipe_info->recv_bufs_needed, 0);
1923 }
1924 ce_tasklet_init(hif_state, (1 << pipe_num));
1925 ce_register_irq(hif_state, (1 << pipe_num));
1926 scn->request_irq_done = true;
1927 }
1928
1929 if (athdiag_procfs_init(scn) != 0) {
1930 A_TARGET_ACCESS_UNLIKELY(scn);
1931 goto err;
1932 }
1933 scn->athdiag_procfs_inited = true;
1934
1935 /*
1936 * Initially, establish CE completion handlers for use with BMI.
1937 * These are overwritten with generic handlers after we exit BMI phase.
1938 */
1939 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1940#ifdef HIF_PCI
1941 ce_send_cb_register(
1942 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1943#ifndef BMI_RSP_POLLING
1944 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1945 ce_recv_cb_register(
1946 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1947#endif
1948#endif
1949 HIF_INFO_MED("%s: ce_init done", __func__);
1950
1951 rv = hif_set_hia(scn);
1952
1953 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1954
1955 A_TARGET_ACCESS_UNLIKELY(scn);
1956
1957 if (rv != CDF_STATUS_SUCCESS)
1958 goto err;
1959 else
Komal Seelamf8600682016-02-02 18:17:13 +05301960 init_tasklet_workers(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961
1962 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1963
1964#ifdef ADRASTEA_SHADOW_REGISTERS
1965 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1966 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1967 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1968 __func__, i,
1969 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1970 }
1971#endif
1972
1973
1974 return rv != CDF_STATUS_SUCCESS;
1975
1976err:
1977 /* Failure, so clean up */
1978 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1979 pipe_info = &hif_state->pipe_info[pipe_num];
1980 if (pipe_info->ce_hdl) {
1981 ce_unregister_irq(hif_state, (1 << pipe_num));
1982 scn->request_irq_done = false;
1983 ce_fini(pipe_info->ce_hdl);
1984 pipe_info->ce_hdl = NULL;
1985 pipe_info->buf_sz = 0;
1986 }
1987 }
1988 if (hif_state->sleep_timer_init) {
1989 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1990 cdf_softirq_timer_free(&hif_state->sleep_timer);
1991 hif_state->sleep_timer_init = false;
1992 }
1993 if (scn->hif_hdl) {
1994 scn->hif_hdl = NULL;
1995 cdf_mem_free(hif_state);
1996 }
1997 athdiag_procfs_remove();
1998 scn->athdiag_procfs_inited = false;
1999 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2000 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2001}
2002
2003
2004
2005
2006
2007
2008#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002009/**
2010 * hif_ipa_get_ce_resource() - get uc resource on hif
2011 * @scn: bus context
2012 * @ce_sr_base_paddr: copyengine source ring base physical address
2013 * @ce_sr_ring_size: copyengine source ring size
2014 * @ce_reg_paddr: copyengine register physical address
2015 *
2016 * IPA micro controller data path offload feature enabled,
2017 * HIF should release copy engine related resource information to IPA UC
2018 * IPA UC will access hardware resource with released information
2019 *
2020 * Return: None
2021 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002022void hif_ipa_get_ce_resource(struct ol_softc *scn,
Leo Changd85f78d2015-11-13 10:55:34 -08002023 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002024 uint32_t *ce_sr_ring_size,
2025 cdf_dma_addr_t *ce_reg_paddr)
2026{
2027 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
2028 struct HIF_CE_pipe_info *pipe_info =
2029 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2030 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2031
2032 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2033 ce_reg_paddr);
2034 return;
2035}
2036#endif /* IPA_OFFLOAD */
2037
2038
2039#ifdef ADRASTEA_SHADOW_REGISTERS
2040
2041/*
2042 Current shadow register config
2043
2044 -----------------------------------------------------------
2045 Shadow Register | CE | src/dst write index
2046 -----------------------------------------------------------
2047 0 | 0 | src
2048 1 No Config - Doesn't point to anything
2049 2 No Config - Doesn't point to anything
2050 3 | 3 | src
2051 4 | 4 | src
2052 5 | 5 | src
2053 6 No Config - Doesn't point to anything
2054 7 | 7 | src
2055 8 No Config - Doesn't point to anything
2056 9 No Config - Doesn't point to anything
2057 10 No Config - Doesn't point to anything
2058 11 No Config - Doesn't point to anything
2059 -----------------------------------------------------------
2060 12 No Config - Doesn't point to anything
2061 13 | 1 | dst
2062 14 | 2 | dst
2063 15 No Config - Doesn't point to anything
2064 16 No Config - Doesn't point to anything
2065 17 No Config - Doesn't point to anything
2066 18 No Config - Doesn't point to anything
2067 19 | 7 | dst
2068 20 | 8 | dst
2069 21 No Config - Doesn't point to anything
2070 22 No Config - Doesn't point to anything
2071 23 No Config - Doesn't point to anything
2072 -----------------------------------------------------------
2073
2074
2075 ToDo - Move shadow register config to following in the future
2076 This helps free up a block of shadow registers towards the end.
2077 Can be used for other purposes
2078
2079 -----------------------------------------------------------
2080 Shadow Register | CE | src/dst write index
2081 -----------------------------------------------------------
2082 0 | 0 | src
2083 1 | 3 | src
2084 2 | 4 | src
2085 3 | 5 | src
2086 4 | 7 | src
2087 -----------------------------------------------------------
2088 5 | 1 | dst
2089 6 | 2 | dst
2090 7 | 7 | dst
2091 8 | 8 | dst
2092 -----------------------------------------------------------
2093 9 No Config - Doesn't point to anything
2094 12 No Config - Doesn't point to anything
2095 13 No Config - Doesn't point to anything
2096 14 No Config - Doesn't point to anything
2097 15 No Config - Doesn't point to anything
2098 16 No Config - Doesn't point to anything
2099 17 No Config - Doesn't point to anything
2100 18 No Config - Doesn't point to anything
2101 19 No Config - Doesn't point to anything
2102 20 No Config - Doesn't point to anything
2103 21 No Config - Doesn't point to anything
2104 22 No Config - Doesn't point to anything
2105 23 No Config - Doesn't point to anything
2106 -----------------------------------------------------------
2107*/
2108
2109u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2110{
2111 u32 addr = 0;
2112
2113 switch (COPY_ENGINE_ID(ctrl_addr)) {
2114 case 0:
2115 addr = SHADOW_VALUE0;
2116 break;
2117 case 3:
2118 addr = SHADOW_VALUE3;
2119 break;
2120 case 4:
2121 addr = SHADOW_VALUE4;
2122 break;
2123 case 5:
2124 addr = SHADOW_VALUE5;
2125 break;
2126 case 7:
2127 addr = SHADOW_VALUE7;
2128 break;
2129 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002130 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002131 CDF_ASSERT(0);
2132
2133 }
2134 return addr;
2135
2136}
2137
2138u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2139{
2140 u32 addr = 0;
2141
2142 switch (COPY_ENGINE_ID(ctrl_addr)) {
2143 case 1:
2144 addr = SHADOW_VALUE13;
2145 break;
2146 case 2:
2147 addr = SHADOW_VALUE14;
2148 break;
2149 case 7:
2150 addr = SHADOW_VALUE19;
2151 break;
2152 case 8:
2153 addr = SHADOW_VALUE20;
2154 break;
2155 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002156 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157 CDF_ASSERT(0);
2158 }
2159
2160 return addr;
2161
2162}
2163#endif
2164
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002165#if defined(FEATURE_LRO)
2166/**
2167 * ce_lro_flush_cb_register() - register the LRO flush
2168 * callback
2169 * @scn: HIF context
2170 * @handler: callback function
2171 * @data: opaque data pointer to be passed back
2172 *
2173 * Store the LRO flush callback provided
2174 *
2175 * Return: none
2176 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002177void ce_lro_flush_cb_register(struct ol_softc *scn,
2178 void (handler)(void *), void *data)
2179{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002180 uint8_t ul, dl;
2181 int ul_polled, dl_polled;
2182
2183 CDF_ASSERT(scn != NULL);
2184
2185 if (CDF_STATUS_SUCCESS !=
2186 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2187 &ul, &dl, &ul_polled, &dl_polled)) {
2188 printk("%s cannot map service to pipe\n", __FUNCTION__);
2189 return;
2190 } else {
2191 struct CE_state *ce_state;
2192 ce_state = scn->ce_id_to_state[dl];
2193 ce_state->lro_flush_cb = handler;
2194 ce_state->lro_data = data;
2195 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002196}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002197
2198/**
2199 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2200 * callback
2201 * @scn: HIF context
2202 *
2203 * Remove the LRO flush callback
2204 *
2205 * Return: none
2206 */
2207void ce_lro_flush_cb_deregister(struct ol_softc *scn)
2208{
2209 uint8_t ul, dl;
2210 int ul_polled, dl_polled;
2211
2212 CDF_ASSERT(scn != NULL);
2213
2214 if (CDF_STATUS_SUCCESS !=
2215 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2216 &ul, &dl, &ul_polled, &dl_polled)) {
2217 printk("%s cannot map service to pipe\n", __FUNCTION__);
2218 return;
2219 } else {
2220 struct CE_state *ce_state;
2221 ce_state = scn->ce_id_to_state[dl];
2222 ce_state->lro_flush_cb = NULL;
2223 ce_state->lro_data = NULL;
2224 }
2225}
2226#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002227
2228/**
2229 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2230 * this service
2231 * @scn: ol_softc pointer.
2232 * @svc_id: Service ID for which the mapping is needed.
2233 * @ul_pipe: address of the container in which ul pipe is returned.
2234 * @dl_pipe: address of the container in which dl pipe is returned.
2235 * @ul_is_polled: address of the container in which a bool
2236 * indicating if the UL CE for this service
2237 * is polled is returned.
2238 * @dl_is_polled: address of the container in which a bool
2239 * indicating if the DL CE for this service
2240 * is polled is returned.
2241 *
2242 * Return: Indicates whether this operation was successful.
2243 */
2244
2245int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
2246 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2247 int *dl_is_polled)
2248{
2249 int status = CDF_STATUS_SUCCESS;
2250
2251 unsigned int i;
2252 struct service_to_pipe element;
2253
2254 struct service_to_pipe *tgt_svc_map_to_use;
2255 size_t sz_tgt_svc_map_to_use;
2256
2257 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2258 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2259 sz_tgt_svc_map_to_use =
2260 sizeof(target_service_to_ce_map_wlan_epping);
2261 } else {
2262 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2263 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2264 }
2265
2266 *dl_is_polled = 0; /* polling for received messages not supported */
2267
2268 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2269
2270 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2271 if (element.service_id == svc_id) {
2272
2273 if (element.pipedir == PIPEDIR_OUT)
2274 *ul_pipe = element.pipenum;
2275
2276 else if (element.pipedir == PIPEDIR_IN)
2277 *dl_pipe = element.pipenum;
2278 }
2279 }
2280
2281 *ul_is_polled =
2282 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2283
2284 return status;
2285}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002286
2287#ifdef SHADOW_REG_DEBUG
2288inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct ol_softc *scn,
2289 uint32_t CE_ctrl_addr)
2290{
2291 uint32_t read_from_hw, srri_from_ddr = 0;
2292
2293 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2294
2295 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2296
2297 if (read_from_hw != srri_from_ddr) {
2298 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2299 srri_from_ddr, read_from_hw,
2300 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2301 CDF_ASSERT(0);
2302 }
2303 return srri_from_ddr;
2304}
2305
2306
2307inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct ol_softc *scn,
2308 uint32_t CE_ctrl_addr)
2309{
2310 uint32_t read_from_hw, drri_from_ddr = 0;
2311
2312 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2313
2314 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2315
2316 if (read_from_hw != drri_from_ddr) {
2317 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2318 drri_from_ddr, read_from_hw,
2319 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2320 CDF_ASSERT(0);
2321 }
2322 return drri_from_ddr;
2323}
2324
2325#endif
2326
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002327#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002328/**
2329 * hif_get_src_ring_read_index(): Called to get the SRRI
2330 *
2331 * @scn: ol_softc pointer
2332 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2333 *
2334 * This function returns the SRRI to the caller. For CEs that
2335 * dont have interrupts enabled, we look at the DDR based SRRI
2336 *
2337 * Return: SRRI
2338 */
2339inline unsigned int hif_get_src_ring_read_index(struct ol_softc *scn,
2340 uint32_t CE_ctrl_addr)
2341{
2342 struct CE_attr attr;
2343
2344 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2345 if (attr.flags & CE_ATTR_DISABLE_INTR)
2346 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2347 else
2348 return A_TARGET_READ(scn,
2349 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2350}
2351
2352/**
2353 * hif_get_dst_ring_read_index(): Called to get the DRRI
2354 *
2355 * @scn: ol_softc pointer
2356 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2357 *
2358 * This function returns the DRRI to the caller. For CEs that
2359 * dont have interrupts enabled, we look at the DDR based DRRI
2360 *
2361 * Return: DRRI
2362 */
2363inline unsigned int hif_get_dst_ring_read_index(struct ol_softc *scn,
2364 uint32_t CE_ctrl_addr)
2365{
2366 struct CE_attr attr;
2367
2368 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2369
2370 if (attr.flags & CE_ATTR_DISABLE_INTR)
2371 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2372 else
2373 return A_TARGET_READ(scn,
2374 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2375}
2376
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002377/**
2378 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2379 *
2380 * @scn: ol_softc pointer
2381 *
2382 * This function allocates non cached memory on ddr and sends
2383 * the physical address of this memory to the CE hardware. The
2384 * hardware updates the RRI on this particular location.
2385 *
2386 * Return: None
2387 */
2388static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2389{
2390 unsigned int i;
2391 cdf_dma_addr_t paddr_rri_on_ddr;
2392 uint32_t high_paddr, low_paddr;
2393 scn->vaddr_rri_on_ddr =
2394 (uint32_t *)cdf_os_mem_alloc_consistent(scn->cdf_dev,
2395 (CE_COUNT*sizeof(uint32_t)), &paddr_rri_on_ddr, 0);
2396
2397 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2398 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2399
2400 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2401
2402 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2403 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2404
2405 for (i = 0; i < CE_COUNT; i++)
2406 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2407
2408 cdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
2409
2410 return;
2411}
2412#else
2413
2414/**
2415 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2416 *
2417 * @scn: ol_softc pointer
2418 *
2419 * This is a dummy implementation for platforms that don't
2420 * support this functionality.
2421 *
2422 * Return: None
2423 */
2424static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2425{
2426 return;
2427}
2428#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302429
2430/**
2431 * hif_dump_ce_registers() - dump ce registers
2432 * @scn: ol_softc pointer.
2433 *
2434 * Output the copy engine registers
2435 *
2436 * Return: 0 for success or error code
2437 */
2438int hif_dump_ce_registers(struct ol_softc *scn)
2439{
2440 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2441 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2442 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2443 uint16_t i;
2444 CDF_STATUS status;
2445
2446 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
2447 status = hif_diag_read_mem(scn, ce_reg_address,
2448 (uint8_t *) &ce_reg_values[i][0],
2449 ce_reg_word_size * sizeof(uint32_t));
2450
2451 if (status != CDF_STATUS_SUCCESS) {
2452 HIF_ERROR("Dumping CE register failed!");
2453 return -EACCES;
2454 }
2455 HIF_ERROR("CE%d Registers:", i);
2456 cdf_trace_hex_dump(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_DEBUG,
2457 (uint8_t *) &ce_reg_values[i][0],
2458 ce_reg_word_size * sizeof(uint32_t));
2459 }
2460
2461 return 0;
2462}