blob: abeb6a38b7a09b07cb6dcd16652d2a299f210907 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
48#include "cds_api.h"
49#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053064#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065
66#define CE_POLL_TIMEOUT 10 /* ms */
67
68/* Forward references */
69static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
78
79static int hif_post_recv_buffers(struct ol_softc *scn);
80
81static void ce_poll_timeout(void *arg)
82{
83 struct CE_state *CE_state = (struct CE_state *)arg;
84 if (CE_state->timer_inited) {
85 ce_per_engine_service(CE_state->scn, CE_state->id);
86 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
87 }
88}
89
90static unsigned int roundup_pwr2(unsigned int n)
91{
92 int i;
93 unsigned int test_pwr2;
94
95 if (!(n & (n - 1)))
96 return n; /* already a power of 2 */
97
98 test_pwr2 = 4;
99 for (i = 0; i < 29; i++) {
100 if (test_pwr2 > n)
101 return test_pwr2;
102 test_pwr2 = test_pwr2 << 1;
103 }
104
105 CDF_ASSERT(0); /* n too large */
106 return 0;
107}
108
109/*
110 * Initialize a Copy Engine based on caller-supplied attributes.
111 * This may be called once to initialize both source and destination
112 * rings or it may be called twice for separate source and destination
113 * initialization. It may be that only one side or the other is
114 * initialized by software/firmware.
115 */
116struct CE_handle *ce_init(struct ol_softc *scn,
117 unsigned int CE_id, struct CE_attr *attr)
118{
119 struct CE_state *CE_state;
120 uint32_t ctrl_addr;
121 unsigned int nentries;
122 cdf_dma_addr_t base_addr;
123 bool malloc_CE_state = false;
124 bool malloc_src_ring = false;
125
126 CDF_ASSERT(CE_id < scn->ce_count);
127 ctrl_addr = CE_BASE_ADDRESS(CE_id);
128 cdf_spin_lock(&scn->target_lock);
129 CE_state = scn->ce_id_to_state[CE_id];
130
131 if (!CE_state) {
132 cdf_spin_unlock(&scn->target_lock);
133 CE_state =
134 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
135 if (!CE_state) {
136 HIF_ERROR("%s: CE_state has no mem", __func__);
137 return NULL;
138 } else
139 malloc_CE_state = true;
140 cdf_mem_zero(CE_state, sizeof(*CE_state));
141 cdf_spin_lock(&scn->target_lock);
142 if (!scn->ce_id_to_state[CE_id]) { /* re-check under lock */
143 scn->ce_id_to_state[CE_id] = CE_state;
144
145 CE_state->id = CE_id;
146 CE_state->ctrl_addr = ctrl_addr;
147 CE_state->state = CE_RUNNING;
148 CE_state->attr_flags = attr->flags;
149 } else {
150 /*
151 * We released target_lock in order to allocate
152 * CE state, but someone else beat us to it.
153 * Continue, using that CE_state
154 * (and free the one we allocated).
155 */
156 cdf_mem_free(CE_state);
157 malloc_CE_state = false;
158 CE_state = scn->ce_id_to_state[CE_id];
159 }
160 }
161 CE_state->scn = scn;
162 cdf_spin_unlock(&scn->target_lock);
163
164 cdf_atomic_init(&CE_state->rx_pending);
165 if (attr == NULL) {
166 /* Already initialized; caller wants the handle */
167 return (struct CE_handle *)CE_state;
168 }
169
170#ifdef ADRASTEA_SHADOW_REGISTERS
171 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
172 __func__);
173#endif
174
175 if (CE_state->src_sz_max)
176 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
177 else
178 CE_state->src_sz_max = attr->src_sz_max;
179
180 /* source ring setup */
181 nentries = attr->src_nentries;
182 if (nentries) {
183 struct CE_ring_state *src_ring;
184 unsigned CE_nbytes;
185 char *ptr;
186 uint64_t dma_addr;
187 nentries = roundup_pwr2(nentries);
188 if (CE_state->src_ring) {
189 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
190 } else {
191 CE_nbytes = sizeof(struct CE_ring_state)
192 + (nentries * sizeof(void *));
193 ptr = cdf_mem_malloc(CE_nbytes);
194 if (!ptr) {
195 /* cannot allocate src ring. If the
196 * CE_state is allocated locally free
197 * CE_State and return error.
198 */
199 HIF_ERROR("%s: src ring has no mem", __func__);
200 if (malloc_CE_state) {
201 /* allocated CE_state locally */
202 cdf_spin_lock(&scn->target_lock);
203 scn->ce_id_to_state[CE_id] = NULL;
204 cdf_spin_unlock(&scn->target_lock);
205 cdf_mem_free(CE_state);
206 malloc_CE_state = false;
207 }
208 return NULL;
209 } else {
210 /* we can allocate src ring.
211 * Mark that the src ring is
212 * allocated locally
213 */
214 malloc_src_ring = true;
215 }
216 cdf_mem_zero(ptr, CE_nbytes);
217
218 src_ring = CE_state->src_ring =
219 (struct CE_ring_state *)ptr;
220 ptr += sizeof(struct CE_ring_state);
221 src_ring->nentries = nentries;
222 src_ring->nentries_mask = nentries - 1;
223 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
224 src_ring->hw_index =
225 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
226 src_ring->sw_index = src_ring->hw_index;
227 src_ring->write_index =
228 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
229 A_TARGET_ACCESS_END_RET_PTR(scn);
230 src_ring->low_water_mark_nentries = 0;
231 src_ring->high_water_mark_nentries = nentries;
232 src_ring->per_transfer_context = (void **)ptr;
233
234 /* Legacy platforms that do not support cache
235 * coherent DMA are unsupported
236 */
237 src_ring->base_addr_owner_space_unaligned =
238 cdf_os_mem_alloc_consistent(scn->cdf_dev,
239 (nentries *
240 sizeof(struct CE_src_desc) +
241 CE_DESC_RING_ALIGN),
242 &base_addr, 0);
243 if (src_ring->base_addr_owner_space_unaligned
244 == NULL) {
245 HIF_ERROR("%s: src ring has no DMA mem",
246 __func__);
247 goto error_no_dma_mem;
248 }
249 src_ring->base_addr_CE_space_unaligned = base_addr;
250
251 if (src_ring->
252 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
253 - 1)) {
254 src_ring->base_addr_CE_space =
255 (src_ring->base_addr_CE_space_unaligned
256 + CE_DESC_RING_ALIGN -
257 1) & ~(CE_DESC_RING_ALIGN - 1);
258
259 src_ring->base_addr_owner_space =
260 (void
261 *)(((size_t) src_ring->
262 base_addr_owner_space_unaligned +
263 CE_DESC_RING_ALIGN -
264 1) & ~(CE_DESC_RING_ALIGN - 1));
265 } else {
266 src_ring->base_addr_CE_space =
267 src_ring->base_addr_CE_space_unaligned;
268 src_ring->base_addr_owner_space =
269 src_ring->
270 base_addr_owner_space_unaligned;
271 }
272 /*
273 * Also allocate a shadow src ring in
274 * regular mem to use for faster access.
275 */
276 src_ring->shadow_base_unaligned =
277 cdf_mem_malloc(nentries *
278 sizeof(struct CE_src_desc) +
279 CE_DESC_RING_ALIGN);
280 if (src_ring->shadow_base_unaligned == NULL) {
281 HIF_ERROR("%s: src ring no shadow_base mem",
282 __func__);
283 goto error_no_dma_mem;
284 }
285 src_ring->shadow_base = (struct CE_src_desc *)
286 (((size_t) src_ring->shadow_base_unaligned +
287 CE_DESC_RING_ALIGN - 1) &
288 ~(CE_DESC_RING_ALIGN - 1));
289
290 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
291 dma_addr = src_ring->base_addr_CE_space;
292 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
293 (uint32_t)(dma_addr & 0xFFFFFFFF));
294#ifdef WLAN_ENABLE_QCA6180
295 {
296 uint32_t tmp;
297 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
298 scn, ctrl_addr);
299 tmp &= ~0x1F;
300 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
301 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
302 ctrl_addr, (uint32_t)dma_addr);
303 }
304#endif
305 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
306 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
307#ifdef BIG_ENDIAN_HOST
308 /* Enable source ring byte swap for big endian host */
309 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
310#endif
311 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
312 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
313 A_TARGET_ACCESS_END_RET_PTR(scn);
314 }
315 }
316
317 /* destination ring setup */
318 nentries = attr->dest_nentries;
319 if (nentries) {
320 struct CE_ring_state *dest_ring;
321 unsigned CE_nbytes;
322 char *ptr;
323 uint64_t dma_addr;
324
325 nentries = roundup_pwr2(nentries);
326 if (CE_state->dest_ring) {
327 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
328 } else {
329 CE_nbytes = sizeof(struct CE_ring_state)
330 + (nentries * sizeof(void *));
331 ptr = cdf_mem_malloc(CE_nbytes);
332 if (!ptr) {
333 /* cannot allocate dst ring. If the CE_state
334 * or src ring is allocated locally free
335 * CE_State and src ring and return error.
336 */
337 HIF_ERROR("%s: dest ring has no mem",
338 __func__);
339 if (malloc_src_ring) {
340 cdf_mem_free(CE_state->src_ring);
341 CE_state->src_ring = NULL;
342 malloc_src_ring = false;
343 }
344 if (malloc_CE_state) {
345 /* allocated CE_state locally */
346 cdf_spin_lock(&scn->target_lock);
347 scn->ce_id_to_state[CE_id] = NULL;
348 cdf_spin_unlock(&scn->target_lock);
349 cdf_mem_free(CE_state);
350 malloc_CE_state = false;
351 }
352 return NULL;
353 }
354 cdf_mem_zero(ptr, CE_nbytes);
355
356 dest_ring = CE_state->dest_ring =
357 (struct CE_ring_state *)ptr;
358 ptr += sizeof(struct CE_ring_state);
359 dest_ring->nentries = nentries;
360 dest_ring->nentries_mask = nentries - 1;
361 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
362 dest_ring->sw_index =
363 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
364 dest_ring->write_index =
365 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
366 A_TARGET_ACCESS_END_RET_PTR(scn);
367 dest_ring->low_water_mark_nentries = 0;
368 dest_ring->high_water_mark_nentries = nentries;
369 dest_ring->per_transfer_context = (void **)ptr;
370
371 /* Legacy platforms that do not support cache
372 * coherent DMA are unsupported */
373 dest_ring->base_addr_owner_space_unaligned =
374 cdf_os_mem_alloc_consistent(scn->cdf_dev,
375 (nentries *
376 sizeof(struct CE_dest_desc) +
377 CE_DESC_RING_ALIGN),
378 &base_addr, 0);
379 if (dest_ring->base_addr_owner_space_unaligned
380 == NULL) {
381 HIF_ERROR("%s: dest ring has no DMA mem",
382 __func__);
383 goto error_no_dma_mem;
384 }
385 dest_ring->base_addr_CE_space_unaligned = base_addr;
386
387 /* Correctly initialize memory to 0 to
388 * prevent garbage data crashing system
389 * when download firmware
390 */
391 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
392 nentries * sizeof(struct CE_dest_desc) +
393 CE_DESC_RING_ALIGN);
394
395 if (dest_ring->
396 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
397 1)) {
398
399 dest_ring->base_addr_CE_space =
400 (dest_ring->
401 base_addr_CE_space_unaligned +
402 CE_DESC_RING_ALIGN -
403 1) & ~(CE_DESC_RING_ALIGN - 1);
404
405 dest_ring->base_addr_owner_space =
406 (void
407 *)(((size_t) dest_ring->
408 base_addr_owner_space_unaligned +
409 CE_DESC_RING_ALIGN -
410 1) & ~(CE_DESC_RING_ALIGN - 1));
411 } else {
412 dest_ring->base_addr_CE_space =
413 dest_ring->base_addr_CE_space_unaligned;
414 dest_ring->base_addr_owner_space =
415 dest_ring->
416 base_addr_owner_space_unaligned;
417 }
418
419 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
420 dma_addr = dest_ring->base_addr_CE_space;
421 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
422 (uint32_t)(dma_addr & 0xFFFFFFFF));
423#ifdef WLAN_ENABLE_QCA6180
424 {
425 uint32_t tmp;
426 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
427 ctrl_addr);
428 tmp &= ~0x1F;
429 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
430 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
431 ctrl_addr, (uint32_t)dma_addr);
432 }
433#endif
434 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
435#ifdef BIG_ENDIAN_HOST
436 /* Enable Dest ring byte swap for big endian host */
437 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
438#endif
439 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
440 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
441 A_TARGET_ACCESS_END_RET_PTR(scn);
442
443 /* epping */
444 /* poll timer */
445 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
446 cdf_softirq_timer_init(scn->cdf_dev,
447 &CE_state->poll_timer,
448 ce_poll_timeout,
449 CE_state,
450 CDF_TIMER_TYPE_SW);
451 CE_state->timer_inited = true;
452 cdf_softirq_timer_mod(&CE_state->poll_timer,
453 CE_POLL_TIMEOUT);
454 }
455 }
456 }
457
458 /* Enable CE error interrupts */
459 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
460 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
461 A_TARGET_ACCESS_END_RET_PTR(scn);
462
463 return (struct CE_handle *)CE_state;
464
465error_no_dma_mem:
466 ce_fini((struct CE_handle *)CE_state);
467 return NULL;
468}
469
470#ifdef WLAN_FEATURE_FASTPATH
471/**
472 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
473 * No processing is required inside this function.
474 * @ce_hdl: Cope engine handle
475 * Using an assert, this function makes sure that,
476 * the TX CE has been processed completely.
477 * Return: none
478 */
479void
480ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
481{
482 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
483 struct CE_ring_state *src_ring = ce_state->src_ring;
484 struct ol_softc *sc = ce_state->scn;
485 uint32_t sw_index, write_index;
486
487 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
488 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
489 __func__, __LINE__);
490 cdf_spin_lock_bh(&sc->target_lock);
491 sw_index = src_ring->sw_index;
492 write_index = src_ring->sw_index;
493 cdf_spin_unlock_bh(&sc->target_lock);
494
495 /* At this point Tx CE should be clean */
496 cdf_assert_always(sw_index == write_index);
497 }
498}
499#else
500void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
501{
502}
503#endif /* WLAN_FEATURE_FASTPATH */
504
505void ce_fini(struct CE_handle *copyeng)
506{
507 struct CE_state *CE_state = (struct CE_state *)copyeng;
508 unsigned int CE_id = CE_state->id;
509 struct ol_softc *scn = CE_state->scn;
510
511 CE_state->state = CE_UNUSED;
512 scn->ce_id_to_state[CE_id] = NULL;
513 if (CE_state->src_ring) {
514 /* Cleanup the HTT Tx ring */
515 ce_h2t_tx_ce_cleanup(copyeng);
516
517 if (CE_state->src_ring->shadow_base_unaligned)
518 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
519 if (CE_state->src_ring->base_addr_owner_space_unaligned)
520 cdf_os_mem_free_consistent(scn->cdf_dev,
521 (CE_state->src_ring->nentries *
522 sizeof(struct CE_src_desc) +
523 CE_DESC_RING_ALIGN),
524 CE_state->src_ring->
525 base_addr_owner_space_unaligned,
526 CE_state->src_ring->
527 base_addr_CE_space, 0);
528 cdf_mem_free(CE_state->src_ring);
529 }
530 if (CE_state->dest_ring) {
531 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
532 cdf_os_mem_free_consistent(scn->cdf_dev,
533 (CE_state->dest_ring->nentries *
534 sizeof(struct CE_dest_desc) +
535 CE_DESC_RING_ALIGN),
536 CE_state->dest_ring->
537 base_addr_owner_space_unaligned,
538 CE_state->dest_ring->
539 base_addr_CE_space, 0);
540 cdf_mem_free(CE_state->dest_ring);
541
542 /* epping */
543 if (CE_state->timer_inited) {
544 CE_state->timer_inited = false;
545 cdf_softirq_timer_free(&CE_state->poll_timer);
546 }
547 }
548 cdf_mem_free(CE_state);
549}
550
551void hif_detach_htc(struct ol_softc *scn)
552{
553 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
554
555 cdf_mem_zero(&hif_state->msg_callbacks_pending,
556 sizeof(hif_state->msg_callbacks_pending));
557 cdf_mem_zero(&hif_state->msg_callbacks_current,
558 sizeof(hif_state->msg_callbacks_current));
559}
560
561/* Send the first nbytes bytes of the buffer */
562CDF_STATUS
563hif_send_head(struct ol_softc *scn,
564 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
565 cdf_nbuf_t nbuf, unsigned int data_attr)
566{
567 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
568 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
569 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
570 int bytes = nbytes, nfrags = 0;
571 struct ce_sendlist sendlist;
572 int status, i = 0;
573 unsigned int mux_id = 0;
574
575 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
576
577 transfer_id =
578 (mux_id & MUX_ID_MASK) |
579 (transfer_id & TRANSACTION_ID_MASK);
580 data_attr &= DESC_DATA_FLAG_MASK;
581 /*
582 * The common case involves sending multiple fragments within a
583 * single download (the tx descriptor and the tx frame header).
584 * So, optimize for the case of multiple fragments by not even
585 * checking whether it's necessary to use a sendlist.
586 * The overhead of using a sendlist for a single buffer download
587 * is not a big deal, since it happens rarely (for WMI messages).
588 */
589 ce_sendlist_init(&sendlist);
590 do {
591 uint32_t frag_paddr;
592 int frag_bytes;
593
594 frag_paddr = cdf_nbuf_get_frag_paddr_lo(nbuf, nfrags);
595 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
596 /*
597 * Clear the packet offset for all but the first CE desc.
598 */
599 if (i++ > 0)
600 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
601
602 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
603 frag_bytes >
604 bytes ? bytes : frag_bytes,
605 cdf_nbuf_get_frag_is_wordstream
606 (nbuf,
607 nfrags) ? 0 :
608 CE_SEND_FLAG_SWAP_DISABLE,
609 data_attr);
610 if (status != CDF_STATUS_SUCCESS) {
611 HIF_ERROR("%s: error, frag_num %d larger than limit",
612 __func__, nfrags);
613 return status;
614 }
615 bytes -= frag_bytes;
616 nfrags++;
617 } while (bytes > 0);
618
619 /* Make sure we have resources to handle this request */
620 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
621 if (pipe_info->num_sends_allowed < nfrags) {
622 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
623 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
624 return CDF_STATUS_E_RESOURCES;
625 }
626 pipe_info->num_sends_allowed -= nfrags;
627 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
628
629 if (cdf_unlikely(ce_hdl == NULL)) {
630 HIF_ERROR("%s: error CE handle is null", __func__);
631 return A_ERROR;
632 }
633
634 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
635 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
636 (uint8_t *)(cdf_nbuf_data(nbuf)),
637 sizeof(cdf_nbuf_data(nbuf))));
638 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
639 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
640
641 return status;
642}
643
644void hif_send_complete_check(struct ol_softc *scn, uint8_t pipe, int force)
645{
646 if (!force) {
647 int resources;
648 /*
649 * Decide whether to actually poll for completions, or just
650 * wait for a later chance. If there seem to be plenty of
651 * resources left, then just wait, since checking involves
652 * reading a CE register, which is a relatively expensive
653 * operation.
654 */
655 resources = hif_get_free_queue_number(scn, pipe);
656 /*
657 * If at least 50% of the total resources are still available,
658 * don't bother checking again yet.
659 */
660 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
661 return;
662 }
663 }
664#ifdef ATH_11AC_TXCOMPACT
665 ce_per_engine_servicereap(scn, pipe);
666#else
667 ce_per_engine_service(scn, pipe);
668#endif
669}
670
671uint16_t hif_get_free_queue_number(struct ol_softc *scn, uint8_t pipe)
672{
673 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
674 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
675 uint16_t rv;
676
677 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
678 rv = pipe_info->num_sends_allowed;
679 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
680 return rv;
681}
682
683/* Called by lower (CE) layer when a send to Target completes. */
684void
685hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
686 void *transfer_context, cdf_dma_addr_t CE_data,
687 unsigned int nbytes, unsigned int transfer_id,
688 unsigned int sw_index, unsigned int hw_index,
689 unsigned int toeplitz_hash_result)
690{
691 struct HIF_CE_pipe_info *pipe_info =
692 (struct HIF_CE_pipe_info *)ce_context;
693 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
694 struct HIF_CE_completion_state *compl_state;
695 struct HIF_CE_completion_state *compl_queue_head, *compl_queue_tail;
696 unsigned int sw_idx = sw_index, hw_idx = hw_index;
697
698 compl_queue_head = compl_queue_tail = NULL;
699 do {
700 /*
701 * For the send completion of an item in sendlist, just increment
702 * num_sends_allowed. The upper layer callback will be triggered
703 * when last fragment is done with send.
704 */
705 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
706 cdf_spin_lock(&pipe_info->completion_freeq_lock);
707 pipe_info->num_sends_allowed++;
708 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
709 continue;
710 }
711
712 cdf_spin_lock(&pipe_info->completion_freeq_lock);
713 compl_state = pipe_info->completion_freeq_head;
714 if (!compl_state) {
715 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
716 HIF_ERROR("%s: ce_id:%d num_allowed:%d pipe_info:%p",
717 __func__, pipe_info->pipe_num,
718 pipe_info->num_sends_allowed,
719 pipe_info);
720 ASSERT(0);
721 break;
722 }
723 pipe_info->completion_freeq_head = compl_state->next;
724 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
725
726 compl_state->next = NULL;
727 compl_state->send_or_recv = HIF_CE_COMPLETE_SEND;
728 compl_state->copyeng = copyeng;
729 compl_state->ce_context = ce_context;
730 compl_state->transfer_context = transfer_context;
731 compl_state->data = CE_data;
732 compl_state->nbytes = nbytes;
733 compl_state->transfer_id = transfer_id;
734 compl_state->flags = 0;
735 compl_state->toeplitz_hash_result = toeplitz_hash_result;
736
737 /* Enqueue at end of local queue */
738 if (compl_queue_tail) {
739 compl_queue_tail->next = compl_state;
740 } else {
741 compl_queue_head = compl_state;
742 }
743 compl_queue_tail = compl_state;
744 } while (ce_completed_send_next(copyeng,
745 &ce_context, &transfer_context,
746 &CE_data, &nbytes, &transfer_id,
747 &sw_idx, &hw_idx,
748 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
749
750 if (compl_queue_head == NULL) {
751 /*
752 * If only some of the items within a sendlist have completed,
753 * don't invoke completion processing until the entire sendlist
754 * has been sent.
755 */
756 return;
757 }
758
759 cdf_spin_lock(&hif_state->completion_pendingq_lock);
760
761 /* Enqueue the local completion queue on the
762 * per-device completion queue */
763 if (hif_state->completion_pendingq_head) {
764 hif_state->completion_pendingq_tail->next = compl_queue_head;
765 hif_state->completion_pendingq_tail = compl_queue_tail;
766 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
767 } else {
768 hif_state->completion_pendingq_head = compl_queue_head;
769 hif_state->completion_pendingq_tail = compl_queue_tail;
770 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
771
772 /* Alert the send completion service thread */
773 hif_completion_thread(hif_state);
774 }
775}
776
777/* Called by lower (CE) layer when data is received from the Target. */
778void
779hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
780 void *transfer_context, cdf_dma_addr_t CE_data,
781 unsigned int nbytes, unsigned int transfer_id,
782 unsigned int flags)
783{
784 struct HIF_CE_pipe_info *pipe_info =
785 (struct HIF_CE_pipe_info *)ce_context;
786 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
787 struct ol_softc *scn = hif_state->scn;
788 struct HIF_CE_completion_state *compl_state;
789 struct HIF_CE_completion_state *compl_queue_head, *compl_queue_tail;
790
791 compl_queue_head = compl_queue_tail = NULL;
792 do {
793 cdf_spin_lock(&pipe_info->completion_freeq_lock);
794 compl_state = pipe_info->completion_freeq_head;
795 ASSERT(compl_state != NULL);
796 pipe_info->completion_freeq_head = compl_state->next;
797 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
798
799 compl_state->next = NULL;
800 compl_state->send_or_recv = HIF_CE_COMPLETE_RECV;
801 compl_state->copyeng = copyeng;
802 compl_state->ce_context = ce_context;
803 compl_state->transfer_context = transfer_context;
804 compl_state->data = CE_data;
805 compl_state->nbytes = nbytes;
806 compl_state->transfer_id = transfer_id;
807 compl_state->flags = flags;
808
809 /* Enqueue at end of local queue */
810 if (compl_queue_tail) {
811 compl_queue_tail->next = compl_state;
812 } else {
813 compl_queue_head = compl_state;
814 }
815 compl_queue_tail = compl_state;
816
817 cdf_nbuf_unmap_single(scn->cdf_dev,
818 (cdf_nbuf_t) transfer_context,
819 CDF_DMA_FROM_DEVICE);
820
821 /*
822 * EV #112693 - [Peregrine][ES1][WB342][Win8x86][Performance]
823 * BSoD_0x133 occurred in VHT80 UDP_DL
824 * Break out DPC by force if number of loops in
825 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES to avoid
826 * spending too long time in DPC for each interrupt handling.
827 * Schedule another DPC to avoid data loss if we had taken
828 * force-break action before apply to Windows OS only
829 * currently, Linux/MAC os can expand to their platform
830 * if necessary
831 */
832
833 /* Set up force_break flag if num of receices reaches
834 * MAX_NUM_OF_RECEIVES */
835 scn->receive_count++;
836 if (cdf_unlikely(hif_max_num_receives_reached(
837 scn->receive_count))) {
838 scn->force_break = 1;
839 break;
840 }
841 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
842 &CE_data, &nbytes, &transfer_id,
843 &flags) == CDF_STATUS_SUCCESS);
844
845 cdf_spin_lock(&hif_state->completion_pendingq_lock);
846
847 /* Enqueue the local completion queue on the
848 * per-device completion queue */
849 if (hif_state->completion_pendingq_head) {
850 hif_state->completion_pendingq_tail->next = compl_queue_head;
851 hif_state->completion_pendingq_tail = compl_queue_tail;
852 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
853 } else {
854 hif_state->completion_pendingq_head = compl_queue_head;
855 hif_state->completion_pendingq_tail = compl_queue_tail;
856 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
857
858 /* Alert the recv completion service thread */
859 hif_completion_thread(hif_state);
860 }
861}
862
863/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
864
865void
866hif_post_init(struct ol_softc *scn, void *unused,
867 struct hif_msg_callbacks *callbacks)
868{
869 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
870
871#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
872 spin_lock_init(&pcie_access_log_lock);
873#endif
874 /* Save callbacks for later installation */
875 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
876 sizeof(hif_state->msg_callbacks_pending));
877
878}
879
880static void hif_pci_free_complete_state(struct HIF_CE_pipe_info *pipe_info)
881{
882 struct HIF_CE_completion_state_list *tmp_list;
883
884 while (pipe_info->completion_space_list) {
885 tmp_list = pipe_info->completion_space_list;
886 pipe_info->completion_space_list = tmp_list->next;
887 cdf_mem_free(tmp_list);
888 }
889}
890
891static int hif_alloc_complete_state_list(
892 struct HIF_CE_pipe_info *pipe_info,
893 int completions_needed)
894{
895 struct HIF_CE_completion_state *compl_state;
896 struct HIF_CE_completion_state_list *tmp_list;
897 int i;
898 int idx;
899 int num_list;
900 int allocated_node;
901 int num_in_batch;
902 size_t len;
903
904 allocated_node = 0;
905 num_list = (completions_needed + HIF_CE_COMPLETE_STATE_NUM -1);
906 num_list /= HIF_CE_COMPLETE_STATE_NUM;
907 for (idx = 0; idx < num_list; idx++) {
908 if (completions_needed - allocated_node >=
909 HIF_CE_COMPLETE_STATE_NUM)
910 num_in_batch = HIF_CE_COMPLETE_STATE_NUM;
911 else
912 num_in_batch = completions_needed - allocated_node;
913 if (num_in_batch <= 0)
914 break;
915 len = num_in_batch * sizeof(struct HIF_CE_completion_state) +
916 sizeof(struct HIF_CE_completion_state_list);
917 /* Allocate structures to track pending send/recv completions */
918 tmp_list =
919 (struct HIF_CE_completion_state_list *)
920 cdf_mem_malloc(len);
921 if (!tmp_list) {
922 HIF_ERROR("%s: compl_state has no mem", __func__);
923 hif_pci_free_complete_state(pipe_info);
924 return -1;
925 }
926 cdf_mem_zero(tmp_list, len);
927 compl_state = (struct HIF_CE_completion_state *)
928 ((uint8_t *)tmp_list +
929 sizeof(struct HIF_CE_completion_state_list));
930 for (i = 0; i < num_in_batch; i++) {
931 compl_state->send_or_recv = HIF_CE_COMPLETE_FREE;
932 compl_state->next = NULL;
933 if (pipe_info->completion_freeq_head)
934 pipe_info->completion_freeq_tail->next =
935 compl_state;
936 else
937 pipe_info->completion_freeq_head =
938 compl_state;
939 pipe_info->completion_freeq_tail = compl_state;
940 compl_state++;
941 allocated_node++;
942 }
943 if (pipe_info->completion_space_list == NULL) {
944 pipe_info->completion_space_list = tmp_list;
945 tmp_list->next = NULL;
946 } else {
947 tmp_list->next =
948 pipe_info->completion_space_list;
949 pipe_info->completion_space_list = tmp_list;
950 }
951 }
952 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
953 return 0;
954}
955int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
956{
957 struct CE_handle *ce_diag = hif_state->ce_diag;
958 int pipe_num;
959 struct ol_softc *scn = hif_state->scn;
960
961 /* daemonize("hif_compl_thread"); */
962
963 cdf_spinlock_init(&hif_state->completion_pendingq_lock);
964 hif_state->completion_pendingq_head =
965 hif_state->completion_pendingq_tail = NULL;
966
967 if (scn->ce_count == 0) {
968 HIF_ERROR("%s: Invalid ce_count\n", __func__);
969 return -EINVAL;
970 }
971 A_TARGET_ACCESS_LIKELY(scn);
972 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
973 struct CE_attr attr;
974 struct HIF_CE_pipe_info *pipe_info;
975 int completions_needed;
976
977 pipe_info = &hif_state->pipe_info[pipe_num];
978 if (pipe_info->ce_hdl == ce_diag) {
979 continue; /* Handle Diagnostic CE specially */
980 }
981 attr = host_ce_config[pipe_num];
982 completions_needed = 0;
983 if (attr.src_nentries) {
984 /* pipe used to send to target */
985 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
986 __func__, pipe_num, pipe_info);
987 ce_send_cb_register(pipe_info->ce_hdl,
988 hif_pci_ce_send_done, pipe_info,
989 attr.flags & CE_ATTR_DISABLE_INTR);
990 completions_needed += attr.src_nentries;
991 pipe_info->num_sends_allowed = attr.src_nentries - 1;
992 }
993 if (attr.dest_nentries) {
994 /* pipe used to receive from target */
995 ce_recv_cb_register(pipe_info->ce_hdl,
996 hif_pci_ce_recv_data, pipe_info,
997 attr.flags & CE_ATTR_DISABLE_INTR);
998 completions_needed += attr.dest_nentries;
999 }
1000
1001 pipe_info->completion_freeq_head =
1002 pipe_info->completion_freeq_tail = NULL;
1003 if (completions_needed > 0) {
1004 int ret;
1005
1006 ret = hif_alloc_complete_state_list(pipe_info,
1007 completions_needed);
1008 if (ret != 0) {
1009 HIF_ERROR("%s: ce_id = %d, no mem",
1010 __func__, pipe_info->pipe_num);
1011 return ret;
1012 }
1013 }
1014 }
1015 A_TARGET_ACCESS_UNLIKELY(scn);
1016 return 0;
1017}
1018
1019void hif_completion_thread_shutdown(struct HIF_CE_state *hif_state)
1020{
1021 struct HIF_CE_completion_state *compl_state;
1022 struct HIF_CE_pipe_info *pipe_info;
1023 struct ol_softc *scn = hif_state->scn;
1024 int pipe_num;
1025
1026 /*
1027 * Drop pending completions. These have already been
1028 * reported by the CE layer to us but we have not yet
1029 * passed them upstack.
1030 */
1031 while ((compl_state = hif_state->completion_pendingq_head) != NULL) {
1032 cdf_nbuf_t netbuf;
1033
1034 netbuf = (cdf_nbuf_t) compl_state->transfer_context;
1035 cdf_nbuf_free(netbuf);
1036
1037 hif_state->completion_pendingq_head = compl_state->next;
1038
1039 /*
1040 * NB: Don't bother to place compl_state on pipe's free queue,
1041 * because we'll free underlying memory for the free queues
1042 * in a moment anyway.
1043 */
1044 }
1045
1046 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1047 pipe_info = &hif_state->pipe_info[pipe_num];
1048 hif_pci_free_complete_state(pipe_info);
1049 cdf_spinlock_destroy(&pipe_info->completion_freeq_lock);
1050 }
1051
1052 /* hif_state->compl_thread = NULL; */
1053 /* complete_and_exit(&hif_state->compl_thread_done, 0); */
1054}
1055
1056/*
1057 * This thread provides a context in which send/recv completions
1058 * are handled.
1059 *
1060 * Note: HIF installs callback functions with the CE layer.
1061 * Those functions are called directly (e.g. in interrupt context).
1062 * Upper layers (e.g. HTC) have installed callbacks with HIF which
1063 * expect to be called in a thread context. This is where that
1064 * conversion occurs.
1065 *
1066 * TBDXXX: Currently we use just one thread for all pipes.
1067 * This might be sufficient or we might need multiple threads.
1068 */
1069int
1070/* hif_completion_thread(void *hif_dev) */
1071hif_completion_thread(struct HIF_CE_state *hif_state)
1072{
1073 struct hif_msg_callbacks *msg_callbacks =
1074 &hif_state->msg_callbacks_current;
1075 struct HIF_CE_completion_state *compl_state;
1076
1077 /* Allow only one instance of the thread to execute at a time to
1078 * prevent out of order processing of messages - this is bad for higher
1079 * layer code
1080 */
1081 if (!cdf_atomic_dec_and_test(&hif_state->hif_thread_idle)) {
1082 /* We were not the lucky one */
1083 cdf_atomic_inc(&hif_state->hif_thread_idle);
1084 return 0;
1085 }
1086
1087 if (!msg_callbacks->fwEventHandler
1088 || !msg_callbacks->txCompletionHandler
1089 || !msg_callbacks->rxCompletionHandler) {
1090 return 0;
1091 }
1092 while (atomic_read(&hif_state->fw_event_pending) > 0) {
1093 /*
1094 * Clear pending state before handling, in case there's
1095 * another while we process the first.
1096 */
1097 atomic_set(&hif_state->fw_event_pending, 0);
1098 msg_callbacks->fwEventHandler(msg_callbacks->Context,
1099 CDF_STATUS_E_FAILURE);
1100 }
1101
1102 if (hif_state->scn->target_status == OL_TRGET_STATUS_RESET)
1103 return 0;
1104
1105 for (;; ) {
1106 struct HIF_CE_pipe_info *pipe_info;
1107 int send_done = 0;
1108
1109 cdf_spin_lock(&hif_state->completion_pendingq_lock);
1110
1111 if (!hif_state->completion_pendingq_head) {
1112 /* We are atomically sure that
1113 * there is no pending work */
1114 cdf_atomic_inc(&hif_state->hif_thread_idle);
1115 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
1116 break; /* All pending completions are handled */
1117 }
1118
1119 /* Dequeue the first unprocessed but completed transfer */
1120 compl_state = hif_state->completion_pendingq_head;
1121 hif_state->completion_pendingq_head = compl_state->next;
1122 cdf_spin_unlock(&hif_state->completion_pendingq_lock);
1123
1124 pipe_info = (struct HIF_CE_pipe_info *)compl_state->ce_context;
1125 if (compl_state->send_or_recv == HIF_CE_COMPLETE_SEND) {
1126 msg_callbacks->txCompletionHandler(msg_callbacks->
1127 Context,
1128 compl_state->
1129 transfer_context,
1130 compl_state->
1131 transfer_id,
1132 compl_state->toeplitz_hash_result);
1133 send_done = 1;
1134 } else {
1135 /* compl_state->send_or_recv == HIF_CE_COMPLETE_RECV */
1136 cdf_nbuf_t netbuf;
1137 unsigned int nbytes;
1138
1139 atomic_inc(&pipe_info->recv_bufs_needed);
1140 hif_post_recv_buffers(hif_state->scn);
1141
1142 netbuf = (cdf_nbuf_t) compl_state->transfer_context;
1143 nbytes = compl_state->nbytes;
1144 /*
1145 * To see the following debug output,
1146 * enable the HIF_PCI_DEBUG flag in
1147 * the debug module declaration in this source file
1148 */
1149 HIF_DBG("%s: netbuf=%p, nbytes=%d",
1150 __func__, netbuf, nbytes);
1151 if (nbytes <= pipe_info->buf_sz) {
1152 cdf_nbuf_set_pktlen(netbuf, nbytes);
1153 msg_callbacks->
1154 rxCompletionHandler(msg_callbacks->Context,
1155 netbuf,
1156 pipe_info->pipe_num);
1157 } else {
1158 HIF_ERROR(
1159 "%s: Invalid Rx msg buf:%p nbytes:%d",
1160 __func__, netbuf, nbytes);
1161 cdf_nbuf_free(netbuf);
1162 }
1163 }
1164
1165 /* Recycle completion state back to the pipe it came from. */
1166 compl_state->next = NULL;
1167 compl_state->send_or_recv = HIF_CE_COMPLETE_FREE;
1168 cdf_spin_lock(&pipe_info->completion_freeq_lock);
1169 if (pipe_info->completion_freeq_head) {
1170 pipe_info->completion_freeq_tail->next = compl_state;
1171 } else {
1172 pipe_info->completion_freeq_head = compl_state;
1173 }
1174 pipe_info->completion_freeq_tail = compl_state;
1175 pipe_info->num_sends_allowed += send_done;
1176 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
1177 }
1178
1179 return 0;
1180}
1181
1182/*
1183 * Install pending msg callbacks.
1184 *
1185 * TBDXXX: This hack is needed because upper layers install msg callbacks
1186 * for use with HTC before BMI is done; yet this HIF implementation
1187 * needs to continue to use BMI msg callbacks. Really, upper layers
1188 * should not register HTC callbacks until AFTER BMI phase.
1189 */
1190static void hif_msg_callbacks_install(struct ol_softc *scn)
1191{
1192 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1193
1194 cdf_mem_copy(&hif_state->msg_callbacks_current,
1195 &hif_state->msg_callbacks_pending,
1196 sizeof(hif_state->msg_callbacks_pending));
1197}
1198
1199void hif_claim_device(struct ol_softc *scn, void *claimedContext)
1200{
1201 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1202
1203 hif_state->claimedContext = claimedContext;
1204}
1205
1206void hif_release_device(struct ol_softc *scn)
1207{
1208 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1209
1210 hif_state->claimedContext = NULL;
1211}
1212
1213void
1214hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe, uint8_t *DLPipe)
1215{
1216 int ul_is_polled, dl_is_polled;
1217
1218 (void)hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
1219 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1220}
1221
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222/**
1223 * hif_dump_pipe_debug_count() - Log error count
1224 * @scn: ol_softc pointer.
1225 *
1226 * Output the pipe error counts of each pipe to log file
1227 *
1228 * Return: N/A
1229 */
1230void hif_dump_pipe_debug_count(struct ol_softc *scn)
1231{
1232 struct HIF_CE_state *hif_state;
1233 int pipe_num;
1234
1235 if (scn == NULL) {
1236 HIF_ERROR("%s scn is NULL", __func__);
1237 return;
1238 }
1239 hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1240 if (hif_state == NULL) {
1241 HIF_ERROR("%s hif_state is NULL", __func__);
1242 return;
1243 }
1244 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1245 struct HIF_CE_pipe_info *pipe_info;
1246
1247 pipe_info = &hif_state->pipe_info[pipe_num];
1248
1249 if (pipe_info->nbuf_alloc_err_count > 0 ||
1250 pipe_info->nbuf_dma_err_count > 0 ||
1251 pipe_info->nbuf_ce_enqueue_err_count)
1252 HIF_ERROR(
1253 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1254 __func__, pipe_info->pipe_num,
1255 atomic_read(&pipe_info->recv_bufs_needed),
1256 pipe_info->nbuf_alloc_err_count,
1257 pipe_info->nbuf_dma_err_count,
1258 pipe_info->nbuf_ce_enqueue_err_count);
1259 }
1260}
1261
1262static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1263{
1264 struct CE_handle *ce_hdl;
1265 cdf_size_t buf_sz;
1266 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1267 struct ol_softc *scn = hif_state->scn;
1268 CDF_STATUS ret;
1269 uint32_t bufs_posted = 0;
1270
1271 buf_sz = pipe_info->buf_sz;
1272 if (buf_sz == 0) {
1273 /* Unused Copy Engine */
1274 return 0;
1275 }
1276
1277 ce_hdl = pipe_info->ce_hdl;
1278
1279 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1280 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
1281 cdf_dma_addr_t CE_data; /* CE space buffer address */
1282 cdf_nbuf_t nbuf;
1283 int status;
1284
1285 atomic_dec(&pipe_info->recv_bufs_needed);
1286 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1287
1288 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
1289 if (!nbuf) {
1290 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1291 pipe_info->nbuf_alloc_err_count++;
1292 cdf_spin_unlock_bh(
1293 &pipe_info->recv_bufs_needed_lock);
1294 HIF_ERROR(
1295 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1296 __func__, pipe_info->pipe_num,
1297 atomic_read(&pipe_info->recv_bufs_needed),
1298 pipe_info->nbuf_alloc_err_count);
1299 atomic_inc(&pipe_info->recv_bufs_needed);
1300 return 1;
1301 }
1302
1303 /*
1304 * cdf_nbuf_peek_header(nbuf, &data, &unused);
1305 * CE_data = dma_map_single(dev, data, buf_sz, );
1306 * DMA_FROM_DEVICE);
1307 */
1308 ret =
1309 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
1310 CDF_DMA_FROM_DEVICE);
1311
1312 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
1313 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1314 pipe_info->nbuf_dma_err_count++;
1315 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1316 HIF_ERROR(
1317 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
1318 __func__, pipe_info->pipe_num,
1319 atomic_read(&pipe_info->recv_bufs_needed),
1320 pipe_info->nbuf_dma_err_count);
1321 cdf_nbuf_free(nbuf);
1322 atomic_inc(&pipe_info->recv_bufs_needed);
1323 return 1;
1324 }
1325
1326 CE_data = cdf_nbuf_get_frag_paddr_lo(nbuf, 0);
1327
1328 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
1329 buf_sz, DMA_FROM_DEVICE);
1330 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
1331 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
1332 if (status != EOK) {
1333 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1334 pipe_info->nbuf_ce_enqueue_err_count++;
1335 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1336 HIF_ERROR(
1337 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1338 __func__, pipe_info->pipe_num,
1339 atomic_read(&pipe_info->recv_bufs_needed),
1340 pipe_info->nbuf_ce_enqueue_err_count);
1341 atomic_inc(&pipe_info->recv_bufs_needed);
1342 cdf_nbuf_free(nbuf);
1343 return 1;
1344 }
1345
1346 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1347 bufs_posted++;
1348 }
1349 pipe_info->nbuf_alloc_err_count =
1350 (pipe_info->nbuf_alloc_err_count > bufs_posted)?
1351 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1352 pipe_info->nbuf_dma_err_count =
1353 (pipe_info->nbuf_dma_err_count > bufs_posted)?
1354 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1355 pipe_info->nbuf_ce_enqueue_err_count =
1356 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted)?
1357 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1358
1359 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1360
1361 return 0;
1362}
1363
1364/*
1365 * Try to post all desired receive buffers for all pipes.
1366 * Returns 0 if all desired buffers are posted,
1367 * non-zero if were were unable to completely
1368 * replenish receive buffers.
1369 */
1370static int hif_post_recv_buffers(struct ol_softc *scn)
1371{
1372 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1373 int pipe_num, rv = 0;
1374
1375 A_TARGET_ACCESS_LIKELY(scn);
1376 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1377 struct HIF_CE_pipe_info *pipe_info;
1378
1379 pipe_info = &hif_state->pipe_info[pipe_num];
1380 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1381 rv = 1;
1382 goto done;
1383 }
1384 }
1385
1386done:
1387 A_TARGET_ACCESS_UNLIKELY(scn);
1388
1389 return rv;
1390}
1391
1392CDF_STATUS hif_start(struct ol_softc *scn)
1393{
1394 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1395
1396 if (hif_completion_thread_startup(hif_state))
1397 return CDF_STATUS_E_FAILURE;
1398
1399 hif_msg_callbacks_install(scn);
1400
1401 /* Post buffers once to start things off. */
1402 (void)hif_post_recv_buffers(scn);
1403
1404 hif_state->started = true;
1405
1406 return CDF_STATUS_SUCCESS;
1407}
1408
1409#ifdef WLAN_FEATURE_FASTPATH
1410/**
1411 * hif_enable_fastpath() Update that we have enabled fastpath mode
1412 * @hif_device: HIF context
1413 *
1414 * For use in data path
1415 *
1416 * Retrun: void
1417 */
1418void
1419hif_enable_fastpath(struct ol_softc *hif_device)
1420{
1421 HIF_INFO("Enabling fastpath mode\n");
1422 hif_device->fastpath_mode_on = 1;
1423}
1424#endif /* WLAN_FEATURE_FASTPATH */
1425
1426void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1427{
1428 struct ol_softc *scn;
1429 struct CE_handle *ce_hdl;
1430 uint32_t buf_sz;
1431 struct HIF_CE_state *hif_state;
1432 cdf_nbuf_t netbuf;
1433 cdf_dma_addr_t CE_data;
1434 void *per_CE_context;
1435
1436 buf_sz = pipe_info->buf_sz;
1437 if (buf_sz == 0) {
1438 /* Unused Copy Engine */
1439 return;
1440 }
1441
1442 hif_state = pipe_info->HIF_CE_state;
1443 if (!hif_state->started) {
1444 return;
1445 }
1446
1447 scn = hif_state->scn;
1448 ce_hdl = pipe_info->ce_hdl;
1449
1450 if (scn->cdf_dev == NULL) {
1451 return;
1452 }
1453 while (ce_revoke_recv_next
1454 (ce_hdl, &per_CE_context, (void **)&netbuf,
1455 &CE_data) == CDF_STATUS_SUCCESS) {
1456 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1457 CDF_DMA_FROM_DEVICE);
1458 cdf_nbuf_free(netbuf);
1459 }
1460}
1461
1462void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1463{
1464 struct CE_handle *ce_hdl;
1465 struct HIF_CE_state *hif_state;
1466 cdf_nbuf_t netbuf;
1467 void *per_CE_context;
1468 cdf_dma_addr_t CE_data;
1469 unsigned int nbytes;
1470 unsigned int id;
1471 uint32_t buf_sz;
1472 uint32_t toeplitz_hash_result;
1473
1474 buf_sz = pipe_info->buf_sz;
1475 if (buf_sz == 0) {
1476 /* Unused Copy Engine */
1477 return;
1478 }
1479
1480 hif_state = pipe_info->HIF_CE_state;
1481 if (!hif_state->started) {
1482 return;
1483 }
1484
1485 ce_hdl = pipe_info->ce_hdl;
1486
1487 while (ce_cancel_send_next
1488 (ce_hdl, &per_CE_context,
1489 (void **)&netbuf, &CE_data, &nbytes,
1490 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1491 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1492 /*
1493 * Packets enqueued by htt_h2t_ver_req_msg() and
1494 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1495 * freed in htt_htc_misc_pkt_pool_free() in
1496 * wlantl_close(), so do not free them here again
1497 * by checking whether it's the EndPoint
1498 * which they are queued in.
1499 */
1500 if (id == hif_state->scn->htc_endpoint)
1501 return;
1502 /* Indicate the completion to higer
1503 * layer to free the buffer */
1504 hif_state->msg_callbacks_current.
1505 txCompletionHandler(hif_state->
1506 msg_callbacks_current.Context,
1507 netbuf, id, toeplitz_hash_result);
1508 }
1509 }
1510}
1511
1512/*
1513 * Cleanup residual buffers for device shutdown:
1514 * buffers that were enqueued for receive
1515 * buffers that were to be sent
1516 * Note: Buffers that had completed but which were
1517 * not yet processed are on a completion queue. They
1518 * are handled when the completion thread shuts down.
1519 */
1520void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1521{
1522 int pipe_num;
1523
1524 for (pipe_num = 0; pipe_num < hif_state->scn->ce_count; pipe_num++) {
1525 struct HIF_CE_pipe_info *pipe_info;
1526
1527 pipe_info = &hif_state->pipe_info[pipe_num];
1528 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1529 hif_send_buffer_cleanup_on_pipe(pipe_info);
1530 }
1531}
1532
1533void hif_flush_surprise_remove(struct ol_softc *scn)
1534{
1535 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1536 hif_buffer_cleanup(hif_state);
1537}
1538
1539void hif_stop(struct ol_softc *scn)
1540{
1541 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1542 int pipe_num;
1543
1544 scn->hif_init_done = false;
1545 if (hif_state->started) {
1546 /* sync shutdown */
1547 hif_completion_thread_shutdown(hif_state);
1548 hif_completion_thread(hif_state);
1549 } else {
1550 hif_completion_thread_shutdown(hif_state);
1551 }
1552
1553 /*
1554 * At this point, asynchronous threads are stopped,
1555 * The Target should not DMA nor interrupt, Host code may
1556 * not initiate anything more. So we just need to clean
1557 * up Host-side state.
1558 */
1559
1560 if (scn->athdiag_procfs_inited) {
1561 athdiag_procfs_remove();
1562 scn->athdiag_procfs_inited = false;
1563 }
1564
1565 hif_buffer_cleanup(hif_state);
1566
1567 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1568 struct HIF_CE_pipe_info *pipe_info;
1569
1570 pipe_info = &hif_state->pipe_info[pipe_num];
1571 if (pipe_info->ce_hdl) {
1572 ce_fini(pipe_info->ce_hdl);
1573 pipe_info->ce_hdl = NULL;
1574 pipe_info->buf_sz = 0;
1575 }
1576 }
1577
1578 if (hif_state->sleep_timer_init) {
1579 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1580 cdf_softirq_timer_free(&hif_state->sleep_timer);
1581 hif_state->sleep_timer_init = false;
1582 }
1583
1584 hif_state->started = false;
1585}
1586
1587#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1588#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1589
1590
1591static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1592 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1593 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1594 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1595 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1596 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1597 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1598 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1599 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1600 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1601};
1602
1603
1604
1605/* CE_PCI TABLE */
1606/*
1607 * NOTE: the table below is out of date, though still a useful reference.
1608 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1609 * mapping of HTC services to HIF pipes.
1610 */
1611/*
1612 * This authoritative table defines Copy Engine configuration and the mapping
1613 * of services/endpoints to CEs. A subset of this information is passed to
1614 * the Target during startup as a prerequisite to entering BMI phase.
1615 * See:
1616 * target_service_to_ce_map - Target-side mapping
1617 * hif_map_service_to_pipe - Host-side mapping
1618 * target_ce_config - Target-side configuration
1619 * host_ce_config - Host-side configuration
1620 ============================================================================
1621 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1622 | | | ctio | Size | Frequency
1623 | | | n | |
1624 ============================================================================
1625 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1626 descriptor | | | | O(100B) | and regular
1627 download | | | | |
1628 ----------------------------------------------------------------------------
1629 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1630 indication | | | | O(10B) | regular
1631 upload | | | | |
1632 ----------------------------------------------------------------------------
1633 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1634 upload | | | | O(1000B) | (frequent
1635 e.g. noise | | | | | during IP1.0
1636 packets | | | | | testing)
1637 ----------------------------------------------------------------------------
1638 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1639 download | | | | O(1000B) | (frequent
1640 e.g. | | | | | during IP1.0
1641 misdirecte | | | | | testing)
1642 d EAPOL | | | | |
1643 packets | | | | |
1644 ----------------------------------------------------------------------------
1645 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1646 | DATA_VO (uplink) | | | |
1647 ----------------------------------------------------------------------------
1648 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1649 | DATA_VO (downlink) | | | |
1650 ----------------------------------------------------------------------------
1651 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1652 | | | | O(100B) |
1653 ----------------------------------------------------------------------------
1654 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1655 messages | (downlink) | | | O(100B) |
1656 | | | | |
1657 ----------------------------------------------------------------------------
1658 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1659 | HTC_RAW_STREAMS | | | |
1660 | (uplink) | | | |
1661 ----------------------------------------------------------------------------
1662 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1663 | HTC_RAW_STREAMS | | | |
1664 | (downlink) | | | |
1665 ----------------------------------------------------------------------------
1666 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1667 | | | | | infrequent
1668 ============================================================================
1669 */
1670
1671/*
1672 * Map from service/endpoint to Copy Engine.
1673 * This table is derived from the CE_PCI TABLE, above.
1674 * It is passed to the Target at startup for use by firmware.
1675 */
1676static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1677 {
1678 WMI_DATA_VO_SVC,
1679 PIPEDIR_OUT, /* out = UL = host -> target */
1680 3,
1681 },
1682 {
1683 WMI_DATA_VO_SVC,
1684 PIPEDIR_IN, /* in = DL = target -> host */
1685 2,
1686 },
1687 {
1688 WMI_DATA_BK_SVC,
1689 PIPEDIR_OUT, /* out = UL = host -> target */
1690 3,
1691 },
1692 {
1693 WMI_DATA_BK_SVC,
1694 PIPEDIR_IN, /* in = DL = target -> host */
1695 2,
1696 },
1697 {
1698 WMI_DATA_BE_SVC,
1699 PIPEDIR_OUT, /* out = UL = host -> target */
1700 3,
1701 },
1702 {
1703 WMI_DATA_BE_SVC,
1704 PIPEDIR_IN, /* in = DL = target -> host */
1705 2,
1706 },
1707 {
1708 WMI_DATA_VI_SVC,
1709 PIPEDIR_OUT, /* out = UL = host -> target */
1710 3,
1711 },
1712 {
1713 WMI_DATA_VI_SVC,
1714 PIPEDIR_IN, /* in = DL = target -> host */
1715 2,
1716 },
1717 {
1718 WMI_CONTROL_SVC,
1719 PIPEDIR_OUT, /* out = UL = host -> target */
1720 3,
1721 },
1722 {
1723 WMI_CONTROL_SVC,
1724 PIPEDIR_IN, /* in = DL = target -> host */
1725 2,
1726 },
1727 {
1728 HTC_CTRL_RSVD_SVC,
1729 PIPEDIR_OUT, /* out = UL = host -> target */
1730 0, /* could be moved to 3 (share with WMI) */
1731 },
1732 {
1733 HTC_CTRL_RSVD_SVC,
1734 PIPEDIR_IN, /* in = DL = target -> host */
1735 2,
1736 },
1737 {
1738 HTC_RAW_STREAMS_SVC, /* not currently used */
1739 PIPEDIR_OUT, /* out = UL = host -> target */
1740 0,
1741 },
1742 {
1743 HTC_RAW_STREAMS_SVC, /* not currently used */
1744 PIPEDIR_IN, /* in = DL = target -> host */
1745 2,
1746 },
1747 {
1748 HTT_DATA_MSG_SVC,
1749 PIPEDIR_OUT, /* out = UL = host -> target */
1750 4,
1751 },
1752 {
1753 HTT_DATA_MSG_SVC,
1754 PIPEDIR_IN, /* in = DL = target -> host */
1755 1,
1756 },
1757 {
1758 WDI_IPA_TX_SVC,
1759 PIPEDIR_OUT, /* in = DL = target -> host */
1760 5,
1761 },
1762 /* (Additions here) */
1763
1764 { /* Must be last */
1765 0,
1766 0,
1767 0,
1768 },
1769};
1770
1771static struct service_to_pipe *target_service_to_ce_map =
1772 target_service_to_ce_map_wlan;
1773static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1774
1775static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1776static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1777
1778static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1779 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1780 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1781 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1782 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1783 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1784 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1785 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1786 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1787 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1788 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1789 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1790 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1791 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1792 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1793 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1794 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1795 {0, 0, 0,}, /* Must be last */
1796};
1797
1798#ifdef HIF_PCI
1799/*
1800 * Send an interrupt to the device to wake up the Target CPU
1801 * so it has an opportunity to notice any changed state.
1802 */
1803void hif_wake_target_cpu(struct ol_softc *scn)
1804{
1805 CDF_STATUS rv;
1806 uint32_t core_ctrl;
1807
1808 rv = hif_diag_read_access(scn,
1809 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1810 &core_ctrl);
1811 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1812 /* A_INUM_FIRMWARE interrupt to Target CPU */
1813 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1814
1815 rv = hif_diag_write_access(scn,
1816 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1817 core_ctrl);
1818 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1819}
1820#endif
1821
1822static void hif_sleep_entry(void *arg)
1823{
1824 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1825 struct ol_softc *scn = hif_state->scn;
1826 uint32_t idle_ms;
1827 if (scn->recovery)
1828 return;
1829
1830 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1831 if (hif_state->verified_awake == false) {
1832 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1833 - hif_state->sleep_ticks);
1834 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1835 if (!cdf_atomic_read(&scn->link_suspended)) {
1836 soc_wake_reset(scn);
1837 hif_state->fake_sleep = false;
1838 }
1839 } else {
1840 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1841 cdf_softirq_timer_start(&hif_state->sleep_timer,
1842 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1843 }
1844 } else {
1845 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1846 cdf_softirq_timer_start(&hif_state->sleep_timer,
1847 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1848 }
1849 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1850}
1851#define HIF_HIA_MAX_POLL_LOOP 1000000
1852#define HIF_HIA_POLLING_DELAY_MS 10
1853
1854#ifndef HIF_PCI
1855int hif_set_hia(struct ol_softc *scn)
1856{
1857 return 0;
1858}
1859#else
1860int hif_set_hia(struct ol_softc *scn)
1861{
1862 CDF_STATUS rv;
1863 uint32_t interconnect_targ_addr = 0;
1864 uint32_t pcie_state_targ_addr = 0;
1865 uint32_t pipe_cfg_targ_addr = 0;
1866 uint32_t svc_to_pipe_map = 0;
1867 uint32_t pcie_config_flags = 0;
1868 uint32_t flag2_value = 0;
1869 uint32_t flag2_targ_addr = 0;
1870#ifdef QCA_WIFI_3_0
1871 uint32_t host_interest_area = 0;
1872 uint8_t i;
1873#else
1874 uint32_t ealloc_value = 0;
1875 uint32_t ealloc_targ_addr = 0;
1876 uint8_t banks_switched = 1;
1877 uint32_t chip_id;
1878#endif
1879 uint32_t pipe_cfg_addr;
1880
1881 HIF_TRACE("%s: E", __func__);
1882
1883 if (IHELIUM_BU || ADRASTEA_BU)
1884 return CDF_STATUS_SUCCESS;
1885
1886#ifdef QCA_WIFI_3_0
1887 i = 0;
1888 while (i < HIF_HIA_MAX_POLL_LOOP) {
1889 host_interest_area = hif_read32_mb(scn->mem +
1890 A_SOC_CORE_SCRATCH_0_ADDRESS);
1891 if ((host_interest_area & 0x01) == 0) {
1892 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1893 host_interest_area = 0;
1894 i++;
1895 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1896 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1897 }
1898 } else {
1899 host_interest_area &= (~0x01);
1900 hif_write32_mb(scn->mem + 0x113014, 0);
1901 break;
1902 }
1903 }
1904
1905 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1906 HIF_ERROR("%s: hia polling timeout", __func__);
1907 return -EIO;
1908 }
1909
1910 if (host_interest_area == 0) {
1911 HIF_ERROR("%s: host_interest_area = 0", __func__);
1912 return -EIO;
1913 }
1914
1915 interconnect_targ_addr = host_interest_area +
1916 offsetof(struct host_interest_area_t,
1917 hi_interconnect_state);
1918
1919 flag2_targ_addr = host_interest_area +
1920 offsetof(struct host_interest_area_t, hi_option_flag2);
1921
1922#else
1923 interconnect_targ_addr = hif_hia_item_address(scn->target_type,
1924 offsetof(struct host_interest_s, hi_interconnect_state));
1925 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
1926 offsetof(struct host_interest_s, hi_early_alloc));
1927 flag2_targ_addr = hif_hia_item_address(scn->target_type,
1928 offsetof(struct host_interest_s, hi_option_flag2));
1929#endif
1930 /* Supply Target-side CE configuration */
1931 rv = hif_diag_read_access(scn, interconnect_targ_addr,
1932 &pcie_state_targ_addr);
1933 if (rv != CDF_STATUS_SUCCESS) {
1934 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1935 __func__, interconnect_targ_addr, rv);
1936 goto done;
1937 }
1938 if (pcie_state_targ_addr == 0) {
1939 rv = CDF_STATUS_E_FAILURE;
1940 HIF_ERROR("%s: pcie state addr is 0", __func__);
1941 goto done;
1942 }
1943 pipe_cfg_addr = pcie_state_targ_addr +
1944 offsetof(struct pcie_state_s,
1945 pipe_cfg_addr);
1946 rv = hif_diag_read_access(scn,
1947 pipe_cfg_addr,
1948 &pipe_cfg_targ_addr);
1949 if (rv != CDF_STATUS_SUCCESS) {
1950 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1951 __func__, pipe_cfg_addr, rv);
1952 goto done;
1953 }
1954 if (pipe_cfg_targ_addr == 0) {
1955 rv = CDF_STATUS_E_FAILURE;
1956 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1957 goto done;
1958 }
1959
1960 rv = hif_diag_write_mem(scn, pipe_cfg_targ_addr,
1961 (uint8_t *) target_ce_config,
1962 target_ce_config_sz);
1963
1964 if (rv != CDF_STATUS_SUCCESS) {
1965 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1966 goto done;
1967 }
1968
1969 rv = hif_diag_read_access(scn,
1970 pcie_state_targ_addr +
1971 offsetof(struct pcie_state_s,
1972 svc_to_pipe_map),
1973 &svc_to_pipe_map);
1974 if (rv != CDF_STATUS_SUCCESS) {
1975 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1976 goto done;
1977 }
1978 if (svc_to_pipe_map == 0) {
1979 rv = CDF_STATUS_E_FAILURE;
1980 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1981 goto done;
1982 }
1983
1984 rv = hif_diag_write_mem(scn,
1985 svc_to_pipe_map,
1986 (uint8_t *) target_service_to_ce_map,
1987 target_service_to_ce_map_sz);
1988 if (rv != CDF_STATUS_SUCCESS) {
1989 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1990 goto done;
1991 }
1992
1993 rv = hif_diag_read_access(scn,
1994 pcie_state_targ_addr +
1995 offsetof(struct pcie_state_s,
1996 config_flags),
1997 &pcie_config_flags);
1998 if (rv != CDF_STATUS_SUCCESS) {
1999 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
2000 goto done;
2001 }
2002#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
2003 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
2004#else
2005 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2006#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
2007 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
2008#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
2009 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
2010#endif
2011 rv = hif_diag_write_mem(scn,
2012 pcie_state_targ_addr +
2013 offsetof(struct pcie_state_s,
2014 config_flags),
2015 (uint8_t *) &pcie_config_flags,
2016 sizeof(pcie_config_flags));
2017 if (rv != CDF_STATUS_SUCCESS) {
2018 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
2019 goto done;
2020 }
2021
2022#ifndef QCA_WIFI_3_0
2023 /* configure early allocation */
2024 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
2025 offsetof(
2026 struct host_interest_s,
2027 hi_early_alloc));
2028
2029 rv = hif_diag_read_access(scn, ealloc_targ_addr,
2030 &ealloc_value);
2031 if (rv != CDF_STATUS_SUCCESS) {
2032 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
2033 goto done;
2034 }
2035
2036 /* 1 bank is switched to IRAM, except ROME 1.0 */
2037 ealloc_value |=
2038 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2039 HI_EARLY_ALLOC_MAGIC_MASK);
2040
2041 rv = hif_diag_read_access(scn,
2042 CHIP_ID_ADDRESS |
2043 RTC_SOC_BASE_ADDRESS, &chip_id);
2044 if (rv != CDF_STATUS_SUCCESS) {
2045 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
2046 goto done;
2047 }
2048 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
2049 scn->target_revision =
2050 CHIP_ID_REVISION_GET(chip_id);
2051 switch (CHIP_ID_REVISION_GET(chip_id)) {
2052 case 0x2: /* ROME 1.3 */
2053 /* 2 banks are switched to IRAM */
2054 banks_switched = 2;
2055 break;
2056 case 0x4: /* ROME 2.1 */
2057 case 0x5: /* ROME 2.2 */
2058 banks_switched = 6;
2059 break;
2060 case 0x8: /* ROME 3.0 */
2061 case 0x9: /* ROME 3.1 */
2062 case 0xA: /* ROME 3.2 */
2063 banks_switched = 9;
2064 break;
2065 case 0x0: /* ROME 1.0 */
2066 case 0x1: /* ROME 1.1 */
2067 default:
2068 /* 3 banks are switched to IRAM */
2069 banks_switched = 3;
2070 break;
2071 }
2072 }
2073
2074 ealloc_value |=
2075 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
2076 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2077
2078 rv = hif_diag_write_access(scn,
2079 ealloc_targ_addr,
2080 ealloc_value);
2081 if (rv != CDF_STATUS_SUCCESS) {
2082 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
2083 goto done;
2084 }
2085#endif
2086
2087 /* Tell Target to proceed with initialization */
2088 flag2_targ_addr = hif_hia_item_address(scn->target_type,
2089 offsetof(
2090 struct host_interest_s,
2091 hi_option_flag2));
2092
2093 rv = hif_diag_read_access(scn, flag2_targ_addr,
2094 &flag2_value);
2095 if (rv != CDF_STATUS_SUCCESS) {
2096 HIF_ERROR("%s: get option val (%d)", __func__, rv);
2097 goto done;
2098 }
2099
2100 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2101 rv = hif_diag_write_access(scn, flag2_targ_addr,
2102 flag2_value);
2103 if (rv != CDF_STATUS_SUCCESS) {
2104 HIF_ERROR("%s: set option val (%d)", __func__, rv);
2105 goto done;
2106 }
2107
2108 hif_wake_target_cpu(scn);
2109
2110done:
2111
2112 return rv;
2113}
2114#endif
2115
2116/**
2117 * hif_wlan_enable(): call the platform driver to enable wlan
2118 *
2119 * This function passes the con_mode and CE configuration to
2120 * platform driver to enable wlan.
2121 *
2122 * Return: void
2123 */
2124static int hif_wlan_enable(void)
2125{
2126 struct icnss_wlan_enable_cfg cfg;
2127 enum icnss_driver_mode mode;
2128 uint32_t con_mode = cds_get_conparam();
2129
2130 cfg.num_ce_tgt_cfg = target_ce_config_sz /
2131 sizeof(struct CE_pipe_config);
2132 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
2133 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
2134 sizeof(struct service_to_pipe);
2135 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
2136 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
2137 cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
2138
2139 switch (con_mode) {
2140 case CDF_FTM_MODE:
2141 mode = ICNSS_FTM;
2142 break;
2143 case CDF_EPPING_MODE:
2144 mode = ICNSS_EPPING;
2145 break;
2146 default:
2147 mode = ICNSS_MISSION;
2148 break;
2149 }
2150 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
2151}
2152
2153#if ((!defined(QCA_WIFI_3_0_IHELIUM) && !defined(QCA_WIFI_3_0_ADRASTEA)) || defined(CONFIG_ICNSS))
2154static inline void cnss_pcie_notify_q6(void)
2155{
2156 return;
2157}
2158#endif
2159
2160/*
2161 * Called from PCI layer whenever a new PCI device is probed.
2162 * Initializes per-device HIF state and notifies the main
2163 * driver that a new HIF device is present.
2164 */
2165int hif_config_ce(hif_handle_t hif_hdl)
2166{
2167 struct HIF_CE_state *hif_state;
2168 struct HIF_CE_pipe_info *pipe_info;
2169 int pipe_num;
2170#ifdef ADRASTEA_SHADOW_REGISTERS
2171 int i;
2172#endif
2173 CDF_STATUS rv = CDF_STATUS_SUCCESS;
2174 int ret;
2175 struct ol_softc *scn = hif_hdl;
2176 struct icnss_soc_info soc_info;
2177
2178 /* if epping is enabled we need to use the epping configuration. */
2179 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2180 if (WLAN_IS_EPPING_IRQ(cds_get_conparam()))
2181 host_ce_config = host_ce_config_wlan_epping_irq;
2182 else
2183 host_ce_config = host_ce_config_wlan_epping_poll;
2184 target_ce_config = target_ce_config_wlan_epping;
2185 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2186 target_service_to_ce_map =
2187 target_service_to_ce_map_wlan_epping;
2188 target_service_to_ce_map_sz =
2189 sizeof(target_service_to_ce_map_wlan_epping);
2190 }
2191
2192 ret = hif_wlan_enable();
2193
2194 if (ret) {
2195 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
2196 return CDF_STATUS_NOT_INITIALIZED;
2197 }
2198 if (IHELIUM_BU) {
2199 cnss_pcie_notify_q6();
2200 HIF_TRACE("%s: cnss_pcie_notify_q6 done, notice_send= %d",
2201 __func__, scn->notice_send);
2202 }
2203
2204 scn->notice_send = true;
2205
2206 cdf_mem_zero(&soc_info, sizeof(soc_info));
2207 ret = icnss_get_soc_info(&soc_info);
2208 if (ret < 0) {
2209 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
2210 return CDF_STATUS_NOT_INITIALIZED;
2211 }
2212
2213 hif_state = (struct HIF_CE_state *)cdf_mem_malloc(sizeof(*hif_state));
2214 if (!hif_state) {
2215 return -ENOMEM;
2216 }
2217 cdf_mem_zero(hif_state, sizeof(*hif_state));
2218
2219 hif_state->scn = scn;
2220 scn->hif_hdl = hif_state;
2221 scn->mem = soc_info.v_addr;
2222 scn->mem_pa = soc_info.p_addr;
2223 scn->soc_version = soc_info.version;
2224
2225 cdf_spinlock_init(&hif_state->keep_awake_lock);
2226
2227 cdf_atomic_init(&hif_state->hif_thread_idle);
2228 cdf_atomic_inc(&hif_state->hif_thread_idle);
2229
2230 hif_state->keep_awake_count = 0;
2231
2232 hif_state->fake_sleep = false;
2233 hif_state->sleep_ticks = 0;
2234 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
2235 hif_sleep_entry, (void *)hif_state,
2236 CDF_TIMER_TYPE_WAKE_APPS);
2237 hif_state->sleep_timer_init = true;
2238 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2239#ifdef HIF_PCI
2240#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
2241 /* Force AWAKE forever/till the driver is loaded */
2242 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
2243 return -EACCES;
2244#endif
2245#endif
2246
2247 /* During CE initializtion */
2248 scn->ce_count = HOST_CE_COUNT;
2249 A_TARGET_ACCESS_LIKELY(scn);
2250 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2251 struct CE_attr *attr;
2252
2253 pipe_info = &hif_state->pipe_info[pipe_num];
2254 pipe_info->pipe_num = pipe_num;
2255 pipe_info->HIF_CE_state = hif_state;
2256 attr = &host_ce_config[pipe_num];
2257 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2258 CDF_ASSERT(pipe_info->ce_hdl != NULL);
2259 if (pipe_info->ce_hdl == NULL) {
2260 rv = CDF_STATUS_E_FAILURE;
2261 A_TARGET_ACCESS_UNLIKELY(scn);
2262 goto err;
2263 }
2264
2265 if (pipe_num == DIAG_CE_ID) {
2266 /* Reserve the ultimate CE for
2267 * Diagnostic Window support */
2268 hif_state->ce_diag =
2269 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
2270 continue;
2271 }
2272
2273 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
2274 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
2275 if (attr->dest_nentries > 0) {
2276 atomic_set(&pipe_info->recv_bufs_needed,
2277 init_buffer_count(attr->dest_nentries - 1));
2278 } else {
2279 atomic_set(&pipe_info->recv_bufs_needed, 0);
2280 }
2281 ce_tasklet_init(hif_state, (1 << pipe_num));
2282 ce_register_irq(hif_state, (1 << pipe_num));
2283 scn->request_irq_done = true;
2284 }
2285
2286 if (athdiag_procfs_init(scn) != 0) {
2287 A_TARGET_ACCESS_UNLIKELY(scn);
2288 goto err;
2289 }
2290 scn->athdiag_procfs_inited = true;
2291
2292 /*
2293 * Initially, establish CE completion handlers for use with BMI.
2294 * These are overwritten with generic handlers after we exit BMI phase.
2295 */
2296 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
2297#ifdef HIF_PCI
2298 ce_send_cb_register(
2299 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
2300#ifndef BMI_RSP_POLLING
2301 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
2302 ce_recv_cb_register(
2303 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
2304#endif
2305#endif
2306 HIF_INFO_MED("%s: ce_init done", __func__);
2307
2308 rv = hif_set_hia(scn);
2309
2310 HIF_INFO_MED("%s: hif_set_hia done", __func__);
2311
2312 A_TARGET_ACCESS_UNLIKELY(scn);
2313
2314 if (rv != CDF_STATUS_SUCCESS)
2315 goto err;
2316 else
2317 init_tasklet_workers();
2318
2319 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2320
2321#ifdef ADRASTEA_SHADOW_REGISTERS
2322 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
2323 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2324 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
2325 __func__, i,
2326 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2327 }
2328#endif
2329
2330
2331 return rv != CDF_STATUS_SUCCESS;
2332
2333err:
2334 /* Failure, so clean up */
2335 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2336 pipe_info = &hif_state->pipe_info[pipe_num];
2337 if (pipe_info->ce_hdl) {
2338 ce_unregister_irq(hif_state, (1 << pipe_num));
2339 scn->request_irq_done = false;
2340 ce_fini(pipe_info->ce_hdl);
2341 pipe_info->ce_hdl = NULL;
2342 pipe_info->buf_sz = 0;
2343 }
2344 }
2345 if (hif_state->sleep_timer_init) {
2346 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2347 cdf_softirq_timer_free(&hif_state->sleep_timer);
2348 hif_state->sleep_timer_init = false;
2349 }
2350 if (scn->hif_hdl) {
2351 scn->hif_hdl = NULL;
2352 cdf_mem_free(hif_state);
2353 }
2354 athdiag_procfs_remove();
2355 scn->athdiag_procfs_inited = false;
2356 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2357 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2358}
2359
2360
2361
2362
2363
2364
2365#ifdef IPA_OFFLOAD
2366void hif_ipa_get_ce_resource(struct ol_softc *scn,
2367 uint32_t *ce_sr_base_paddr,
2368 uint32_t *ce_sr_ring_size,
2369 cdf_dma_addr_t *ce_reg_paddr)
2370{
2371 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
2372 struct HIF_CE_pipe_info *pipe_info =
2373 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2374 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2375
2376 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2377 ce_reg_paddr);
2378 return;
2379}
2380#endif /* IPA_OFFLOAD */
2381
2382
2383#ifdef ADRASTEA_SHADOW_REGISTERS
2384
2385/*
2386 Current shadow register config
2387
2388 -----------------------------------------------------------
2389 Shadow Register | CE | src/dst write index
2390 -----------------------------------------------------------
2391 0 | 0 | src
2392 1 No Config - Doesn't point to anything
2393 2 No Config - Doesn't point to anything
2394 3 | 3 | src
2395 4 | 4 | src
2396 5 | 5 | src
2397 6 No Config - Doesn't point to anything
2398 7 | 7 | src
2399 8 No Config - Doesn't point to anything
2400 9 No Config - Doesn't point to anything
2401 10 No Config - Doesn't point to anything
2402 11 No Config - Doesn't point to anything
2403 -----------------------------------------------------------
2404 12 No Config - Doesn't point to anything
2405 13 | 1 | dst
2406 14 | 2 | dst
2407 15 No Config - Doesn't point to anything
2408 16 No Config - Doesn't point to anything
2409 17 No Config - Doesn't point to anything
2410 18 No Config - Doesn't point to anything
2411 19 | 7 | dst
2412 20 | 8 | dst
2413 21 No Config - Doesn't point to anything
2414 22 No Config - Doesn't point to anything
2415 23 No Config - Doesn't point to anything
2416 -----------------------------------------------------------
2417
2418
2419 ToDo - Move shadow register config to following in the future
2420 This helps free up a block of shadow registers towards the end.
2421 Can be used for other purposes
2422
2423 -----------------------------------------------------------
2424 Shadow Register | CE | src/dst write index
2425 -----------------------------------------------------------
2426 0 | 0 | src
2427 1 | 3 | src
2428 2 | 4 | src
2429 3 | 5 | src
2430 4 | 7 | src
2431 -----------------------------------------------------------
2432 5 | 1 | dst
2433 6 | 2 | dst
2434 7 | 7 | dst
2435 8 | 8 | dst
2436 -----------------------------------------------------------
2437 9 No Config - Doesn't point to anything
2438 12 No Config - Doesn't point to anything
2439 13 No Config - Doesn't point to anything
2440 14 No Config - Doesn't point to anything
2441 15 No Config - Doesn't point to anything
2442 16 No Config - Doesn't point to anything
2443 17 No Config - Doesn't point to anything
2444 18 No Config - Doesn't point to anything
2445 19 No Config - Doesn't point to anything
2446 20 No Config - Doesn't point to anything
2447 21 No Config - Doesn't point to anything
2448 22 No Config - Doesn't point to anything
2449 23 No Config - Doesn't point to anything
2450 -----------------------------------------------------------
2451*/
2452
2453u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2454{
2455 u32 addr = 0;
2456
2457 switch (COPY_ENGINE_ID(ctrl_addr)) {
2458 case 0:
2459 addr = SHADOW_VALUE0;
2460 break;
2461 case 3:
2462 addr = SHADOW_VALUE3;
2463 break;
2464 case 4:
2465 addr = SHADOW_VALUE4;
2466 break;
2467 case 5:
2468 addr = SHADOW_VALUE5;
2469 break;
2470 case 7:
2471 addr = SHADOW_VALUE7;
2472 break;
2473 default:
2474 printk("invalid CE ctrl_addr\n");
2475 CDF_ASSERT(0);
2476
2477 }
2478 return addr;
2479
2480}
2481
2482u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2483{
2484 u32 addr = 0;
2485
2486 switch (COPY_ENGINE_ID(ctrl_addr)) {
2487 case 1:
2488 addr = SHADOW_VALUE13;
2489 break;
2490 case 2:
2491 addr = SHADOW_VALUE14;
2492 break;
2493 case 7:
2494 addr = SHADOW_VALUE19;
2495 break;
2496 case 8:
2497 addr = SHADOW_VALUE20;
2498 break;
2499 default:
2500 printk("invalid CE ctrl_addr\n");
2501 CDF_ASSERT(0);
2502 }
2503
2504 return addr;
2505
2506}
2507#endif
2508
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002509#if defined(FEATURE_LRO)
2510/**
2511 * ce_lro_flush_cb_register() - register the LRO flush
2512 * callback
2513 * @scn: HIF context
2514 * @handler: callback function
2515 * @data: opaque data pointer to be passed back
2516 *
2517 * Store the LRO flush callback provided
2518 *
2519 * Return: none
2520 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002521void ce_lro_flush_cb_register(struct ol_softc *scn,
2522 void (handler)(void *), void *data)
2523{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002524 uint8_t ul, dl;
2525 int ul_polled, dl_polled;
2526
2527 CDF_ASSERT(scn != NULL);
2528
2529 if (CDF_STATUS_SUCCESS !=
2530 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2531 &ul, &dl, &ul_polled, &dl_polled)) {
2532 printk("%s cannot map service to pipe\n", __FUNCTION__);
2533 return;
2534 } else {
2535 struct CE_state *ce_state;
2536 ce_state = scn->ce_id_to_state[dl];
2537 ce_state->lro_flush_cb = handler;
2538 ce_state->lro_data = data;
2539 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002540}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002541
2542/**
2543 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2544 * callback
2545 * @scn: HIF context
2546 *
2547 * Remove the LRO flush callback
2548 *
2549 * Return: none
2550 */
2551void ce_lro_flush_cb_deregister(struct ol_softc *scn)
2552{
2553 uint8_t ul, dl;
2554 int ul_polled, dl_polled;
2555
2556 CDF_ASSERT(scn != NULL);
2557
2558 if (CDF_STATUS_SUCCESS !=
2559 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2560 &ul, &dl, &ul_polled, &dl_polled)) {
2561 printk("%s cannot map service to pipe\n", __FUNCTION__);
2562 return;
2563 } else {
2564 struct CE_state *ce_state;
2565 ce_state = scn->ce_id_to_state[dl];
2566 ce_state->lro_flush_cb = NULL;
2567 ce_state->lro_data = NULL;
2568 }
2569}
2570#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002571
2572/**
2573 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2574 * this service
2575 * @scn: ol_softc pointer.
2576 * @svc_id: Service ID for which the mapping is needed.
2577 * @ul_pipe: address of the container in which ul pipe is returned.
2578 * @dl_pipe: address of the container in which dl pipe is returned.
2579 * @ul_is_polled: address of the container in which a bool
2580 * indicating if the UL CE for this service
2581 * is polled is returned.
2582 * @dl_is_polled: address of the container in which a bool
2583 * indicating if the DL CE for this service
2584 * is polled is returned.
2585 *
2586 * Return: Indicates whether this operation was successful.
2587 */
2588
2589int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
2590 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2591 int *dl_is_polled)
2592{
2593 int status = CDF_STATUS_SUCCESS;
2594
2595 unsigned int i;
2596 struct service_to_pipe element;
2597
2598 struct service_to_pipe *tgt_svc_map_to_use;
2599 size_t sz_tgt_svc_map_to_use;
2600
2601 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2602 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2603 sz_tgt_svc_map_to_use =
2604 sizeof(target_service_to_ce_map_wlan_epping);
2605 } else {
2606 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2607 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2608 }
2609
2610 *dl_is_polled = 0; /* polling for received messages not supported */
2611
2612 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2613
2614 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2615 if (element.service_id == svc_id) {
2616
2617 if (element.pipedir == PIPEDIR_OUT)
2618 *ul_pipe = element.pipenum;
2619
2620 else if (element.pipedir == PIPEDIR_IN)
2621 *dl_pipe = element.pipenum;
2622 }
2623 }
2624
2625 *ul_is_polled =
2626 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2627
2628 return status;
2629}