blob: 216ea979a094ab9460ee409e0b310171ebbc82f7 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
48#include "cds_api.h"
49#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053064#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065
66#define CE_POLL_TIMEOUT 10 /* ms */
67
68/* Forward references */
69static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
78
79static int hif_post_recv_buffers(struct ol_softc *scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -080080static void hif_config_rri_on_ddr(struct ol_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081
82static void ce_poll_timeout(void *arg)
83{
84 struct CE_state *CE_state = (struct CE_state *)arg;
85 if (CE_state->timer_inited) {
86 ce_per_engine_service(CE_state->scn, CE_state->id);
87 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
88 }
89}
90
91static unsigned int roundup_pwr2(unsigned int n)
92{
93 int i;
94 unsigned int test_pwr2;
95
96 if (!(n & (n - 1)))
97 return n; /* already a power of 2 */
98
99 test_pwr2 = 4;
100 for (i = 0; i < 29; i++) {
101 if (test_pwr2 > n)
102 return test_pwr2;
103 test_pwr2 = test_pwr2 << 1;
104 }
105
106 CDF_ASSERT(0); /* n too large */
107 return 0;
108}
109
110/*
111 * Initialize a Copy Engine based on caller-supplied attributes.
112 * This may be called once to initialize both source and destination
113 * rings or it may be called twice for separate source and destination
114 * initialization. It may be that only one side or the other is
115 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700116 *
117 * This should be called durring the initialization sequence before
118 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 */
120struct CE_handle *ce_init(struct ol_softc *scn,
121 unsigned int CE_id, struct CE_attr *attr)
122{
123 struct CE_state *CE_state;
124 uint32_t ctrl_addr;
125 unsigned int nentries;
126 cdf_dma_addr_t base_addr;
127 bool malloc_CE_state = false;
128 bool malloc_src_ring = false;
129
130 CDF_ASSERT(CE_id < scn->ce_count);
131 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132 CE_state = scn->ce_id_to_state[CE_id];
133
134 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135 CE_state =
136 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
137 if (!CE_state) {
138 HIF_ERROR("%s: CE_state has no mem", __func__);
139 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700141 malloc_CE_state = true;
142 cdf_mem_zero(CE_state, sizeof(*CE_state));
143 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700144 cdf_spinlock_init(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700145
146 CE_state->id = CE_id;
147 CE_state->ctrl_addr = ctrl_addr;
148 CE_state->state = CE_RUNNING;
149 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150 }
151 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800152
153 cdf_atomic_init(&CE_state->rx_pending);
154 if (attr == NULL) {
155 /* Already initialized; caller wants the handle */
156 return (struct CE_handle *)CE_state;
157 }
158
159#ifdef ADRASTEA_SHADOW_REGISTERS
160 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
161 __func__);
162#endif
163
164 if (CE_state->src_sz_max)
165 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
166 else
167 CE_state->src_sz_max = attr->src_sz_max;
168
Houston Hoffman68e837e2015-12-04 12:57:24 -0800169 ce_init_ce_desc_event_log(CE_id,
170 attr->src_nentries + attr->dest_nentries);
171
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800172 /* source ring setup */
173 nentries = attr->src_nentries;
174 if (nentries) {
175 struct CE_ring_state *src_ring;
176 unsigned CE_nbytes;
177 char *ptr;
178 uint64_t dma_addr;
179 nentries = roundup_pwr2(nentries);
180 if (CE_state->src_ring) {
181 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
182 } else {
183 CE_nbytes = sizeof(struct CE_ring_state)
184 + (nentries * sizeof(void *));
185 ptr = cdf_mem_malloc(CE_nbytes);
186 if (!ptr) {
187 /* cannot allocate src ring. If the
188 * CE_state is allocated locally free
189 * CE_State and return error.
190 */
191 HIF_ERROR("%s: src ring has no mem", __func__);
192 if (malloc_CE_state) {
193 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800194 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800195 cdf_mem_free(CE_state);
196 malloc_CE_state = false;
197 }
198 return NULL;
199 } else {
200 /* we can allocate src ring.
201 * Mark that the src ring is
202 * allocated locally
203 */
204 malloc_src_ring = true;
205 }
206 cdf_mem_zero(ptr, CE_nbytes);
207
208 src_ring = CE_state->src_ring =
209 (struct CE_ring_state *)ptr;
210 ptr += sizeof(struct CE_ring_state);
211 src_ring->nentries = nentries;
212 src_ring->nentries_mask = nentries - 1;
213 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
214 src_ring->hw_index =
215 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
216 src_ring->sw_index = src_ring->hw_index;
217 src_ring->write_index =
218 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
219 A_TARGET_ACCESS_END_RET_PTR(scn);
220 src_ring->low_water_mark_nentries = 0;
221 src_ring->high_water_mark_nentries = nentries;
222 src_ring->per_transfer_context = (void **)ptr;
223
224 /* Legacy platforms that do not support cache
225 * coherent DMA are unsupported
226 */
227 src_ring->base_addr_owner_space_unaligned =
228 cdf_os_mem_alloc_consistent(scn->cdf_dev,
229 (nentries *
230 sizeof(struct CE_src_desc) +
231 CE_DESC_RING_ALIGN),
232 &base_addr, 0);
233 if (src_ring->base_addr_owner_space_unaligned
234 == NULL) {
235 HIF_ERROR("%s: src ring has no DMA mem",
236 __func__);
237 goto error_no_dma_mem;
238 }
239 src_ring->base_addr_CE_space_unaligned = base_addr;
240
241 if (src_ring->
242 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
243 - 1)) {
244 src_ring->base_addr_CE_space =
245 (src_ring->base_addr_CE_space_unaligned
246 + CE_DESC_RING_ALIGN -
247 1) & ~(CE_DESC_RING_ALIGN - 1);
248
249 src_ring->base_addr_owner_space =
250 (void
251 *)(((size_t) src_ring->
252 base_addr_owner_space_unaligned +
253 CE_DESC_RING_ALIGN -
254 1) & ~(CE_DESC_RING_ALIGN - 1));
255 } else {
256 src_ring->base_addr_CE_space =
257 src_ring->base_addr_CE_space_unaligned;
258 src_ring->base_addr_owner_space =
259 src_ring->
260 base_addr_owner_space_unaligned;
261 }
262 /*
263 * Also allocate a shadow src ring in
264 * regular mem to use for faster access.
265 */
266 src_ring->shadow_base_unaligned =
267 cdf_mem_malloc(nentries *
268 sizeof(struct CE_src_desc) +
269 CE_DESC_RING_ALIGN);
270 if (src_ring->shadow_base_unaligned == NULL) {
271 HIF_ERROR("%s: src ring no shadow_base mem",
272 __func__);
273 goto error_no_dma_mem;
274 }
275 src_ring->shadow_base = (struct CE_src_desc *)
276 (((size_t) src_ring->shadow_base_unaligned +
277 CE_DESC_RING_ALIGN - 1) &
278 ~(CE_DESC_RING_ALIGN - 1));
279
280 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
281 dma_addr = src_ring->base_addr_CE_space;
282 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
283 (uint32_t)(dma_addr & 0xFFFFFFFF));
284#ifdef WLAN_ENABLE_QCA6180
285 {
286 uint32_t tmp;
287 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
288 scn, ctrl_addr);
289 tmp &= ~0x1F;
290 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
291 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
292 ctrl_addr, (uint32_t)dma_addr);
293 }
294#endif
295 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
296 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
297#ifdef BIG_ENDIAN_HOST
298 /* Enable source ring byte swap for big endian host */
299 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
300#endif
301 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
302 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
303 A_TARGET_ACCESS_END_RET_PTR(scn);
304 }
305 }
306
307 /* destination ring setup */
308 nentries = attr->dest_nentries;
309 if (nentries) {
310 struct CE_ring_state *dest_ring;
311 unsigned CE_nbytes;
312 char *ptr;
313 uint64_t dma_addr;
314
315 nentries = roundup_pwr2(nentries);
316 if (CE_state->dest_ring) {
317 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
318 } else {
319 CE_nbytes = sizeof(struct CE_ring_state)
320 + (nentries * sizeof(void *));
321 ptr = cdf_mem_malloc(CE_nbytes);
322 if (!ptr) {
323 /* cannot allocate dst ring. If the CE_state
324 * or src ring is allocated locally free
325 * CE_State and src ring and return error.
326 */
327 HIF_ERROR("%s: dest ring has no mem",
328 __func__);
329 if (malloc_src_ring) {
330 cdf_mem_free(CE_state->src_ring);
331 CE_state->src_ring = NULL;
332 malloc_src_ring = false;
333 }
334 if (malloc_CE_state) {
335 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 cdf_mem_free(CE_state);
338 malloc_CE_state = false;
339 }
340 return NULL;
341 }
342 cdf_mem_zero(ptr, CE_nbytes);
343
344 dest_ring = CE_state->dest_ring =
345 (struct CE_ring_state *)ptr;
346 ptr += sizeof(struct CE_ring_state);
347 dest_ring->nentries = nentries;
348 dest_ring->nentries_mask = nentries - 1;
349 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
350 dest_ring->sw_index =
351 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
352 dest_ring->write_index =
353 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
354 A_TARGET_ACCESS_END_RET_PTR(scn);
355 dest_ring->low_water_mark_nentries = 0;
356 dest_ring->high_water_mark_nentries = nentries;
357 dest_ring->per_transfer_context = (void **)ptr;
358
359 /* Legacy platforms that do not support cache
360 * coherent DMA are unsupported */
361 dest_ring->base_addr_owner_space_unaligned =
362 cdf_os_mem_alloc_consistent(scn->cdf_dev,
363 (nentries *
364 sizeof(struct CE_dest_desc) +
365 CE_DESC_RING_ALIGN),
366 &base_addr, 0);
367 if (dest_ring->base_addr_owner_space_unaligned
368 == NULL) {
369 HIF_ERROR("%s: dest ring has no DMA mem",
370 __func__);
371 goto error_no_dma_mem;
372 }
373 dest_ring->base_addr_CE_space_unaligned = base_addr;
374
375 /* Correctly initialize memory to 0 to
376 * prevent garbage data crashing system
377 * when download firmware
378 */
379 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
380 nentries * sizeof(struct CE_dest_desc) +
381 CE_DESC_RING_ALIGN);
382
383 if (dest_ring->
384 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
385 1)) {
386
387 dest_ring->base_addr_CE_space =
388 (dest_ring->
389 base_addr_CE_space_unaligned +
390 CE_DESC_RING_ALIGN -
391 1) & ~(CE_DESC_RING_ALIGN - 1);
392
393 dest_ring->base_addr_owner_space =
394 (void
395 *)(((size_t) dest_ring->
396 base_addr_owner_space_unaligned +
397 CE_DESC_RING_ALIGN -
398 1) & ~(CE_DESC_RING_ALIGN - 1));
399 } else {
400 dest_ring->base_addr_CE_space =
401 dest_ring->base_addr_CE_space_unaligned;
402 dest_ring->base_addr_owner_space =
403 dest_ring->
404 base_addr_owner_space_unaligned;
405 }
406
407 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
408 dma_addr = dest_ring->base_addr_CE_space;
409 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
410 (uint32_t)(dma_addr & 0xFFFFFFFF));
411#ifdef WLAN_ENABLE_QCA6180
412 {
413 uint32_t tmp;
414 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
415 ctrl_addr);
416 tmp &= ~0x1F;
417 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
418 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
419 ctrl_addr, (uint32_t)dma_addr);
420 }
421#endif
422 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
423#ifdef BIG_ENDIAN_HOST
424 /* Enable Dest ring byte swap for big endian host */
425 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
426#endif
427 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
428 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
429 A_TARGET_ACCESS_END_RET_PTR(scn);
430
431 /* epping */
432 /* poll timer */
433 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
434 cdf_softirq_timer_init(scn->cdf_dev,
435 &CE_state->poll_timer,
436 ce_poll_timeout,
437 CE_state,
438 CDF_TIMER_TYPE_SW);
439 CE_state->timer_inited = true;
440 cdf_softirq_timer_mod(&CE_state->poll_timer,
441 CE_POLL_TIMEOUT);
442 }
443 }
444 }
445
446 /* Enable CE error interrupts */
447 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
448 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
449 A_TARGET_ACCESS_END_RET_PTR(scn);
450
451 return (struct CE_handle *)CE_state;
452
453error_no_dma_mem:
454 ce_fini((struct CE_handle *)CE_state);
455 return NULL;
456}
457
458#ifdef WLAN_FEATURE_FASTPATH
459/**
460 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
461 * No processing is required inside this function.
462 * @ce_hdl: Cope engine handle
463 * Using an assert, this function makes sure that,
464 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700465 *
466 * This is called while dismantling CE structures. No other thread
467 * should be using these structures while dismantling is occuring
468 * therfore no locking is needed.
469 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800470 * Return: none
471 */
472void
473ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
474{
475 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
476 struct CE_ring_state *src_ring = ce_state->src_ring;
477 struct ol_softc *sc = ce_state->scn;
478 uint32_t sw_index, write_index;
479
480 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
481 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
482 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800483 sw_index = src_ring->sw_index;
484 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485
486 /* At this point Tx CE should be clean */
487 cdf_assert_always(sw_index == write_index);
488 }
489}
490#else
491void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
492{
493}
494#endif /* WLAN_FEATURE_FASTPATH */
495
496void ce_fini(struct CE_handle *copyeng)
497{
498 struct CE_state *CE_state = (struct CE_state *)copyeng;
499 unsigned int CE_id = CE_state->id;
500 struct ol_softc *scn = CE_state->scn;
501
502 CE_state->state = CE_UNUSED;
503 scn->ce_id_to_state[CE_id] = NULL;
504 if (CE_state->src_ring) {
505 /* Cleanup the HTT Tx ring */
506 ce_h2t_tx_ce_cleanup(copyeng);
507
508 if (CE_state->src_ring->shadow_base_unaligned)
509 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
510 if (CE_state->src_ring->base_addr_owner_space_unaligned)
511 cdf_os_mem_free_consistent(scn->cdf_dev,
512 (CE_state->src_ring->nentries *
513 sizeof(struct CE_src_desc) +
514 CE_DESC_RING_ALIGN),
515 CE_state->src_ring->
516 base_addr_owner_space_unaligned,
517 CE_state->src_ring->
518 base_addr_CE_space, 0);
519 cdf_mem_free(CE_state->src_ring);
520 }
521 if (CE_state->dest_ring) {
522 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
523 cdf_os_mem_free_consistent(scn->cdf_dev,
524 (CE_state->dest_ring->nentries *
525 sizeof(struct CE_dest_desc) +
526 CE_DESC_RING_ALIGN),
527 CE_state->dest_ring->
528 base_addr_owner_space_unaligned,
529 CE_state->dest_ring->
530 base_addr_CE_space, 0);
531 cdf_mem_free(CE_state->dest_ring);
532
533 /* epping */
534 if (CE_state->timer_inited) {
535 CE_state->timer_inited = false;
536 cdf_softirq_timer_free(&CE_state->poll_timer);
537 }
538 }
539 cdf_mem_free(CE_state);
540}
541
542void hif_detach_htc(struct ol_softc *scn)
543{
544 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
545
546 cdf_mem_zero(&hif_state->msg_callbacks_pending,
547 sizeof(hif_state->msg_callbacks_pending));
548 cdf_mem_zero(&hif_state->msg_callbacks_current,
549 sizeof(hif_state->msg_callbacks_current));
550}
551
552/* Send the first nbytes bytes of the buffer */
553CDF_STATUS
554hif_send_head(struct ol_softc *scn,
555 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
556 cdf_nbuf_t nbuf, unsigned int data_attr)
557{
558 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
559 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
560 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
561 int bytes = nbytes, nfrags = 0;
562 struct ce_sendlist sendlist;
563 int status, i = 0;
564 unsigned int mux_id = 0;
565
566 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
567
568 transfer_id =
569 (mux_id & MUX_ID_MASK) |
570 (transfer_id & TRANSACTION_ID_MASK);
571 data_attr &= DESC_DATA_FLAG_MASK;
572 /*
573 * The common case involves sending multiple fragments within a
574 * single download (the tx descriptor and the tx frame header).
575 * So, optimize for the case of multiple fragments by not even
576 * checking whether it's necessary to use a sendlist.
577 * The overhead of using a sendlist for a single buffer download
578 * is not a big deal, since it happens rarely (for WMI messages).
579 */
580 ce_sendlist_init(&sendlist);
581 do {
582 uint32_t frag_paddr;
583 int frag_bytes;
584
585 frag_paddr = cdf_nbuf_get_frag_paddr_lo(nbuf, nfrags);
586 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
587 /*
588 * Clear the packet offset for all but the first CE desc.
589 */
590 if (i++ > 0)
591 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
592
593 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
594 frag_bytes >
595 bytes ? bytes : frag_bytes,
596 cdf_nbuf_get_frag_is_wordstream
597 (nbuf,
598 nfrags) ? 0 :
599 CE_SEND_FLAG_SWAP_DISABLE,
600 data_attr);
601 if (status != CDF_STATUS_SUCCESS) {
602 HIF_ERROR("%s: error, frag_num %d larger than limit",
603 __func__, nfrags);
604 return status;
605 }
606 bytes -= frag_bytes;
607 nfrags++;
608 } while (bytes > 0);
609
610 /* Make sure we have resources to handle this request */
611 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
612 if (pipe_info->num_sends_allowed < nfrags) {
613 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
614 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
615 return CDF_STATUS_E_RESOURCES;
616 }
617 pipe_info->num_sends_allowed -= nfrags;
618 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
619
620 if (cdf_unlikely(ce_hdl == NULL)) {
621 HIF_ERROR("%s: error CE handle is null", __func__);
622 return A_ERROR;
623 }
624
625 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
626 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
627 (uint8_t *)(cdf_nbuf_data(nbuf)),
628 sizeof(cdf_nbuf_data(nbuf))));
629 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
630 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
631
632 return status;
633}
634
635void hif_send_complete_check(struct ol_softc *scn, uint8_t pipe, int force)
636{
637 if (!force) {
638 int resources;
639 /*
640 * Decide whether to actually poll for completions, or just
641 * wait for a later chance. If there seem to be plenty of
642 * resources left, then just wait, since checking involves
643 * reading a CE register, which is a relatively expensive
644 * operation.
645 */
646 resources = hif_get_free_queue_number(scn, pipe);
647 /*
648 * If at least 50% of the total resources are still available,
649 * don't bother checking again yet.
650 */
651 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
652 return;
653 }
654 }
655#ifdef ATH_11AC_TXCOMPACT
656 ce_per_engine_servicereap(scn, pipe);
657#else
658 ce_per_engine_service(scn, pipe);
659#endif
660}
661
662uint16_t hif_get_free_queue_number(struct ol_softc *scn, uint8_t pipe)
663{
664 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
665 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
666 uint16_t rv;
667
668 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
669 rv = pipe_info->num_sends_allowed;
670 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
671 return rv;
672}
673
674/* Called by lower (CE) layer when a send to Target completes. */
675void
676hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
677 void *transfer_context, cdf_dma_addr_t CE_data,
678 unsigned int nbytes, unsigned int transfer_id,
679 unsigned int sw_index, unsigned int hw_index,
680 unsigned int toeplitz_hash_result)
681{
682 struct HIF_CE_pipe_info *pipe_info =
683 (struct HIF_CE_pipe_info *)ce_context;
684 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800685 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700686 struct hif_msg_callbacks *msg_callbacks =
687 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800689 do {
690 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700691 * The upper layer callback will be triggered
692 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693 */
Houston Hoffman85118512015-09-28 14:17:11 -0700694 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700695 if (hif_state->scn->target_status
696 == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800697 cdf_nbuf_free(transfer_context);
698 else
699 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700700 msg_callbacks->Context,
701 transfer_context, transfer_id,
702 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800703 }
704
705 cdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700706 pipe_info->num_sends_allowed++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800707 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 } while (ce_completed_send_next(copyeng,
709 &ce_context, &transfer_context,
710 &CE_data, &nbytes, &transfer_id,
711 &sw_idx, &hw_idx,
712 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713}
714
Houston Hoffman910c6262015-09-28 12:56:25 -0700715/**
716 * hif_ce_do_recv(): send message from copy engine to upper layers
717 * @msg_callbacks: structure containing callback and callback context
718 * @netbuff: skb containing message
719 * @nbytes: number of bytes in the message
720 * @pipe_info: used for the pipe_number info
721 *
722 * Checks the packet length, configures the lenght in the netbuff,
723 * and calls the upper layer callback.
724 *
725 * return: None
726 */
727static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
728 cdf_nbuf_t netbuf, int nbytes,
729 struct HIF_CE_pipe_info *pipe_info) {
730 if (nbytes <= pipe_info->buf_sz) {
731 cdf_nbuf_set_pktlen(netbuf, nbytes);
732 msg_callbacks->
733 rxCompletionHandler(msg_callbacks->Context,
734 netbuf, pipe_info->pipe_num);
735 } else {
736 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
737 __func__, netbuf, nbytes);
738 cdf_nbuf_free(netbuf);
739 }
740}
741
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742/* Called by lower (CE) layer when data is received from the Target. */
743void
744hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
745 void *transfer_context, cdf_dma_addr_t CE_data,
746 unsigned int nbytes, unsigned int transfer_id,
747 unsigned int flags)
748{
749 struct HIF_CE_pipe_info *pipe_info =
750 (struct HIF_CE_pipe_info *)ce_context;
751 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700752 struct CE_state *ce_state = (struct CE_state *) copyeng;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800753 struct ol_softc *scn = hif_state->scn;
Houston Hoffman910c6262015-09-28 12:56:25 -0700754 struct hif_msg_callbacks *msg_callbacks =
755 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800757 do {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800758 cdf_nbuf_unmap_single(scn->cdf_dev,
759 (cdf_nbuf_t) transfer_context,
760 CDF_DMA_FROM_DEVICE);
761
Houston Hoffman910c6262015-09-28 12:56:25 -0700762 atomic_inc(&pipe_info->recv_bufs_needed);
763 hif_post_recv_buffers_for_pipe(pipe_info);
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700764 if (hif_state->scn->target_status == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800765 cdf_nbuf_free(transfer_context);
766 else
767 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700768 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800769
770 /* Set up force_break flag if num of receices reaches
771 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700772 ce_state->receive_count++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800773 if (cdf_unlikely(hif_max_num_receives_reached(
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700774 ce_state->receive_count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700775 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800776 break;
777 }
778 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
779 &CE_data, &nbytes, &transfer_id,
780 &flags) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800781}
782
783/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
784
785void
786hif_post_init(struct ol_softc *scn, void *unused,
787 struct hif_msg_callbacks *callbacks)
788{
789 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
790
791#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
792 spin_lock_init(&pcie_access_log_lock);
793#endif
794 /* Save callbacks for later installation */
795 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
796 sizeof(hif_state->msg_callbacks_pending));
797
798}
799
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800800int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
801{
802 struct CE_handle *ce_diag = hif_state->ce_diag;
803 int pipe_num;
804 struct ol_softc *scn = hif_state->scn;
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700805 struct hif_msg_callbacks *hif_msg_callbacks =
806 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800807
808 /* daemonize("hif_compl_thread"); */
809
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800810 if (scn->ce_count == 0) {
811 HIF_ERROR("%s: Invalid ce_count\n", __func__);
812 return -EINVAL;
813 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700814
815 if (!hif_msg_callbacks ||
816 !hif_msg_callbacks->rxCompletionHandler ||
817 !hif_msg_callbacks->txCompletionHandler) {
818 HIF_ERROR("%s: no completion handler registered", __func__);
819 return -EFAULT;
820 }
821
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800822 A_TARGET_ACCESS_LIKELY(scn);
823 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
824 struct CE_attr attr;
825 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800826
827 pipe_info = &hif_state->pipe_info[pipe_num];
828 if (pipe_info->ce_hdl == ce_diag) {
829 continue; /* Handle Diagnostic CE specially */
830 }
831 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800832 if (attr.src_nentries) {
833 /* pipe used to send to target */
834 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
835 __func__, pipe_num, pipe_info);
836 ce_send_cb_register(pipe_info->ce_hdl,
837 hif_pci_ce_send_done, pipe_info,
838 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800839 pipe_info->num_sends_allowed = attr.src_nentries - 1;
840 }
841 if (attr.dest_nentries) {
842 /* pipe used to receive from target */
843 ce_recv_cb_register(pipe_info->ce_hdl,
844 hif_pci_ce_recv_data, pipe_info,
845 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800846 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800847
848 if (attr.src_nentries)
849 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800850 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800851
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800852 A_TARGET_ACCESS_UNLIKELY(scn);
853 return 0;
854}
855
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800856/*
857 * Install pending msg callbacks.
858 *
859 * TBDXXX: This hack is needed because upper layers install msg callbacks
860 * for use with HTC before BMI is done; yet this HIF implementation
861 * needs to continue to use BMI msg callbacks. Really, upper layers
862 * should not register HTC callbacks until AFTER BMI phase.
863 */
864static void hif_msg_callbacks_install(struct ol_softc *scn)
865{
866 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
867
868 cdf_mem_copy(&hif_state->msg_callbacks_current,
869 &hif_state->msg_callbacks_pending,
870 sizeof(hif_state->msg_callbacks_pending));
871}
872
873void hif_claim_device(struct ol_softc *scn, void *claimedContext)
874{
875 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
876
877 hif_state->claimedContext = claimedContext;
878}
879
880void hif_release_device(struct ol_softc *scn)
881{
882 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
883
884 hif_state->claimedContext = NULL;
885}
886
887void
888hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe, uint8_t *DLPipe)
889{
890 int ul_is_polled, dl_is_polled;
891
892 (void)hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
893 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
894}
895
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800896/**
897 * hif_dump_pipe_debug_count() - Log error count
898 * @scn: ol_softc pointer.
899 *
900 * Output the pipe error counts of each pipe to log file
901 *
902 * Return: N/A
903 */
904void hif_dump_pipe_debug_count(struct ol_softc *scn)
905{
906 struct HIF_CE_state *hif_state;
907 int pipe_num;
908
909 if (scn == NULL) {
910 HIF_ERROR("%s scn is NULL", __func__);
911 return;
912 }
913 hif_state = (struct HIF_CE_state *)scn->hif_hdl;
914 if (hif_state == NULL) {
915 HIF_ERROR("%s hif_state is NULL", __func__);
916 return;
917 }
918 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
919 struct HIF_CE_pipe_info *pipe_info;
920
921 pipe_info = &hif_state->pipe_info[pipe_num];
922
923 if (pipe_info->nbuf_alloc_err_count > 0 ||
924 pipe_info->nbuf_dma_err_count > 0 ||
925 pipe_info->nbuf_ce_enqueue_err_count)
926 HIF_ERROR(
927 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
928 __func__, pipe_info->pipe_num,
929 atomic_read(&pipe_info->recv_bufs_needed),
930 pipe_info->nbuf_alloc_err_count,
931 pipe_info->nbuf_dma_err_count,
932 pipe_info->nbuf_ce_enqueue_err_count);
933 }
934}
935
936static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
937{
938 struct CE_handle *ce_hdl;
939 cdf_size_t buf_sz;
940 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
941 struct ol_softc *scn = hif_state->scn;
942 CDF_STATUS ret;
943 uint32_t bufs_posted = 0;
944
945 buf_sz = pipe_info->buf_sz;
946 if (buf_sz == 0) {
947 /* Unused Copy Engine */
948 return 0;
949 }
950
951 ce_hdl = pipe_info->ce_hdl;
952
953 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
954 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
955 cdf_dma_addr_t CE_data; /* CE space buffer address */
956 cdf_nbuf_t nbuf;
957 int status;
958
959 atomic_dec(&pipe_info->recv_bufs_needed);
960 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
961
962 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
963 if (!nbuf) {
964 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
965 pipe_info->nbuf_alloc_err_count++;
966 cdf_spin_unlock_bh(
967 &pipe_info->recv_bufs_needed_lock);
968 HIF_ERROR(
969 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
970 __func__, pipe_info->pipe_num,
971 atomic_read(&pipe_info->recv_bufs_needed),
972 pipe_info->nbuf_alloc_err_count);
973 atomic_inc(&pipe_info->recv_bufs_needed);
974 return 1;
975 }
976
977 /*
978 * cdf_nbuf_peek_header(nbuf, &data, &unused);
979 * CE_data = dma_map_single(dev, data, buf_sz, );
980 * DMA_FROM_DEVICE);
981 */
982 ret =
983 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
984 CDF_DMA_FROM_DEVICE);
985
986 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
987 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
988 pipe_info->nbuf_dma_err_count++;
989 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
990 HIF_ERROR(
991 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
992 __func__, pipe_info->pipe_num,
993 atomic_read(&pipe_info->recv_bufs_needed),
994 pipe_info->nbuf_dma_err_count);
995 cdf_nbuf_free(nbuf);
996 atomic_inc(&pipe_info->recv_bufs_needed);
997 return 1;
998 }
999
1000 CE_data = cdf_nbuf_get_frag_paddr_lo(nbuf, 0);
1001
1002 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
1003 buf_sz, DMA_FROM_DEVICE);
1004 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
1005 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
1006 if (status != EOK) {
1007 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1008 pipe_info->nbuf_ce_enqueue_err_count++;
1009 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1010 HIF_ERROR(
1011 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1012 __func__, pipe_info->pipe_num,
1013 atomic_read(&pipe_info->recv_bufs_needed),
1014 pipe_info->nbuf_ce_enqueue_err_count);
1015 atomic_inc(&pipe_info->recv_bufs_needed);
1016 cdf_nbuf_free(nbuf);
1017 return 1;
1018 }
1019
1020 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1021 bufs_posted++;
1022 }
1023 pipe_info->nbuf_alloc_err_count =
1024 (pipe_info->nbuf_alloc_err_count > bufs_posted)?
1025 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1026 pipe_info->nbuf_dma_err_count =
1027 (pipe_info->nbuf_dma_err_count > bufs_posted)?
1028 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1029 pipe_info->nbuf_ce_enqueue_err_count =
1030 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted)?
1031 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1032
1033 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1034
1035 return 0;
1036}
1037
1038/*
1039 * Try to post all desired receive buffers for all pipes.
1040 * Returns 0 if all desired buffers are posted,
1041 * non-zero if were were unable to completely
1042 * replenish receive buffers.
1043 */
1044static int hif_post_recv_buffers(struct ol_softc *scn)
1045{
1046 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1047 int pipe_num, rv = 0;
1048
1049 A_TARGET_ACCESS_LIKELY(scn);
1050 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1051 struct HIF_CE_pipe_info *pipe_info;
1052
1053 pipe_info = &hif_state->pipe_info[pipe_num];
1054 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1055 rv = 1;
1056 goto done;
1057 }
1058 }
1059
1060done:
1061 A_TARGET_ACCESS_UNLIKELY(scn);
1062
1063 return rv;
1064}
1065
1066CDF_STATUS hif_start(struct ol_softc *scn)
1067{
1068 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1069
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001070 hif_msg_callbacks_install(scn);
1071
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001072 if (hif_completion_thread_startup(hif_state))
1073 return CDF_STATUS_E_FAILURE;
1074
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001075 /* Post buffers once to start things off. */
1076 (void)hif_post_recv_buffers(scn);
1077
1078 hif_state->started = true;
1079
1080 return CDF_STATUS_SUCCESS;
1081}
1082
1083#ifdef WLAN_FEATURE_FASTPATH
1084/**
1085 * hif_enable_fastpath() Update that we have enabled fastpath mode
1086 * @hif_device: HIF context
1087 *
1088 * For use in data path
1089 *
1090 * Retrun: void
1091 */
1092void
1093hif_enable_fastpath(struct ol_softc *hif_device)
1094{
1095 HIF_INFO("Enabling fastpath mode\n");
1096 hif_device->fastpath_mode_on = 1;
1097}
1098#endif /* WLAN_FEATURE_FASTPATH */
1099
1100void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1101{
1102 struct ol_softc *scn;
1103 struct CE_handle *ce_hdl;
1104 uint32_t buf_sz;
1105 struct HIF_CE_state *hif_state;
1106 cdf_nbuf_t netbuf;
1107 cdf_dma_addr_t CE_data;
1108 void *per_CE_context;
1109
1110 buf_sz = pipe_info->buf_sz;
1111 if (buf_sz == 0) {
1112 /* Unused Copy Engine */
1113 return;
1114 }
1115
1116 hif_state = pipe_info->HIF_CE_state;
1117 if (!hif_state->started) {
1118 return;
1119 }
1120
1121 scn = hif_state->scn;
1122 ce_hdl = pipe_info->ce_hdl;
1123
1124 if (scn->cdf_dev == NULL) {
1125 return;
1126 }
1127 while (ce_revoke_recv_next
1128 (ce_hdl, &per_CE_context, (void **)&netbuf,
1129 &CE_data) == CDF_STATUS_SUCCESS) {
1130 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1131 CDF_DMA_FROM_DEVICE);
1132 cdf_nbuf_free(netbuf);
1133 }
1134}
1135
1136void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1137{
1138 struct CE_handle *ce_hdl;
1139 struct HIF_CE_state *hif_state;
1140 cdf_nbuf_t netbuf;
1141 void *per_CE_context;
1142 cdf_dma_addr_t CE_data;
1143 unsigned int nbytes;
1144 unsigned int id;
1145 uint32_t buf_sz;
1146 uint32_t toeplitz_hash_result;
1147
1148 buf_sz = pipe_info->buf_sz;
1149 if (buf_sz == 0) {
1150 /* Unused Copy Engine */
1151 return;
1152 }
1153
1154 hif_state = pipe_info->HIF_CE_state;
1155 if (!hif_state->started) {
1156 return;
1157 }
1158
1159 ce_hdl = pipe_info->ce_hdl;
1160
1161 while (ce_cancel_send_next
1162 (ce_hdl, &per_CE_context,
1163 (void **)&netbuf, &CE_data, &nbytes,
1164 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1165 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1166 /*
1167 * Packets enqueued by htt_h2t_ver_req_msg() and
1168 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1169 * freed in htt_htc_misc_pkt_pool_free() in
1170 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001171 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001172 * which they are queued in.
1173 */
1174 if (id == hif_state->scn->htc_endpoint)
1175 return;
1176 /* Indicate the completion to higer
1177 * layer to free the buffer */
1178 hif_state->msg_callbacks_current.
1179 txCompletionHandler(hif_state->
1180 msg_callbacks_current.Context,
1181 netbuf, id, toeplitz_hash_result);
1182 }
1183 }
1184}
1185
1186/*
1187 * Cleanup residual buffers for device shutdown:
1188 * buffers that were enqueued for receive
1189 * buffers that were to be sent
1190 * Note: Buffers that had completed but which were
1191 * not yet processed are on a completion queue. They
1192 * are handled when the completion thread shuts down.
1193 */
1194void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1195{
1196 int pipe_num;
1197
1198 for (pipe_num = 0; pipe_num < hif_state->scn->ce_count; pipe_num++) {
1199 struct HIF_CE_pipe_info *pipe_info;
1200
1201 pipe_info = &hif_state->pipe_info[pipe_num];
1202 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1203 hif_send_buffer_cleanup_on_pipe(pipe_info);
1204 }
1205}
1206
1207void hif_flush_surprise_remove(struct ol_softc *scn)
1208{
1209 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1210 hif_buffer_cleanup(hif_state);
1211}
1212
1213void hif_stop(struct ol_softc *scn)
1214{
1215 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1216 int pipe_num;
1217
1218 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001219
1220 /*
1221 * At this point, asynchronous threads are stopped,
1222 * The Target should not DMA nor interrupt, Host code may
1223 * not initiate anything more. So we just need to clean
1224 * up Host-side state.
1225 */
1226
1227 if (scn->athdiag_procfs_inited) {
1228 athdiag_procfs_remove();
1229 scn->athdiag_procfs_inited = false;
1230 }
1231
1232 hif_buffer_cleanup(hif_state);
1233
1234 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1235 struct HIF_CE_pipe_info *pipe_info;
1236
1237 pipe_info = &hif_state->pipe_info[pipe_num];
1238 if (pipe_info->ce_hdl) {
1239 ce_fini(pipe_info->ce_hdl);
1240 pipe_info->ce_hdl = NULL;
1241 pipe_info->buf_sz = 0;
1242 }
1243 }
1244
1245 if (hif_state->sleep_timer_init) {
1246 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1247 cdf_softirq_timer_free(&hif_state->sleep_timer);
1248 hif_state->sleep_timer_init = false;
1249 }
1250
1251 hif_state->started = false;
1252}
1253
1254#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1255#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1256
1257
1258static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1259 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1260 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1261 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1262 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1263 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1264 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1265 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1266 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1267 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1268};
1269
1270
1271
1272/* CE_PCI TABLE */
1273/*
1274 * NOTE: the table below is out of date, though still a useful reference.
1275 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1276 * mapping of HTC services to HIF pipes.
1277 */
1278/*
1279 * This authoritative table defines Copy Engine configuration and the mapping
1280 * of services/endpoints to CEs. A subset of this information is passed to
1281 * the Target during startup as a prerequisite to entering BMI phase.
1282 * See:
1283 * target_service_to_ce_map - Target-side mapping
1284 * hif_map_service_to_pipe - Host-side mapping
1285 * target_ce_config - Target-side configuration
1286 * host_ce_config - Host-side configuration
1287 ============================================================================
1288 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1289 | | | ctio | Size | Frequency
1290 | | | n | |
1291 ============================================================================
1292 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1293 descriptor | | | | O(100B) | and regular
1294 download | | | | |
1295 ----------------------------------------------------------------------------
1296 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1297 indication | | | | O(10B) | regular
1298 upload | | | | |
1299 ----------------------------------------------------------------------------
1300 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1301 upload | | | | O(1000B) | (frequent
1302 e.g. noise | | | | | during IP1.0
1303 packets | | | | | testing)
1304 ----------------------------------------------------------------------------
1305 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1306 download | | | | O(1000B) | (frequent
1307 e.g. | | | | | during IP1.0
1308 misdirecte | | | | | testing)
1309 d EAPOL | | | | |
1310 packets | | | | |
1311 ----------------------------------------------------------------------------
1312 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1313 | DATA_VO (uplink) | | | |
1314 ----------------------------------------------------------------------------
1315 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1316 | DATA_VO (downlink) | | | |
1317 ----------------------------------------------------------------------------
1318 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1319 | | | | O(100B) |
1320 ----------------------------------------------------------------------------
1321 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1322 messages | (downlink) | | | O(100B) |
1323 | | | | |
1324 ----------------------------------------------------------------------------
1325 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1326 | HTC_RAW_STREAMS | | | |
1327 | (uplink) | | | |
1328 ----------------------------------------------------------------------------
1329 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1330 | HTC_RAW_STREAMS | | | |
1331 | (downlink) | | | |
1332 ----------------------------------------------------------------------------
1333 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1334 | | | | | infrequent
1335 ============================================================================
1336 */
1337
1338/*
1339 * Map from service/endpoint to Copy Engine.
1340 * This table is derived from the CE_PCI TABLE, above.
1341 * It is passed to the Target at startup for use by firmware.
1342 */
1343static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1344 {
1345 WMI_DATA_VO_SVC,
1346 PIPEDIR_OUT, /* out = UL = host -> target */
1347 3,
1348 },
1349 {
1350 WMI_DATA_VO_SVC,
1351 PIPEDIR_IN, /* in = DL = target -> host */
1352 2,
1353 },
1354 {
1355 WMI_DATA_BK_SVC,
1356 PIPEDIR_OUT, /* out = UL = host -> target */
1357 3,
1358 },
1359 {
1360 WMI_DATA_BK_SVC,
1361 PIPEDIR_IN, /* in = DL = target -> host */
1362 2,
1363 },
1364 {
1365 WMI_DATA_BE_SVC,
1366 PIPEDIR_OUT, /* out = UL = host -> target */
1367 3,
1368 },
1369 {
1370 WMI_DATA_BE_SVC,
1371 PIPEDIR_IN, /* in = DL = target -> host */
1372 2,
1373 },
1374 {
1375 WMI_DATA_VI_SVC,
1376 PIPEDIR_OUT, /* out = UL = host -> target */
1377 3,
1378 },
1379 {
1380 WMI_DATA_VI_SVC,
1381 PIPEDIR_IN, /* in = DL = target -> host */
1382 2,
1383 },
1384 {
1385 WMI_CONTROL_SVC,
1386 PIPEDIR_OUT, /* out = UL = host -> target */
1387 3,
1388 },
1389 {
1390 WMI_CONTROL_SVC,
1391 PIPEDIR_IN, /* in = DL = target -> host */
1392 2,
1393 },
1394 {
1395 HTC_CTRL_RSVD_SVC,
1396 PIPEDIR_OUT, /* out = UL = host -> target */
1397 0, /* could be moved to 3 (share with WMI) */
1398 },
1399 {
1400 HTC_CTRL_RSVD_SVC,
1401 PIPEDIR_IN, /* in = DL = target -> host */
1402 2,
1403 },
1404 {
1405 HTC_RAW_STREAMS_SVC, /* not currently used */
1406 PIPEDIR_OUT, /* out = UL = host -> target */
1407 0,
1408 },
1409 {
1410 HTC_RAW_STREAMS_SVC, /* not currently used */
1411 PIPEDIR_IN, /* in = DL = target -> host */
1412 2,
1413 },
1414 {
1415 HTT_DATA_MSG_SVC,
1416 PIPEDIR_OUT, /* out = UL = host -> target */
1417 4,
1418 },
1419 {
1420 HTT_DATA_MSG_SVC,
1421 PIPEDIR_IN, /* in = DL = target -> host */
1422 1,
1423 },
1424 {
1425 WDI_IPA_TX_SVC,
1426 PIPEDIR_OUT, /* in = DL = target -> host */
1427 5,
1428 },
1429 /* (Additions here) */
1430
1431 { /* Must be last */
1432 0,
1433 0,
1434 0,
1435 },
1436};
1437
1438static struct service_to_pipe *target_service_to_ce_map =
1439 target_service_to_ce_map_wlan;
1440static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1441
1442static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1443static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1444
1445static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1446 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1447 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1448 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1449 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1450 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1451 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1452 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1453 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1454 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1455 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1456 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1457 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1458 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1459 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1460 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1461 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1462 {0, 0, 0,}, /* Must be last */
1463};
1464
1465#ifdef HIF_PCI
1466/*
1467 * Send an interrupt to the device to wake up the Target CPU
1468 * so it has an opportunity to notice any changed state.
1469 */
1470void hif_wake_target_cpu(struct ol_softc *scn)
1471{
1472 CDF_STATUS rv;
1473 uint32_t core_ctrl;
1474
1475 rv = hif_diag_read_access(scn,
1476 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1477 &core_ctrl);
1478 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1479 /* A_INUM_FIRMWARE interrupt to Target CPU */
1480 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1481
1482 rv = hif_diag_write_access(scn,
1483 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1484 core_ctrl);
1485 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1486}
1487#endif
1488
1489static void hif_sleep_entry(void *arg)
1490{
1491 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1492 struct ol_softc *scn = hif_state->scn;
1493 uint32_t idle_ms;
1494 if (scn->recovery)
1495 return;
1496
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08001497 if (cds_is_driver_unloading())
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001498 return;
1499
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001500 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1501 if (hif_state->verified_awake == false) {
1502 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1503 - hif_state->sleep_ticks);
1504 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1505 if (!cdf_atomic_read(&scn->link_suspended)) {
1506 soc_wake_reset(scn);
1507 hif_state->fake_sleep = false;
1508 }
1509 } else {
1510 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1511 cdf_softirq_timer_start(&hif_state->sleep_timer,
1512 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1513 }
1514 } else {
1515 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1516 cdf_softirq_timer_start(&hif_state->sleep_timer,
1517 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1518 }
1519 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1520}
1521#define HIF_HIA_MAX_POLL_LOOP 1000000
1522#define HIF_HIA_POLLING_DELAY_MS 10
1523
1524#ifndef HIF_PCI
1525int hif_set_hia(struct ol_softc *scn)
1526{
1527 return 0;
1528}
1529#else
1530int hif_set_hia(struct ol_softc *scn)
1531{
1532 CDF_STATUS rv;
1533 uint32_t interconnect_targ_addr = 0;
1534 uint32_t pcie_state_targ_addr = 0;
1535 uint32_t pipe_cfg_targ_addr = 0;
1536 uint32_t svc_to_pipe_map = 0;
1537 uint32_t pcie_config_flags = 0;
1538 uint32_t flag2_value = 0;
1539 uint32_t flag2_targ_addr = 0;
1540#ifdef QCA_WIFI_3_0
1541 uint32_t host_interest_area = 0;
1542 uint8_t i;
1543#else
1544 uint32_t ealloc_value = 0;
1545 uint32_t ealloc_targ_addr = 0;
1546 uint8_t banks_switched = 1;
1547 uint32_t chip_id;
1548#endif
1549 uint32_t pipe_cfg_addr;
1550
1551 HIF_TRACE("%s: E", __func__);
1552
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001553 if (ADRASTEA_BU)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001554 return CDF_STATUS_SUCCESS;
1555
1556#ifdef QCA_WIFI_3_0
1557 i = 0;
1558 while (i < HIF_HIA_MAX_POLL_LOOP) {
1559 host_interest_area = hif_read32_mb(scn->mem +
1560 A_SOC_CORE_SCRATCH_0_ADDRESS);
1561 if ((host_interest_area & 0x01) == 0) {
1562 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1563 host_interest_area = 0;
1564 i++;
1565 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1566 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1567 }
1568 } else {
1569 host_interest_area &= (~0x01);
1570 hif_write32_mb(scn->mem + 0x113014, 0);
1571 break;
1572 }
1573 }
1574
1575 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1576 HIF_ERROR("%s: hia polling timeout", __func__);
1577 return -EIO;
1578 }
1579
1580 if (host_interest_area == 0) {
1581 HIF_ERROR("%s: host_interest_area = 0", __func__);
1582 return -EIO;
1583 }
1584
1585 interconnect_targ_addr = host_interest_area +
1586 offsetof(struct host_interest_area_t,
1587 hi_interconnect_state);
1588
1589 flag2_targ_addr = host_interest_area +
1590 offsetof(struct host_interest_area_t, hi_option_flag2);
1591
1592#else
1593 interconnect_targ_addr = hif_hia_item_address(scn->target_type,
1594 offsetof(struct host_interest_s, hi_interconnect_state));
1595 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
1596 offsetof(struct host_interest_s, hi_early_alloc));
1597 flag2_targ_addr = hif_hia_item_address(scn->target_type,
1598 offsetof(struct host_interest_s, hi_option_flag2));
1599#endif
1600 /* Supply Target-side CE configuration */
1601 rv = hif_diag_read_access(scn, interconnect_targ_addr,
1602 &pcie_state_targ_addr);
1603 if (rv != CDF_STATUS_SUCCESS) {
1604 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1605 __func__, interconnect_targ_addr, rv);
1606 goto done;
1607 }
1608 if (pcie_state_targ_addr == 0) {
1609 rv = CDF_STATUS_E_FAILURE;
1610 HIF_ERROR("%s: pcie state addr is 0", __func__);
1611 goto done;
1612 }
1613 pipe_cfg_addr = pcie_state_targ_addr +
1614 offsetof(struct pcie_state_s,
1615 pipe_cfg_addr);
1616 rv = hif_diag_read_access(scn,
1617 pipe_cfg_addr,
1618 &pipe_cfg_targ_addr);
1619 if (rv != CDF_STATUS_SUCCESS) {
1620 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1621 __func__, pipe_cfg_addr, rv);
1622 goto done;
1623 }
1624 if (pipe_cfg_targ_addr == 0) {
1625 rv = CDF_STATUS_E_FAILURE;
1626 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1627 goto done;
1628 }
1629
1630 rv = hif_diag_write_mem(scn, pipe_cfg_targ_addr,
1631 (uint8_t *) target_ce_config,
1632 target_ce_config_sz);
1633
1634 if (rv != CDF_STATUS_SUCCESS) {
1635 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1636 goto done;
1637 }
1638
1639 rv = hif_diag_read_access(scn,
1640 pcie_state_targ_addr +
1641 offsetof(struct pcie_state_s,
1642 svc_to_pipe_map),
1643 &svc_to_pipe_map);
1644 if (rv != CDF_STATUS_SUCCESS) {
1645 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1646 goto done;
1647 }
1648 if (svc_to_pipe_map == 0) {
1649 rv = CDF_STATUS_E_FAILURE;
1650 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1651 goto done;
1652 }
1653
1654 rv = hif_diag_write_mem(scn,
1655 svc_to_pipe_map,
1656 (uint8_t *) target_service_to_ce_map,
1657 target_service_to_ce_map_sz);
1658 if (rv != CDF_STATUS_SUCCESS) {
1659 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1660 goto done;
1661 }
1662
1663 rv = hif_diag_read_access(scn,
1664 pcie_state_targ_addr +
1665 offsetof(struct pcie_state_s,
1666 config_flags),
1667 &pcie_config_flags);
1668 if (rv != CDF_STATUS_SUCCESS) {
1669 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1670 goto done;
1671 }
1672#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1673 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1674#else
1675 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1676#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1677 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1678#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1679 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1680#endif
1681 rv = hif_diag_write_mem(scn,
1682 pcie_state_targ_addr +
1683 offsetof(struct pcie_state_s,
1684 config_flags),
1685 (uint8_t *) &pcie_config_flags,
1686 sizeof(pcie_config_flags));
1687 if (rv != CDF_STATUS_SUCCESS) {
1688 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1689 goto done;
1690 }
1691
1692#ifndef QCA_WIFI_3_0
1693 /* configure early allocation */
1694 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
1695 offsetof(
1696 struct host_interest_s,
1697 hi_early_alloc));
1698
1699 rv = hif_diag_read_access(scn, ealloc_targ_addr,
1700 &ealloc_value);
1701 if (rv != CDF_STATUS_SUCCESS) {
1702 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1703 goto done;
1704 }
1705
1706 /* 1 bank is switched to IRAM, except ROME 1.0 */
1707 ealloc_value |=
1708 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1709 HI_EARLY_ALLOC_MAGIC_MASK);
1710
1711 rv = hif_diag_read_access(scn,
1712 CHIP_ID_ADDRESS |
1713 RTC_SOC_BASE_ADDRESS, &chip_id);
1714 if (rv != CDF_STATUS_SUCCESS) {
1715 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1716 goto done;
1717 }
1718 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1719 scn->target_revision =
1720 CHIP_ID_REVISION_GET(chip_id);
1721 switch (CHIP_ID_REVISION_GET(chip_id)) {
1722 case 0x2: /* ROME 1.3 */
1723 /* 2 banks are switched to IRAM */
1724 banks_switched = 2;
1725 break;
1726 case 0x4: /* ROME 2.1 */
1727 case 0x5: /* ROME 2.2 */
1728 banks_switched = 6;
1729 break;
1730 case 0x8: /* ROME 3.0 */
1731 case 0x9: /* ROME 3.1 */
1732 case 0xA: /* ROME 3.2 */
1733 banks_switched = 9;
1734 break;
1735 case 0x0: /* ROME 1.0 */
1736 case 0x1: /* ROME 1.1 */
1737 default:
1738 /* 3 banks are switched to IRAM */
1739 banks_switched = 3;
1740 break;
1741 }
1742 }
1743
1744 ealloc_value |=
1745 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1746 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1747
1748 rv = hif_diag_write_access(scn,
1749 ealloc_targ_addr,
1750 ealloc_value);
1751 if (rv != CDF_STATUS_SUCCESS) {
1752 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1753 goto done;
1754 }
1755#endif
1756
1757 /* Tell Target to proceed with initialization */
1758 flag2_targ_addr = hif_hia_item_address(scn->target_type,
1759 offsetof(
1760 struct host_interest_s,
1761 hi_option_flag2));
1762
1763 rv = hif_diag_read_access(scn, flag2_targ_addr,
1764 &flag2_value);
1765 if (rv != CDF_STATUS_SUCCESS) {
1766 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1767 goto done;
1768 }
1769
1770 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1771 rv = hif_diag_write_access(scn, flag2_targ_addr,
1772 flag2_value);
1773 if (rv != CDF_STATUS_SUCCESS) {
1774 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1775 goto done;
1776 }
1777
1778 hif_wake_target_cpu(scn);
1779
1780done:
1781
1782 return rv;
1783}
1784#endif
1785
1786/**
1787 * hif_wlan_enable(): call the platform driver to enable wlan
1788 *
1789 * This function passes the con_mode and CE configuration to
1790 * platform driver to enable wlan.
1791 *
1792 * Return: void
1793 */
1794static int hif_wlan_enable(void)
1795{
1796 struct icnss_wlan_enable_cfg cfg;
1797 enum icnss_driver_mode mode;
1798 uint32_t con_mode = cds_get_conparam();
1799
1800 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1801 sizeof(struct CE_pipe_config);
1802 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1803 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1804 sizeof(struct service_to_pipe);
1805 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1806 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
1807 cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
1808
Peng Xu7b962532015-10-02 17:17:03 -07001809 if (CDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001810 mode = ICNSS_FTM;
Peng Xu7b962532015-10-02 17:17:03 -07001811 else if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001813 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001815
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001816 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1817}
1818
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819/*
1820 * Called from PCI layer whenever a new PCI device is probed.
1821 * Initializes per-device HIF state and notifies the main
1822 * driver that a new HIF device is present.
1823 */
1824int hif_config_ce(hif_handle_t hif_hdl)
1825{
1826 struct HIF_CE_state *hif_state;
1827 struct HIF_CE_pipe_info *pipe_info;
1828 int pipe_num;
1829#ifdef ADRASTEA_SHADOW_REGISTERS
1830 int i;
1831#endif
1832 CDF_STATUS rv = CDF_STATUS_SUCCESS;
1833 int ret;
1834 struct ol_softc *scn = hif_hdl;
1835 struct icnss_soc_info soc_info;
1836
1837 /* if epping is enabled we need to use the epping configuration. */
1838 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1839 if (WLAN_IS_EPPING_IRQ(cds_get_conparam()))
1840 host_ce_config = host_ce_config_wlan_epping_irq;
1841 else
1842 host_ce_config = host_ce_config_wlan_epping_poll;
1843 target_ce_config = target_ce_config_wlan_epping;
1844 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1845 target_service_to_ce_map =
1846 target_service_to_ce_map_wlan_epping;
1847 target_service_to_ce_map_sz =
1848 sizeof(target_service_to_ce_map_wlan_epping);
1849 }
1850
1851 ret = hif_wlan_enable();
1852
1853 if (ret) {
1854 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
1855 return CDF_STATUS_NOT_INITIALIZED;
1856 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001857
1858 scn->notice_send = true;
1859
1860 cdf_mem_zero(&soc_info, sizeof(soc_info));
1861 ret = icnss_get_soc_info(&soc_info);
1862 if (ret < 0) {
1863 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
1864 return CDF_STATUS_NOT_INITIALIZED;
1865 }
1866
1867 hif_state = (struct HIF_CE_state *)cdf_mem_malloc(sizeof(*hif_state));
1868 if (!hif_state) {
1869 return -ENOMEM;
1870 }
1871 cdf_mem_zero(hif_state, sizeof(*hif_state));
1872
1873 hif_state->scn = scn;
1874 scn->hif_hdl = hif_state;
1875 scn->mem = soc_info.v_addr;
1876 scn->mem_pa = soc_info.p_addr;
1877 scn->soc_version = soc_info.version;
1878
1879 cdf_spinlock_init(&hif_state->keep_awake_lock);
1880
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001881 hif_state->keep_awake_count = 0;
1882
1883 hif_state->fake_sleep = false;
1884 hif_state->sleep_ticks = 0;
1885 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
1886 hif_sleep_entry, (void *)hif_state,
1887 CDF_TIMER_TYPE_WAKE_APPS);
1888 hif_state->sleep_timer_init = true;
1889 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
1890#ifdef HIF_PCI
1891#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1892 /* Force AWAKE forever/till the driver is loaded */
1893 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1894 return -EACCES;
1895#endif
1896#endif
1897
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001898 hif_config_rri_on_ddr(scn);
1899
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001900 /* During CE initializtion */
1901 scn->ce_count = HOST_CE_COUNT;
1902 A_TARGET_ACCESS_LIKELY(scn);
1903 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1904 struct CE_attr *attr;
1905
1906 pipe_info = &hif_state->pipe_info[pipe_num];
1907 pipe_info->pipe_num = pipe_num;
1908 pipe_info->HIF_CE_state = hif_state;
1909 attr = &host_ce_config[pipe_num];
1910 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
1911 CDF_ASSERT(pipe_info->ce_hdl != NULL);
1912 if (pipe_info->ce_hdl == NULL) {
1913 rv = CDF_STATUS_E_FAILURE;
1914 A_TARGET_ACCESS_UNLIKELY(scn);
1915 goto err;
1916 }
1917
1918 if (pipe_num == DIAG_CE_ID) {
1919 /* Reserve the ultimate CE for
1920 * Diagnostic Window support */
1921 hif_state->ce_diag =
1922 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1923 continue;
1924 }
1925
1926 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
1927 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
1928 if (attr->dest_nentries > 0) {
1929 atomic_set(&pipe_info->recv_bufs_needed,
1930 init_buffer_count(attr->dest_nentries - 1));
1931 } else {
1932 atomic_set(&pipe_info->recv_bufs_needed, 0);
1933 }
1934 ce_tasklet_init(hif_state, (1 << pipe_num));
1935 ce_register_irq(hif_state, (1 << pipe_num));
1936 scn->request_irq_done = true;
1937 }
1938
1939 if (athdiag_procfs_init(scn) != 0) {
1940 A_TARGET_ACCESS_UNLIKELY(scn);
1941 goto err;
1942 }
1943 scn->athdiag_procfs_inited = true;
1944
1945 /*
1946 * Initially, establish CE completion handlers for use with BMI.
1947 * These are overwritten with generic handlers after we exit BMI phase.
1948 */
1949 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1950#ifdef HIF_PCI
1951 ce_send_cb_register(
1952 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1953#ifndef BMI_RSP_POLLING
1954 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1955 ce_recv_cb_register(
1956 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1957#endif
1958#endif
1959 HIF_INFO_MED("%s: ce_init done", __func__);
1960
1961 rv = hif_set_hia(scn);
1962
1963 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1964
1965 A_TARGET_ACCESS_UNLIKELY(scn);
1966
1967 if (rv != CDF_STATUS_SUCCESS)
1968 goto err;
1969 else
1970 init_tasklet_workers();
1971
1972 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1973
1974#ifdef ADRASTEA_SHADOW_REGISTERS
1975 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1976 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1977 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1978 __func__, i,
1979 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1980 }
1981#endif
1982
1983
1984 return rv != CDF_STATUS_SUCCESS;
1985
1986err:
1987 /* Failure, so clean up */
1988 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1989 pipe_info = &hif_state->pipe_info[pipe_num];
1990 if (pipe_info->ce_hdl) {
1991 ce_unregister_irq(hif_state, (1 << pipe_num));
1992 scn->request_irq_done = false;
1993 ce_fini(pipe_info->ce_hdl);
1994 pipe_info->ce_hdl = NULL;
1995 pipe_info->buf_sz = 0;
1996 }
1997 }
1998 if (hif_state->sleep_timer_init) {
1999 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2000 cdf_softirq_timer_free(&hif_state->sleep_timer);
2001 hif_state->sleep_timer_init = false;
2002 }
2003 if (scn->hif_hdl) {
2004 scn->hif_hdl = NULL;
2005 cdf_mem_free(hif_state);
2006 }
2007 athdiag_procfs_remove();
2008 scn->athdiag_procfs_inited = false;
2009 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2010 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2011}
2012
2013
2014
2015
2016
2017
2018#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002019/**
2020 * hif_ipa_get_ce_resource() - get uc resource on hif
2021 * @scn: bus context
2022 * @ce_sr_base_paddr: copyengine source ring base physical address
2023 * @ce_sr_ring_size: copyengine source ring size
2024 * @ce_reg_paddr: copyengine register physical address
2025 *
2026 * IPA micro controller data path offload feature enabled,
2027 * HIF should release copy engine related resource information to IPA UC
2028 * IPA UC will access hardware resource with released information
2029 *
2030 * Return: None
2031 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002032void hif_ipa_get_ce_resource(struct ol_softc *scn,
Leo Changd85f78d2015-11-13 10:55:34 -08002033 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002034 uint32_t *ce_sr_ring_size,
2035 cdf_dma_addr_t *ce_reg_paddr)
2036{
2037 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
2038 struct HIF_CE_pipe_info *pipe_info =
2039 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2040 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2041
2042 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2043 ce_reg_paddr);
2044 return;
2045}
2046#endif /* IPA_OFFLOAD */
2047
2048
2049#ifdef ADRASTEA_SHADOW_REGISTERS
2050
2051/*
2052 Current shadow register config
2053
2054 -----------------------------------------------------------
2055 Shadow Register | CE | src/dst write index
2056 -----------------------------------------------------------
2057 0 | 0 | src
2058 1 No Config - Doesn't point to anything
2059 2 No Config - Doesn't point to anything
2060 3 | 3 | src
2061 4 | 4 | src
2062 5 | 5 | src
2063 6 No Config - Doesn't point to anything
2064 7 | 7 | src
2065 8 No Config - Doesn't point to anything
2066 9 No Config - Doesn't point to anything
2067 10 No Config - Doesn't point to anything
2068 11 No Config - Doesn't point to anything
2069 -----------------------------------------------------------
2070 12 No Config - Doesn't point to anything
2071 13 | 1 | dst
2072 14 | 2 | dst
2073 15 No Config - Doesn't point to anything
2074 16 No Config - Doesn't point to anything
2075 17 No Config - Doesn't point to anything
2076 18 No Config - Doesn't point to anything
2077 19 | 7 | dst
2078 20 | 8 | dst
2079 21 No Config - Doesn't point to anything
2080 22 No Config - Doesn't point to anything
2081 23 No Config - Doesn't point to anything
2082 -----------------------------------------------------------
2083
2084
2085 ToDo - Move shadow register config to following in the future
2086 This helps free up a block of shadow registers towards the end.
2087 Can be used for other purposes
2088
2089 -----------------------------------------------------------
2090 Shadow Register | CE | src/dst write index
2091 -----------------------------------------------------------
2092 0 | 0 | src
2093 1 | 3 | src
2094 2 | 4 | src
2095 3 | 5 | src
2096 4 | 7 | src
2097 -----------------------------------------------------------
2098 5 | 1 | dst
2099 6 | 2 | dst
2100 7 | 7 | dst
2101 8 | 8 | dst
2102 -----------------------------------------------------------
2103 9 No Config - Doesn't point to anything
2104 12 No Config - Doesn't point to anything
2105 13 No Config - Doesn't point to anything
2106 14 No Config - Doesn't point to anything
2107 15 No Config - Doesn't point to anything
2108 16 No Config - Doesn't point to anything
2109 17 No Config - Doesn't point to anything
2110 18 No Config - Doesn't point to anything
2111 19 No Config - Doesn't point to anything
2112 20 No Config - Doesn't point to anything
2113 21 No Config - Doesn't point to anything
2114 22 No Config - Doesn't point to anything
2115 23 No Config - Doesn't point to anything
2116 -----------------------------------------------------------
2117*/
2118
2119u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2120{
2121 u32 addr = 0;
2122
2123 switch (COPY_ENGINE_ID(ctrl_addr)) {
2124 case 0:
2125 addr = SHADOW_VALUE0;
2126 break;
2127 case 3:
2128 addr = SHADOW_VALUE3;
2129 break;
2130 case 4:
2131 addr = SHADOW_VALUE4;
2132 break;
2133 case 5:
2134 addr = SHADOW_VALUE5;
2135 break;
2136 case 7:
2137 addr = SHADOW_VALUE7;
2138 break;
2139 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002140 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002141 CDF_ASSERT(0);
2142
2143 }
2144 return addr;
2145
2146}
2147
2148u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2149{
2150 u32 addr = 0;
2151
2152 switch (COPY_ENGINE_ID(ctrl_addr)) {
2153 case 1:
2154 addr = SHADOW_VALUE13;
2155 break;
2156 case 2:
2157 addr = SHADOW_VALUE14;
2158 break;
2159 case 7:
2160 addr = SHADOW_VALUE19;
2161 break;
2162 case 8:
2163 addr = SHADOW_VALUE20;
2164 break;
2165 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002166 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002167 CDF_ASSERT(0);
2168 }
2169
2170 return addr;
2171
2172}
2173#endif
2174
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002175#if defined(FEATURE_LRO)
2176/**
2177 * ce_lro_flush_cb_register() - register the LRO flush
2178 * callback
2179 * @scn: HIF context
2180 * @handler: callback function
2181 * @data: opaque data pointer to be passed back
2182 *
2183 * Store the LRO flush callback provided
2184 *
2185 * Return: none
2186 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002187void ce_lro_flush_cb_register(struct ol_softc *scn,
2188 void (handler)(void *), void *data)
2189{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002190 uint8_t ul, dl;
2191 int ul_polled, dl_polled;
2192
2193 CDF_ASSERT(scn != NULL);
2194
2195 if (CDF_STATUS_SUCCESS !=
2196 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2197 &ul, &dl, &ul_polled, &dl_polled)) {
2198 printk("%s cannot map service to pipe\n", __FUNCTION__);
2199 return;
2200 } else {
2201 struct CE_state *ce_state;
2202 ce_state = scn->ce_id_to_state[dl];
2203 ce_state->lro_flush_cb = handler;
2204 ce_state->lro_data = data;
2205 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002206}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002207
2208/**
2209 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2210 * callback
2211 * @scn: HIF context
2212 *
2213 * Remove the LRO flush callback
2214 *
2215 * Return: none
2216 */
2217void ce_lro_flush_cb_deregister(struct ol_softc *scn)
2218{
2219 uint8_t ul, dl;
2220 int ul_polled, dl_polled;
2221
2222 CDF_ASSERT(scn != NULL);
2223
2224 if (CDF_STATUS_SUCCESS !=
2225 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2226 &ul, &dl, &ul_polled, &dl_polled)) {
2227 printk("%s cannot map service to pipe\n", __FUNCTION__);
2228 return;
2229 } else {
2230 struct CE_state *ce_state;
2231 ce_state = scn->ce_id_to_state[dl];
2232 ce_state->lro_flush_cb = NULL;
2233 ce_state->lro_data = NULL;
2234 }
2235}
2236#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002237
2238/**
2239 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2240 * this service
2241 * @scn: ol_softc pointer.
2242 * @svc_id: Service ID for which the mapping is needed.
2243 * @ul_pipe: address of the container in which ul pipe is returned.
2244 * @dl_pipe: address of the container in which dl pipe is returned.
2245 * @ul_is_polled: address of the container in which a bool
2246 * indicating if the UL CE for this service
2247 * is polled is returned.
2248 * @dl_is_polled: address of the container in which a bool
2249 * indicating if the DL CE for this service
2250 * is polled is returned.
2251 *
2252 * Return: Indicates whether this operation was successful.
2253 */
2254
2255int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
2256 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2257 int *dl_is_polled)
2258{
2259 int status = CDF_STATUS_SUCCESS;
2260
2261 unsigned int i;
2262 struct service_to_pipe element;
2263
2264 struct service_to_pipe *tgt_svc_map_to_use;
2265 size_t sz_tgt_svc_map_to_use;
2266
2267 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2268 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2269 sz_tgt_svc_map_to_use =
2270 sizeof(target_service_to_ce_map_wlan_epping);
2271 } else {
2272 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2273 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2274 }
2275
2276 *dl_is_polled = 0; /* polling for received messages not supported */
2277
2278 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2279
2280 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2281 if (element.service_id == svc_id) {
2282
2283 if (element.pipedir == PIPEDIR_OUT)
2284 *ul_pipe = element.pipenum;
2285
2286 else if (element.pipedir == PIPEDIR_IN)
2287 *dl_pipe = element.pipenum;
2288 }
2289 }
2290
2291 *ul_is_polled =
2292 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2293
2294 return status;
2295}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002296
2297#ifdef SHADOW_REG_DEBUG
2298inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct ol_softc *scn,
2299 uint32_t CE_ctrl_addr)
2300{
2301 uint32_t read_from_hw, srri_from_ddr = 0;
2302
2303 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2304
2305 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2306
2307 if (read_from_hw != srri_from_ddr) {
2308 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2309 srri_from_ddr, read_from_hw,
2310 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2311 CDF_ASSERT(0);
2312 }
2313 return srri_from_ddr;
2314}
2315
2316
2317inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct ol_softc *scn,
2318 uint32_t CE_ctrl_addr)
2319{
2320 uint32_t read_from_hw, drri_from_ddr = 0;
2321
2322 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2323
2324 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2325
2326 if (read_from_hw != drri_from_ddr) {
2327 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2328 drri_from_ddr, read_from_hw,
2329 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2330 CDF_ASSERT(0);
2331 }
2332 return drri_from_ddr;
2333}
2334
2335#endif
2336
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002337#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002338/**
2339 * hif_get_src_ring_read_index(): Called to get the SRRI
2340 *
2341 * @scn: ol_softc pointer
2342 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2343 *
2344 * This function returns the SRRI to the caller. For CEs that
2345 * dont have interrupts enabled, we look at the DDR based SRRI
2346 *
2347 * Return: SRRI
2348 */
2349inline unsigned int hif_get_src_ring_read_index(struct ol_softc *scn,
2350 uint32_t CE_ctrl_addr)
2351{
2352 struct CE_attr attr;
2353
2354 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2355 if (attr.flags & CE_ATTR_DISABLE_INTR)
2356 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2357 else
2358 return A_TARGET_READ(scn,
2359 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2360}
2361
2362/**
2363 * hif_get_dst_ring_read_index(): Called to get the DRRI
2364 *
2365 * @scn: ol_softc pointer
2366 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2367 *
2368 * This function returns the DRRI to the caller. For CEs that
2369 * dont have interrupts enabled, we look at the DDR based DRRI
2370 *
2371 * Return: DRRI
2372 */
2373inline unsigned int hif_get_dst_ring_read_index(struct ol_softc *scn,
2374 uint32_t CE_ctrl_addr)
2375{
2376 struct CE_attr attr;
2377
2378 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2379
2380 if (attr.flags & CE_ATTR_DISABLE_INTR)
2381 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2382 else
2383 return A_TARGET_READ(scn,
2384 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2385}
2386
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002387/**
2388 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2389 *
2390 * @scn: ol_softc pointer
2391 *
2392 * This function allocates non cached memory on ddr and sends
2393 * the physical address of this memory to the CE hardware. The
2394 * hardware updates the RRI on this particular location.
2395 *
2396 * Return: None
2397 */
2398static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2399{
2400 unsigned int i;
2401 cdf_dma_addr_t paddr_rri_on_ddr;
2402 uint32_t high_paddr, low_paddr;
2403 scn->vaddr_rri_on_ddr =
2404 (uint32_t *)cdf_os_mem_alloc_consistent(scn->cdf_dev,
2405 (CE_COUNT*sizeof(uint32_t)), &paddr_rri_on_ddr, 0);
2406
2407 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2408 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2409
2410 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2411
2412 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2413 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2414
2415 for (i = 0; i < CE_COUNT; i++)
2416 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2417
2418 cdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
2419
2420 return;
2421}
2422#else
2423
2424/**
2425 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2426 *
2427 * @scn: ol_softc pointer
2428 *
2429 * This is a dummy implementation for platforms that don't
2430 * support this functionality.
2431 *
2432 * Return: None
2433 */
2434static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2435{
2436 return;
2437}
2438#endif