blob: 31a64f5cbdacbd6ebc71d2b9693b7b1030d7e80a [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27#include <osdep.h>
28#include "a_types.h"
29#include "athdefs.h"
30#include "osapi_linux.h"
31#include "targcfg.h"
32#include "cdf_lock.h"
33#include "cdf_status.h"
34#include <cdf_atomic.h> /* cdf_atomic_read */
35#include <targaddrs.h>
36#include <bmi_msg.h>
37#include "hif_io32.h"
38#include <hif.h>
39#include "regtable.h"
40#define ATH_MODULE_NAME hif
41#include <a_debug.h>
42#include "hif_main.h"
43#ifdef HIF_PCI
44#include "ce_bmi.h"
45#endif
46#include "ce_api.h"
47#include "cdf_trace.h"
48#include "cds_api.h"
49#ifdef CONFIG_CNSS
50#include <net/cnss.h>
51#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "epping_main.h"
53#include "hif_debug.h"
54#include "ce_internal.h"
55#include "ce_reg.h"
56#include "ce_assignment.h"
57#include "ce_tasklet.h"
58#ifdef HIF_PCI
59#include "icnss_stub.h"
60#else
61#include <soc/qcom/icnss.h>
62#endif
63#include "qwlan_version.h"
Chandrasekaran, Manishekar681d1372015-11-05 10:42:48 +053064#include "cds_concurrency.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080065
66#define CE_POLL_TIMEOUT 10 /* ms */
67
68/* Forward references */
69static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
78
79static int hif_post_recv_buffers(struct ol_softc *scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -080080static void hif_config_rri_on_ddr(struct ol_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081
82static void ce_poll_timeout(void *arg)
83{
84 struct CE_state *CE_state = (struct CE_state *)arg;
85 if (CE_state->timer_inited) {
86 ce_per_engine_service(CE_state->scn, CE_state->id);
87 cdf_softirq_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
88 }
89}
90
91static unsigned int roundup_pwr2(unsigned int n)
92{
93 int i;
94 unsigned int test_pwr2;
95
96 if (!(n & (n - 1)))
97 return n; /* already a power of 2 */
98
99 test_pwr2 = 4;
100 for (i = 0; i < 29; i++) {
101 if (test_pwr2 > n)
102 return test_pwr2;
103 test_pwr2 = test_pwr2 << 1;
104 }
105
106 CDF_ASSERT(0); /* n too large */
107 return 0;
108}
109
110/*
111 * Initialize a Copy Engine based on caller-supplied attributes.
112 * This may be called once to initialize both source and destination
113 * rings or it may be called twice for separate source and destination
114 * initialization. It may be that only one side or the other is
115 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700116 *
117 * This should be called durring the initialization sequence before
118 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800119 */
120struct CE_handle *ce_init(struct ol_softc *scn,
121 unsigned int CE_id, struct CE_attr *attr)
122{
123 struct CE_state *CE_state;
124 uint32_t ctrl_addr;
125 unsigned int nentries;
126 cdf_dma_addr_t base_addr;
127 bool malloc_CE_state = false;
128 bool malloc_src_ring = false;
129
130 CDF_ASSERT(CE_id < scn->ce_count);
131 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132 CE_state = scn->ce_id_to_state[CE_id];
133
134 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135 CE_state =
136 (struct CE_state *)cdf_mem_malloc(sizeof(*CE_state));
137 if (!CE_state) {
138 HIF_ERROR("%s: CE_state has no mem", __func__);
139 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700141 malloc_CE_state = true;
142 cdf_mem_zero(CE_state, sizeof(*CE_state));
143 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffman44b7e4a2015-09-03 17:01:22 -0700144 cdf_spinlock_init(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700145
146 CE_state->id = CE_id;
147 CE_state->ctrl_addr = ctrl_addr;
148 CE_state->state = CE_RUNNING;
149 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150 }
151 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800152
153 cdf_atomic_init(&CE_state->rx_pending);
154 if (attr == NULL) {
155 /* Already initialized; caller wants the handle */
156 return (struct CE_handle *)CE_state;
157 }
158
159#ifdef ADRASTEA_SHADOW_REGISTERS
160 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
161 __func__);
162#endif
163
164 if (CE_state->src_sz_max)
165 CDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
166 else
167 CE_state->src_sz_max = attr->src_sz_max;
168
Houston Hoffman68e837e2015-12-04 12:57:24 -0800169 ce_init_ce_desc_event_log(CE_id,
170 attr->src_nentries + attr->dest_nentries);
171
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800172 /* source ring setup */
173 nentries = attr->src_nentries;
174 if (nentries) {
175 struct CE_ring_state *src_ring;
176 unsigned CE_nbytes;
177 char *ptr;
178 uint64_t dma_addr;
179 nentries = roundup_pwr2(nentries);
180 if (CE_state->src_ring) {
181 CDF_ASSERT(CE_state->src_ring->nentries == nentries);
182 } else {
183 CE_nbytes = sizeof(struct CE_ring_state)
184 + (nentries * sizeof(void *));
185 ptr = cdf_mem_malloc(CE_nbytes);
186 if (!ptr) {
187 /* cannot allocate src ring. If the
188 * CE_state is allocated locally free
189 * CE_State and return error.
190 */
191 HIF_ERROR("%s: src ring has no mem", __func__);
192 if (malloc_CE_state) {
193 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800194 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800195 cdf_mem_free(CE_state);
196 malloc_CE_state = false;
197 }
198 return NULL;
199 } else {
200 /* we can allocate src ring.
201 * Mark that the src ring is
202 * allocated locally
203 */
204 malloc_src_ring = true;
205 }
206 cdf_mem_zero(ptr, CE_nbytes);
207
208 src_ring = CE_state->src_ring =
209 (struct CE_ring_state *)ptr;
210 ptr += sizeof(struct CE_ring_state);
211 src_ring->nentries = nentries;
212 src_ring->nentries_mask = nentries - 1;
213 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
214 src_ring->hw_index =
215 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
216 src_ring->sw_index = src_ring->hw_index;
217 src_ring->write_index =
218 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
219 A_TARGET_ACCESS_END_RET_PTR(scn);
220 src_ring->low_water_mark_nentries = 0;
221 src_ring->high_water_mark_nentries = nentries;
222 src_ring->per_transfer_context = (void **)ptr;
223
224 /* Legacy platforms that do not support cache
225 * coherent DMA are unsupported
226 */
227 src_ring->base_addr_owner_space_unaligned =
228 cdf_os_mem_alloc_consistent(scn->cdf_dev,
229 (nentries *
230 sizeof(struct CE_src_desc) +
231 CE_DESC_RING_ALIGN),
232 &base_addr, 0);
233 if (src_ring->base_addr_owner_space_unaligned
234 == NULL) {
235 HIF_ERROR("%s: src ring has no DMA mem",
236 __func__);
237 goto error_no_dma_mem;
238 }
239 src_ring->base_addr_CE_space_unaligned = base_addr;
240
241 if (src_ring->
242 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
243 - 1)) {
244 src_ring->base_addr_CE_space =
245 (src_ring->base_addr_CE_space_unaligned
246 + CE_DESC_RING_ALIGN -
247 1) & ~(CE_DESC_RING_ALIGN - 1);
248
249 src_ring->base_addr_owner_space =
250 (void
251 *)(((size_t) src_ring->
252 base_addr_owner_space_unaligned +
253 CE_DESC_RING_ALIGN -
254 1) & ~(CE_DESC_RING_ALIGN - 1));
255 } else {
256 src_ring->base_addr_CE_space =
257 src_ring->base_addr_CE_space_unaligned;
258 src_ring->base_addr_owner_space =
259 src_ring->
260 base_addr_owner_space_unaligned;
261 }
262 /*
263 * Also allocate a shadow src ring in
264 * regular mem to use for faster access.
265 */
266 src_ring->shadow_base_unaligned =
267 cdf_mem_malloc(nentries *
268 sizeof(struct CE_src_desc) +
269 CE_DESC_RING_ALIGN);
270 if (src_ring->shadow_base_unaligned == NULL) {
271 HIF_ERROR("%s: src ring no shadow_base mem",
272 __func__);
273 goto error_no_dma_mem;
274 }
275 src_ring->shadow_base = (struct CE_src_desc *)
276 (((size_t) src_ring->shadow_base_unaligned +
277 CE_DESC_RING_ALIGN - 1) &
278 ~(CE_DESC_RING_ALIGN - 1));
279
280 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
281 dma_addr = src_ring->base_addr_CE_space;
282 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
283 (uint32_t)(dma_addr & 0xFFFFFFFF));
284#ifdef WLAN_ENABLE_QCA6180
285 {
286 uint32_t tmp;
287 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
288 scn, ctrl_addr);
289 tmp &= ~0x1F;
290 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
291 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
292 ctrl_addr, (uint32_t)dma_addr);
293 }
294#endif
295 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
296 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
297#ifdef BIG_ENDIAN_HOST
298 /* Enable source ring byte swap for big endian host */
299 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
300#endif
301 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
302 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
303 A_TARGET_ACCESS_END_RET_PTR(scn);
304 }
305 }
306
307 /* destination ring setup */
308 nentries = attr->dest_nentries;
309 if (nentries) {
310 struct CE_ring_state *dest_ring;
311 unsigned CE_nbytes;
312 char *ptr;
313 uint64_t dma_addr;
314
315 nentries = roundup_pwr2(nentries);
316 if (CE_state->dest_ring) {
317 CDF_ASSERT(CE_state->dest_ring->nentries == nentries);
318 } else {
319 CE_nbytes = sizeof(struct CE_ring_state)
320 + (nentries * sizeof(void *));
321 ptr = cdf_mem_malloc(CE_nbytes);
322 if (!ptr) {
323 /* cannot allocate dst ring. If the CE_state
324 * or src ring is allocated locally free
325 * CE_State and src ring and return error.
326 */
327 HIF_ERROR("%s: dest ring has no mem",
328 __func__);
329 if (malloc_src_ring) {
330 cdf_mem_free(CE_state->src_ring);
331 CE_state->src_ring = NULL;
332 malloc_src_ring = false;
333 }
334 if (malloc_CE_state) {
335 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800336 scn->ce_id_to_state[CE_id] = NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800337 cdf_mem_free(CE_state);
338 malloc_CE_state = false;
339 }
340 return NULL;
341 }
342 cdf_mem_zero(ptr, CE_nbytes);
343
344 dest_ring = CE_state->dest_ring =
345 (struct CE_ring_state *)ptr;
346 ptr += sizeof(struct CE_ring_state);
347 dest_ring->nentries = nentries;
348 dest_ring->nentries_mask = nentries - 1;
349 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
350 dest_ring->sw_index =
351 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
352 dest_ring->write_index =
353 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
354 A_TARGET_ACCESS_END_RET_PTR(scn);
355 dest_ring->low_water_mark_nentries = 0;
356 dest_ring->high_water_mark_nentries = nentries;
357 dest_ring->per_transfer_context = (void **)ptr;
358
359 /* Legacy platforms that do not support cache
360 * coherent DMA are unsupported */
361 dest_ring->base_addr_owner_space_unaligned =
362 cdf_os_mem_alloc_consistent(scn->cdf_dev,
363 (nentries *
364 sizeof(struct CE_dest_desc) +
365 CE_DESC_RING_ALIGN),
366 &base_addr, 0);
367 if (dest_ring->base_addr_owner_space_unaligned
368 == NULL) {
369 HIF_ERROR("%s: dest ring has no DMA mem",
370 __func__);
371 goto error_no_dma_mem;
372 }
373 dest_ring->base_addr_CE_space_unaligned = base_addr;
374
375 /* Correctly initialize memory to 0 to
376 * prevent garbage data crashing system
377 * when download firmware
378 */
379 cdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
380 nentries * sizeof(struct CE_dest_desc) +
381 CE_DESC_RING_ALIGN);
382
383 if (dest_ring->
384 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
385 1)) {
386
387 dest_ring->base_addr_CE_space =
388 (dest_ring->
389 base_addr_CE_space_unaligned +
390 CE_DESC_RING_ALIGN -
391 1) & ~(CE_DESC_RING_ALIGN - 1);
392
393 dest_ring->base_addr_owner_space =
394 (void
395 *)(((size_t) dest_ring->
396 base_addr_owner_space_unaligned +
397 CE_DESC_RING_ALIGN -
398 1) & ~(CE_DESC_RING_ALIGN - 1));
399 } else {
400 dest_ring->base_addr_CE_space =
401 dest_ring->base_addr_CE_space_unaligned;
402 dest_ring->base_addr_owner_space =
403 dest_ring->
404 base_addr_owner_space_unaligned;
405 }
406
407 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
408 dma_addr = dest_ring->base_addr_CE_space;
409 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
410 (uint32_t)(dma_addr & 0xFFFFFFFF));
411#ifdef WLAN_ENABLE_QCA6180
412 {
413 uint32_t tmp;
414 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
415 ctrl_addr);
416 tmp &= ~0x1F;
417 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
418 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
419 ctrl_addr, (uint32_t)dma_addr);
420 }
421#endif
422 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
423#ifdef BIG_ENDIAN_HOST
424 /* Enable Dest ring byte swap for big endian host */
425 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
426#endif
427 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
428 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
429 A_TARGET_ACCESS_END_RET_PTR(scn);
430
431 /* epping */
432 /* poll timer */
433 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
434 cdf_softirq_timer_init(scn->cdf_dev,
435 &CE_state->poll_timer,
436 ce_poll_timeout,
437 CE_state,
438 CDF_TIMER_TYPE_SW);
439 CE_state->timer_inited = true;
440 cdf_softirq_timer_mod(&CE_state->poll_timer,
441 CE_POLL_TIMEOUT);
442 }
443 }
444 }
445
446 /* Enable CE error interrupts */
447 A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
448 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
449 A_TARGET_ACCESS_END_RET_PTR(scn);
450
451 return (struct CE_handle *)CE_state;
452
453error_no_dma_mem:
454 ce_fini((struct CE_handle *)CE_state);
455 return NULL;
456}
457
458#ifdef WLAN_FEATURE_FASTPATH
459/**
460 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
461 * No processing is required inside this function.
462 * @ce_hdl: Cope engine handle
463 * Using an assert, this function makes sure that,
464 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700465 *
466 * This is called while dismantling CE structures. No other thread
467 * should be using these structures while dismantling is occuring
468 * therfore no locking is needed.
469 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800470 * Return: none
471 */
472void
473ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
474{
475 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
476 struct CE_ring_state *src_ring = ce_state->src_ring;
477 struct ol_softc *sc = ce_state->scn;
478 uint32_t sw_index, write_index;
479
480 if (sc->fastpath_mode_on && (ce_state->id == CE_HTT_H2T_MSG)) {
481 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
482 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800483 sw_index = src_ring->sw_index;
484 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800485
486 /* At this point Tx CE should be clean */
487 cdf_assert_always(sw_index == write_index);
488 }
489}
490#else
491void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
492{
493}
494#endif /* WLAN_FEATURE_FASTPATH */
495
496void ce_fini(struct CE_handle *copyeng)
497{
498 struct CE_state *CE_state = (struct CE_state *)copyeng;
499 unsigned int CE_id = CE_state->id;
500 struct ol_softc *scn = CE_state->scn;
501
502 CE_state->state = CE_UNUSED;
503 scn->ce_id_to_state[CE_id] = NULL;
504 if (CE_state->src_ring) {
505 /* Cleanup the HTT Tx ring */
506 ce_h2t_tx_ce_cleanup(copyeng);
507
508 if (CE_state->src_ring->shadow_base_unaligned)
509 cdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
510 if (CE_state->src_ring->base_addr_owner_space_unaligned)
511 cdf_os_mem_free_consistent(scn->cdf_dev,
512 (CE_state->src_ring->nentries *
513 sizeof(struct CE_src_desc) +
514 CE_DESC_RING_ALIGN),
515 CE_state->src_ring->
516 base_addr_owner_space_unaligned,
517 CE_state->src_ring->
518 base_addr_CE_space, 0);
519 cdf_mem_free(CE_state->src_ring);
520 }
521 if (CE_state->dest_ring) {
522 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
523 cdf_os_mem_free_consistent(scn->cdf_dev,
524 (CE_state->dest_ring->nentries *
525 sizeof(struct CE_dest_desc) +
526 CE_DESC_RING_ALIGN),
527 CE_state->dest_ring->
528 base_addr_owner_space_unaligned,
529 CE_state->dest_ring->
530 base_addr_CE_space, 0);
531 cdf_mem_free(CE_state->dest_ring);
532
533 /* epping */
534 if (CE_state->timer_inited) {
535 CE_state->timer_inited = false;
536 cdf_softirq_timer_free(&CE_state->poll_timer);
537 }
538 }
539 cdf_mem_free(CE_state);
540}
541
542void hif_detach_htc(struct ol_softc *scn)
543{
544 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
545
546 cdf_mem_zero(&hif_state->msg_callbacks_pending,
547 sizeof(hif_state->msg_callbacks_pending));
548 cdf_mem_zero(&hif_state->msg_callbacks_current,
549 sizeof(hif_state->msg_callbacks_current));
550}
551
552/* Send the first nbytes bytes of the buffer */
553CDF_STATUS
554hif_send_head(struct ol_softc *scn,
555 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
556 cdf_nbuf_t nbuf, unsigned int data_attr)
557{
558 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
559 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
560 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
561 int bytes = nbytes, nfrags = 0;
562 struct ce_sendlist sendlist;
563 int status, i = 0;
564 unsigned int mux_id = 0;
565
566 CDF_ASSERT(nbytes <= cdf_nbuf_len(nbuf));
567
568 transfer_id =
569 (mux_id & MUX_ID_MASK) |
570 (transfer_id & TRANSACTION_ID_MASK);
571 data_attr &= DESC_DATA_FLAG_MASK;
572 /*
573 * The common case involves sending multiple fragments within a
574 * single download (the tx descriptor and the tx frame header).
575 * So, optimize for the case of multiple fragments by not even
576 * checking whether it's necessary to use a sendlist.
577 * The overhead of using a sendlist for a single buffer download
578 * is not a big deal, since it happens rarely (for WMI messages).
579 */
580 ce_sendlist_init(&sendlist);
581 do {
582 uint32_t frag_paddr;
583 int frag_bytes;
584
585 frag_paddr = cdf_nbuf_get_frag_paddr_lo(nbuf, nfrags);
586 frag_bytes = cdf_nbuf_get_frag_len(nbuf, nfrags);
587 /*
588 * Clear the packet offset for all but the first CE desc.
589 */
590 if (i++ > 0)
591 data_attr &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
592
593 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
594 frag_bytes >
595 bytes ? bytes : frag_bytes,
596 cdf_nbuf_get_frag_is_wordstream
597 (nbuf,
598 nfrags) ? 0 :
599 CE_SEND_FLAG_SWAP_DISABLE,
600 data_attr);
601 if (status != CDF_STATUS_SUCCESS) {
602 HIF_ERROR("%s: error, frag_num %d larger than limit",
603 __func__, nfrags);
604 return status;
605 }
606 bytes -= frag_bytes;
607 nfrags++;
608 } while (bytes > 0);
609
610 /* Make sure we have resources to handle this request */
611 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
612 if (pipe_info->num_sends_allowed < nfrags) {
613 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
614 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
615 return CDF_STATUS_E_RESOURCES;
616 }
617 pipe_info->num_sends_allowed -= nfrags;
618 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
619
620 if (cdf_unlikely(ce_hdl == NULL)) {
621 HIF_ERROR("%s: error CE handle is null", __func__);
622 return A_ERROR;
623 }
624
625 NBUF_UPDATE_TX_PKT_COUNT(nbuf, NBUF_TX_PKT_HIF);
626 DPTRACE(cdf_dp_trace(nbuf, CDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
627 (uint8_t *)(cdf_nbuf_data(nbuf)),
628 sizeof(cdf_nbuf_data(nbuf))));
629 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
630 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
631
632 return status;
633}
634
635void hif_send_complete_check(struct ol_softc *scn, uint8_t pipe, int force)
636{
637 if (!force) {
638 int resources;
639 /*
640 * Decide whether to actually poll for completions, or just
641 * wait for a later chance. If there seem to be plenty of
642 * resources left, then just wait, since checking involves
643 * reading a CE register, which is a relatively expensive
644 * operation.
645 */
646 resources = hif_get_free_queue_number(scn, pipe);
647 /*
648 * If at least 50% of the total resources are still available,
649 * don't bother checking again yet.
650 */
651 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
652 return;
653 }
654 }
655#ifdef ATH_11AC_TXCOMPACT
656 ce_per_engine_servicereap(scn, pipe);
657#else
658 ce_per_engine_service(scn, pipe);
659#endif
660}
661
662uint16_t hif_get_free_queue_number(struct ol_softc *scn, uint8_t pipe)
663{
664 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
665 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
666 uint16_t rv;
667
668 cdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
669 rv = pipe_info->num_sends_allowed;
670 cdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
671 return rv;
672}
673
674/* Called by lower (CE) layer when a send to Target completes. */
675void
676hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
677 void *transfer_context, cdf_dma_addr_t CE_data,
678 unsigned int nbytes, unsigned int transfer_id,
679 unsigned int sw_index, unsigned int hw_index,
680 unsigned int toeplitz_hash_result)
681{
682 struct HIF_CE_pipe_info *pipe_info =
683 (struct HIF_CE_pipe_info *)ce_context;
684 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800685 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -0700686 struct hif_msg_callbacks *msg_callbacks =
687 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800689 do {
690 /*
Houston Hoffman85118512015-09-28 14:17:11 -0700691 * The upper layer callback will be triggered
692 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800693 */
Houston Hoffman85118512015-09-28 14:17:11 -0700694 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700695 if (hif_state->scn->target_status
696 == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800697 cdf_nbuf_free(transfer_context);
698 else
699 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -0700700 msg_callbacks->Context,
701 transfer_context, transfer_id,
702 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800703 }
704
705 cdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -0700706 pipe_info->num_sends_allowed++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800707 cdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 } while (ce_completed_send_next(copyeng,
709 &ce_context, &transfer_context,
710 &CE_data, &nbytes, &transfer_id,
711 &sw_idx, &hw_idx,
712 &toeplitz_hash_result) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800713}
714
Houston Hoffman910c6262015-09-28 12:56:25 -0700715/**
716 * hif_ce_do_recv(): send message from copy engine to upper layers
717 * @msg_callbacks: structure containing callback and callback context
718 * @netbuff: skb containing message
719 * @nbytes: number of bytes in the message
720 * @pipe_info: used for the pipe_number info
721 *
722 * Checks the packet length, configures the lenght in the netbuff,
723 * and calls the upper layer callback.
724 *
725 * return: None
726 */
727static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
728 cdf_nbuf_t netbuf, int nbytes,
729 struct HIF_CE_pipe_info *pipe_info) {
730 if (nbytes <= pipe_info->buf_sz) {
731 cdf_nbuf_set_pktlen(netbuf, nbytes);
732 msg_callbacks->
733 rxCompletionHandler(msg_callbacks->Context,
734 netbuf, pipe_info->pipe_num);
735 } else {
736 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
737 __func__, netbuf, nbytes);
738 cdf_nbuf_free(netbuf);
739 }
740}
741
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800742/* Called by lower (CE) layer when data is received from the Target. */
743void
744hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
745 void *transfer_context, cdf_dma_addr_t CE_data,
746 unsigned int nbytes, unsigned int transfer_id,
747 unsigned int flags)
748{
749 struct HIF_CE_pipe_info *pipe_info =
750 (struct HIF_CE_pipe_info *)ce_context;
751 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700752 struct CE_state *ce_state = (struct CE_state *) copyeng;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800753 struct ol_softc *scn = hif_state->scn;
Houston Hoffman910c6262015-09-28 12:56:25 -0700754 struct hif_msg_callbacks *msg_callbacks =
755 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800757 do {
Houston Hoffman226a3b12015-11-12 18:00:21 -0800758 hif_pm_runtime_mark_last_busy(scn->hif_sc->dev);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800759 cdf_nbuf_unmap_single(scn->cdf_dev,
760 (cdf_nbuf_t) transfer_context,
761 CDF_DMA_FROM_DEVICE);
762
Houston Hoffman910c6262015-09-28 12:56:25 -0700763 atomic_inc(&pipe_info->recv_bufs_needed);
764 hif_post_recv_buffers_for_pipe(pipe_info);
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700765 if (hif_state->scn->target_status == OL_TRGET_STATUS_RESET)
Houston Hoffman49794a32015-12-21 12:14:56 -0800766 cdf_nbuf_free(transfer_context);
767 else
768 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -0700769 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800770
771 /* Set up force_break flag if num of receices reaches
772 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700773 ce_state->receive_count++;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800774 if (cdf_unlikely(hif_max_num_receives_reached(
Houston Hoffman5bf441a2015-09-02 11:52:10 -0700775 ce_state->receive_count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -0700776 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800777 break;
778 }
779 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
780 &CE_data, &nbytes, &transfer_id,
781 &flags) == CDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800782}
783
784/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
785
786void
787hif_post_init(struct ol_softc *scn, void *unused,
788 struct hif_msg_callbacks *callbacks)
789{
790 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
791
792#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
793 spin_lock_init(&pcie_access_log_lock);
794#endif
795 /* Save callbacks for later installation */
796 cdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
797 sizeof(hif_state->msg_callbacks_pending));
798
799}
800
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800801int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
802{
803 struct CE_handle *ce_diag = hif_state->ce_diag;
804 int pipe_num;
805 struct ol_softc *scn = hif_state->scn;
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700806 struct hif_msg_callbacks *hif_msg_callbacks =
807 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800808
809 /* daemonize("hif_compl_thread"); */
810
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811 if (scn->ce_count == 0) {
812 HIF_ERROR("%s: Invalid ce_count\n", __func__);
813 return -EINVAL;
814 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -0700815
816 if (!hif_msg_callbacks ||
817 !hif_msg_callbacks->rxCompletionHandler ||
818 !hif_msg_callbacks->txCompletionHandler) {
819 HIF_ERROR("%s: no completion handler registered", __func__);
820 return -EFAULT;
821 }
822
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800823 A_TARGET_ACCESS_LIKELY(scn);
824 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
825 struct CE_attr attr;
826 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800827
828 pipe_info = &hif_state->pipe_info[pipe_num];
829 if (pipe_info->ce_hdl == ce_diag) {
830 continue; /* Handle Diagnostic CE specially */
831 }
832 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800833 if (attr.src_nentries) {
834 /* pipe used to send to target */
835 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
836 __func__, pipe_num, pipe_info);
837 ce_send_cb_register(pipe_info->ce_hdl,
838 hif_pci_ce_send_done, pipe_info,
839 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800840 pipe_info->num_sends_allowed = attr.src_nentries - 1;
841 }
842 if (attr.dest_nentries) {
843 /* pipe used to receive from target */
844 ce_recv_cb_register(pipe_info->ce_hdl,
845 hif_pci_ce_recv_data, pipe_info,
846 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800847 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800848
849 if (attr.src_nentries)
850 cdf_spinlock_init(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800851 }
Houston Hoffman6666df72015-11-30 16:48:35 -0800852
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800853 A_TARGET_ACCESS_UNLIKELY(scn);
854 return 0;
855}
856
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800857/*
858 * Install pending msg callbacks.
859 *
860 * TBDXXX: This hack is needed because upper layers install msg callbacks
861 * for use with HTC before BMI is done; yet this HIF implementation
862 * needs to continue to use BMI msg callbacks. Really, upper layers
863 * should not register HTC callbacks until AFTER BMI phase.
864 */
865static void hif_msg_callbacks_install(struct ol_softc *scn)
866{
867 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
868
869 cdf_mem_copy(&hif_state->msg_callbacks_current,
870 &hif_state->msg_callbacks_pending,
871 sizeof(hif_state->msg_callbacks_pending));
872}
873
874void hif_claim_device(struct ol_softc *scn, void *claimedContext)
875{
876 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
877
878 hif_state->claimedContext = claimedContext;
879}
880
881void hif_release_device(struct ol_softc *scn)
882{
883 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
884
885 hif_state->claimedContext = NULL;
886}
887
888void
889hif_get_default_pipe(struct ol_softc *scn, uint8_t *ULPipe, uint8_t *DLPipe)
890{
891 int ul_is_polled, dl_is_polled;
892
893 (void)hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
894 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
895}
896
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800897/**
898 * hif_dump_pipe_debug_count() - Log error count
899 * @scn: ol_softc pointer.
900 *
901 * Output the pipe error counts of each pipe to log file
902 *
903 * Return: N/A
904 */
905void hif_dump_pipe_debug_count(struct ol_softc *scn)
906{
907 struct HIF_CE_state *hif_state;
908 int pipe_num;
909
910 if (scn == NULL) {
911 HIF_ERROR("%s scn is NULL", __func__);
912 return;
913 }
914 hif_state = (struct HIF_CE_state *)scn->hif_hdl;
915 if (hif_state == NULL) {
916 HIF_ERROR("%s hif_state is NULL", __func__);
917 return;
918 }
919 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
920 struct HIF_CE_pipe_info *pipe_info;
921
922 pipe_info = &hif_state->pipe_info[pipe_num];
923
924 if (pipe_info->nbuf_alloc_err_count > 0 ||
925 pipe_info->nbuf_dma_err_count > 0 ||
926 pipe_info->nbuf_ce_enqueue_err_count)
927 HIF_ERROR(
928 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
929 __func__, pipe_info->pipe_num,
930 atomic_read(&pipe_info->recv_bufs_needed),
931 pipe_info->nbuf_alloc_err_count,
932 pipe_info->nbuf_dma_err_count,
933 pipe_info->nbuf_ce_enqueue_err_count);
934 }
935}
936
937static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
938{
939 struct CE_handle *ce_hdl;
940 cdf_size_t buf_sz;
941 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
942 struct ol_softc *scn = hif_state->scn;
943 CDF_STATUS ret;
944 uint32_t bufs_posted = 0;
945
946 buf_sz = pipe_info->buf_sz;
947 if (buf_sz == 0) {
948 /* Unused Copy Engine */
949 return 0;
950 }
951
952 ce_hdl = pipe_info->ce_hdl;
953
954 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
955 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
956 cdf_dma_addr_t CE_data; /* CE space buffer address */
957 cdf_nbuf_t nbuf;
958 int status;
959
960 atomic_dec(&pipe_info->recv_bufs_needed);
961 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
962
963 nbuf = cdf_nbuf_alloc(scn->cdf_dev, buf_sz, 0, 4, false);
964 if (!nbuf) {
965 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
966 pipe_info->nbuf_alloc_err_count++;
967 cdf_spin_unlock_bh(
968 &pipe_info->recv_bufs_needed_lock);
969 HIF_ERROR(
970 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
971 __func__, pipe_info->pipe_num,
972 atomic_read(&pipe_info->recv_bufs_needed),
973 pipe_info->nbuf_alloc_err_count);
974 atomic_inc(&pipe_info->recv_bufs_needed);
975 return 1;
976 }
977
978 /*
979 * cdf_nbuf_peek_header(nbuf, &data, &unused);
980 * CE_data = dma_map_single(dev, data, buf_sz, );
981 * DMA_FROM_DEVICE);
982 */
983 ret =
984 cdf_nbuf_map_single(scn->cdf_dev, nbuf,
985 CDF_DMA_FROM_DEVICE);
986
987 if (unlikely(ret != CDF_STATUS_SUCCESS)) {
988 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
989 pipe_info->nbuf_dma_err_count++;
990 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
991 HIF_ERROR(
992 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
993 __func__, pipe_info->pipe_num,
994 atomic_read(&pipe_info->recv_bufs_needed),
995 pipe_info->nbuf_dma_err_count);
996 cdf_nbuf_free(nbuf);
997 atomic_inc(&pipe_info->recv_bufs_needed);
998 return 1;
999 }
1000
1001 CE_data = cdf_nbuf_get_frag_paddr_lo(nbuf, 0);
1002
1003 cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data,
1004 buf_sz, DMA_FROM_DEVICE);
1005 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
1006 CDF_ASSERT(status == CDF_STATUS_SUCCESS);
1007 if (status != EOK) {
1008 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1009 pipe_info->nbuf_ce_enqueue_err_count++;
1010 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1011 HIF_ERROR(
1012 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1013 __func__, pipe_info->pipe_num,
1014 atomic_read(&pipe_info->recv_bufs_needed),
1015 pipe_info->nbuf_ce_enqueue_err_count);
1016 atomic_inc(&pipe_info->recv_bufs_needed);
1017 cdf_nbuf_free(nbuf);
1018 return 1;
1019 }
1020
1021 cdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1022 bufs_posted++;
1023 }
1024 pipe_info->nbuf_alloc_err_count =
1025 (pipe_info->nbuf_alloc_err_count > bufs_posted)?
1026 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1027 pipe_info->nbuf_dma_err_count =
1028 (pipe_info->nbuf_dma_err_count > bufs_posted)?
1029 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1030 pipe_info->nbuf_ce_enqueue_err_count =
1031 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted)?
1032 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1033
1034 cdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1035
1036 return 0;
1037}
1038
1039/*
1040 * Try to post all desired receive buffers for all pipes.
1041 * Returns 0 if all desired buffers are posted,
1042 * non-zero if were were unable to completely
1043 * replenish receive buffers.
1044 */
1045static int hif_post_recv_buffers(struct ol_softc *scn)
1046{
1047 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1048 int pipe_num, rv = 0;
1049
1050 A_TARGET_ACCESS_LIKELY(scn);
1051 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1052 struct HIF_CE_pipe_info *pipe_info;
1053
1054 pipe_info = &hif_state->pipe_info[pipe_num];
1055 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1056 rv = 1;
1057 goto done;
1058 }
1059 }
1060
1061done:
1062 A_TARGET_ACCESS_UNLIKELY(scn);
1063
1064 return rv;
1065}
1066
1067CDF_STATUS hif_start(struct ol_softc *scn)
1068{
1069 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1070
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001071 hif_msg_callbacks_install(scn);
1072
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001073 if (hif_completion_thread_startup(hif_state))
1074 return CDF_STATUS_E_FAILURE;
1075
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001076 /* Post buffers once to start things off. */
1077 (void)hif_post_recv_buffers(scn);
1078
1079 hif_state->started = true;
1080
1081 return CDF_STATUS_SUCCESS;
1082}
1083
1084#ifdef WLAN_FEATURE_FASTPATH
1085/**
1086 * hif_enable_fastpath() Update that we have enabled fastpath mode
1087 * @hif_device: HIF context
1088 *
1089 * For use in data path
1090 *
1091 * Retrun: void
1092 */
1093void
1094hif_enable_fastpath(struct ol_softc *hif_device)
1095{
1096 HIF_INFO("Enabling fastpath mode\n");
1097 hif_device->fastpath_mode_on = 1;
1098}
1099#endif /* WLAN_FEATURE_FASTPATH */
1100
1101void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1102{
1103 struct ol_softc *scn;
1104 struct CE_handle *ce_hdl;
1105 uint32_t buf_sz;
1106 struct HIF_CE_state *hif_state;
1107 cdf_nbuf_t netbuf;
1108 cdf_dma_addr_t CE_data;
1109 void *per_CE_context;
1110
1111 buf_sz = pipe_info->buf_sz;
1112 if (buf_sz == 0) {
1113 /* Unused Copy Engine */
1114 return;
1115 }
1116
1117 hif_state = pipe_info->HIF_CE_state;
1118 if (!hif_state->started) {
1119 return;
1120 }
1121
1122 scn = hif_state->scn;
1123 ce_hdl = pipe_info->ce_hdl;
1124
1125 if (scn->cdf_dev == NULL) {
1126 return;
1127 }
1128 while (ce_revoke_recv_next
1129 (ce_hdl, &per_CE_context, (void **)&netbuf,
1130 &CE_data) == CDF_STATUS_SUCCESS) {
1131 cdf_nbuf_unmap_single(scn->cdf_dev, netbuf,
1132 CDF_DMA_FROM_DEVICE);
1133 cdf_nbuf_free(netbuf);
1134 }
1135}
1136
1137void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1138{
1139 struct CE_handle *ce_hdl;
1140 struct HIF_CE_state *hif_state;
1141 cdf_nbuf_t netbuf;
1142 void *per_CE_context;
1143 cdf_dma_addr_t CE_data;
1144 unsigned int nbytes;
1145 unsigned int id;
1146 uint32_t buf_sz;
1147 uint32_t toeplitz_hash_result;
1148
1149 buf_sz = pipe_info->buf_sz;
1150 if (buf_sz == 0) {
1151 /* Unused Copy Engine */
1152 return;
1153 }
1154
1155 hif_state = pipe_info->HIF_CE_state;
1156 if (!hif_state->started) {
1157 return;
1158 }
1159
1160 ce_hdl = pipe_info->ce_hdl;
1161
1162 while (ce_cancel_send_next
1163 (ce_hdl, &per_CE_context,
1164 (void **)&netbuf, &CE_data, &nbytes,
1165 &id, &toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
1166 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1167 /*
1168 * Packets enqueued by htt_h2t_ver_req_msg() and
1169 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1170 * freed in htt_htc_misc_pkt_pool_free() in
1171 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001172 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001173 * which they are queued in.
1174 */
1175 if (id == hif_state->scn->htc_endpoint)
1176 return;
1177 /* Indicate the completion to higer
1178 * layer to free the buffer */
1179 hif_state->msg_callbacks_current.
1180 txCompletionHandler(hif_state->
1181 msg_callbacks_current.Context,
1182 netbuf, id, toeplitz_hash_result);
1183 }
1184 }
1185}
1186
1187/*
1188 * Cleanup residual buffers for device shutdown:
1189 * buffers that were enqueued for receive
1190 * buffers that were to be sent
1191 * Note: Buffers that had completed but which were
1192 * not yet processed are on a completion queue. They
1193 * are handled when the completion thread shuts down.
1194 */
1195void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1196{
1197 int pipe_num;
1198
1199 for (pipe_num = 0; pipe_num < hif_state->scn->ce_count; pipe_num++) {
1200 struct HIF_CE_pipe_info *pipe_info;
1201
1202 pipe_info = &hif_state->pipe_info[pipe_num];
1203 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1204 hif_send_buffer_cleanup_on_pipe(pipe_info);
1205 }
1206}
1207
1208void hif_flush_surprise_remove(struct ol_softc *scn)
1209{
1210 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1211 hif_buffer_cleanup(hif_state);
1212}
1213
1214void hif_stop(struct ol_softc *scn)
1215{
1216 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
1217 int pipe_num;
1218
1219 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001220
1221 /*
1222 * At this point, asynchronous threads are stopped,
1223 * The Target should not DMA nor interrupt, Host code may
1224 * not initiate anything more. So we just need to clean
1225 * up Host-side state.
1226 */
1227
1228 if (scn->athdiag_procfs_inited) {
1229 athdiag_procfs_remove();
1230 scn->athdiag_procfs_inited = false;
1231 }
1232
1233 hif_buffer_cleanup(hif_state);
1234
1235 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1236 struct HIF_CE_pipe_info *pipe_info;
1237
1238 pipe_info = &hif_state->pipe_info[pipe_num];
1239 if (pipe_info->ce_hdl) {
1240 ce_fini(pipe_info->ce_hdl);
1241 pipe_info->ce_hdl = NULL;
1242 pipe_info->buf_sz = 0;
1243 }
1244 }
1245
1246 if (hif_state->sleep_timer_init) {
1247 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1248 cdf_softirq_timer_free(&hif_state->sleep_timer);
1249 hif_state->sleep_timer_init = false;
1250 }
1251
1252 hif_state->started = false;
1253}
1254
1255#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
1256#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
1257
1258
1259static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
1260 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
1261 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
1262 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
1263 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
1264 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
1265 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
1266 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
1267 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
1268 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
1269};
1270
1271
1272
1273/* CE_PCI TABLE */
1274/*
1275 * NOTE: the table below is out of date, though still a useful reference.
1276 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
1277 * mapping of HTC services to HIF pipes.
1278 */
1279/*
1280 * This authoritative table defines Copy Engine configuration and the mapping
1281 * of services/endpoints to CEs. A subset of this information is passed to
1282 * the Target during startup as a prerequisite to entering BMI phase.
1283 * See:
1284 * target_service_to_ce_map - Target-side mapping
1285 * hif_map_service_to_pipe - Host-side mapping
1286 * target_ce_config - Target-side configuration
1287 * host_ce_config - Host-side configuration
1288 ============================================================================
1289 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
1290 | | | ctio | Size | Frequency
1291 | | | n | |
1292 ============================================================================
1293 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
1294 descriptor | | | | O(100B) | and regular
1295 download | | | | |
1296 ----------------------------------------------------------------------------
1297 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
1298 indication | | | | O(10B) | regular
1299 upload | | | | |
1300 ----------------------------------------------------------------------------
1301 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
1302 upload | | | | O(1000B) | (frequent
1303 e.g. noise | | | | | during IP1.0
1304 packets | | | | | testing)
1305 ----------------------------------------------------------------------------
1306 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
1307 download | | | | O(1000B) | (frequent
1308 e.g. | | | | | during IP1.0
1309 misdirecte | | | | | testing)
1310 d EAPOL | | | | |
1311 packets | | | | |
1312 ----------------------------------------------------------------------------
1313 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
1314 | DATA_VO (uplink) | | | |
1315 ----------------------------------------------------------------------------
1316 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
1317 | DATA_VO (downlink) | | | |
1318 ----------------------------------------------------------------------------
1319 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
1320 | | | | O(100B) |
1321 ----------------------------------------------------------------------------
1322 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
1323 messages | (downlink) | | | O(100B) |
1324 | | | | |
1325 ----------------------------------------------------------------------------
1326 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
1327 | HTC_RAW_STREAMS | | | |
1328 | (uplink) | | | |
1329 ----------------------------------------------------------------------------
1330 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
1331 | HTC_RAW_STREAMS | | | |
1332 | (downlink) | | | |
1333 ----------------------------------------------------------------------------
1334 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
1335 | | | | | infrequent
1336 ============================================================================
1337 */
1338
1339/*
1340 * Map from service/endpoint to Copy Engine.
1341 * This table is derived from the CE_PCI TABLE, above.
1342 * It is passed to the Target at startup for use by firmware.
1343 */
1344static struct service_to_pipe target_service_to_ce_map_wlan[] = {
1345 {
1346 WMI_DATA_VO_SVC,
1347 PIPEDIR_OUT, /* out = UL = host -> target */
1348 3,
1349 },
1350 {
1351 WMI_DATA_VO_SVC,
1352 PIPEDIR_IN, /* in = DL = target -> host */
1353 2,
1354 },
1355 {
1356 WMI_DATA_BK_SVC,
1357 PIPEDIR_OUT, /* out = UL = host -> target */
1358 3,
1359 },
1360 {
1361 WMI_DATA_BK_SVC,
1362 PIPEDIR_IN, /* in = DL = target -> host */
1363 2,
1364 },
1365 {
1366 WMI_DATA_BE_SVC,
1367 PIPEDIR_OUT, /* out = UL = host -> target */
1368 3,
1369 },
1370 {
1371 WMI_DATA_BE_SVC,
1372 PIPEDIR_IN, /* in = DL = target -> host */
1373 2,
1374 },
1375 {
1376 WMI_DATA_VI_SVC,
1377 PIPEDIR_OUT, /* out = UL = host -> target */
1378 3,
1379 },
1380 {
1381 WMI_DATA_VI_SVC,
1382 PIPEDIR_IN, /* in = DL = target -> host */
1383 2,
1384 },
1385 {
1386 WMI_CONTROL_SVC,
1387 PIPEDIR_OUT, /* out = UL = host -> target */
1388 3,
1389 },
1390 {
1391 WMI_CONTROL_SVC,
1392 PIPEDIR_IN, /* in = DL = target -> host */
1393 2,
1394 },
1395 {
1396 HTC_CTRL_RSVD_SVC,
1397 PIPEDIR_OUT, /* out = UL = host -> target */
1398 0, /* could be moved to 3 (share with WMI) */
1399 },
1400 {
1401 HTC_CTRL_RSVD_SVC,
1402 PIPEDIR_IN, /* in = DL = target -> host */
1403 2,
1404 },
1405 {
1406 HTC_RAW_STREAMS_SVC, /* not currently used */
1407 PIPEDIR_OUT, /* out = UL = host -> target */
1408 0,
1409 },
1410 {
1411 HTC_RAW_STREAMS_SVC, /* not currently used */
1412 PIPEDIR_IN, /* in = DL = target -> host */
1413 2,
1414 },
1415 {
1416 HTT_DATA_MSG_SVC,
1417 PIPEDIR_OUT, /* out = UL = host -> target */
1418 4,
1419 },
1420 {
1421 HTT_DATA_MSG_SVC,
1422 PIPEDIR_IN, /* in = DL = target -> host */
1423 1,
1424 },
1425 {
1426 WDI_IPA_TX_SVC,
1427 PIPEDIR_OUT, /* in = DL = target -> host */
1428 5,
1429 },
1430 /* (Additions here) */
1431
1432 { /* Must be last */
1433 0,
1434 0,
1435 0,
1436 },
1437};
1438
1439static struct service_to_pipe *target_service_to_ce_map =
1440 target_service_to_ce_map_wlan;
1441static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
1442
1443static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1444static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1445
1446static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1447 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1448 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1449 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1450 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1451 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1452 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1453 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1454 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1455 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
1456 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1457 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1458 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1459 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1460 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
1461 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
1462 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
1463 {0, 0, 0,}, /* Must be last */
1464};
1465
1466#ifdef HIF_PCI
1467/*
1468 * Send an interrupt to the device to wake up the Target CPU
1469 * so it has an opportunity to notice any changed state.
1470 */
1471void hif_wake_target_cpu(struct ol_softc *scn)
1472{
1473 CDF_STATUS rv;
1474 uint32_t core_ctrl;
1475
1476 rv = hif_diag_read_access(scn,
1477 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1478 &core_ctrl);
1479 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1480 /* A_INUM_FIRMWARE interrupt to Target CPU */
1481 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1482
1483 rv = hif_diag_write_access(scn,
1484 SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1485 core_ctrl);
1486 CDF_ASSERT(rv == CDF_STATUS_SUCCESS);
1487}
1488#endif
1489
1490static void hif_sleep_entry(void *arg)
1491{
1492 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1493 struct ol_softc *scn = hif_state->scn;
1494 uint32_t idle_ms;
1495 if (scn->recovery)
1496 return;
1497
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08001498 if (cds_is_driver_unloading())
Sanjay Devnani79c99b22015-11-23 11:42:35 -08001499 return;
1500
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1502 if (hif_state->verified_awake == false) {
1503 idle_ms = cdf_system_ticks_to_msecs(cdf_system_ticks()
1504 - hif_state->sleep_ticks);
1505 if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1506 if (!cdf_atomic_read(&scn->link_suspended)) {
1507 soc_wake_reset(scn);
1508 hif_state->fake_sleep = false;
1509 }
1510 } else {
1511 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1512 cdf_softirq_timer_start(&hif_state->sleep_timer,
1513 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1514 }
1515 } else {
1516 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
1517 cdf_softirq_timer_start(&hif_state->sleep_timer,
1518 HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1519 }
1520 cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1521}
1522#define HIF_HIA_MAX_POLL_LOOP 1000000
1523#define HIF_HIA_POLLING_DELAY_MS 10
1524
1525#ifndef HIF_PCI
1526int hif_set_hia(struct ol_softc *scn)
1527{
1528 return 0;
1529}
1530#else
1531int hif_set_hia(struct ol_softc *scn)
1532{
1533 CDF_STATUS rv;
1534 uint32_t interconnect_targ_addr = 0;
1535 uint32_t pcie_state_targ_addr = 0;
1536 uint32_t pipe_cfg_targ_addr = 0;
1537 uint32_t svc_to_pipe_map = 0;
1538 uint32_t pcie_config_flags = 0;
1539 uint32_t flag2_value = 0;
1540 uint32_t flag2_targ_addr = 0;
1541#ifdef QCA_WIFI_3_0
1542 uint32_t host_interest_area = 0;
1543 uint8_t i;
1544#else
1545 uint32_t ealloc_value = 0;
1546 uint32_t ealloc_targ_addr = 0;
1547 uint8_t banks_switched = 1;
1548 uint32_t chip_id;
1549#endif
1550 uint32_t pipe_cfg_addr;
1551
1552 HIF_TRACE("%s: E", __func__);
1553
Houston Hoffman06bc4f52015-12-16 18:43:34 -08001554 if (ADRASTEA_BU)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001555 return CDF_STATUS_SUCCESS;
1556
1557#ifdef QCA_WIFI_3_0
1558 i = 0;
1559 while (i < HIF_HIA_MAX_POLL_LOOP) {
1560 host_interest_area = hif_read32_mb(scn->mem +
1561 A_SOC_CORE_SCRATCH_0_ADDRESS);
1562 if ((host_interest_area & 0x01) == 0) {
1563 cdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1564 host_interest_area = 0;
1565 i++;
1566 if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) {
1567 HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1568 }
1569 } else {
1570 host_interest_area &= (~0x01);
1571 hif_write32_mb(scn->mem + 0x113014, 0);
1572 break;
1573 }
1574 }
1575
1576 if (i >= HIF_HIA_MAX_POLL_LOOP) {
1577 HIF_ERROR("%s: hia polling timeout", __func__);
1578 return -EIO;
1579 }
1580
1581 if (host_interest_area == 0) {
1582 HIF_ERROR("%s: host_interest_area = 0", __func__);
1583 return -EIO;
1584 }
1585
1586 interconnect_targ_addr = host_interest_area +
1587 offsetof(struct host_interest_area_t,
1588 hi_interconnect_state);
1589
1590 flag2_targ_addr = host_interest_area +
1591 offsetof(struct host_interest_area_t, hi_option_flag2);
1592
1593#else
1594 interconnect_targ_addr = hif_hia_item_address(scn->target_type,
1595 offsetof(struct host_interest_s, hi_interconnect_state));
1596 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
1597 offsetof(struct host_interest_s, hi_early_alloc));
1598 flag2_targ_addr = hif_hia_item_address(scn->target_type,
1599 offsetof(struct host_interest_s, hi_option_flag2));
1600#endif
1601 /* Supply Target-side CE configuration */
1602 rv = hif_diag_read_access(scn, interconnect_targ_addr,
1603 &pcie_state_targ_addr);
1604 if (rv != CDF_STATUS_SUCCESS) {
1605 HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1606 __func__, interconnect_targ_addr, rv);
1607 goto done;
1608 }
1609 if (pcie_state_targ_addr == 0) {
1610 rv = CDF_STATUS_E_FAILURE;
1611 HIF_ERROR("%s: pcie state addr is 0", __func__);
1612 goto done;
1613 }
1614 pipe_cfg_addr = pcie_state_targ_addr +
1615 offsetof(struct pcie_state_s,
1616 pipe_cfg_addr);
1617 rv = hif_diag_read_access(scn,
1618 pipe_cfg_addr,
1619 &pipe_cfg_targ_addr);
1620 if (rv != CDF_STATUS_SUCCESS) {
1621 HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1622 __func__, pipe_cfg_addr, rv);
1623 goto done;
1624 }
1625 if (pipe_cfg_targ_addr == 0) {
1626 rv = CDF_STATUS_E_FAILURE;
1627 HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1628 goto done;
1629 }
1630
1631 rv = hif_diag_write_mem(scn, pipe_cfg_targ_addr,
1632 (uint8_t *) target_ce_config,
1633 target_ce_config_sz);
1634
1635 if (rv != CDF_STATUS_SUCCESS) {
1636 HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1637 goto done;
1638 }
1639
1640 rv = hif_diag_read_access(scn,
1641 pcie_state_targ_addr +
1642 offsetof(struct pcie_state_s,
1643 svc_to_pipe_map),
1644 &svc_to_pipe_map);
1645 if (rv != CDF_STATUS_SUCCESS) {
1646 HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1647 goto done;
1648 }
1649 if (svc_to_pipe_map == 0) {
1650 rv = CDF_STATUS_E_FAILURE;
1651 HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1652 goto done;
1653 }
1654
1655 rv = hif_diag_write_mem(scn,
1656 svc_to_pipe_map,
1657 (uint8_t *) target_service_to_ce_map,
1658 target_service_to_ce_map_sz);
1659 if (rv != CDF_STATUS_SUCCESS) {
1660 HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1661 goto done;
1662 }
1663
1664 rv = hif_diag_read_access(scn,
1665 pcie_state_targ_addr +
1666 offsetof(struct pcie_state_s,
1667 config_flags),
1668 &pcie_config_flags);
1669 if (rv != CDF_STATUS_SUCCESS) {
1670 HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1671 goto done;
1672 }
1673#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1674 pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1675#else
1676 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1677#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1678 pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1679#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1680 pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1681#endif
1682 rv = hif_diag_write_mem(scn,
1683 pcie_state_targ_addr +
1684 offsetof(struct pcie_state_s,
1685 config_flags),
1686 (uint8_t *) &pcie_config_flags,
1687 sizeof(pcie_config_flags));
1688 if (rv != CDF_STATUS_SUCCESS) {
1689 HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1690 goto done;
1691 }
1692
1693#ifndef QCA_WIFI_3_0
1694 /* configure early allocation */
1695 ealloc_targ_addr = hif_hia_item_address(scn->target_type,
1696 offsetof(
1697 struct host_interest_s,
1698 hi_early_alloc));
1699
1700 rv = hif_diag_read_access(scn, ealloc_targ_addr,
1701 &ealloc_value);
1702 if (rv != CDF_STATUS_SUCCESS) {
1703 HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1704 goto done;
1705 }
1706
1707 /* 1 bank is switched to IRAM, except ROME 1.0 */
1708 ealloc_value |=
1709 ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1710 HI_EARLY_ALLOC_MAGIC_MASK);
1711
1712 rv = hif_diag_read_access(scn,
1713 CHIP_ID_ADDRESS |
1714 RTC_SOC_BASE_ADDRESS, &chip_id);
1715 if (rv != CDF_STATUS_SUCCESS) {
1716 HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1717 goto done;
1718 }
1719 if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1720 scn->target_revision =
1721 CHIP_ID_REVISION_GET(chip_id);
1722 switch (CHIP_ID_REVISION_GET(chip_id)) {
1723 case 0x2: /* ROME 1.3 */
1724 /* 2 banks are switched to IRAM */
1725 banks_switched = 2;
1726 break;
1727 case 0x4: /* ROME 2.1 */
1728 case 0x5: /* ROME 2.2 */
1729 banks_switched = 6;
1730 break;
1731 case 0x8: /* ROME 3.0 */
1732 case 0x9: /* ROME 3.1 */
1733 case 0xA: /* ROME 3.2 */
1734 banks_switched = 9;
1735 break;
1736 case 0x0: /* ROME 1.0 */
1737 case 0x1: /* ROME 1.1 */
1738 default:
1739 /* 3 banks are switched to IRAM */
1740 banks_switched = 3;
1741 break;
1742 }
1743 }
1744
1745 ealloc_value |=
1746 ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1747 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1748
1749 rv = hif_diag_write_access(scn,
1750 ealloc_targ_addr,
1751 ealloc_value);
1752 if (rv != CDF_STATUS_SUCCESS) {
1753 HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1754 goto done;
1755 }
1756#endif
1757
1758 /* Tell Target to proceed with initialization */
1759 flag2_targ_addr = hif_hia_item_address(scn->target_type,
1760 offsetof(
1761 struct host_interest_s,
1762 hi_option_flag2));
1763
1764 rv = hif_diag_read_access(scn, flag2_targ_addr,
1765 &flag2_value);
1766 if (rv != CDF_STATUS_SUCCESS) {
1767 HIF_ERROR("%s: get option val (%d)", __func__, rv);
1768 goto done;
1769 }
1770
1771 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1772 rv = hif_diag_write_access(scn, flag2_targ_addr,
1773 flag2_value);
1774 if (rv != CDF_STATUS_SUCCESS) {
1775 HIF_ERROR("%s: set option val (%d)", __func__, rv);
1776 goto done;
1777 }
1778
1779 hif_wake_target_cpu(scn);
1780
1781done:
1782
1783 return rv;
1784}
1785#endif
1786
1787/**
1788 * hif_wlan_enable(): call the platform driver to enable wlan
1789 *
1790 * This function passes the con_mode and CE configuration to
1791 * platform driver to enable wlan.
1792 *
1793 * Return: void
1794 */
1795static int hif_wlan_enable(void)
1796{
1797 struct icnss_wlan_enable_cfg cfg;
1798 enum icnss_driver_mode mode;
1799 uint32_t con_mode = cds_get_conparam();
1800
1801 cfg.num_ce_tgt_cfg = target_ce_config_sz /
1802 sizeof(struct CE_pipe_config);
1803 cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)target_ce_config;
1804 cfg.num_ce_svc_pipe_cfg = target_service_to_ce_map_sz /
1805 sizeof(struct service_to_pipe);
1806 cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)target_service_to_ce_map;
1807 cfg.num_shadow_reg_cfg = shadow_cfg_sz / sizeof(struct shadow_reg_cfg);
1808 cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *) target_shadow_reg_cfg;
1809
Peng Xu7b962532015-10-02 17:17:03 -07001810 if (CDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001811 mode = ICNSS_FTM;
Peng Xu7b962532015-10-02 17:17:03 -07001812 else if (WLAN_IS_EPPING_ENABLED(cds_get_conparam()))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001813 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001814 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001815 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001816
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
1818}
1819
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001820/*
1821 * Called from PCI layer whenever a new PCI device is probed.
1822 * Initializes per-device HIF state and notifies the main
1823 * driver that a new HIF device is present.
1824 */
1825int hif_config_ce(hif_handle_t hif_hdl)
1826{
1827 struct HIF_CE_state *hif_state;
1828 struct HIF_CE_pipe_info *pipe_info;
1829 int pipe_num;
1830#ifdef ADRASTEA_SHADOW_REGISTERS
1831 int i;
1832#endif
1833 CDF_STATUS rv = CDF_STATUS_SUCCESS;
1834 int ret;
1835 struct ol_softc *scn = hif_hdl;
1836 struct icnss_soc_info soc_info;
1837
1838 /* if epping is enabled we need to use the epping configuration. */
1839 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
1840 if (WLAN_IS_EPPING_IRQ(cds_get_conparam()))
1841 host_ce_config = host_ce_config_wlan_epping_irq;
1842 else
1843 host_ce_config = host_ce_config_wlan_epping_poll;
1844 target_ce_config = target_ce_config_wlan_epping;
1845 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1846 target_service_to_ce_map =
1847 target_service_to_ce_map_wlan_epping;
1848 target_service_to_ce_map_sz =
1849 sizeof(target_service_to_ce_map_wlan_epping);
1850 }
1851
1852 ret = hif_wlan_enable();
1853
1854 if (ret) {
1855 HIF_ERROR("%s: hif_wlan_enable error = %d", __func__, ret);
1856 return CDF_STATUS_NOT_INITIALIZED;
1857 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858
1859 scn->notice_send = true;
1860
1861 cdf_mem_zero(&soc_info, sizeof(soc_info));
1862 ret = icnss_get_soc_info(&soc_info);
1863 if (ret < 0) {
1864 HIF_ERROR("%s: icnss_get_soc_info error = %d", __func__, ret);
1865 return CDF_STATUS_NOT_INITIALIZED;
1866 }
1867
1868 hif_state = (struct HIF_CE_state *)cdf_mem_malloc(sizeof(*hif_state));
1869 if (!hif_state) {
1870 return -ENOMEM;
1871 }
1872 cdf_mem_zero(hif_state, sizeof(*hif_state));
1873
1874 hif_state->scn = scn;
1875 scn->hif_hdl = hif_state;
1876 scn->mem = soc_info.v_addr;
1877 scn->mem_pa = soc_info.p_addr;
1878 scn->soc_version = soc_info.version;
1879
1880 cdf_spinlock_init(&hif_state->keep_awake_lock);
1881
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882 hif_state->keep_awake_count = 0;
1883
1884 hif_state->fake_sleep = false;
1885 hif_state->sleep_ticks = 0;
1886 cdf_softirq_timer_init(NULL, &hif_state->sleep_timer,
1887 hif_sleep_entry, (void *)hif_state,
1888 CDF_TIMER_TYPE_WAKE_APPS);
1889 hif_state->sleep_timer_init = true;
1890 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
1891#ifdef HIF_PCI
1892#if CONFIG_ATH_PCIE_MAX_PERF || CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD
1893 /* Force AWAKE forever/till the driver is loaded */
1894 if (hif_target_sleep_state_adjust(scn, false, true) < 0)
1895 return -EACCES;
1896#endif
1897#endif
1898
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001899 hif_config_rri_on_ddr(scn);
1900
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 /* During CE initializtion */
1902 scn->ce_count = HOST_CE_COUNT;
1903 A_TARGET_ACCESS_LIKELY(scn);
1904 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1905 struct CE_attr *attr;
1906
1907 pipe_info = &hif_state->pipe_info[pipe_num];
1908 pipe_info->pipe_num = pipe_num;
1909 pipe_info->HIF_CE_state = hif_state;
1910 attr = &host_ce_config[pipe_num];
1911 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
1912 CDF_ASSERT(pipe_info->ce_hdl != NULL);
1913 if (pipe_info->ce_hdl == NULL) {
1914 rv = CDF_STATUS_E_FAILURE;
1915 A_TARGET_ACCESS_UNLIKELY(scn);
1916 goto err;
1917 }
1918
1919 if (pipe_num == DIAG_CE_ID) {
1920 /* Reserve the ultimate CE for
1921 * Diagnostic Window support */
1922 hif_state->ce_diag =
1923 hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
1924 continue;
1925 }
1926
1927 pipe_info->buf_sz = (cdf_size_t) (attr->src_sz_max);
1928 cdf_spinlock_init(&pipe_info->recv_bufs_needed_lock);
1929 if (attr->dest_nentries > 0) {
1930 atomic_set(&pipe_info->recv_bufs_needed,
1931 init_buffer_count(attr->dest_nentries - 1));
1932 } else {
1933 atomic_set(&pipe_info->recv_bufs_needed, 0);
1934 }
1935 ce_tasklet_init(hif_state, (1 << pipe_num));
1936 ce_register_irq(hif_state, (1 << pipe_num));
1937 scn->request_irq_done = true;
1938 }
1939
1940 if (athdiag_procfs_init(scn) != 0) {
1941 A_TARGET_ACCESS_UNLIKELY(scn);
1942 goto err;
1943 }
1944 scn->athdiag_procfs_inited = true;
1945
1946 /*
1947 * Initially, establish CE completion handlers for use with BMI.
1948 * These are overwritten with generic handlers after we exit BMI phase.
1949 */
1950 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
1951#ifdef HIF_PCI
1952 ce_send_cb_register(
1953 pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
1954#ifndef BMI_RSP_POLLING
1955 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
1956 ce_recv_cb_register(
1957 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
1958#endif
1959#endif
1960 HIF_INFO_MED("%s: ce_init done", __func__);
1961
1962 rv = hif_set_hia(scn);
1963
1964 HIF_INFO_MED("%s: hif_set_hia done", __func__);
1965
1966 A_TARGET_ACCESS_UNLIKELY(scn);
1967
1968 if (rv != CDF_STATUS_SUCCESS)
1969 goto err;
1970 else
1971 init_tasklet_workers();
1972
1973 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1974
1975#ifdef ADRASTEA_SHADOW_REGISTERS
1976 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1977 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1978 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1979 __func__, i,
1980 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1981 }
1982#endif
1983
1984
1985 return rv != CDF_STATUS_SUCCESS;
1986
1987err:
1988 /* Failure, so clean up */
1989 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1990 pipe_info = &hif_state->pipe_info[pipe_num];
1991 if (pipe_info->ce_hdl) {
1992 ce_unregister_irq(hif_state, (1 << pipe_num));
1993 scn->request_irq_done = false;
1994 ce_fini(pipe_info->ce_hdl);
1995 pipe_info->ce_hdl = NULL;
1996 pipe_info->buf_sz = 0;
1997 }
1998 }
1999 if (hif_state->sleep_timer_init) {
2000 cdf_softirq_timer_cancel(&hif_state->sleep_timer);
2001 cdf_softirq_timer_free(&hif_state->sleep_timer);
2002 hif_state->sleep_timer_init = false;
2003 }
2004 if (scn->hif_hdl) {
2005 scn->hif_hdl = NULL;
2006 cdf_mem_free(hif_state);
2007 }
2008 athdiag_procfs_remove();
2009 scn->athdiag_procfs_inited = false;
2010 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2011 return CDF_STATUS_SUCCESS != CDF_STATUS_E_FAILURE;
2012}
2013
2014
2015
2016
2017
2018
2019#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002020/**
2021 * hif_ipa_get_ce_resource() - get uc resource on hif
2022 * @scn: bus context
2023 * @ce_sr_base_paddr: copyengine source ring base physical address
2024 * @ce_sr_ring_size: copyengine source ring size
2025 * @ce_reg_paddr: copyengine register physical address
2026 *
2027 * IPA micro controller data path offload feature enabled,
2028 * HIF should release copy engine related resource information to IPA UC
2029 * IPA UC will access hardware resource with released information
2030 *
2031 * Return: None
2032 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033void hif_ipa_get_ce_resource(struct ol_softc *scn,
Leo Changd85f78d2015-11-13 10:55:34 -08002034 cdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002035 uint32_t *ce_sr_ring_size,
2036 cdf_dma_addr_t *ce_reg_paddr)
2037{
2038 struct HIF_CE_state *hif_state = (struct HIF_CE_state *)scn->hif_hdl;
2039 struct HIF_CE_pipe_info *pipe_info =
2040 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2041 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2042
2043 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2044 ce_reg_paddr);
2045 return;
2046}
2047#endif /* IPA_OFFLOAD */
2048
2049
2050#ifdef ADRASTEA_SHADOW_REGISTERS
2051
2052/*
2053 Current shadow register config
2054
2055 -----------------------------------------------------------
2056 Shadow Register | CE | src/dst write index
2057 -----------------------------------------------------------
2058 0 | 0 | src
2059 1 No Config - Doesn't point to anything
2060 2 No Config - Doesn't point to anything
2061 3 | 3 | src
2062 4 | 4 | src
2063 5 | 5 | src
2064 6 No Config - Doesn't point to anything
2065 7 | 7 | src
2066 8 No Config - Doesn't point to anything
2067 9 No Config - Doesn't point to anything
2068 10 No Config - Doesn't point to anything
2069 11 No Config - Doesn't point to anything
2070 -----------------------------------------------------------
2071 12 No Config - Doesn't point to anything
2072 13 | 1 | dst
2073 14 | 2 | dst
2074 15 No Config - Doesn't point to anything
2075 16 No Config - Doesn't point to anything
2076 17 No Config - Doesn't point to anything
2077 18 No Config - Doesn't point to anything
2078 19 | 7 | dst
2079 20 | 8 | dst
2080 21 No Config - Doesn't point to anything
2081 22 No Config - Doesn't point to anything
2082 23 No Config - Doesn't point to anything
2083 -----------------------------------------------------------
2084
2085
2086 ToDo - Move shadow register config to following in the future
2087 This helps free up a block of shadow registers towards the end.
2088 Can be used for other purposes
2089
2090 -----------------------------------------------------------
2091 Shadow Register | CE | src/dst write index
2092 -----------------------------------------------------------
2093 0 | 0 | src
2094 1 | 3 | src
2095 2 | 4 | src
2096 3 | 5 | src
2097 4 | 7 | src
2098 -----------------------------------------------------------
2099 5 | 1 | dst
2100 6 | 2 | dst
2101 7 | 7 | dst
2102 8 | 8 | dst
2103 -----------------------------------------------------------
2104 9 No Config - Doesn't point to anything
2105 12 No Config - Doesn't point to anything
2106 13 No Config - Doesn't point to anything
2107 14 No Config - Doesn't point to anything
2108 15 No Config - Doesn't point to anything
2109 16 No Config - Doesn't point to anything
2110 17 No Config - Doesn't point to anything
2111 18 No Config - Doesn't point to anything
2112 19 No Config - Doesn't point to anything
2113 20 No Config - Doesn't point to anything
2114 21 No Config - Doesn't point to anything
2115 22 No Config - Doesn't point to anything
2116 23 No Config - Doesn't point to anything
2117 -----------------------------------------------------------
2118*/
2119
2120u32 shadow_sr_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2121{
2122 u32 addr = 0;
2123
2124 switch (COPY_ENGINE_ID(ctrl_addr)) {
2125 case 0:
2126 addr = SHADOW_VALUE0;
2127 break;
2128 case 3:
2129 addr = SHADOW_VALUE3;
2130 break;
2131 case 4:
2132 addr = SHADOW_VALUE4;
2133 break;
2134 case 5:
2135 addr = SHADOW_VALUE5;
2136 break;
2137 case 7:
2138 addr = SHADOW_VALUE7;
2139 break;
2140 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002141 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002142 CDF_ASSERT(0);
2143
2144 }
2145 return addr;
2146
2147}
2148
2149u32 shadow_dst_wr_ind_addr(struct ol_softc *scn, u32 ctrl_addr)
2150{
2151 u32 addr = 0;
2152
2153 switch (COPY_ENGINE_ID(ctrl_addr)) {
2154 case 1:
2155 addr = SHADOW_VALUE13;
2156 break;
2157 case 2:
2158 addr = SHADOW_VALUE14;
2159 break;
2160 case 7:
2161 addr = SHADOW_VALUE19;
2162 break;
2163 case 8:
2164 addr = SHADOW_VALUE20;
2165 break;
2166 default:
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002167 HIF_ERROR("invalid CE ctrl_addr\n");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002168 CDF_ASSERT(0);
2169 }
2170
2171 return addr;
2172
2173}
2174#endif
2175
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002176#if defined(FEATURE_LRO)
2177/**
2178 * ce_lro_flush_cb_register() - register the LRO flush
2179 * callback
2180 * @scn: HIF context
2181 * @handler: callback function
2182 * @data: opaque data pointer to be passed back
2183 *
2184 * Store the LRO flush callback provided
2185 *
2186 * Return: none
2187 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002188void ce_lro_flush_cb_register(struct ol_softc *scn,
2189 void (handler)(void *), void *data)
2190{
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002191 uint8_t ul, dl;
2192 int ul_polled, dl_polled;
2193
2194 CDF_ASSERT(scn != NULL);
2195
2196 if (CDF_STATUS_SUCCESS !=
2197 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2198 &ul, &dl, &ul_polled, &dl_polled)) {
2199 printk("%s cannot map service to pipe\n", __FUNCTION__);
2200 return;
2201 } else {
2202 struct CE_state *ce_state;
2203 ce_state = scn->ce_id_to_state[dl];
2204 ce_state->lro_flush_cb = handler;
2205 ce_state->lro_data = data;
2206 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002208
2209/**
2210 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2211 * callback
2212 * @scn: HIF context
2213 *
2214 * Remove the LRO flush callback
2215 *
2216 * Return: none
2217 */
2218void ce_lro_flush_cb_deregister(struct ol_softc *scn)
2219{
2220 uint8_t ul, dl;
2221 int ul_polled, dl_polled;
2222
2223 CDF_ASSERT(scn != NULL);
2224
2225 if (CDF_STATUS_SUCCESS !=
2226 hif_map_service_to_pipe(scn, HTT_DATA_MSG_SVC,
2227 &ul, &dl, &ul_polled, &dl_polled)) {
2228 printk("%s cannot map service to pipe\n", __FUNCTION__);
2229 return;
2230 } else {
2231 struct CE_state *ce_state;
2232 ce_state = scn->ce_id_to_state[dl];
2233 ce_state->lro_flush_cb = NULL;
2234 ce_state->lro_data = NULL;
2235 }
2236}
2237#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002238
2239/**
2240 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2241 * this service
2242 * @scn: ol_softc pointer.
2243 * @svc_id: Service ID for which the mapping is needed.
2244 * @ul_pipe: address of the container in which ul pipe is returned.
2245 * @dl_pipe: address of the container in which dl pipe is returned.
2246 * @ul_is_polled: address of the container in which a bool
2247 * indicating if the UL CE for this service
2248 * is polled is returned.
2249 * @dl_is_polled: address of the container in which a bool
2250 * indicating if the DL CE for this service
2251 * is polled is returned.
2252 *
2253 * Return: Indicates whether this operation was successful.
2254 */
2255
2256int hif_map_service_to_pipe(struct ol_softc *scn, uint16_t svc_id,
2257 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2258 int *dl_is_polled)
2259{
2260 int status = CDF_STATUS_SUCCESS;
2261
2262 unsigned int i;
2263 struct service_to_pipe element;
2264
2265 struct service_to_pipe *tgt_svc_map_to_use;
2266 size_t sz_tgt_svc_map_to_use;
2267
2268 if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
2269 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2270 sz_tgt_svc_map_to_use =
2271 sizeof(target_service_to_ce_map_wlan_epping);
2272 } else {
2273 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2274 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2275 }
2276
2277 *dl_is_polled = 0; /* polling for received messages not supported */
2278
2279 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2280
2281 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2282 if (element.service_id == svc_id) {
2283
2284 if (element.pipedir == PIPEDIR_OUT)
2285 *ul_pipe = element.pipenum;
2286
2287 else if (element.pipedir == PIPEDIR_IN)
2288 *dl_pipe = element.pipenum;
2289 }
2290 }
2291
2292 *ul_is_polled =
2293 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2294
2295 return status;
2296}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002297
2298#ifdef SHADOW_REG_DEBUG
2299inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct ol_softc *scn,
2300 uint32_t CE_ctrl_addr)
2301{
2302 uint32_t read_from_hw, srri_from_ddr = 0;
2303
2304 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2305
2306 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2307
2308 if (read_from_hw != srri_from_ddr) {
2309 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2310 srri_from_ddr, read_from_hw,
2311 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2312 CDF_ASSERT(0);
2313 }
2314 return srri_from_ddr;
2315}
2316
2317
2318inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct ol_softc *scn,
2319 uint32_t CE_ctrl_addr)
2320{
2321 uint32_t read_from_hw, drri_from_ddr = 0;
2322
2323 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2324
2325 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2326
2327 if (read_from_hw != drri_from_ddr) {
2328 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2329 drri_from_ddr, read_from_hw,
2330 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2331 CDF_ASSERT(0);
2332 }
2333 return drri_from_ddr;
2334}
2335
2336#endif
2337
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002338#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002339/**
2340 * hif_get_src_ring_read_index(): Called to get the SRRI
2341 *
2342 * @scn: ol_softc pointer
2343 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2344 *
2345 * This function returns the SRRI to the caller. For CEs that
2346 * dont have interrupts enabled, we look at the DDR based SRRI
2347 *
2348 * Return: SRRI
2349 */
2350inline unsigned int hif_get_src_ring_read_index(struct ol_softc *scn,
2351 uint32_t CE_ctrl_addr)
2352{
2353 struct CE_attr attr;
2354
2355 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2356 if (attr.flags & CE_ATTR_DISABLE_INTR)
2357 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2358 else
2359 return A_TARGET_READ(scn,
2360 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2361}
2362
2363/**
2364 * hif_get_dst_ring_read_index(): Called to get the DRRI
2365 *
2366 * @scn: ol_softc pointer
2367 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2368 *
2369 * This function returns the DRRI to the caller. For CEs that
2370 * dont have interrupts enabled, we look at the DDR based DRRI
2371 *
2372 * Return: DRRI
2373 */
2374inline unsigned int hif_get_dst_ring_read_index(struct ol_softc *scn,
2375 uint32_t CE_ctrl_addr)
2376{
2377 struct CE_attr attr;
2378
2379 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2380
2381 if (attr.flags & CE_ATTR_DISABLE_INTR)
2382 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2383 else
2384 return A_TARGET_READ(scn,
2385 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2386}
2387
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002388/**
2389 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2390 *
2391 * @scn: ol_softc pointer
2392 *
2393 * This function allocates non cached memory on ddr and sends
2394 * the physical address of this memory to the CE hardware. The
2395 * hardware updates the RRI on this particular location.
2396 *
2397 * Return: None
2398 */
2399static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2400{
2401 unsigned int i;
2402 cdf_dma_addr_t paddr_rri_on_ddr;
2403 uint32_t high_paddr, low_paddr;
2404 scn->vaddr_rri_on_ddr =
2405 (uint32_t *)cdf_os_mem_alloc_consistent(scn->cdf_dev,
2406 (CE_COUNT*sizeof(uint32_t)), &paddr_rri_on_ddr, 0);
2407
2408 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2409 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2410
2411 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2412
2413 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2414 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2415
2416 for (i = 0; i < CE_COUNT; i++)
2417 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2418
2419 cdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
2420
2421 return;
2422}
2423#else
2424
2425/**
2426 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2427 *
2428 * @scn: ol_softc pointer
2429 *
2430 * This is a dummy implementation for platforms that don't
2431 * support this functionality.
2432 *
2433 * Return: None
2434 */
2435static inline void hif_config_rri_on_ddr(struct ol_softc *scn)
2436{
2437 return;
2438}
2439#endif