blob: 1a7d3a5a5123392974110d666730cc6d99d49858 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080061/* Forward references */
62static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
63
64/*
65 * Fix EV118783, poll to check whether a BMI response comes
66 * other than waiting for the interruption which may be lost.
67 */
68/* #define BMI_RSP_POLLING */
69#define BMI_RSP_TO_MILLISEC 1000
70
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070071#ifdef CONFIG_BYPASS_QMI
72#define BYPASS_QMI 1
73#else
74#define BYPASS_QMI 0
75#endif
76
Houston Hoffmanabd00772016-05-06 17:02:48 -070077#ifdef CONFIG_WIN
78#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080083
Komal Seelam644263d2016-02-22 20:45:49 +053084static int hif_post_recv_buffers(struct hif_softc *scn);
85static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080086
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053087/**
88 * hif_target_access_log_dump() - dump access log
89 *
90 * dump access log
91 *
92 * Return: n/a
93 */
94#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
95static void hif_target_access_log_dump(void)
96{
97 hif_target_dump_access_log();
98}
99#endif
100
101
102void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
103 uint8_t cmd_id, bool start)
104{
105 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
106
107 switch (cmd_id) {
108 case AGC_DUMP:
109 if (start)
110 priv_start_agc(scn);
111 else
112 priv_dump_agc(scn);
113 break;
114 case CHANINFO_DUMP:
115 if (start)
116 priv_start_cap_chaninfo(scn);
117 else
118 priv_dump_chaninfo(scn);
119 break;
120 case BB_WATCHDOG_DUMP:
121 priv_dump_bbwatchdog(scn);
122 break;
123#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
124 case PCIE_ACCESS_DUMP:
125 hif_target_access_log_dump();
126 break;
127#endif
128 default:
129 HIF_ERROR("%s: Invalid htc dump command", __func__);
130 break;
131 }
132}
133
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800134static void ce_poll_timeout(void *arg)
135{
136 struct CE_state *CE_state = (struct CE_state *)arg;
137 if (CE_state->timer_inited) {
138 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530139 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 }
141}
142
143static unsigned int roundup_pwr2(unsigned int n)
144{
145 int i;
146 unsigned int test_pwr2;
147
148 if (!(n & (n - 1)))
149 return n; /* already a power of 2 */
150
151 test_pwr2 = 4;
152 for (i = 0; i < 29; i++) {
153 if (test_pwr2 > n)
154 return test_pwr2;
155 test_pwr2 = test_pwr2 << 1;
156 }
157
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530158 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800159 return 0;
160}
161
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700162#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
163#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
164
165static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
166 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
167 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
168 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
172 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
173 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800175#ifdef QCA_WIFI_3_0_ADRASTEA
176 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
177 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530178 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800179#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700180};
181
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700182static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
183 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
184 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
185 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
186 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
188 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
189 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
192};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700193
194/* CE_PCI TABLE */
195/*
196 * NOTE: the table below is out of date, though still a useful reference.
197 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
198 * mapping of HTC services to HIF pipes.
199 */
200/*
201 * This authoritative table defines Copy Engine configuration and the mapping
202 * of services/endpoints to CEs. A subset of this information is passed to
203 * the Target during startup as a prerequisite to entering BMI phase.
204 * See:
205 * target_service_to_ce_map - Target-side mapping
206 * hif_map_service_to_pipe - Host-side mapping
207 * target_ce_config - Target-side configuration
208 * host_ce_config - Host-side configuration
209 ============================================================================
210 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
211 | | | ctio | Size | Frequency
212 | | | n | |
213 ============================================================================
214 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
215 descriptor | | | | O(100B) | and regular
216 download | | | | |
217 ----------------------------------------------------------------------------
218 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
219 indication | | | | O(10B) | regular
220 upload | | | | |
221 ----------------------------------------------------------------------------
222 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
223 upload | | | | O(1000B) | (frequent
224 e.g. noise | | | | | during IP1.0
225 packets | | | | | testing)
226 ----------------------------------------------------------------------------
227 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
228 download | | | | O(1000B) | (frequent
229 e.g. | | | | | during IP1.0
230 misdirecte | | | | | testing)
231 d EAPOL | | | | |
232 packets | | | | |
233 ----------------------------------------------------------------------------
234 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
235 | DATA_VO (uplink) | | | |
236 ----------------------------------------------------------------------------
237 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
238 | DATA_VO (downlink) | | | |
239 ----------------------------------------------------------------------------
240 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
241 | | | | O(100B) |
242 ----------------------------------------------------------------------------
243 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
244 messages | (downlink) | | | O(100B) |
245 | | | | |
246 ----------------------------------------------------------------------------
247 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
248 | HTC_RAW_STREAMS | | | |
249 | (uplink) | | | |
250 ----------------------------------------------------------------------------
251 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
252 | HTC_RAW_STREAMS | | | |
253 | (downlink) | | | |
254 ----------------------------------------------------------------------------
255 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
256 | | | | | infrequent
257 ============================================================================
258 */
259
260/*
261 * Map from service/endpoint to Copy Engine.
262 * This table is derived from the CE_PCI TABLE, above.
263 * It is passed to the Target at startup for use by firmware.
264 */
265static struct service_to_pipe target_service_to_ce_map_wlan[] = {
266 {
267 WMI_DATA_VO_SVC,
268 PIPEDIR_OUT, /* out = UL = host -> target */
269 3,
270 },
271 {
272 WMI_DATA_VO_SVC,
273 PIPEDIR_IN, /* in = DL = target -> host */
274 2,
275 },
276 {
277 WMI_DATA_BK_SVC,
278 PIPEDIR_OUT, /* out = UL = host -> target */
279 3,
280 },
281 {
282 WMI_DATA_BK_SVC,
283 PIPEDIR_IN, /* in = DL = target -> host */
284 2,
285 },
286 {
287 WMI_DATA_BE_SVC,
288 PIPEDIR_OUT, /* out = UL = host -> target */
289 3,
290 },
291 {
292 WMI_DATA_BE_SVC,
293 PIPEDIR_IN, /* in = DL = target -> host */
294 2,
295 },
296 {
297 WMI_DATA_VI_SVC,
298 PIPEDIR_OUT, /* out = UL = host -> target */
299 3,
300 },
301 {
302 WMI_DATA_VI_SVC,
303 PIPEDIR_IN, /* in = DL = target -> host */
304 2,
305 },
306 {
307 WMI_CONTROL_SVC,
308 PIPEDIR_OUT, /* out = UL = host -> target */
309 3,
310 },
311 {
312 WMI_CONTROL_SVC,
313 PIPEDIR_IN, /* in = DL = target -> host */
314 2,
315 },
316 {
317 HTC_CTRL_RSVD_SVC,
318 PIPEDIR_OUT, /* out = UL = host -> target */
319 0, /* could be moved to 3 (share with WMI) */
320 },
321 {
322 HTC_CTRL_RSVD_SVC,
323 PIPEDIR_IN, /* in = DL = target -> host */
324 2,
325 },
326 {
327 HTC_RAW_STREAMS_SVC, /* not currently used */
328 PIPEDIR_OUT, /* out = UL = host -> target */
329 0,
330 },
331 {
332 HTC_RAW_STREAMS_SVC, /* not currently used */
333 PIPEDIR_IN, /* in = DL = target -> host */
334 2,
335 },
336 {
337 HTT_DATA_MSG_SVC,
338 PIPEDIR_OUT, /* out = UL = host -> target */
339 4,
340 },
341 {
342 HTT_DATA_MSG_SVC,
343 PIPEDIR_IN, /* in = DL = target -> host */
344 1,
345 },
346 {
347 WDI_IPA_TX_SVC,
348 PIPEDIR_OUT, /* in = DL = target -> host */
349 5,
350 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800351#if defined(QCA_WIFI_3_0_ADRASTEA)
352 {
353 HTT_DATA2_MSG_SVC,
354 PIPEDIR_IN, /* in = DL = target -> host */
355 9,
356 },
357 {
358 HTT_DATA3_MSG_SVC,
359 PIPEDIR_IN, /* in = DL = target -> host */
360 10,
361 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530362 {
363 PACKET_LOG_SVC,
364 PIPEDIR_IN, /* in = DL = target -> host */
365 11,
366 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800367#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700368 /* (Additions here) */
369
370 { /* Must be last */
371 0,
372 0,
373 0,
374 },
375};
376
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700377static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
378 {
379 WMI_DATA_VO_SVC,
380 PIPEDIR_OUT, /* out = UL = host -> target */
381 3,
382 },
383 {
384 WMI_DATA_VO_SVC,
385 PIPEDIR_IN, /* in = DL = target -> host */
386 2,
387 },
388 {
389 WMI_DATA_BK_SVC,
390 PIPEDIR_OUT, /* out = UL = host -> target */
391 3,
392 },
393 {
394 WMI_DATA_BK_SVC,
395 PIPEDIR_IN, /* in = DL = target -> host */
396 2,
397 },
398 {
399 WMI_DATA_BE_SVC,
400 PIPEDIR_OUT, /* out = UL = host -> target */
401 3,
402 },
403 {
404 WMI_DATA_BE_SVC,
405 PIPEDIR_IN, /* in = DL = target -> host */
406 2,
407 },
408 {
409 WMI_DATA_VI_SVC,
410 PIPEDIR_OUT, /* out = UL = host -> target */
411 3,
412 },
413 {
414 WMI_DATA_VI_SVC,
415 PIPEDIR_IN, /* in = DL = target -> host */
416 2,
417 },
418 {
419 WMI_CONTROL_SVC,
420 PIPEDIR_OUT, /* out = UL = host -> target */
421 3,
422 },
423 {
424 WMI_CONTROL_SVC,
425 PIPEDIR_IN, /* in = DL = target -> host */
426 2,
427 },
428 {
429 HTC_CTRL_RSVD_SVC,
430 PIPEDIR_OUT, /* out = UL = host -> target */
431 0, /* could be moved to 3 (share with WMI) */
432 },
433 {
434 HTC_CTRL_RSVD_SVC,
435 PIPEDIR_IN, /* in = DL = target -> host */
436 1,
437 },
438 {
439 HTC_RAW_STREAMS_SVC, /* not currently used */
440 PIPEDIR_OUT, /* out = UL = host -> target */
441 0,
442 },
443 {
444 HTC_RAW_STREAMS_SVC, /* not currently used */
445 PIPEDIR_IN, /* in = DL = target -> host */
446 1,
447 },
448 {
449 HTT_DATA_MSG_SVC,
450 PIPEDIR_OUT, /* out = UL = host -> target */
451 4,
452 },
453#if WLAN_FEATURE_FASTPATH
454 {
455 HTT_DATA_MSG_SVC,
456 PIPEDIR_IN, /* in = DL = target -> host */
457 5,
458 },
459#else /* WLAN_FEATURE_FASTPATH */
460 {
461 HTT_DATA_MSG_SVC,
462 PIPEDIR_IN, /* in = DL = target -> host */
463 1,
464 },
465#endif /* WLAN_FEATURE_FASTPATH */
466
467 /* (Additions here) */
468
469 { /* Must be last */
470 0,
471 0,
472 0,
473 },
474};
475
476
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700477static struct service_to_pipe *target_service_to_ce_map =
478 target_service_to_ce_map_wlan;
479static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
480
481static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
482static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
483
484static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
485 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
486 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
487 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
488 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
489 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
490 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
491 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
492 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
493 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
494 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
495 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
496 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
497 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
498 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
499 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
500 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
501 {0, 0, 0,}, /* Must be last */
502};
503
504/**
505 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
506 * @ce_state : pointer to the state context of the CE
507 *
508 * Description:
509 * Sets htt_rx_data attribute of the state structure if the
510 * CE serves one of the HTT DATA services.
511 *
512 * Return:
513 * false (attribute set to false)
514 * true (attribute set to true);
515 */
516bool ce_mark_datapath(struct CE_state *ce_state)
517{
518 struct service_to_pipe *svc_map;
519 size_t map_sz;
520 int i;
521 bool rc = false;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700522 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ce_state->scn);
523 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700524
525 if (ce_state != NULL) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -0700526 if (QDF_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700527 svc_map = target_service_to_ce_map_wlan_epping;
528 map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
529 sizeof(struct service_to_pipe);
530 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700531 switch (tgt_info->target_type) {
532 default:
533 svc_map = target_service_to_ce_map_wlan;
534 map_sz =
535 sizeof(target_service_to_ce_map_wlan) /
536 sizeof(struct service_to_pipe);
537 break;
538 case TARGET_TYPE_AR900B:
539 case TARGET_TYPE_QCA9984:
540 case TARGET_TYPE_IPQ4019:
541 case TARGET_TYPE_QCA9888:
542 case TARGET_TYPE_AR9888:
543 case TARGET_TYPE_AR9888V2:
544 svc_map = target_service_to_ce_map_ar900b;
545 map_sz =
546 sizeof(target_service_to_ce_map_ar900b)
547 / sizeof(struct service_to_pipe);
548 break;
549 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700550 }
551 for (i = 0; i < map_sz; i++) {
552 if ((svc_map[i].pipenum == ce_state->id) &&
553 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
554 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
555 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
556 /* HTT CEs are unidirectional */
557 if (svc_map[i].pipedir == PIPEDIR_IN)
558 ce_state->htt_rx_data = true;
559 else
560 ce_state->htt_tx_data = true;
561 rc = true;
562 }
563 }
564 }
565 return rc;
566}
567
Houston Hoffman47808172016-05-06 10:04:21 -0700568/**
569 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
570 * @ce_id: ce in question
571 * @ring: ring state being examined
572 * @type: "src_ring" or "dest_ring" string for identifying the ring
573 *
574 * Warns on non-zero index values.
575 * Causes a kernel panic if the ring is not empty durring initialization.
576 */
577static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
578 char *type)
579{
580 if (ring->write_index != 0 || ring->sw_index != 0)
581 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
582 ce_id, type, ring->sw_index, ring->write_index);
583 if (ring->write_index != ring->sw_index)
584 QDF_BUG(0);
585}
586
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800587/*
588 * Initialize a Copy Engine based on caller-supplied attributes.
589 * This may be called once to initialize both source and destination
590 * rings or it may be called twice for separate source and destination
591 * initialization. It may be that only one side or the other is
592 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700593 *
594 * This should be called durring the initialization sequence before
595 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800596 */
Komal Seelam644263d2016-02-22 20:45:49 +0530597struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800598 unsigned int CE_id, struct CE_attr *attr)
599{
600 struct CE_state *CE_state;
601 uint32_t ctrl_addr;
602 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530603 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800604 bool malloc_CE_state = false;
605 bool malloc_src_ring = false;
606
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530607 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800608 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800609 CE_state = scn->ce_id_to_state[CE_id];
610
611 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800612 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530613 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800614 if (!CE_state) {
615 HIF_ERROR("%s: CE_state has no mem", __func__);
616 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800617 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700618 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530619 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700620 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530621 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700622
623 CE_state->id = CE_id;
624 CE_state->ctrl_addr = ctrl_addr;
625 CE_state->state = CE_RUNNING;
626 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800627 }
628 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800629
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530630 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800631 if (attr == NULL) {
632 /* Already initialized; caller wants the handle */
633 return (struct CE_handle *)CE_state;
634 }
635
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800636 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530637 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800638 else
639 CE_state->src_sz_max = attr->src_sz_max;
640
Houston Hoffman68e837e2015-12-04 12:57:24 -0800641 ce_init_ce_desc_event_log(CE_id,
642 attr->src_nentries + attr->dest_nentries);
643
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800644 /* source ring setup */
645 nentries = attr->src_nentries;
646 if (nentries) {
647 struct CE_ring_state *src_ring;
648 unsigned CE_nbytes;
649 char *ptr;
650 uint64_t dma_addr;
651 nentries = roundup_pwr2(nentries);
652 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530653 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800654 } else {
655 CE_nbytes = sizeof(struct CE_ring_state)
656 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530657 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800658 if (!ptr) {
659 /* cannot allocate src ring. If the
660 * CE_state is allocated locally free
661 * CE_State and return error.
662 */
663 HIF_ERROR("%s: src ring has no mem", __func__);
664 if (malloc_CE_state) {
665 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800666 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530667 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800668 malloc_CE_state = false;
669 }
670 return NULL;
671 } else {
672 /* we can allocate src ring.
673 * Mark that the src ring is
674 * allocated locally
675 */
676 malloc_src_ring = true;
677 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530678 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800679
680 src_ring = CE_state->src_ring =
681 (struct CE_ring_state *)ptr;
682 ptr += sizeof(struct CE_ring_state);
683 src_ring->nentries = nentries;
684 src_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700685 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
686 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800687 src_ring->hw_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700688 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn,
689 ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800690 src_ring->sw_index = src_ring->hw_index;
691 src_ring->write_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700692 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn,
693 ctrl_addr);
694
695 ce_ring_test_initial_indexes(CE_id, src_ring,
696 "src_ring");
697
Houston Hoffman4411ad42016-03-14 21:12:04 -0700698 if (Q_TARGET_ACCESS_END(scn) < 0)
699 goto error_target_access;
700
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800701 src_ring->low_water_mark_nentries = 0;
702 src_ring->high_water_mark_nentries = nentries;
703 src_ring->per_transfer_context = (void **)ptr;
704
705 /* Legacy platforms that do not support cache
706 * coherent DMA are unsupported
707 */
708 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530709 qdf_mem_alloc_consistent(scn->qdf_dev,
710 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 (nentries *
712 sizeof(struct CE_src_desc) +
713 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530714 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800715 if (src_ring->base_addr_owner_space_unaligned
716 == NULL) {
717 HIF_ERROR("%s: src ring has no DMA mem",
718 __func__);
719 goto error_no_dma_mem;
720 }
721 src_ring->base_addr_CE_space_unaligned = base_addr;
722
723 if (src_ring->
724 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
725 - 1)) {
726 src_ring->base_addr_CE_space =
727 (src_ring->base_addr_CE_space_unaligned
728 + CE_DESC_RING_ALIGN -
729 1) & ~(CE_DESC_RING_ALIGN - 1);
730
731 src_ring->base_addr_owner_space =
732 (void
733 *)(((size_t) src_ring->
734 base_addr_owner_space_unaligned +
735 CE_DESC_RING_ALIGN -
736 1) & ~(CE_DESC_RING_ALIGN - 1));
737 } else {
738 src_ring->base_addr_CE_space =
739 src_ring->base_addr_CE_space_unaligned;
740 src_ring->base_addr_owner_space =
741 src_ring->
742 base_addr_owner_space_unaligned;
743 }
744 /*
745 * Also allocate a shadow src ring in
746 * regular mem to use for faster access.
747 */
748 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530749 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800750 sizeof(struct CE_src_desc) +
751 CE_DESC_RING_ALIGN);
752 if (src_ring->shadow_base_unaligned == NULL) {
753 HIF_ERROR("%s: src ring no shadow_base mem",
754 __func__);
755 goto error_no_dma_mem;
756 }
757 src_ring->shadow_base = (struct CE_src_desc *)
758 (((size_t) src_ring->shadow_base_unaligned +
759 CE_DESC_RING_ALIGN - 1) &
760 ~(CE_DESC_RING_ALIGN - 1));
761
Houston Hoffman4411ad42016-03-14 21:12:04 -0700762 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
763 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800764 dma_addr = src_ring->base_addr_CE_space;
765 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
766 (uint32_t)(dma_addr & 0xFFFFFFFF));
Houston Hoffmanf789c662016-04-12 15:39:04 -0700767
768 /* if SR_BA_ADDRESS_HIGH register exists */
Houston Hoffman056d4522016-05-05 16:10:48 -0700769 if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800770 uint32_t tmp;
771 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
772 scn, ctrl_addr);
773 tmp &= ~0x1F;
774 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
775 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
776 ctrl_addr, (uint32_t)dma_addr);
777 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800778 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
779 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
780#ifdef BIG_ENDIAN_HOST
781 /* Enable source ring byte swap for big endian host */
782 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
783#endif
784 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
785 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700786 if (Q_TARGET_ACCESS_END(scn) < 0)
787 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800788 }
789 }
790
791 /* destination ring setup */
792 nentries = attr->dest_nentries;
793 if (nentries) {
794 struct CE_ring_state *dest_ring;
795 unsigned CE_nbytes;
796 char *ptr;
797 uint64_t dma_addr;
798
799 nentries = roundup_pwr2(nentries);
800 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530801 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 } else {
803 CE_nbytes = sizeof(struct CE_ring_state)
804 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530805 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800806 if (!ptr) {
807 /* cannot allocate dst ring. If the CE_state
808 * or src ring is allocated locally free
809 * CE_State and src ring and return error.
810 */
811 HIF_ERROR("%s: dest ring has no mem",
812 __func__);
813 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530814 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800815 CE_state->src_ring = NULL;
816 malloc_src_ring = false;
817 }
818 if (malloc_CE_state) {
819 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800820 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530821 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800822 malloc_CE_state = false;
823 }
824 return NULL;
825 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530826 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800827
828 dest_ring = CE_state->dest_ring =
829 (struct CE_ring_state *)ptr;
830 ptr += sizeof(struct CE_ring_state);
831 dest_ring->nentries = nentries;
832 dest_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700833 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
834 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800835 dest_ring->sw_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700836 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn,
837 ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800838 dest_ring->write_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700839 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn,
840 ctrl_addr);
841
842 ce_ring_test_initial_indexes(CE_id, dest_ring,
843 "dest_ring");
844
Houston Hoffman4411ad42016-03-14 21:12:04 -0700845 if (Q_TARGET_ACCESS_END(scn) < 0)
846 goto error_target_access;
847
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800848 dest_ring->low_water_mark_nentries = 0;
849 dest_ring->high_water_mark_nentries = nentries;
850 dest_ring->per_transfer_context = (void **)ptr;
851
852 /* Legacy platforms that do not support cache
853 * coherent DMA are unsupported */
854 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530855 qdf_mem_alloc_consistent(scn->qdf_dev,
856 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800857 (nentries *
858 sizeof(struct CE_dest_desc) +
859 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530860 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800861 if (dest_ring->base_addr_owner_space_unaligned
862 == NULL) {
863 HIF_ERROR("%s: dest ring has no DMA mem",
864 __func__);
865 goto error_no_dma_mem;
866 }
867 dest_ring->base_addr_CE_space_unaligned = base_addr;
868
869 /* Correctly initialize memory to 0 to
870 * prevent garbage data crashing system
871 * when download firmware
872 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530873 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800874 nentries * sizeof(struct CE_dest_desc) +
875 CE_DESC_RING_ALIGN);
876
877 if (dest_ring->
878 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
879 1)) {
880
881 dest_ring->base_addr_CE_space =
882 (dest_ring->
883 base_addr_CE_space_unaligned +
884 CE_DESC_RING_ALIGN -
885 1) & ~(CE_DESC_RING_ALIGN - 1);
886
887 dest_ring->base_addr_owner_space =
888 (void
889 *)(((size_t) dest_ring->
890 base_addr_owner_space_unaligned +
891 CE_DESC_RING_ALIGN -
892 1) & ~(CE_DESC_RING_ALIGN - 1));
893 } else {
894 dest_ring->base_addr_CE_space =
895 dest_ring->base_addr_CE_space_unaligned;
896 dest_ring->base_addr_owner_space =
897 dest_ring->
898 base_addr_owner_space_unaligned;
899 }
900
Houston Hoffman4411ad42016-03-14 21:12:04 -0700901 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
902 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 dma_addr = dest_ring->base_addr_CE_space;
904 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
905 (uint32_t)(dma_addr & 0xFFFFFFFF));
Houston Hoffmanf789c662016-04-12 15:39:04 -0700906
907 /* if DR_BA_ADDRESS_HIGH exists */
Houston Hoffman056d4522016-05-05 16:10:48 -0700908 if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800909 uint32_t tmp;
910 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
911 ctrl_addr);
912 tmp &= ~0x1F;
913 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
914 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
915 ctrl_addr, (uint32_t)dma_addr);
916 }
Houston Hoffmanf789c662016-04-12 15:39:04 -0700917
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800918 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
919#ifdef BIG_ENDIAN_HOST
920 /* Enable Dest ring byte swap for big endian host */
921 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
922#endif
923 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
924 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700925 if (Q_TARGET_ACCESS_END(scn) < 0)
926 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800927
928 /* epping */
929 /* poll timer */
930 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530931 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800932 &CE_state->poll_timer,
933 ce_poll_timeout,
934 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530937 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800938 CE_POLL_TIMEOUT);
939 }
940 }
941 }
942
943 /* Enable CE error interrupts */
Houston Hoffman4411ad42016-03-14 21:12:04 -0700944 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
945 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800946 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700947 if (Q_TARGET_ACCESS_END(scn) < 0)
948 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800949
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700950 /* update the htt_data attribute */
951 ce_mark_datapath(CE_state);
952
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800953 return (struct CE_handle *)CE_state;
954
Houston Hoffman4411ad42016-03-14 21:12:04 -0700955error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800956error_no_dma_mem:
957 ce_fini((struct CE_handle *)CE_state);
958 return NULL;
959}
960
961#ifdef WLAN_FEATURE_FASTPATH
962/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700963 * hif_enable_fastpath() Update that we have enabled fastpath mode
964 * @hif_ctx: HIF context
965 *
966 * For use in data path
967 *
968 * Retrun: void
969 */
970void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
971{
972 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
973
Houston Hoffmanc50572b2016-06-08 19:49:46 -0700974 HIF_INFO("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700975 scn->fastpath_mode_on = true;
976}
977
978/**
979 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
980 * @hif_ctx: HIF Context
981 *
982 * For use in data path to skip HTC
983 *
984 * Return: bool
985 */
986bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
987{
988 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
989
990 return scn->fastpath_mode_on;
991}
992
993/**
994 * hif_get_ce_handle - API to get CE handle for FastPath mode
995 * @hif_ctx: HIF Context
996 * @id: CopyEngine Id
997 *
998 * API to return CE handle for fastpath mode
999 *
1000 * Return: void
1001 */
1002void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1003{
1004 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1005
1006 return scn->ce_id_to_state[id];
1007}
1008
1009/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001010 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1011 * No processing is required inside this function.
1012 * @ce_hdl: Cope engine handle
1013 * Using an assert, this function makes sure that,
1014 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001015 *
1016 * This is called while dismantling CE structures. No other thread
1017 * should be using these structures while dismantling is occuring
1018 * therfore no locking is needed.
1019 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001020 * Return: none
1021 */
1022void
1023ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1024{
1025 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1026 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301027 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001028 uint32_t sw_index, write_index;
Houston Hoffman85925072016-05-06 17:02:18 -07001029 if (hif_is_nss_wifi_enabled(sc))
1030 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001031
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001032 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Houston Hoffman85925072016-05-06 17:02:18 -07001033 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1034 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001035 sw_index = src_ring->sw_index;
1036 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001037
1038 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301039 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 }
1041}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001042
1043/**
1044 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1045 * @ce_hdl: Handle to CE
1046 *
1047 * These buffers are never allocated on the fly, but
1048 * are allocated only once during HIF start and freed
1049 * only once during HIF stop.
1050 * NOTE:
1051 * The assumption here is there is no in-flight DMA in progress
1052 * currently, so that buffers can be freed up safely.
1053 *
1054 * Return: NONE
1055 */
1056void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1057{
1058 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1059 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1060 qdf_nbuf_t nbuf;
1061 int i;
1062
1063 if (!ce_state->fastpath_handler)
1064 return;
1065 /*
1066 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1067 * this CE is completely full: does not leave one blank space, to
1068 * distinguish between empty queue & full queue. So free all the
1069 * entries.
1070 */
1071 for (i = 0; i < dst_ring->nentries; i++) {
1072 nbuf = dst_ring->per_transfer_context[i];
1073
1074 /*
1075 * The reasons for doing this check are:
1076 * 1) Protect against calling cleanup before allocating buffers
1077 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1078 * could have a partially filled ring, because of a memory
1079 * allocation failure in the middle of allocating ring.
1080 * This check accounts for that case, checking
1081 * fastpath_mode_on flag or started flag would not have
1082 * covered that case. This is not in performance path,
1083 * so OK to do this.
1084 */
1085 if (nbuf)
1086 qdf_nbuf_free(nbuf);
1087 }
1088}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001089
1090/**
1091 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1092 * @scn: HIF handle
1093 *
1094 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1095 * Hence we have to post all the entries in the pipe, even, in the beginning
1096 * unlike for other CE pipes where one less than dest_nentries are filled in
1097 * the beginning.
1098 *
1099 * Return: None
1100 */
1101static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1102{
1103 int pipe_num;
1104 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1105
1106 if (scn->fastpath_mode_on == false)
1107 return;
1108
1109 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1110 struct HIF_CE_pipe_info *pipe_info =
1111 &hif_state->pipe_info[pipe_num];
1112 struct CE_state *ce_state =
1113 scn->ce_id_to_state[pipe_info->pipe_num];
1114
1115 if (ce_state->htt_rx_data)
1116 atomic_inc(&pipe_info->recv_bufs_needed);
1117 }
1118}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001119#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001120static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001121{
1122}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001123
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001124static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001125{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001126 return false;
1127}
1128
1129static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1130{
1131 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001132}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001133#endif /* WLAN_FEATURE_FASTPATH */
1134
1135void ce_fini(struct CE_handle *copyeng)
1136{
1137 struct CE_state *CE_state = (struct CE_state *)copyeng;
1138 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301139 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001140
1141 CE_state->state = CE_UNUSED;
1142 scn->ce_id_to_state[CE_id] = NULL;
1143 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001144 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001145 ce_h2t_tx_ce_cleanup(copyeng);
1146
1147 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301148 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001149 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301150 qdf_mem_free_consistent(scn->qdf_dev,
1151 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001152 (CE_state->src_ring->nentries *
1153 sizeof(struct CE_src_desc) +
1154 CE_DESC_RING_ALIGN),
1155 CE_state->src_ring->
1156 base_addr_owner_space_unaligned,
1157 CE_state->src_ring->
1158 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301159 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001160 }
1161 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001162 /* Cleanup the datapath Rx ring */
1163 ce_t2h_msg_ce_cleanup(copyeng);
1164
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001165 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301166 qdf_mem_free_consistent(scn->qdf_dev,
1167 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001168 (CE_state->dest_ring->nentries *
1169 sizeof(struct CE_dest_desc) +
1170 CE_DESC_RING_ALIGN),
1171 CE_state->dest_ring->
1172 base_addr_owner_space_unaligned,
1173 CE_state->dest_ring->
1174 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301175 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001176
1177 /* epping */
1178 if (CE_state->timer_inited) {
1179 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301180 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001181 }
1182 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301183 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001184}
1185
Komal Seelam5584a7c2016-02-24 19:22:48 +05301186void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001187{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301188 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001189
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301190 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001191 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301192 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001193 sizeof(hif_state->msg_callbacks_current));
1194}
1195
1196/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301197QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301198hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001199 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301200 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201{
Komal Seelam644263d2016-02-22 20:45:49 +05301202 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301203 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1205 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1206 int bytes = nbytes, nfrags = 0;
1207 struct ce_sendlist sendlist;
1208 int status, i = 0;
1209 unsigned int mux_id = 0;
1210
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301211 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001212
1213 transfer_id =
1214 (mux_id & MUX_ID_MASK) |
1215 (transfer_id & TRANSACTION_ID_MASK);
1216 data_attr &= DESC_DATA_FLAG_MASK;
1217 /*
1218 * The common case involves sending multiple fragments within a
1219 * single download (the tx descriptor and the tx frame header).
1220 * So, optimize for the case of multiple fragments by not even
1221 * checking whether it's necessary to use a sendlist.
1222 * The overhead of using a sendlist for a single buffer download
1223 * is not a big deal, since it happens rarely (for WMI messages).
1224 */
1225 ce_sendlist_init(&sendlist);
1226 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301227 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001228 int frag_bytes;
1229
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301230 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1231 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001232 /*
1233 * Clear the packet offset for all but the first CE desc.
1234 */
1235 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301236 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237
1238 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1239 frag_bytes >
1240 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301241 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001242 (nbuf,
1243 nfrags) ? 0 :
1244 CE_SEND_FLAG_SWAP_DISABLE,
1245 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301246 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001247 HIF_ERROR("%s: error, frag_num %d larger than limit",
1248 __func__, nfrags);
1249 return status;
1250 }
1251 bytes -= frag_bytes;
1252 nfrags++;
1253 } while (bytes > 0);
1254
1255 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301256 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001257 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301258 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001259 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301260 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001261 }
1262 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301263 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001264
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301265 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001266 HIF_ERROR("%s: error CE handle is null", __func__);
1267 return A_ERROR;
1268 }
1269
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301270 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301271 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +05301272 qdf_nbuf_data_addr(nbuf),
Nirav Shah29beae02016-04-26 22:58:54 +05301273 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001274 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301275 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001276
1277 return status;
1278}
1279
Komal Seelam5584a7c2016-02-24 19:22:48 +05301280void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1281 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001282{
Komal Seelam644263d2016-02-22 20:45:49 +05301283 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1284
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001285 if (!force) {
1286 int resources;
1287 /*
1288 * Decide whether to actually poll for completions, or just
1289 * wait for a later chance. If there seem to be plenty of
1290 * resources left, then just wait, since checking involves
1291 * reading a CE register, which is a relatively expensive
1292 * operation.
1293 */
Komal Seelam644263d2016-02-22 20:45:49 +05301294 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001295 /*
1296 * If at least 50% of the total resources are still available,
1297 * don't bother checking again yet.
1298 */
1299 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
1300 return;
1301 }
1302 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001303#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001304 ce_per_engine_servicereap(scn, pipe);
1305#else
1306 ce_per_engine_service(scn, pipe);
1307#endif
1308}
1309
Komal Seelam5584a7c2016-02-24 19:22:48 +05301310uint16_t
1311hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001312{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301313 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001314 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1315 uint16_t rv;
1316
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301317 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001318 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301319 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001320 return rv;
1321}
1322
1323/* Called by lower (CE) layer when a send to Target completes. */
1324void
1325hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301326 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001327 unsigned int nbytes, unsigned int transfer_id,
1328 unsigned int sw_index, unsigned int hw_index,
1329 unsigned int toeplitz_hash_result)
1330{
1331 struct HIF_CE_pipe_info *pipe_info =
1332 (struct HIF_CE_pipe_info *)ce_context;
1333 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301334 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001336 struct hif_msg_callbacks *msg_callbacks =
1337 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001338
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339 do {
1340 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001341 * The upper layer callback will be triggered
1342 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 */
Houston Hoffman85118512015-09-28 14:17:11 -07001344 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam6ee55902016-04-11 17:11:07 +05301345 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301346 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001347 else
1348 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001349 msg_callbacks->Context,
1350 transfer_context, transfer_id,
1351 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001352 }
1353
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301354 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001355 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301356 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001357 } while (ce_completed_send_next(copyeng,
1358 &ce_context, &transfer_context,
1359 &CE_data, &nbytes, &transfer_id,
1360 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301361 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001362}
1363
Houston Hoffman910c6262015-09-28 12:56:25 -07001364/**
1365 * hif_ce_do_recv(): send message from copy engine to upper layers
1366 * @msg_callbacks: structure containing callback and callback context
1367 * @netbuff: skb containing message
1368 * @nbytes: number of bytes in the message
1369 * @pipe_info: used for the pipe_number info
1370 *
1371 * Checks the packet length, configures the lenght in the netbuff,
1372 * and calls the upper layer callback.
1373 *
1374 * return: None
1375 */
1376static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301377 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001378 struct HIF_CE_pipe_info *pipe_info) {
1379 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301380 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001381 msg_callbacks->
1382 rxCompletionHandler(msg_callbacks->Context,
1383 netbuf, pipe_info->pipe_num);
1384 } else {
1385 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1386 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301387 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001388 }
1389}
1390
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001391/* Called by lower (CE) layer when data is received from the Target. */
1392void
1393hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301394 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001395 unsigned int nbytes, unsigned int transfer_id,
1396 unsigned int flags)
1397{
1398 struct HIF_CE_pipe_info *pipe_info =
1399 (struct HIF_CE_pipe_info *)ce_context;
1400 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001401 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301402 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001403#ifdef HIF_PCI
1404 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1405#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001406 struct hif_msg_callbacks *msg_callbacks =
1407 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001409 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001410#ifdef HIF_PCI
1411 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1412#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301413 qdf_nbuf_unmap_single(scn->qdf_dev,
1414 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301415 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001416
Houston Hoffman910c6262015-09-28 12:56:25 -07001417 atomic_inc(&pipe_info->recv_bufs_needed);
1418 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301419 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301420 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001421 else
1422 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001423 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001424
1425 /* Set up force_break flag if num of receices reaches
1426 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001427 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001428 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001429 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001430 break;
1431 }
1432 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1433 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301434 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001435
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001436}
1437
1438/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1439
1440void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301441hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442 struct hif_msg_callbacks *callbacks)
1443{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301444 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001445
1446#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1447 spin_lock_init(&pcie_access_log_lock);
1448#endif
1449 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301450 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001451 sizeof(hif_state->msg_callbacks_pending));
1452
1453}
1454
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001455int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1456{
1457 struct CE_handle *ce_diag = hif_state->ce_diag;
1458 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301459 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001460 struct hif_msg_callbacks *hif_msg_callbacks =
1461 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462
1463 /* daemonize("hif_compl_thread"); */
1464
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001465 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001466 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001467 return -EINVAL;
1468 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001469
1470 if (!hif_msg_callbacks ||
1471 !hif_msg_callbacks->rxCompletionHandler ||
1472 !hif_msg_callbacks->txCompletionHandler) {
1473 HIF_ERROR("%s: no completion handler registered", __func__);
1474 return -EFAULT;
1475 }
1476
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001477 A_TARGET_ACCESS_LIKELY(scn);
1478 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1479 struct CE_attr attr;
1480 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481
1482 pipe_info = &hif_state->pipe_info[pipe_num];
1483 if (pipe_info->ce_hdl == ce_diag) {
1484 continue; /* Handle Diagnostic CE specially */
1485 }
1486 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 if (attr.src_nentries) {
1488 /* pipe used to send to target */
1489 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1490 __func__, pipe_num, pipe_info);
1491 ce_send_cb_register(pipe_info->ce_hdl,
1492 hif_pci_ce_send_done, pipe_info,
1493 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001494 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1495 }
1496 if (attr.dest_nentries) {
1497 /* pipe used to receive from target */
1498 ce_recv_cb_register(pipe_info->ce_hdl,
1499 hif_pci_ce_recv_data, pipe_info,
1500 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001502
1503 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301504 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001505 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001506
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001507 A_TARGET_ACCESS_UNLIKELY(scn);
1508 return 0;
1509}
1510
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001511/*
1512 * Install pending msg callbacks.
1513 *
1514 * TBDXXX: This hack is needed because upper layers install msg callbacks
1515 * for use with HTC before BMI is done; yet this HIF implementation
1516 * needs to continue to use BMI msg callbacks. Really, upper layers
1517 * should not register HTC callbacks until AFTER BMI phase.
1518 */
Komal Seelam644263d2016-02-22 20:45:49 +05301519static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301521 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001522
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301523 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524 &hif_state->msg_callbacks_pending,
1525 sizeof(hif_state->msg_callbacks_pending));
1526}
1527
Komal Seelam5584a7c2016-02-24 19:22:48 +05301528void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1529 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530{
1531 int ul_is_polled, dl_is_polled;
1532
Komal Seelam644263d2016-02-22 20:45:49 +05301533 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1535}
1536
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537/**
1538 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301539 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540 *
1541 * Output the pipe error counts of each pipe to log file
1542 *
1543 * Return: N/A
1544 */
Komal Seelam644263d2016-02-22 20:45:49 +05301545void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001546{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301547 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001548 int pipe_num;
1549
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001550 if (hif_state == NULL) {
1551 HIF_ERROR("%s hif_state is NULL", __func__);
1552 return;
1553 }
1554 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1555 struct HIF_CE_pipe_info *pipe_info;
1556
1557 pipe_info = &hif_state->pipe_info[pipe_num];
1558
1559 if (pipe_info->nbuf_alloc_err_count > 0 ||
1560 pipe_info->nbuf_dma_err_count > 0 ||
1561 pipe_info->nbuf_ce_enqueue_err_count)
1562 HIF_ERROR(
1563 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1564 __func__, pipe_info->pipe_num,
1565 atomic_read(&pipe_info->recv_bufs_needed),
1566 pipe_info->nbuf_alloc_err_count,
1567 pipe_info->nbuf_dma_err_count,
1568 pipe_info->nbuf_ce_enqueue_err_count);
1569 }
1570}
1571
1572static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1573{
1574 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301575 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301576 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301577 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001578 uint32_t bufs_posted = 0;
1579
1580 buf_sz = pipe_info->buf_sz;
1581 if (buf_sz == 0) {
1582 /* Unused Copy Engine */
1583 return 0;
1584 }
1585
1586 ce_hdl = pipe_info->ce_hdl;
1587
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301588 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001589 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301590 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301591 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592 int status;
1593
1594 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301595 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001596
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301597 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001598 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301599 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001600 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301601 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001602 &pipe_info->recv_bufs_needed_lock);
1603 HIF_ERROR(
1604 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1605 __func__, pipe_info->pipe_num,
1606 atomic_read(&pipe_info->recv_bufs_needed),
1607 pipe_info->nbuf_alloc_err_count);
1608 atomic_inc(&pipe_info->recv_bufs_needed);
1609 return 1;
1610 }
1611
1612 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301613 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614 * CE_data = dma_map_single(dev, data, buf_sz, );
1615 * DMA_FROM_DEVICE);
1616 */
1617 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301618 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301619 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001620
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301621 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
1622 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001623 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301624 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001625 HIF_ERROR(
1626 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
1627 __func__, pipe_info->pipe_num,
1628 atomic_read(&pipe_info->recv_bufs_needed),
1629 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301630 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001631 atomic_inc(&pipe_info->recv_bufs_needed);
1632 return 1;
1633 }
1634
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301635 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001636
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301637 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001638 buf_sz, DMA_FROM_DEVICE);
1639 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301640 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001641 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301642 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301644 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001645 HIF_ERROR(
1646 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1647 __func__, pipe_info->pipe_num,
1648 atomic_read(&pipe_info->recv_bufs_needed),
1649 pipe_info->nbuf_ce_enqueue_err_count);
1650 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301651 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001652 return 1;
1653 }
1654
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301655 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001656 bufs_posted++;
1657 }
1658 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001659 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001660 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1661 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001662 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1664 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001665 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001666 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1667
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301668 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669
1670 return 0;
1671}
1672
1673/*
1674 * Try to post all desired receive buffers for all pipes.
1675 * Returns 0 if all desired buffers are posted,
1676 * non-zero if were were unable to completely
1677 * replenish receive buffers.
1678 */
Komal Seelam644263d2016-02-22 20:45:49 +05301679static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001680{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301681 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001682 int pipe_num, rv = 0;
Houston Hoffman85925072016-05-06 17:02:18 -07001683 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001684
1685 A_TARGET_ACCESS_LIKELY(scn);
1686 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1687 struct HIF_CE_pipe_info *pipe_info;
Houston Hoffman85925072016-05-06 17:02:18 -07001688 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001689 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001690
1691 if (hif_is_nss_wifi_enabled(scn) &&
1692 ce_state && (ce_state->htt_rx_data)) {
1693 continue;
1694 }
1695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1697 rv = 1;
1698 goto done;
1699 }
1700 }
1701
1702done:
1703 A_TARGET_ACCESS_UNLIKELY(scn);
1704
1705 return rv;
1706}
1707
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301708QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709{
Komal Seelam644263d2016-02-22 20:45:49 +05301710 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301711 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001712
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001713 hif_update_fastpath_recv_bufs_cnt(scn);
1714
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001715 hif_msg_callbacks_install(scn);
1716
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001717 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301718 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001719
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720 /* Post buffers once to start things off. */
1721 (void)hif_post_recv_buffers(scn);
1722
1723 hif_state->started = true;
1724
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301725 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726}
1727
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1729{
Komal Seelam644263d2016-02-22 20:45:49 +05301730 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731 struct CE_handle *ce_hdl;
1732 uint32_t buf_sz;
1733 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301734 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301735 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001736 void *per_CE_context;
1737
1738 buf_sz = pipe_info->buf_sz;
1739 if (buf_sz == 0) {
1740 /* Unused Copy Engine */
1741 return;
1742 }
1743
1744 hif_state = pipe_info->HIF_CE_state;
1745 if (!hif_state->started) {
1746 return;
1747 }
1748
Komal Seelam02cf2f82016-02-22 20:44:25 +05301749 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750 ce_hdl = pipe_info->ce_hdl;
1751
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301752 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001753 return;
1754 }
1755 while (ce_revoke_recv_next
1756 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301757 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301758 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301759 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301760 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001761 }
1762}
1763
1764void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1765{
1766 struct CE_handle *ce_hdl;
1767 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301768 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301769 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001770 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301771 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001772 unsigned int nbytes;
1773 unsigned int id;
1774 uint32_t buf_sz;
1775 uint32_t toeplitz_hash_result;
1776
1777 buf_sz = pipe_info->buf_sz;
1778 if (buf_sz == 0) {
1779 /* Unused Copy Engine */
1780 return;
1781 }
1782
1783 hif_state = pipe_info->HIF_CE_state;
1784 if (!hif_state->started) {
1785 return;
1786 }
1787
Komal Seelam02cf2f82016-02-22 20:44:25 +05301788 scn = HIF_GET_SOFTC(hif_state);
1789
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001790 ce_hdl = pipe_info->ce_hdl;
1791
1792 while (ce_cancel_send_next
1793 (ce_hdl, &per_CE_context,
1794 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301795 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001796 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1797 /*
1798 * Packets enqueued by htt_h2t_ver_req_msg() and
1799 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1800 * freed in htt_htc_misc_pkt_pool_free() in
1801 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001802 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 * which they are queued in.
1804 */
Nirav Shahd7f91592016-04-21 14:18:43 +05301805 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001806 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05301807 /* Indicate the completion to higher
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001808 * layer to free the buffer */
1809 hif_state->msg_callbacks_current.
1810 txCompletionHandler(hif_state->
1811 msg_callbacks_current.Context,
1812 netbuf, id, toeplitz_hash_result);
1813 }
1814 }
1815}
1816
1817/*
1818 * Cleanup residual buffers for device shutdown:
1819 * buffers that were enqueued for receive
1820 * buffers that were to be sent
1821 * Note: Buffers that had completed but which were
1822 * not yet processed are on a completion queue. They
1823 * are handled when the completion thread shuts down.
1824 */
1825void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1826{
1827 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301828 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07001829 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001830
Komal Seelam02cf2f82016-02-22 20:44:25 +05301831 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832 struct HIF_CE_pipe_info *pipe_info;
1833
Houston Hoffman85925072016-05-06 17:02:18 -07001834 ce_state = scn->ce_id_to_state[pipe_num];
1835 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
1836 ((ce_state->htt_tx_data) ||
1837 (ce_state->htt_rx_data))) {
1838 continue;
1839 }
1840
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001841 pipe_info = &hif_state->pipe_info[pipe_num];
1842 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1843 hif_send_buffer_cleanup_on_pipe(pipe_info);
1844 }
1845}
1846
Komal Seelam5584a7c2016-02-24 19:22:48 +05301847void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848{
Komal Seelam644263d2016-02-22 20:45:49 +05301849 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301850 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301851
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001852 hif_buffer_cleanup(hif_state);
1853}
1854
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05301855void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001856{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05301857 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858 int pipe_num;
1859
1860 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001861
1862 /*
1863 * At this point, asynchronous threads are stopped,
1864 * The Target should not DMA nor interrupt, Host code may
1865 * not initiate anything more. So we just need to clean
1866 * up Host-side state.
1867 */
1868
1869 if (scn->athdiag_procfs_inited) {
1870 athdiag_procfs_remove();
1871 scn->athdiag_procfs_inited = false;
1872 }
1873
1874 hif_buffer_cleanup(hif_state);
1875
1876 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1877 struct HIF_CE_pipe_info *pipe_info;
1878
1879 pipe_info = &hif_state->pipe_info[pipe_num];
1880 if (pipe_info->ce_hdl) {
1881 ce_fini(pipe_info->ce_hdl);
1882 pipe_info->ce_hdl = NULL;
1883 pipe_info->buf_sz = 0;
1884 }
1885 }
1886
1887 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301888 qdf_timer_stop(&hif_state->sleep_timer);
1889 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890 hif_state->sleep_timer_init = false;
1891 }
1892
1893 hif_state->started = false;
1894}
1895
Houston Hoffman854e67f2016-03-14 21:11:39 -07001896/**
1897 * hif_get_target_ce_config() - get copy engine configuration
1898 * @target_ce_config_ret: basic copy engine configuration
1899 * @target_ce_config_sz_ret: size of the basic configuration in bytes
1900 * @target_service_to_ce_map_ret: service mapping for the copy engines
1901 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
1902 * @target_shadow_reg_cfg_ret: shadow register configuration
1903 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
1904 *
1905 * providing accessor to these values outside of this file.
1906 * currently these are stored in static pointers to const sections.
1907 * there are multiple configurations that are selected from at compile time.
1908 * Runtime selection would need to consider mode, target type and bus type.
1909 *
1910 * Return: return by parameter.
1911 */
1912void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
1913 int *target_ce_config_sz_ret,
1914 struct service_to_pipe **target_service_to_ce_map_ret,
1915 int *target_service_to_ce_map_sz_ret,
1916 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
1917 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001918{
Houston Hoffman854e67f2016-03-14 21:11:39 -07001919 *target_ce_config_ret = target_ce_config;
1920 *target_ce_config_sz_ret = target_ce_config_sz;
1921 *target_service_to_ce_map_ret = target_service_to_ce_map;
1922 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
1923
1924 if (target_shadow_reg_cfg_ret)
1925 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
1926
1927 if (shadow_cfg_sz_ret)
1928 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001929}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001930
1931/**
1932 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301933 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001934 *
1935 * This function passes the con_mode and CE configuration to
1936 * platform driver to enable wlan.
1937 *
Houston Hoffman108da402016-03-14 21:11:24 -07001938 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939 */
Houston Hoffman108da402016-03-14 21:11:24 -07001940int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001941{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07001942 struct pld_wlan_enable_cfg cfg;
1943 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301944 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001945
Houston Hoffman854e67f2016-03-14 21:11:39 -07001946 hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
1947 &cfg.num_ce_tgt_cfg,
1948 (struct service_to_pipe **)&cfg.ce_svc_cfg,
1949 &cfg.num_ce_svc_pipe_cfg,
1950 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
1951 &cfg.num_shadow_reg_cfg);
1952
1953 /* translate from structure size to array size */
1954 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
1955 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
1956 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001957
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301958 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07001959 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001960 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07001961 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001962 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07001963 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001964
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001965 if (BYPASS_QMI)
1966 return 0;
1967 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07001968 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
1969 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001970}
1971
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001972#define CE_EPPING_USES_IRQ true
1973
Houston Hoffman108da402016-03-14 21:11:24 -07001974/**
1975 * hif_ce_prepare_config() - load the correct static tables.
1976 * @scn: hif context
1977 *
1978 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001979 */
Houston Hoffman108da402016-03-14 21:11:24 -07001980void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301982 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001983 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1984 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1985
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001987 if (QDF_IS_EPPING_ENABLED(mode)) {
1988 if (CE_EPPING_USES_IRQ)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001989 host_ce_config = host_ce_config_wlan_epping_irq;
1990 else
1991 host_ce_config = host_ce_config_wlan_epping_poll;
1992 target_ce_config = target_ce_config_wlan_epping;
1993 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1994 target_service_to_ce_map =
1995 target_service_to_ce_map_wlan_epping;
1996 target_service_to_ce_map_sz =
1997 sizeof(target_service_to_ce_map_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07001998 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
1999 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002000 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002001
2002 switch (tgt_info->target_type) {
2003 default:
2004 break;
2005 case TARGET_TYPE_AR900B:
2006 case TARGET_TYPE_QCA9984:
2007 case TARGET_TYPE_IPQ4019:
2008 case TARGET_TYPE_QCA9888:
2009 host_ce_config = host_ce_config_wlan_ar900b;
2010 target_ce_config = target_ce_config_wlan_ar900b;
2011 target_ce_config_sz = sizeof(target_ce_config_wlan_ar900b);
2012
2013 target_service_to_ce_map = target_service_to_ce_map_ar900b;
2014 target_service_to_ce_map_sz =
2015 sizeof(target_service_to_ce_map_ar900b);
2016 break;
2017
2018 case TARGET_TYPE_AR9888:
2019 case TARGET_TYPE_AR9888V2:
2020 host_ce_config = host_ce_config_wlan_ar9888;
2021 target_ce_config = target_ce_config_wlan_ar9888;
2022 target_ce_config_sz = sizeof(target_ce_config_wlan_ar9888);
2023
2024 target_service_to_ce_map = target_service_to_ce_map_ar900b;
2025 target_service_to_ce_map_sz =
2026 sizeof(target_service_to_ce_map_ar900b);
2027 break;
2028 }
Houston Hoffman108da402016-03-14 21:11:24 -07002029}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030
Houston Hoffman108da402016-03-14 21:11:24 -07002031/**
2032 * hif_ce_open() - do ce specific allocations
2033 * @hif_sc: pointer to hif context
2034 *
2035 * return: 0 for success or QDF_STATUS_E_NOMEM
2036 */
2037QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2038{
2039 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302041 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002042 return QDF_STATUS_SUCCESS;
2043}
2044
2045/**
2046 * hif_ce_close() - do ce specific free
2047 * @hif_sc: pointer to hif context
2048 */
2049void hif_ce_close(struct hif_softc *hif_sc)
2050{
2051}
2052
2053/**
2054 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2055 * @hif_sc: hif context
2056 *
2057 * uses state variables to support cleaning up when hif_config_ce fails.
2058 */
2059void hif_unconfig_ce(struct hif_softc *hif_sc)
2060{
2061 int pipe_num;
2062 struct HIF_CE_pipe_info *pipe_info;
2063 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2064
2065 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2066 pipe_info = &hif_state->pipe_info[pipe_num];
2067 if (pipe_info->ce_hdl) {
2068 ce_unregister_irq(hif_state, (1 << pipe_num));
2069 hif_sc->request_irq_done = false;
2070 ce_fini(pipe_info->ce_hdl);
2071 pipe_info->ce_hdl = NULL;
2072 pipe_info->buf_sz = 0;
2073 }
2074 }
Houston Hoffman108da402016-03-14 21:11:24 -07002075 if (hif_sc->athdiag_procfs_inited) {
2076 athdiag_procfs_remove();
2077 hif_sc->athdiag_procfs_inited = false;
2078 }
2079}
2080
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002081#ifdef CONFIG_BYPASS_QMI
2082#define FW_SHARED_MEM (2 * 1024 * 1024)
2083
2084/**
2085 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2086 * @scn: pointer to HIF structure
2087 *
2088 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2089 *
2090 * Return: void
2091 */
2092static void hif_post_static_buf_to_target(struct hif_softc *scn)
2093{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002094 void *target_va;
2095 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002096
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002097 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2098 FW_SHARED_MEM, &target_pa);
2099 if (NULL == target_va) {
2100 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002101 return;
2102 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002103 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2104 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002105}
2106#else
2107static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2108{
2109 return;
2110}
2111#endif
2112
Houston Hoffman108da402016-03-14 21:11:24 -07002113/**
2114 * hif_config_ce() - configure copy engines
2115 * @scn: hif context
2116 *
2117 * Prepares fw, copy engine hardware and host sw according
2118 * to the attributes selected by hif_ce_prepare_config.
2119 *
2120 * also calls athdiag_procfs_init
2121 *
2122 * return: 0 for success nonzero for failure.
2123 */
2124int hif_config_ce(struct hif_softc *scn)
2125{
2126 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2127 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2128 struct HIF_CE_pipe_info *pipe_info;
2129 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002130 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002131#ifdef ADRASTEA_SHADOW_REGISTERS
2132 int i;
2133#endif
2134 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2135
2136 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002137
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002138 hif_post_static_buf_to_target(scn);
2139
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002140 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002141
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002142 hif_config_rri_on_ddr(scn);
2143
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002144 /* During CE initializtion */
2145 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002146 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2147 struct CE_attr *attr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002148 pipe_info = &hif_state->pipe_info[pipe_num];
2149 pipe_info->pipe_num = pipe_num;
2150 pipe_info->HIF_CE_state = hif_state;
2151 attr = &host_ce_config[pipe_num];
2152 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002153 ce_state = scn->ce_id_to_state[pipe_num];
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302154 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002155 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302156 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157 A_TARGET_ACCESS_UNLIKELY(scn);
2158 goto err;
2159 }
2160
2161 if (pipe_num == DIAG_CE_ID) {
2162 /* Reserve the ultimate CE for
2163 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002164 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002165 continue;
2166 }
2167
Houston Hoffman85925072016-05-06 17:02:18 -07002168 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2169 (ce_state->htt_rx_data))
2170 continue;
2171
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302172 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2173 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002174 if (attr->dest_nentries > 0) {
2175 atomic_set(&pipe_info->recv_bufs_needed,
2176 init_buffer_count(attr->dest_nentries - 1));
2177 } else {
2178 atomic_set(&pipe_info->recv_bufs_needed, 0);
2179 }
2180 ce_tasklet_init(hif_state, (1 << pipe_num));
2181 ce_register_irq(hif_state, (1 << pipe_num));
2182 scn->request_irq_done = true;
2183 }
2184
2185 if (athdiag_procfs_init(scn) != 0) {
2186 A_TARGET_ACCESS_UNLIKELY(scn);
2187 goto err;
2188 }
2189 scn->athdiag_procfs_inited = true;
2190
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002191 HIF_INFO_MED("%s: ce_init done", __func__);
2192
Houston Hoffman108da402016-03-14 21:11:24 -07002193 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002194
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002195 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002196
2197#ifdef ADRASTEA_SHADOW_REGISTERS
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002198 HIF_INFO("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002199 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002200 HIF_INFO("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002201 __func__, i,
2202 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2203 }
2204#endif
2205
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302206 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207
2208err:
2209 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002210 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002211 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302212 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002213}
2214
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002215#ifdef WLAN_FEATURE_FASTPATH
2216/**
2217 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2218 * @handler: Callback funtcion
2219 * @context: handle for callback function
2220 *
2221 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2222 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002223int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2224 fastpath_msg_handler handler,
2225 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002226{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002227 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002228 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002229 int i;
2230
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302231 if (!scn) {
2232 HIF_ERROR("%s: scn is NULL", __func__);
2233 QDF_ASSERT(0);
2234 return QDF_STATUS_E_FAILURE;
2235 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002236
2237 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002238 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002239 return QDF_STATUS_E_FAILURE;
2240 }
2241
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002242 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002243 ce_state = scn->ce_id_to_state[i];
2244 if (ce_state->htt_rx_data) {
2245 ce_state->fastpath_handler = handler;
2246 ce_state->context = context;
2247 }
2248 }
2249
2250 return QDF_STATUS_SUCCESS;
2251}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002252#endif
2253
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002254#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002255/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302256 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002257 * @scn: bus context
2258 * @ce_sr_base_paddr: copyengine source ring base physical address
2259 * @ce_sr_ring_size: copyengine source ring size
2260 * @ce_reg_paddr: copyengine register physical address
2261 *
2262 * IPA micro controller data path offload feature enabled,
2263 * HIF should release copy engine related resource information to IPA UC
2264 * IPA UC will access hardware resource with released information
2265 *
2266 * Return: None
2267 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302268void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302269 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002270 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302271 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002272{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302273 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002274 struct HIF_CE_pipe_info *pipe_info =
2275 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2276 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2277
2278 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2279 ce_reg_paddr);
2280 return;
2281}
2282#endif /* IPA_OFFLOAD */
2283
2284
2285#ifdef ADRASTEA_SHADOW_REGISTERS
2286
2287/*
2288 Current shadow register config
2289
2290 -----------------------------------------------------------
2291 Shadow Register | CE | src/dst write index
2292 -----------------------------------------------------------
2293 0 | 0 | src
2294 1 No Config - Doesn't point to anything
2295 2 No Config - Doesn't point to anything
2296 3 | 3 | src
2297 4 | 4 | src
2298 5 | 5 | src
2299 6 No Config - Doesn't point to anything
2300 7 | 7 | src
2301 8 No Config - Doesn't point to anything
2302 9 No Config - Doesn't point to anything
2303 10 No Config - Doesn't point to anything
2304 11 No Config - Doesn't point to anything
2305 -----------------------------------------------------------
2306 12 No Config - Doesn't point to anything
2307 13 | 1 | dst
2308 14 | 2 | dst
2309 15 No Config - Doesn't point to anything
2310 16 No Config - Doesn't point to anything
2311 17 No Config - Doesn't point to anything
2312 18 No Config - Doesn't point to anything
2313 19 | 7 | dst
2314 20 | 8 | dst
2315 21 No Config - Doesn't point to anything
2316 22 No Config - Doesn't point to anything
2317 23 No Config - Doesn't point to anything
2318 -----------------------------------------------------------
2319
2320
2321 ToDo - Move shadow register config to following in the future
2322 This helps free up a block of shadow registers towards the end.
2323 Can be used for other purposes
2324
2325 -----------------------------------------------------------
2326 Shadow Register | CE | src/dst write index
2327 -----------------------------------------------------------
2328 0 | 0 | src
2329 1 | 3 | src
2330 2 | 4 | src
2331 3 | 5 | src
2332 4 | 7 | src
2333 -----------------------------------------------------------
2334 5 | 1 | dst
2335 6 | 2 | dst
2336 7 | 7 | dst
2337 8 | 8 | dst
2338 -----------------------------------------------------------
2339 9 No Config - Doesn't point to anything
2340 12 No Config - Doesn't point to anything
2341 13 No Config - Doesn't point to anything
2342 14 No Config - Doesn't point to anything
2343 15 No Config - Doesn't point to anything
2344 16 No Config - Doesn't point to anything
2345 17 No Config - Doesn't point to anything
2346 18 No Config - Doesn't point to anything
2347 19 No Config - Doesn't point to anything
2348 20 No Config - Doesn't point to anything
2349 21 No Config - Doesn't point to anything
2350 22 No Config - Doesn't point to anything
2351 23 No Config - Doesn't point to anything
2352 -----------------------------------------------------------
2353*/
2354
Komal Seelam644263d2016-02-22 20:45:49 +05302355u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002356{
2357 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002358 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002359
Houston Hoffmane6330442016-02-26 12:19:11 -08002360 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361 case 0:
2362 addr = SHADOW_VALUE0;
2363 break;
2364 case 3:
2365 addr = SHADOW_VALUE3;
2366 break;
2367 case 4:
2368 addr = SHADOW_VALUE4;
2369 break;
2370 case 5:
2371 addr = SHADOW_VALUE5;
2372 break;
2373 case 7:
2374 addr = SHADOW_VALUE7;
2375 break;
2376 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002377 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302378 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002379 }
2380 return addr;
2381
2382}
2383
Komal Seelam644263d2016-02-22 20:45:49 +05302384u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002385{
2386 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002387 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002388
Houston Hoffmane6330442016-02-26 12:19:11 -08002389 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002390 case 1:
2391 addr = SHADOW_VALUE13;
2392 break;
2393 case 2:
2394 addr = SHADOW_VALUE14;
2395 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002396 case 5:
2397 addr = SHADOW_VALUE17;
2398 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002399 case 7:
2400 addr = SHADOW_VALUE19;
2401 break;
2402 case 8:
2403 addr = SHADOW_VALUE20;
2404 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002405 case 9:
2406 addr = SHADOW_VALUE21;
2407 break;
2408 case 10:
2409 addr = SHADOW_VALUE22;
2410 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302411 case 11:
2412 addr = SHADOW_VALUE23;
2413 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002414 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002415 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302416 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417 }
2418
2419 return addr;
2420
2421}
2422#endif
2423
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002424#if defined(FEATURE_LRO)
2425/**
2426 * ce_lro_flush_cb_register() - register the LRO flush
2427 * callback
2428 * @scn: HIF context
2429 * @handler: callback function
2430 * @data: opaque data pointer to be passed back
2431 *
2432 * Store the LRO flush callback provided
2433 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002434 * Return: Number of instances the callback is registered for
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002435 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002436int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
2437 void (handler)(void *), void *data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002438{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002439 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002440 int i;
2441 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302442 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002443
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302444 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002445
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002446 if (scn != NULL) {
2447 for (i = 0; i < scn->ce_count; i++) {
2448 ce_state = scn->ce_id_to_state[i];
2449 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2450 ce_state->lro_flush_cb = handler;
2451 ce_state->lro_data = data;
2452 rc++;
2453 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002454 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002455 } else {
2456 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002457 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002458 return rc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002459}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002460
2461/**
2462 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2463 * callback
2464 * @scn: HIF context
2465 *
2466 * Remove the LRO flush callback
2467 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002468 * Return: Number of instances the callback is de-registered
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002469 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002470int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002471{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002472 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002473 int i;
2474 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302475 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002476
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302477 QDF_ASSERT(scn != NULL);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002478 if (scn != NULL) {
2479 for (i = 0; i < scn->ce_count; i++) {
2480 ce_state = scn->ce_id_to_state[i];
2481 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2482 ce_state->lro_flush_cb = NULL;
2483 ce_state->lro_data = NULL;
2484 rc++;
2485 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002486 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002487 } else {
2488 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002489 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002490 return rc;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002491}
2492#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002493
2494/**
2495 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2496 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302497 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002498 * @svc_id: Service ID for which the mapping is needed.
2499 * @ul_pipe: address of the container in which ul pipe is returned.
2500 * @dl_pipe: address of the container in which dl pipe is returned.
2501 * @ul_is_polled: address of the container in which a bool
2502 * indicating if the UL CE for this service
2503 * is polled is returned.
2504 * @dl_is_polled: address of the container in which a bool
2505 * indicating if the DL CE for this service
2506 * is polled is returned.
2507 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002508 * Return: Indicates whether the service has been found in the table.
2509 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2510 * There will be warning logs if either leg has not been updated
2511 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002512 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302513int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002514 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2515 int *dl_is_polled)
2516{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002517 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002518 unsigned int i;
2519 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002520 struct service_to_pipe *tgt_svc_map_to_use;
2521 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302522 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2523 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002524 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002525 bool dl_updated = false;
2526 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002527
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002528 if (QDF_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002529 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2530 sz_tgt_svc_map_to_use =
2531 sizeof(target_service_to_ce_map_wlan_epping);
2532 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002533 switch (tgt_info->target_type) {
2534 default:
2535 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2536 sz_tgt_svc_map_to_use =
2537 sizeof(target_service_to_ce_map_wlan);
2538 break;
2539 case TARGET_TYPE_AR900B:
2540 case TARGET_TYPE_QCA9984:
2541 case TARGET_TYPE_IPQ4019:
2542 case TARGET_TYPE_QCA9888:
2543 case TARGET_TYPE_AR9888:
2544 case TARGET_TYPE_AR9888V2:
2545 tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
2546 sz_tgt_svc_map_to_use =
2547 sizeof(target_service_to_ce_map_ar900b);
2548 break;
2549 }
Sanjay Devnanic319c822015-11-06 16:44:28 -08002550 }
2551
2552 *dl_is_polled = 0; /* polling for received messages not supported */
2553
2554 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2555
2556 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2557 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002558 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002559 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002560 *ul_is_polled =
2561 (host_ce_config[*ul_pipe].flags &
2562 CE_ATTR_DISABLE_INTR) != 0;
2563 ul_updated = true;
2564 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002565 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002566 dl_updated = true;
2567 }
2568 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002569 }
2570 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002571 if (ul_updated == false)
2572 HIF_WARN("%s: ul pipe is NOT updated for service %d",
2573 __func__, svc_id);
2574 if (dl_updated == false)
2575 HIF_WARN("%s: dl pipe is NOT updated for service %d",
2576 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002577
2578 return status;
2579}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002580
2581#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302582inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002583 uint32_t CE_ctrl_addr)
2584{
2585 uint32_t read_from_hw, srri_from_ddr = 0;
2586
2587 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2588
2589 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2590
2591 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002592 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2593 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002594 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302595 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002596 }
2597 return srri_from_ddr;
2598}
2599
2600
Komal Seelam644263d2016-02-22 20:45:49 +05302601inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002602 uint32_t CE_ctrl_addr)
2603{
2604 uint32_t read_from_hw, drri_from_ddr = 0;
2605
2606 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2607
2608 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2609
2610 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002611 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002612 drri_from_ddr, read_from_hw,
2613 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302614 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002615 }
2616 return drri_from_ddr;
2617}
2618
2619#endif
2620
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002621#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002622/**
2623 * hif_get_src_ring_read_index(): Called to get the SRRI
2624 *
Komal Seelam644263d2016-02-22 20:45:49 +05302625 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002626 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2627 *
2628 * This function returns the SRRI to the caller. For CEs that
2629 * dont have interrupts enabled, we look at the DDR based SRRI
2630 *
2631 * Return: SRRI
2632 */
Komal Seelam644263d2016-02-22 20:45:49 +05302633inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002634 uint32_t CE_ctrl_addr)
2635{
2636 struct CE_attr attr;
2637
2638 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2639 if (attr.flags & CE_ATTR_DISABLE_INTR)
2640 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2641 else
2642 return A_TARGET_READ(scn,
2643 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2644}
2645
2646/**
2647 * hif_get_dst_ring_read_index(): Called to get the DRRI
2648 *
Komal Seelam644263d2016-02-22 20:45:49 +05302649 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002650 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2651 *
2652 * This function returns the DRRI to the caller. For CEs that
2653 * dont have interrupts enabled, we look at the DDR based DRRI
2654 *
2655 * Return: DRRI
2656 */
Komal Seelam644263d2016-02-22 20:45:49 +05302657inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002658 uint32_t CE_ctrl_addr)
2659{
2660 struct CE_attr attr;
2661
2662 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2663
2664 if (attr.flags & CE_ATTR_DISABLE_INTR)
2665 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2666 else
2667 return A_TARGET_READ(scn,
2668 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2669}
2670
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002671/**
2672 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2673 *
Komal Seelam644263d2016-02-22 20:45:49 +05302674 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002675 *
2676 * This function allocates non cached memory on ddr and sends
2677 * the physical address of this memory to the CE hardware. The
2678 * hardware updates the RRI on this particular location.
2679 *
2680 * Return: None
2681 */
Komal Seelam644263d2016-02-22 20:45:49 +05302682static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002683{
2684 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302685 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002686 uint32_t high_paddr, low_paddr;
2687 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302688 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2689 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2690 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002691
2692 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2693 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2694
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002695 HIF_INFO("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002696
2697 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2698 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2699
2700 for (i = 0; i < CE_COUNT; i++)
2701 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2702
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302703 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002704
2705 return;
2706}
2707#else
2708
2709/**
2710 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2711 *
Komal Seelam644263d2016-02-22 20:45:49 +05302712 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002713 *
2714 * This is a dummy implementation for platforms that don't
2715 * support this functionality.
2716 *
2717 * Return: None
2718 */
Komal Seelam644263d2016-02-22 20:45:49 +05302719static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002720{
2721 return;
2722}
2723#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302724
2725/**
2726 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302727 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302728 *
2729 * Output the copy engine registers
2730 *
2731 * Return: 0 for success or error code
2732 */
Komal Seelam644263d2016-02-22 20:45:49 +05302733int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302734{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302735 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302736 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002737 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05302738 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2739 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302740 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302741
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002742 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
2743 if (scn->ce_id_to_state[i] == NULL) {
2744 HIF_DBG("CE%d not used.", i);
2745 continue;
2746 }
2747
Komal Seelam644263d2016-02-22 20:45:49 +05302748 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002749 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05302750 ce_reg_word_size * sizeof(uint32_t));
2751
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302752 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302753 HIF_ERROR("Dumping CE register failed!");
2754 return -EACCES;
2755 }
2756 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302757 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002758 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05302759 ce_reg_word_size * sizeof(uint32_t));
2760 }
Govind Singh2443fb32016-01-13 17:44:48 +05302761 return 0;
2762}
Houston Hoffman85925072016-05-06 17:02:18 -07002763
2764#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
2765struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
2766 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
2767{
2768 struct hif_softc *scn = HIF_GET_SOFTC(osc);
2769 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2770 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
2771 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2772 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2773 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2774 struct CE_ring_state *src_ring = ce_state->src_ring;
2775 struct CE_ring_state *dest_ring = ce_state->dest_ring;
2776
2777 if (src_ring) {
2778 hif_info->ul_pipe.nentries = src_ring->nentries;
2779 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
2780 hif_info->ul_pipe.sw_index = src_ring->sw_index;
2781 hif_info->ul_pipe.write_index = src_ring->write_index;
2782 hif_info->ul_pipe.hw_index = src_ring->hw_index;
2783 hif_info->ul_pipe.base_addr_CE_space =
2784 src_ring->base_addr_CE_space;
2785 hif_info->ul_pipe.base_addr_owner_space =
2786 src_ring->base_addr_owner_space;
2787 }
2788
2789
2790 if (dest_ring) {
2791 hif_info->dl_pipe.nentries = dest_ring->nentries;
2792 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
2793 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
2794 hif_info->dl_pipe.write_index = dest_ring->write_index;
2795 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
2796 hif_info->dl_pipe.base_addr_CE_space =
2797 dest_ring->base_addr_CE_space;
2798 hif_info->dl_pipe.base_addr_owner_space =
2799 dest_ring->base_addr_owner_space;
2800 }
2801
2802 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
2803 hif_info->ctrl_addr = ce_state->ctrl_addr;
2804
2805 return hif_info;
2806}
2807
2808uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
2809{
2810 struct hif_softc *scn = HIF_GET_SOFTC(osc);
2811
2812 scn->nss_wifi_ol_mode = mode;
2813 return 0;
2814}
2815
2816#endif
2817
2818void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
2819{
2820 struct hif_softc *scn = HIF_GET_SOFTC(osc);
2821 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
2822 uint32_t ctrl_addr = CE_state->ctrl_addr;
2823
2824 Q_TARGET_ACCESS_BEGIN(scn);
2825 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2826 Q_TARGET_ACCESS_END(scn);
2827}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302828
2829/**
2830 * hif_fw_event_handler() - hif fw event handler
2831 * @hif_state: pointer to hif ce state structure
2832 *
2833 * Process fw events and raise HTC callback to process fw events.
2834 *
2835 * Return: none
2836 */
2837static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
2838{
2839 struct hif_msg_callbacks *msg_callbacks =
2840 &hif_state->msg_callbacks_current;
2841
2842 if (!msg_callbacks->fwEventHandler)
2843 return;
2844
2845 msg_callbacks->fwEventHandler(msg_callbacks->Context,
2846 QDF_STATUS_E_FAILURE);
2847}
2848
2849#ifndef QCA_WIFI_3_0
2850/**
2851 * hif_fw_interrupt_handler() - FW interrupt handler
2852 * @irq: irq number
2853 * @arg: the user pointer
2854 *
2855 * Called from the PCI interrupt handler when a
2856 * firmware-generated interrupt to the Host.
2857 *
2858 * Return: status of handled irq
2859 */
2860irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
2861{
2862 struct hif_softc *scn = arg;
2863 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2864 uint32_t fw_indicator_address, fw_indicator;
2865
2866 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2867 return ATH_ISR_NOSCHED;
2868
2869 fw_indicator_address = hif_state->fw_indicator_address;
2870 /* For sudden unplug this will return ~0 */
2871 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
2872
2873 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
2874 /* ACK: clear Target-side pending event */
2875 A_TARGET_WRITE(scn, fw_indicator_address,
2876 fw_indicator & ~FW_IND_EVENT_PENDING);
2877 if (Q_TARGET_ACCESS_END(scn) < 0)
2878 return ATH_ISR_SCHED;
2879
2880 if (hif_state->started) {
2881 hif_fw_event_handler(hif_state);
2882 } else {
2883 /*
2884 * Probable Target failure before we're prepared
2885 * to handle it. Generally unexpected.
2886 */
2887 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
2888 ("%s: Early firmware event indicated\n",
2889 __func__));
2890 }
2891 } else {
2892 if (Q_TARGET_ACCESS_END(scn) < 0)
2893 return ATH_ISR_SCHED;
2894 }
2895
2896 return ATH_ISR_SCHED;
2897}
2898#else
2899irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
2900{
2901 return ATH_ISR_SCHED;
2902}
2903#endif /* #ifdef QCA_WIFI_3_0 */
2904
2905
2906/**
2907 * hif_wlan_disable(): call the platform driver to disable wlan
2908 * @scn: HIF Context
2909 *
2910 * This function passes the con_mode to platform driver to disable
2911 * wlan.
2912 *
2913 * Return: void
2914 */
2915void hif_wlan_disable(struct hif_softc *scn)
2916{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002917 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302918 uint32_t con_mode = hif_get_conparam(scn);
2919
2920 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002921 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302922 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002923 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302924 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002925 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302926
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002927 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302928}