blob: e55f8234369f147c47922aacb1a20a11fe6ede09 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
67static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Komal Seelam644263d2016-02-22 20:45:49 +053091static int hif_post_recv_buffers(struct hif_softc *scn);
92static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
144 if (CE_state->timer_inited) {
145 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530146 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800147 }
148}
149
150static unsigned int roundup_pwr2(unsigned int n)
151{
152 int i;
153 unsigned int test_pwr2;
154
155 if (!(n & (n - 1)))
156 return n; /* already a power of 2 */
157
158 test_pwr2 = 4;
159 for (i = 0; i < 29; i++) {
160 if (test_pwr2 > n)
161 return test_pwr2;
162 test_pwr2 = test_pwr2 << 1;
163 }
164
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530165 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800166 return 0;
167}
168
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700169#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
170#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
171
172static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
173 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800182#ifdef QCA_WIFI_3_0_ADRASTEA
183 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
184 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530185 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800186#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700187};
188
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700189static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
190 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
191 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
195 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
199};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700200
201/* CE_PCI TABLE */
202/*
203 * NOTE: the table below is out of date, though still a useful reference.
204 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
205 * mapping of HTC services to HIF pipes.
206 */
207/*
208 * This authoritative table defines Copy Engine configuration and the mapping
209 * of services/endpoints to CEs. A subset of this information is passed to
210 * the Target during startup as a prerequisite to entering BMI phase.
211 * See:
212 * target_service_to_ce_map - Target-side mapping
213 * hif_map_service_to_pipe - Host-side mapping
214 * target_ce_config - Target-side configuration
215 * host_ce_config - Host-side configuration
216 ============================================================================
217 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
218 | | | ctio | Size | Frequency
219 | | | n | |
220 ============================================================================
221 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
222 descriptor | | | | O(100B) | and regular
223 download | | | | |
224 ----------------------------------------------------------------------------
225 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
226 indication | | | | O(10B) | regular
227 upload | | | | |
228 ----------------------------------------------------------------------------
229 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
230 upload | | | | O(1000B) | (frequent
231 e.g. noise | | | | | during IP1.0
232 packets | | | | | testing)
233 ----------------------------------------------------------------------------
234 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
235 download | | | | O(1000B) | (frequent
236 e.g. | | | | | during IP1.0
237 misdirecte | | | | | testing)
238 d EAPOL | | | | |
239 packets | | | | |
240 ----------------------------------------------------------------------------
241 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
242 | DATA_VO (uplink) | | | |
243 ----------------------------------------------------------------------------
244 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
245 | DATA_VO (downlink) | | | |
246 ----------------------------------------------------------------------------
247 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
248 | | | | O(100B) |
249 ----------------------------------------------------------------------------
250 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
251 messages | (downlink) | | | O(100B) |
252 | | | | |
253 ----------------------------------------------------------------------------
254 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
255 | HTC_RAW_STREAMS | | | |
256 | (uplink) | | | |
257 ----------------------------------------------------------------------------
258 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
259 | HTC_RAW_STREAMS | | | |
260 | (downlink) | | | |
261 ----------------------------------------------------------------------------
262 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
263 | | | | | infrequent
264 ============================================================================
265 */
266
267/*
268 * Map from service/endpoint to Copy Engine.
269 * This table is derived from the CE_PCI TABLE, above.
270 * It is passed to the Target at startup for use by firmware.
271 */
272static struct service_to_pipe target_service_to_ce_map_wlan[] = {
273 {
274 WMI_DATA_VO_SVC,
275 PIPEDIR_OUT, /* out = UL = host -> target */
276 3,
277 },
278 {
279 WMI_DATA_VO_SVC,
280 PIPEDIR_IN, /* in = DL = target -> host */
281 2,
282 },
283 {
284 WMI_DATA_BK_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_BK_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BE_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BE_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_VI_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_VI_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_CONTROL_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_CONTROL_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530324 WMI_CONTROL_SVC_WMAC1,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 7,
327 },
328 {
329 WMI_CONTROL_SVC_WMAC1,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 WMI_CONTROL_SVC_WMAC2,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 9,
337 },
338 {
339 WMI_CONTROL_SVC_WMAC2,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700344 HTC_CTRL_RSVD_SVC,
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0, /* could be moved to 3 (share with WMI) */
347 },
348 {
349 HTC_CTRL_RSVD_SVC,
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTC_RAW_STREAMS_SVC, /* not currently used */
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 0,
357 },
358 {
359 HTC_RAW_STREAMS_SVC, /* not currently used */
360 PIPEDIR_IN, /* in = DL = target -> host */
361 2,
362 },
363 {
364 HTT_DATA_MSG_SVC,
365 PIPEDIR_OUT, /* out = UL = host -> target */
366 4,
367 },
368 {
369 HTT_DATA_MSG_SVC,
370 PIPEDIR_IN, /* in = DL = target -> host */
371 1,
372 },
373 {
374 WDI_IPA_TX_SVC,
375 PIPEDIR_OUT, /* in = DL = target -> host */
376 5,
377 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800378#if defined(QCA_WIFI_3_0_ADRASTEA)
379 {
380 HTT_DATA2_MSG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 9,
383 },
384 {
385 HTT_DATA3_MSG_SVC,
386 PIPEDIR_IN, /* in = DL = target -> host */
387 10,
388 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530389 {
390 PACKET_LOG_SVC,
391 PIPEDIR_IN, /* in = DL = target -> host */
392 11,
393 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800394#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700395 /* (Additions here) */
396
397 { /* Must be last */
398 0,
399 0,
400 0,
401 },
402};
403
Houston Hoffman88c896f2016-12-14 09:56:35 -0800404/* PIPEDIR_OUT = HOST to Target */
405/* PIPEDIR_IN = TARGET to HOST */
406static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
407 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
408 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
409 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
410 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
411 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
412 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
413 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
414 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
415 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
416 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
417 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
418 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
419 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
420 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800421 /* (Additions here) */
422 { 0, 0, 0, },
423};
424
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700425static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
426 {
427 WMI_DATA_VO_SVC,
428 PIPEDIR_OUT, /* out = UL = host -> target */
429 3,
430 },
431 {
432 WMI_DATA_VO_SVC,
433 PIPEDIR_IN, /* in = DL = target -> host */
434 2,
435 },
436 {
437 WMI_DATA_BK_SVC,
438 PIPEDIR_OUT, /* out = UL = host -> target */
439 3,
440 },
441 {
442 WMI_DATA_BK_SVC,
443 PIPEDIR_IN, /* in = DL = target -> host */
444 2,
445 },
446 {
447 WMI_DATA_BE_SVC,
448 PIPEDIR_OUT, /* out = UL = host -> target */
449 3,
450 },
451 {
452 WMI_DATA_BE_SVC,
453 PIPEDIR_IN, /* in = DL = target -> host */
454 2,
455 },
456 {
457 WMI_DATA_VI_SVC,
458 PIPEDIR_OUT, /* out = UL = host -> target */
459 3,
460 },
461 {
462 WMI_DATA_VI_SVC,
463 PIPEDIR_IN, /* in = DL = target -> host */
464 2,
465 },
466 {
467 WMI_CONTROL_SVC,
468 PIPEDIR_OUT, /* out = UL = host -> target */
469 3,
470 },
471 {
472 WMI_CONTROL_SVC,
473 PIPEDIR_IN, /* in = DL = target -> host */
474 2,
475 },
476 {
477 HTC_CTRL_RSVD_SVC,
478 PIPEDIR_OUT, /* out = UL = host -> target */
479 0, /* could be moved to 3 (share with WMI) */
480 },
481 {
482 HTC_CTRL_RSVD_SVC,
483 PIPEDIR_IN, /* in = DL = target -> host */
484 1,
485 },
486 {
487 HTC_RAW_STREAMS_SVC, /* not currently used */
488 PIPEDIR_OUT, /* out = UL = host -> target */
489 0,
490 },
491 {
492 HTC_RAW_STREAMS_SVC, /* not currently used */
493 PIPEDIR_IN, /* in = DL = target -> host */
494 1,
495 },
496 {
497 HTT_DATA_MSG_SVC,
498 PIPEDIR_OUT, /* out = UL = host -> target */
499 4,
500 },
501#if WLAN_FEATURE_FASTPATH
502 {
503 HTT_DATA_MSG_SVC,
504 PIPEDIR_IN, /* in = DL = target -> host */
505 5,
506 },
507#else /* WLAN_FEATURE_FASTPATH */
508 {
509 HTT_DATA_MSG_SVC,
510 PIPEDIR_IN, /* in = DL = target -> host */
511 1,
512 },
513#endif /* WLAN_FEATURE_FASTPATH */
514
515 /* (Additions here) */
516
517 { /* Must be last */
518 0,
519 0,
520 0,
521 },
522};
523
524
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700525static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
526static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
527
528static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
529 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
530 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
531 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
532 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
533 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
534 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
535 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
536 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
537 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
538 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
539 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
540 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
541 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
542 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
543 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
544 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
545 {0, 0, 0,}, /* Must be last */
546};
547
Houston Hoffman748e1a62017-03-30 17:20:42 -0700548static void hif_select_service_to_pipe_map(struct hif_softc *scn,
549 struct service_to_pipe **tgt_svc_map_to_use,
550 uint32_t *sz_tgt_svc_map_to_use)
551{
552 uint32_t mode = hif_get_conparam(scn);
553 struct hif_target_info *tgt_info = &scn->target_info;
554
555 if (QDF_IS_EPPING_ENABLED(mode)) {
556 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
557 *sz_tgt_svc_map_to_use =
558 sizeof(target_service_to_ce_map_wlan_epping);
559 } else {
560 switch (tgt_info->target_type) {
561 default:
562 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
563 *sz_tgt_svc_map_to_use =
564 sizeof(target_service_to_ce_map_wlan);
565 break;
566 case TARGET_TYPE_AR900B:
567 case TARGET_TYPE_QCA9984:
568 case TARGET_TYPE_IPQ4019:
569 case TARGET_TYPE_QCA9888:
570 case TARGET_TYPE_AR9888:
571 case TARGET_TYPE_AR9888V2:
572 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
573 *sz_tgt_svc_map_to_use =
574 sizeof(target_service_to_ce_map_ar900b);
575 break;
576 case TARGET_TYPE_QCA6290:
577 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
578 *sz_tgt_svc_map_to_use =
579 sizeof(target_service_to_ce_map_qca6290);
580 break;
581 }
582 }
583}
584
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700585/**
586 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
587 * @ce_state : pointer to the state context of the CE
588 *
589 * Description:
590 * Sets htt_rx_data attribute of the state structure if the
591 * CE serves one of the HTT DATA services.
592 *
593 * Return:
594 * false (attribute set to false)
595 * true (attribute set to true);
596 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700597static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700598{
599 struct service_to_pipe *svc_map;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700600 uint32_t map_sz;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700601 int i;
602 bool rc = false;
603
604 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700605 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
606 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700607
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700608 for (i = 0; i < map_sz; i++) {
609 if ((svc_map[i].pipenum == ce_state->id) &&
610 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
611 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
612 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
613 /* HTT CEs are unidirectional */
614 if (svc_map[i].pipedir == PIPEDIR_IN)
615 ce_state->htt_rx_data = true;
616 else
617 ce_state->htt_tx_data = true;
618 rc = true;
619 }
620 }
621 }
622 return rc;
623}
624
Houston Hoffman47808172016-05-06 10:04:21 -0700625/**
626 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
627 * @ce_id: ce in question
628 * @ring: ring state being examined
629 * @type: "src_ring" or "dest_ring" string for identifying the ring
630 *
631 * Warns on non-zero index values.
632 * Causes a kernel panic if the ring is not empty durring initialization.
633 */
634static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
635 char *type)
636{
637 if (ring->write_index != 0 || ring->sw_index != 0)
638 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
639 ce_id, type, ring->sw_index, ring->write_index);
640 if (ring->write_index != ring->sw_index)
641 QDF_BUG(0);
642}
643
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530644/**
645 * ce_srng_based() - Does this target use srng
646 * @ce_state : pointer to the state context of the CE
647 *
648 * Description:
649 * returns true if the target is SRNG based
650 *
651 * Return:
652 * false (attribute set to false)
653 * true (attribute set to true);
654 */
655bool ce_srng_based(struct hif_softc *scn)
656{
657 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
658 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
659
660 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530661 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700662 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530663 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530664 default:
665 return false;
666 }
667 return false;
668}
669
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800670#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700671static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530672{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530673 if (ce_srng_based(scn))
674 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530675
676 return ce_services_legacy();
677}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800678
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800679
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800680#else /* QCA_LITHIUM */
681static struct ce_ops *ce_services_attach(struct hif_softc *scn)
682{
683 return ce_services_legacy();
684}
685#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530686
Houston Hoffman403c2df2017-01-27 12:51:15 -0800687static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800688 struct pld_shadow_reg_v2_cfg **shadow_config,
689 int *num_shadow_registers_configured) {
690 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
691
692 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
693 scn, shadow_config, num_shadow_registers_configured);
694}
695
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530696static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
697 uint8_t ring_type)
698{
699 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
700
701 return hif_state->ce_services->ce_get_desc_size(ring_type);
702}
703
704
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700705static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530706 uint8_t ring_type, uint32_t nentries)
707{
708 uint32_t ce_nbytes;
709 char *ptr;
710 qdf_dma_addr_t base_addr;
711 struct CE_ring_state *ce_ring;
712 uint32_t desc_size;
713 struct hif_softc *scn = CE_state->scn;
714
715 ce_nbytes = sizeof(struct CE_ring_state)
716 + (nentries * sizeof(void *));
717 ptr = qdf_mem_malloc(ce_nbytes);
718 if (!ptr)
719 return NULL;
720
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530721 ce_ring = (struct CE_ring_state *)ptr;
722 ptr += sizeof(struct CE_ring_state);
723 ce_ring->nentries = nentries;
724 ce_ring->nentries_mask = nentries - 1;
725
726 ce_ring->low_water_mark_nentries = 0;
727 ce_ring->high_water_mark_nentries = nentries;
728 ce_ring->per_transfer_context = (void **)ptr;
729
730 desc_size = ce_get_desc_size(scn, ring_type);
731
732 /* Legacy platforms that do not support cache
733 * coherent DMA are unsupported
734 */
735 ce_ring->base_addr_owner_space_unaligned =
736 qdf_mem_alloc_consistent(scn->qdf_dev,
737 scn->qdf_dev->dev,
738 (nentries *
739 desc_size +
740 CE_DESC_RING_ALIGN),
741 &base_addr);
742 if (ce_ring->base_addr_owner_space_unaligned
743 == NULL) {
744 HIF_ERROR("%s: ring has no DMA mem",
745 __func__);
746 qdf_mem_free(ptr);
747 return NULL;
748 }
749 ce_ring->base_addr_CE_space_unaligned = base_addr;
750
751 /* Correctly initialize memory to 0 to
752 * prevent garbage data crashing system
753 * when download firmware
754 */
755 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
756 nentries * desc_size +
757 CE_DESC_RING_ALIGN);
758
759 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
760
761 ce_ring->base_addr_CE_space =
762 (ce_ring->base_addr_CE_space_unaligned +
763 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
764
765 ce_ring->base_addr_owner_space = (void *)
766 (((size_t) ce_ring->base_addr_owner_space_unaligned +
767 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
768 } else {
769 ce_ring->base_addr_CE_space =
770 ce_ring->base_addr_CE_space_unaligned;
771 ce_ring->base_addr_owner_space =
772 ce_ring->base_addr_owner_space_unaligned;
773 }
774
775 return ce_ring;
776}
777
778static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
779 uint32_t ce_id, struct CE_ring_state *ring,
780 struct CE_attr *attr)
781{
782 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
783
784 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, ring, attr);
785}
786
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800787int hif_ce_bus_early_suspend(struct hif_softc *scn)
788{
789 uint8_t ul_pipe, dl_pipe;
790 int ce_id, status, ul_is_polled, dl_is_polled;
791 struct CE_state *ce_state;
792 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
793 &ul_pipe, &dl_pipe,
794 &ul_is_polled, &dl_is_polled);
795 if (status) {
796 HIF_ERROR("%s: pipe_mapping failure", __func__);
797 return status;
798 }
799
800 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
801 if (ce_id == ul_pipe)
802 continue;
803 if (ce_id == dl_pipe)
804 continue;
805
806 ce_state = scn->ce_id_to_state[ce_id];
807 qdf_spin_lock_bh(&ce_state->ce_index_lock);
808 if (ce_state->state == CE_RUNNING)
809 ce_state->state = CE_PAUSED;
810 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
811 }
812
813 return status;
814}
815
816int hif_ce_bus_late_resume(struct hif_softc *scn)
817{
818 int ce_id;
819 struct CE_state *ce_state;
820 int write_index;
821 bool index_updated;
822
823 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
824 ce_state = scn->ce_id_to_state[ce_id];
825 qdf_spin_lock_bh(&ce_state->ce_index_lock);
826 if (ce_state->state == CE_PENDING) {
827 write_index = ce_state->src_ring->write_index;
828 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
829 write_index);
830 ce_state->state = CE_RUNNING;
831 index_updated = true;
832 } else {
833 index_updated = false;
834 }
835
836 if (ce_state->state == CE_PAUSED)
837 ce_state->state = CE_RUNNING;
838 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
839
840 if (index_updated)
841 hif_record_ce_desc_event(scn, ce_id,
842 RESUME_WRITE_INDEX_UPDATE,
843 NULL, NULL, write_index);
844 }
845
846 return 0;
847}
848
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800849/**
850 * ce_oom_recovery() - try to recover rx ce from oom condition
851 * @context: CE_state of the CE with oom rx ring
852 *
853 * the executing work Will continue to be rescheduled untill
854 * at least 1 descriptor is successfully posted to the rx ring.
855 *
856 * return: none
857 */
858static void ce_oom_recovery(void *context)
859{
860 struct CE_state *ce_state = context;
861 struct hif_softc *scn = ce_state->scn;
862 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
863 struct HIF_CE_pipe_info *pipe_info =
864 &ce_softc->pipe_info[ce_state->id];
865
866 hif_post_recv_buffers_for_pipe(pipe_info);
867}
868
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800869/*
870 * Initialize a Copy Engine based on caller-supplied attributes.
871 * This may be called once to initialize both source and destination
872 * rings or it may be called twice for separate source and destination
873 * initialization. It may be that only one side or the other is
874 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700875 *
876 * This should be called durring the initialization sequence before
877 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800878 */
Komal Seelam644263d2016-02-22 20:45:49 +0530879struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800880 unsigned int CE_id, struct CE_attr *attr)
881{
882 struct CE_state *CE_state;
883 uint32_t ctrl_addr;
884 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800885 bool malloc_CE_state = false;
886 bool malloc_src_ring = false;
887
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530888 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800889 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890 CE_state = scn->ce_id_to_state[CE_id];
891
892 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530894 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800895 if (!CE_state) {
896 HIF_ERROR("%s: CE_state has no mem", __func__);
897 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700899 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530900 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700901
902 CE_state->id = CE_id;
903 CE_state->ctrl_addr = ctrl_addr;
904 CE_state->state = CE_RUNNING;
905 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700906 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800907 }
908 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800909
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530910 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911 if (attr == NULL) {
912 /* Already initialized; caller wants the handle */
913 return (struct CE_handle *)CE_state;
914 }
915
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800916 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530917 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800918 else
919 CE_state->src_sz_max = attr->src_sz_max;
920
Houston Hoffman68e837e2015-12-04 12:57:24 -0800921 ce_init_ce_desc_event_log(CE_id,
922 attr->src_nentries + attr->dest_nentries);
923
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800924 /* source ring setup */
925 nentries = attr->src_nentries;
926 if (nentries) {
927 struct CE_ring_state *src_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800928 nentries = roundup_pwr2(nentries);
929 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530930 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800931 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530932 src_ring = CE_state->src_ring =
933 ce_alloc_ring_state(CE_state,
934 CE_RING_SRC,
935 nentries);
936 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800937 /* cannot allocate src ring. If the
938 * CE_state is allocated locally free
939 * CE_State and return error.
940 */
941 HIF_ERROR("%s: src ring has no mem", __func__);
942 if (malloc_CE_state) {
943 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530944 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800945 malloc_CE_state = false;
946 }
947 return NULL;
948 } else {
949 /* we can allocate src ring.
950 * Mark that the src ring is
951 * allocated locally
952 */
953 malloc_src_ring = true;
954 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800955 /*
956 * Also allocate a shadow src ring in
957 * regular mem to use for faster access.
958 */
959 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530960 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800961 sizeof(struct CE_src_desc) +
962 CE_DESC_RING_ALIGN);
963 if (src_ring->shadow_base_unaligned == NULL) {
964 HIF_ERROR("%s: src ring no shadow_base mem",
965 __func__);
966 goto error_no_dma_mem;
967 }
968 src_ring->shadow_base = (struct CE_src_desc *)
969 (((size_t) src_ring->shadow_base_unaligned +
970 CE_DESC_RING_ALIGN - 1) &
971 ~(CE_DESC_RING_ALIGN - 1));
972
Houston Hoffman4411ad42016-03-14 21:12:04 -0700973 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
974 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -0700975
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530976 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
977
Houston Hoffman4411ad42016-03-14 21:12:04 -0700978 if (Q_TARGET_ACCESS_END(scn) < 0)
979 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530980 ce_ring_test_initial_indexes(CE_id, src_ring,
981 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 }
983 }
984
985 /* destination ring setup */
986 nentries = attr->dest_nentries;
987 if (nentries) {
988 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989
990 nentries = roundup_pwr2(nentries);
991 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530992 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800993 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530994 dest_ring = CE_state->dest_ring =
995 ce_alloc_ring_state(CE_state,
996 CE_RING_DEST,
997 nentries);
998 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800999 /* cannot allocate dst ring. If the CE_state
1000 * or src ring is allocated locally free
1001 * CE_State and src ring and return error.
1002 */
1003 HIF_ERROR("%s: dest ring has no mem",
1004 __func__);
1005 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301006 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001007 CE_state->src_ring = NULL;
1008 malloc_src_ring = false;
1009 }
1010 if (malloc_CE_state) {
1011 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301012 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001013 malloc_CE_state = false;
1014 }
1015 return NULL;
1016 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001017
Houston Hoffman4411ad42016-03-14 21:12:04 -07001018 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1019 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301020
1021 ce_ring_setup(scn, CE_RING_DEST, CE_id, dest_ring, attr);
1022
1023 if (Q_TARGET_ACCESS_END(scn) < 0)
1024 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001025
1026 ce_ring_test_initial_indexes(CE_id, dest_ring,
1027 "dest_ring");
1028
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301029 /* For srng based target, init status ring here */
1030 if (ce_srng_based(CE_state->scn)) {
1031 CE_state->status_ring =
1032 ce_alloc_ring_state(CE_state,
1033 CE_RING_STATUS,
1034 nentries);
1035 if (CE_state->status_ring == NULL) {
1036 /*Allocation failed. Cleanup*/
1037 qdf_mem_free(CE_state->dest_ring);
1038 if (malloc_src_ring) {
1039 qdf_mem_free
1040 (CE_state->src_ring);
1041 CE_state->src_ring = NULL;
1042 malloc_src_ring = false;
1043 }
1044 if (malloc_CE_state) {
1045 /* allocated CE_state locally */
1046 scn->ce_id_to_state[CE_id] =
1047 NULL;
1048 qdf_mem_free(CE_state);
1049 malloc_CE_state = false;
1050 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001051
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301052 return NULL;
1053 }
1054 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1055 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001056
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301057 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1058 CE_state->status_ring, attr);
1059
1060 if (Q_TARGET_ACCESS_END(scn) < 0)
1061 goto error_target_access;
1062
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001063 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001064
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001065 /* epping */
1066 /* poll timer */
1067 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301068 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001069 &CE_state->poll_timer,
1070 ce_poll_timeout,
1071 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301072 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001073 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301074 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001075 CE_POLL_TIMEOUT);
1076 }
1077 }
1078 }
1079
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301080 if (!ce_srng_based(scn)) {
1081 /* Enable CE error interrupts */
1082 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1083 goto error_target_access;
1084 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1085 if (Q_TARGET_ACCESS_END(scn) < 0)
1086 goto error_target_access;
1087 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001088
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001089 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1090 ce_oom_recovery, CE_state);
1091
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001092 /* update the htt_data attribute */
1093 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001094 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001095
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001096 return (struct CE_handle *)CE_state;
1097
Houston Hoffman4411ad42016-03-14 21:12:04 -07001098error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001099error_no_dma_mem:
1100 ce_fini((struct CE_handle *)CE_state);
1101 return NULL;
1102}
1103
1104#ifdef WLAN_FEATURE_FASTPATH
1105/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001106 * hif_enable_fastpath() Update that we have enabled fastpath mode
1107 * @hif_ctx: HIF context
1108 *
1109 * For use in data path
1110 *
1111 * Retrun: void
1112 */
1113void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1114{
1115 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1116
Houston Hoffmand63cd742016-12-05 11:59:56 -08001117 if (ce_srng_based(scn)) {
1118 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1119 return;
1120 }
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001121 HIF_INFO("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001122 scn->fastpath_mode_on = true;
1123}
1124
1125/**
1126 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1127 * @hif_ctx: HIF Context
1128 *
1129 * For use in data path to skip HTC
1130 *
1131 * Return: bool
1132 */
1133bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1134{
1135 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1136
1137 return scn->fastpath_mode_on;
1138}
1139
1140/**
1141 * hif_get_ce_handle - API to get CE handle for FastPath mode
1142 * @hif_ctx: HIF Context
1143 * @id: CopyEngine Id
1144 *
1145 * API to return CE handle for fastpath mode
1146 *
1147 * Return: void
1148 */
1149void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1150{
1151 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1152
1153 return scn->ce_id_to_state[id];
1154}
1155
1156/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001157 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1158 * No processing is required inside this function.
1159 * @ce_hdl: Cope engine handle
1160 * Using an assert, this function makes sure that,
1161 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001162 *
1163 * This is called while dismantling CE structures. No other thread
1164 * should be using these structures while dismantling is occuring
1165 * therfore no locking is needed.
1166 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001167 * Return: none
1168 */
1169void
1170ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1171{
1172 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1173 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301174 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001175 uint32_t sw_index, write_index;
Houston Hoffman85925072016-05-06 17:02:18 -07001176 if (hif_is_nss_wifi_enabled(sc))
1177 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001178
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001179 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Houston Hoffman85925072016-05-06 17:02:18 -07001180 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1181 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182 sw_index = src_ring->sw_index;
1183 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001184
1185 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301186 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001187 }
1188}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001189
1190/**
1191 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1192 * @ce_hdl: Handle to CE
1193 *
1194 * These buffers are never allocated on the fly, but
1195 * are allocated only once during HIF start and freed
1196 * only once during HIF stop.
1197 * NOTE:
1198 * The assumption here is there is no in-flight DMA in progress
1199 * currently, so that buffers can be freed up safely.
1200 *
1201 * Return: NONE
1202 */
1203void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1204{
1205 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1206 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1207 qdf_nbuf_t nbuf;
1208 int i;
1209
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001210 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001211 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001212
1213 if (!ce_state->htt_rx_data)
1214 return;
1215
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001216 /*
1217 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1218 * this CE is completely full: does not leave one blank space, to
1219 * distinguish between empty queue & full queue. So free all the
1220 * entries.
1221 */
1222 for (i = 0; i < dst_ring->nentries; i++) {
1223 nbuf = dst_ring->per_transfer_context[i];
1224
1225 /*
1226 * The reasons for doing this check are:
1227 * 1) Protect against calling cleanup before allocating buffers
1228 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1229 * could have a partially filled ring, because of a memory
1230 * allocation failure in the middle of allocating ring.
1231 * This check accounts for that case, checking
1232 * fastpath_mode_on flag or started flag would not have
1233 * covered that case. This is not in performance path,
1234 * so OK to do this.
1235 */
1236 if (nbuf)
1237 qdf_nbuf_free(nbuf);
1238 }
1239}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001240
1241/**
1242 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1243 * @scn: HIF handle
1244 *
1245 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1246 * Hence we have to post all the entries in the pipe, even, in the beginning
1247 * unlike for other CE pipes where one less than dest_nentries are filled in
1248 * the beginning.
1249 *
1250 * Return: None
1251 */
1252static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1253{
1254 int pipe_num;
1255 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1256
1257 if (scn->fastpath_mode_on == false)
1258 return;
1259
1260 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1261 struct HIF_CE_pipe_info *pipe_info =
1262 &hif_state->pipe_info[pipe_num];
1263 struct CE_state *ce_state =
1264 scn->ce_id_to_state[pipe_info->pipe_num];
1265
1266 if (ce_state->htt_rx_data)
1267 atomic_inc(&pipe_info->recv_bufs_needed);
1268 }
1269}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001270#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001271static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001272{
1273}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001274
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001275static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001276{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001277 return false;
1278}
1279
1280static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1281{
1282 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001283}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001284#endif /* WLAN_FEATURE_FASTPATH */
1285
1286void ce_fini(struct CE_handle *copyeng)
1287{
1288 struct CE_state *CE_state = (struct CE_state *)copyeng;
1289 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301290 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291
1292 CE_state->state = CE_UNUSED;
1293 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001294
1295 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1296
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001297 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001298 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001299 ce_h2t_tx_ce_cleanup(copyeng);
1300
1301 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301302 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001303 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301304 qdf_mem_free_consistent(scn->qdf_dev,
1305 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001306 (CE_state->src_ring->nentries *
1307 sizeof(struct CE_src_desc) +
1308 CE_DESC_RING_ALIGN),
1309 CE_state->src_ring->
1310 base_addr_owner_space_unaligned,
1311 CE_state->src_ring->
1312 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301313 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001314 }
1315 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001316 /* Cleanup the datapath Rx ring */
1317 ce_t2h_msg_ce_cleanup(copyeng);
1318
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001319 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301320 qdf_mem_free_consistent(scn->qdf_dev,
1321 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001322 (CE_state->dest_ring->nentries *
1323 sizeof(struct CE_dest_desc) +
1324 CE_DESC_RING_ALIGN),
1325 CE_state->dest_ring->
1326 base_addr_owner_space_unaligned,
1327 CE_state->dest_ring->
1328 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301329 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001330
1331 /* epping */
1332 if (CE_state->timer_inited) {
1333 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301334 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335 }
1336 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001337 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301338 /* Cleanup the datapath Tx ring */
1339 ce_h2t_tx_ce_cleanup(copyeng);
1340
1341 if (CE_state->status_ring->shadow_base_unaligned)
1342 qdf_mem_free(
1343 CE_state->status_ring->shadow_base_unaligned);
1344
1345 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1346 qdf_mem_free_consistent(scn->qdf_dev,
1347 scn->qdf_dev->dev,
1348 (CE_state->status_ring->nentries *
1349 sizeof(struct CE_src_desc) +
1350 CE_DESC_RING_ALIGN),
1351 CE_state->status_ring->
1352 base_addr_owner_space_unaligned,
1353 CE_state->status_ring->
1354 base_addr_CE_space, 0);
1355 qdf_mem_free(CE_state->status_ring);
1356 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001357
1358 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301359 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001360}
1361
Komal Seelam5584a7c2016-02-24 19:22:48 +05301362void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001363{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301364 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001365
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301366 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001367 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301368 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369 sizeof(hif_state->msg_callbacks_current));
1370}
1371
1372/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301373QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301374hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001375 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301376 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377{
Komal Seelam644263d2016-02-22 20:45:49 +05301378 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301379 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001380 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1381 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1382 int bytes = nbytes, nfrags = 0;
1383 struct ce_sendlist sendlist;
1384 int status, i = 0;
1385 unsigned int mux_id = 0;
1386
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301387 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001388
1389 transfer_id =
1390 (mux_id & MUX_ID_MASK) |
1391 (transfer_id & TRANSACTION_ID_MASK);
1392 data_attr &= DESC_DATA_FLAG_MASK;
1393 /*
1394 * The common case involves sending multiple fragments within a
1395 * single download (the tx descriptor and the tx frame header).
1396 * So, optimize for the case of multiple fragments by not even
1397 * checking whether it's necessary to use a sendlist.
1398 * The overhead of using a sendlist for a single buffer download
1399 * is not a big deal, since it happens rarely (for WMI messages).
1400 */
1401 ce_sendlist_init(&sendlist);
1402 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301403 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001404 int frag_bytes;
1405
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301406 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1407 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 /*
1409 * Clear the packet offset for all but the first CE desc.
1410 */
1411 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301412 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001413
1414 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1415 frag_bytes >
1416 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301417 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 (nbuf,
1419 nfrags) ? 0 :
1420 CE_SEND_FLAG_SWAP_DISABLE,
1421 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301422 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001423 HIF_ERROR("%s: error, frag_num %d larger than limit",
1424 __func__, nfrags);
1425 return status;
1426 }
1427 bytes -= frag_bytes;
1428 nfrags++;
1429 } while (bytes > 0);
1430
1431 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001433 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301434 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001435 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301436 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001437 }
1438 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301439 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001440
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301441 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442 HIF_ERROR("%s: error CE handle is null", __func__);
1443 return A_ERROR;
1444 }
1445
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301446 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301447 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +05301448 qdf_nbuf_data_addr(nbuf),
Nirav Shah29beae02016-04-26 22:58:54 +05301449 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001450 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301451 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001452
1453 return status;
1454}
1455
Komal Seelam5584a7c2016-02-24 19:22:48 +05301456void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1457 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001458{
Komal Seelam644263d2016-02-22 20:45:49 +05301459 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301460 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301461
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462 if (!force) {
1463 int resources;
1464 /*
1465 * Decide whether to actually poll for completions, or just
1466 * wait for a later chance. If there seem to be plenty of
1467 * resources left, then just wait, since checking involves
1468 * reading a CE register, which is a relatively expensive
1469 * operation.
1470 */
Komal Seelam644263d2016-02-22 20:45:49 +05301471 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001472 /*
1473 * If at least 50% of the total resources are still available,
1474 * don't bother checking again yet.
1475 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301476 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 1)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001477 return;
1478 }
1479 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001480#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 ce_per_engine_servicereap(scn, pipe);
1482#else
1483 ce_per_engine_service(scn, pipe);
1484#endif
1485}
1486
Komal Seelam5584a7c2016-02-24 19:22:48 +05301487uint16_t
1488hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001489{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301490 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001491 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1492 uint16_t rv;
1493
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301494 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001495 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301496 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001497 return rv;
1498}
1499
1500/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001501static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001502hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301503 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504 unsigned int nbytes, unsigned int transfer_id,
1505 unsigned int sw_index, unsigned int hw_index,
1506 unsigned int toeplitz_hash_result)
1507{
1508 struct HIF_CE_pipe_info *pipe_info =
1509 (struct HIF_CE_pipe_info *)ce_context;
1510 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301511 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001513 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301514 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001515
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001516 do {
1517 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001518 * The upper layer callback will be triggered
1519 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 */
Houston Hoffman85118512015-09-28 14:17:11 -07001521 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam6ee55902016-04-11 17:11:07 +05301522 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301523 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001524 else
1525 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001526 msg_callbacks->Context,
1527 transfer_context, transfer_id,
1528 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001529 }
1530
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301531 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001532 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301533 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 } while (ce_completed_send_next(copyeng,
1535 &ce_context, &transfer_context,
1536 &CE_data, &nbytes, &transfer_id,
1537 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301538 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001539}
1540
Houston Hoffman910c6262015-09-28 12:56:25 -07001541/**
1542 * hif_ce_do_recv(): send message from copy engine to upper layers
1543 * @msg_callbacks: structure containing callback and callback context
1544 * @netbuff: skb containing message
1545 * @nbytes: number of bytes in the message
1546 * @pipe_info: used for the pipe_number info
1547 *
1548 * Checks the packet length, configures the lenght in the netbuff,
1549 * and calls the upper layer callback.
1550 *
1551 * return: None
1552 */
1553static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301554 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001555 struct HIF_CE_pipe_info *pipe_info) {
1556 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301557 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001558 msg_callbacks->
1559 rxCompletionHandler(msg_callbacks->Context,
1560 netbuf, pipe_info->pipe_num);
1561 } else {
1562 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1563 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301564 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001565 }
1566}
1567
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001568/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001569static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001570hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301571 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001572 unsigned int nbytes, unsigned int transfer_id,
1573 unsigned int flags)
1574{
1575 struct HIF_CE_pipe_info *pipe_info =
1576 (struct HIF_CE_pipe_info *)ce_context;
1577 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001578 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301579 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001580#ifdef HIF_PCI
1581 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1582#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001583 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301584 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001585
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001587#ifdef HIF_PCI
1588 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1589#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301590 qdf_nbuf_unmap_single(scn->qdf_dev,
1591 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301592 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593
Houston Hoffman910c6262015-09-28 12:56:25 -07001594 atomic_inc(&pipe_info->recv_bufs_needed);
1595 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301596 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301597 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001598 else
1599 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001600 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601
1602 /* Set up force_break flag if num of receices reaches
1603 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001604 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001605 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001606 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001607 break;
1608 }
1609 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1610 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301611 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001612
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613}
1614
1615/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1616
1617void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301618hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619 struct hif_msg_callbacks *callbacks)
1620{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301621 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001622
1623#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1624 spin_lock_init(&pcie_access_log_lock);
1625#endif
1626 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301627 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628 sizeof(hif_state->msg_callbacks_pending));
1629
1630}
1631
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001632static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001633{
1634 struct CE_handle *ce_diag = hif_state->ce_diag;
1635 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301636 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001637 struct hif_msg_callbacks *hif_msg_callbacks =
1638 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001639
1640 /* daemonize("hif_compl_thread"); */
1641
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001643 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001644 return -EINVAL;
1645 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001646
1647 if (!hif_msg_callbacks ||
1648 !hif_msg_callbacks->rxCompletionHandler ||
1649 !hif_msg_callbacks->txCompletionHandler) {
1650 HIF_ERROR("%s: no completion handler registered", __func__);
1651 return -EFAULT;
1652 }
1653
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 A_TARGET_ACCESS_LIKELY(scn);
1655 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1656 struct CE_attr attr;
1657 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001658
1659 pipe_info = &hif_state->pipe_info[pipe_num];
1660 if (pipe_info->ce_hdl == ce_diag) {
1661 continue; /* Handle Diagnostic CE specially */
1662 }
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301663 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664 if (attr.src_nentries) {
1665 /* pipe used to send to target */
1666 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1667 __func__, pipe_num, pipe_info);
1668 ce_send_cb_register(pipe_info->ce_hdl,
1669 hif_pci_ce_send_done, pipe_info,
1670 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001671 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1672 }
1673 if (attr.dest_nentries) {
1674 /* pipe used to receive from target */
1675 ce_recv_cb_register(pipe_info->ce_hdl,
1676 hif_pci_ce_recv_data, pipe_info,
1677 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001678 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001679
1680 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301681 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301682
1683 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1684 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001685 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001686
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001687 A_TARGET_ACCESS_UNLIKELY(scn);
1688 return 0;
1689}
1690
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001691/*
1692 * Install pending msg callbacks.
1693 *
1694 * TBDXXX: This hack is needed because upper layers install msg callbacks
1695 * for use with HTC before BMI is done; yet this HIF implementation
1696 * needs to continue to use BMI msg callbacks. Really, upper layers
1697 * should not register HTC callbacks until AFTER BMI phase.
1698 */
Komal Seelam644263d2016-02-22 20:45:49 +05301699static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001700{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301701 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001702
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301703 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001704 &hif_state->msg_callbacks_pending,
1705 sizeof(hif_state->msg_callbacks_pending));
1706}
1707
Komal Seelam5584a7c2016-02-24 19:22:48 +05301708void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1709 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001710{
1711 int ul_is_polled, dl_is_polled;
1712
Komal Seelam644263d2016-02-22 20:45:49 +05301713 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001714 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1715}
1716
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001717/**
1718 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301719 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720 *
1721 * Output the pipe error counts of each pipe to log file
1722 *
1723 * Return: N/A
1724 */
Komal Seelam644263d2016-02-22 20:45:49 +05301725void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301727 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728 int pipe_num;
1729
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001730 if (hif_state == NULL) {
1731 HIF_ERROR("%s hif_state is NULL", __func__);
1732 return;
1733 }
1734 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1735 struct HIF_CE_pipe_info *pipe_info;
1736
1737 pipe_info = &hif_state->pipe_info[pipe_num];
1738
1739 if (pipe_info->nbuf_alloc_err_count > 0 ||
1740 pipe_info->nbuf_dma_err_count > 0 ||
1741 pipe_info->nbuf_ce_enqueue_err_count)
1742 HIF_ERROR(
1743 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1744 __func__, pipe_info->pipe_num,
1745 atomic_read(&pipe_info->recv_bufs_needed),
1746 pipe_info->nbuf_alloc_err_count,
1747 pipe_info->nbuf_dma_err_count,
1748 pipe_info->nbuf_ce_enqueue_err_count);
1749 }
1750}
1751
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001752static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1753 void *nbuf, uint32_t *error_cnt,
1754 enum hif_ce_event_type failure_type,
1755 const char *failure_type_string)
1756{
1757 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1758 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1759 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1760 int ce_id = CE_state->id;
1761 uint32_t error_cnt_tmp;
1762
1763 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1764 error_cnt_tmp = ++(*error_cnt);
1765 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05301766 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001767 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1768 failure_type_string);
1769 hif_record_ce_desc_event(scn, ce_id, failure_type,
1770 NULL, nbuf, bufs_needed_tmp);
1771 /* if we fail to allocate the last buffer for an rx pipe,
1772 * there is no trigger to refill the ce and we will
1773 * eventually crash
1774 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301775 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001776 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301777
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001778}
1779
1780
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001781
1782
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001783static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1784{
1785 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301786 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301787 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301788 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001789 uint32_t bufs_posted = 0;
1790
1791 buf_sz = pipe_info->buf_sz;
1792 if (buf_sz == 0) {
1793 /* Unused Copy Engine */
1794 return 0;
1795 }
1796
1797 ce_hdl = pipe_info->ce_hdl;
1798
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301799 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001800 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301801 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301802 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 int status;
1804
1805 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301806 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001807
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301808 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001809 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001810 hif_post_recv_buffers_failure(pipe_info, nbuf,
1811 &pipe_info->nbuf_alloc_err_count,
1812 HIF_RX_NBUF_ALLOC_FAILURE,
1813 "HIF_RX_NBUF_ALLOC_FAILURE");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814 return 1;
1815 }
1816
1817 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301818 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819 * CE_data = dma_map_single(dev, data, buf_sz, );
1820 * DMA_FROM_DEVICE);
1821 */
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001822 ret = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301823 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001824
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301825 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001826 hif_post_recv_buffers_failure(pipe_info, nbuf,
1827 &pipe_info->nbuf_dma_err_count,
1828 HIF_RX_NBUF_MAP_FAILURE,
1829 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301830 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001831 return 1;
1832 }
1833
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301834 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001835
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301836 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001837 buf_sz, DMA_FROM_DEVICE);
1838 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301839 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001840 if (unlikely(status != EOK)) {
1841 hif_post_recv_buffers_failure(pipe_info, nbuf,
1842 &pipe_info->nbuf_ce_enqueue_err_count,
1843 HIF_RX_NBUF_ENQUEUE_FAILURE,
1844 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1845
Govind Singh4fcafd42016-08-08 12:37:31 +05301846 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1847 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301848 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001849 return 1;
1850 }
1851
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301852 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001853 bufs_posted++;
1854 }
1855 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001856 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001857 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1858 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001859 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001860 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1861 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001862 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001863 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001864
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301865 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001866
1867 return 0;
1868}
1869
1870/*
1871 * Try to post all desired receive buffers for all pipes.
1872 * Returns 0 if all desired buffers are posted,
1873 * non-zero if were were unable to completely
1874 * replenish receive buffers.
1875 */
Komal Seelam644263d2016-02-22 20:45:49 +05301876static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001877{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301878 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001879 int pipe_num, rv = 0;
Houston Hoffman85925072016-05-06 17:02:18 -07001880 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001881
1882 A_TARGET_ACCESS_LIKELY(scn);
1883 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1884 struct HIF_CE_pipe_info *pipe_info;
Houston Hoffman85925072016-05-06 17:02:18 -07001885 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001886 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001887
1888 if (hif_is_nss_wifi_enabled(scn) &&
1889 ce_state && (ce_state->htt_rx_data)) {
1890 continue;
1891 }
1892
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1894 rv = 1;
1895 goto done;
1896 }
1897 }
1898
1899done:
1900 A_TARGET_ACCESS_UNLIKELY(scn);
1901
1902 return rv;
1903}
1904
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301905QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906{
Komal Seelam644263d2016-02-22 20:45:49 +05301907 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301908 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001910 hif_update_fastpath_recv_bufs_cnt(scn);
1911
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001912 hif_msg_callbacks_install(scn);
1913
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301915 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916
Houston Hoffman271951f2016-11-12 15:24:27 -08001917 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001918 hif_state->started = true;
1919
Houston Hoffman271951f2016-11-12 15:24:27 -08001920 /* Post buffers once to start things off. */
1921 if (hif_post_recv_buffers(scn)) {
1922 /* cleanup is done in hif_ce_disable */
1923 HIF_ERROR("%s:failed to post buffers", __func__);
1924 return QDF_STATUS_E_FAILURE;
1925 }
1926
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301927 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001928}
1929
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001930static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001931{
Komal Seelam644263d2016-02-22 20:45:49 +05301932 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933 struct CE_handle *ce_hdl;
1934 uint32_t buf_sz;
1935 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301936 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301937 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001938 void *per_CE_context;
1939
1940 buf_sz = pipe_info->buf_sz;
1941 if (buf_sz == 0) {
1942 /* Unused Copy Engine */
1943 return;
1944 }
1945
1946 hif_state = pipe_info->HIF_CE_state;
1947 if (!hif_state->started) {
1948 return;
1949 }
1950
Komal Seelam02cf2f82016-02-22 20:44:25 +05301951 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001952 ce_hdl = pipe_info->ce_hdl;
1953
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301954 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001955 return;
1956 }
1957 while (ce_revoke_recv_next
1958 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301959 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301960 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301961 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301962 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963 }
1964}
1965
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001966static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001967{
1968 struct CE_handle *ce_hdl;
1969 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301970 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301971 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001972 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301973 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001974 unsigned int nbytes;
1975 unsigned int id;
1976 uint32_t buf_sz;
1977 uint32_t toeplitz_hash_result;
1978
1979 buf_sz = pipe_info->buf_sz;
1980 if (buf_sz == 0) {
1981 /* Unused Copy Engine */
1982 return;
1983 }
1984
1985 hif_state = pipe_info->HIF_CE_state;
1986 if (!hif_state->started) {
1987 return;
1988 }
1989
Komal Seelam02cf2f82016-02-22 20:44:25 +05301990 scn = HIF_GET_SOFTC(hif_state);
1991
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992 ce_hdl = pipe_info->ce_hdl;
1993
1994 while (ce_cancel_send_next
1995 (ce_hdl, &per_CE_context,
1996 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301997 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001998 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1999 /*
2000 * Packets enqueued by htt_h2t_ver_req_msg() and
2001 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2002 * freed in htt_htc_misc_pkt_pool_free() in
2003 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002004 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002005 * which they are queued in.
2006 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302007 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002008 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302009 /* Indicate the completion to higher
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 * layer to free the buffer */
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302011 if (pipe_info->pipe_callbacks.
Himanshu Agarwal8d0cdea2016-09-02 21:05:01 +05302012 txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302013 pipe_info->pipe_callbacks.
2014 txCompletionHandler(pipe_info->
2015 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002016 netbuf, id, toeplitz_hash_result);
2017 }
2018 }
2019}
2020
2021/*
2022 * Cleanup residual buffers for device shutdown:
2023 * buffers that were enqueued for receive
2024 * buffers that were to be sent
2025 * Note: Buffers that had completed but which were
2026 * not yet processed are on a completion queue. They
2027 * are handled when the completion thread shuts down.
2028 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002029static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030{
2031 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302032 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002033 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002034
Komal Seelam02cf2f82016-02-22 20:44:25 +05302035 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002036 struct HIF_CE_pipe_info *pipe_info;
2037
Houston Hoffman85925072016-05-06 17:02:18 -07002038 ce_state = scn->ce_id_to_state[pipe_num];
2039 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2040 ((ce_state->htt_tx_data) ||
2041 (ce_state->htt_rx_data))) {
2042 continue;
2043 }
2044
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002045 pipe_info = &hif_state->pipe_info[pipe_num];
2046 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2047 hif_send_buffer_cleanup_on_pipe(pipe_info);
2048 }
2049}
2050
Komal Seelam5584a7c2016-02-24 19:22:48 +05302051void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002052{
Komal Seelam644263d2016-02-22 20:45:49 +05302053 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302054 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302055
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002056 hif_buffer_cleanup(hif_state);
2057}
2058
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002059static void hif_destroy_oom_work(struct hif_softc *scn)
2060{
2061 struct CE_state *ce_state;
2062 int ce_id;
2063
2064 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2065 ce_state = scn->ce_id_to_state[ce_id];
2066 if (ce_state)
2067 qdf_destroy_work(scn->qdf_dev,
2068 &ce_state->oom_allocation_work);
2069 }
2070}
2071
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302072void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002073{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302074 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002075 int pipe_num;
2076
Houston Hoffmana69581e2016-11-14 18:03:19 -08002077 /*
2078 * before cleaning up any memory, ensure irq &
2079 * bottom half contexts will not be re-entered
2080 */
2081 hif_nointrs(scn);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002082 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002083 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002084
2085 /*
2086 * At this point, asynchronous threads are stopped,
2087 * The Target should not DMA nor interrupt, Host code may
2088 * not initiate anything more. So we just need to clean
2089 * up Host-side state.
2090 */
2091
2092 if (scn->athdiag_procfs_inited) {
2093 athdiag_procfs_remove();
2094 scn->athdiag_procfs_inited = false;
2095 }
2096
2097 hif_buffer_cleanup(hif_state);
2098
2099 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2100 struct HIF_CE_pipe_info *pipe_info;
2101
2102 pipe_info = &hif_state->pipe_info[pipe_num];
2103 if (pipe_info->ce_hdl) {
2104 ce_fini(pipe_info->ce_hdl);
2105 pipe_info->ce_hdl = NULL;
2106 pipe_info->buf_sz = 0;
2107 }
2108 }
2109
2110 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302111 qdf_timer_stop(&hif_state->sleep_timer);
2112 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 hif_state->sleep_timer_init = false;
2114 }
2115
2116 hif_state->started = false;
2117}
2118
Houston Hoffman748e1a62017-03-30 17:20:42 -07002119
Houston Hoffman854e67f2016-03-14 21:11:39 -07002120/**
2121 * hif_get_target_ce_config() - get copy engine configuration
2122 * @target_ce_config_ret: basic copy engine configuration
2123 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2124 * @target_service_to_ce_map_ret: service mapping for the copy engines
2125 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2126 * @target_shadow_reg_cfg_ret: shadow register configuration
2127 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2128 *
2129 * providing accessor to these values outside of this file.
2130 * currently these are stored in static pointers to const sections.
2131 * there are multiple configurations that are selected from at compile time.
2132 * Runtime selection would need to consider mode, target type and bus type.
2133 *
2134 * Return: return by parameter.
2135 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302136void hif_get_target_ce_config(struct hif_softc *scn,
2137 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002138 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002139 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002140 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002141 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002142 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002143{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302144 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2145
2146 *target_ce_config_ret = hif_state->target_ce_config;
2147 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002148
2149 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2150 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002151
2152 if (target_shadow_reg_cfg_ret)
2153 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2154
2155 if (shadow_cfg_sz_ret)
2156 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002158
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002159#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002160static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002161{
2162 int i;
2163 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2164 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2165
2166 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2167 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2168 "%s: i %d, val %x\n", __func__, i,
2169 cfg->shadow_reg_v2_cfg[i].addr);
2170 }
2171}
2172
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002173#else
2174static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2175{
2176 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2177 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2178}
2179#endif
2180
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002181/**
2182 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302183 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002184 *
2185 * This function passes the con_mode and CE configuration to
2186 * platform driver to enable wlan.
2187 *
Houston Hoffman108da402016-03-14 21:11:24 -07002188 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002189 */
Houston Hoffman108da402016-03-14 21:11:24 -07002190int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002191{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002192 struct pld_wlan_enable_cfg cfg;
2193 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302194 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002195
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302196 hif_get_target_ce_config(scn,
2197 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002198 &cfg.num_ce_tgt_cfg,
2199 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2200 &cfg.num_ce_svc_pipe_cfg,
2201 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2202 &cfg.num_shadow_reg_cfg);
2203
2204 /* translate from structure size to array size */
2205 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2206 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2207 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002208
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002209 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2210 &cfg.num_shadow_reg_v2_cfg);
2211
2212 hif_print_hal_shadow_register_cfg(&cfg);
2213
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302214 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002215 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002216 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002217 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002218 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002219 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002220
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002221 if (BYPASS_QMI)
2222 return 0;
2223 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002224 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2225 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226}
2227
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002228#define CE_EPPING_USES_IRQ true
2229
Houston Hoffman108da402016-03-14 21:11:24 -07002230/**
2231 * hif_ce_prepare_config() - load the correct static tables.
2232 * @scn: hif context
2233 *
2234 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002235 */
Houston Hoffman108da402016-03-14 21:11:24 -07002236void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002237{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302238 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002239 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2240 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302241 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002242
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002243 hif_state->ce_services = ce_services_attach(scn);
2244
Houston Hoffman710af5a2016-11-22 21:59:03 -08002245 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002246 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002247 if (QDF_IS_EPPING_ENABLED(mode)) {
2248 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302249 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002250 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302251 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2252 hif_state->target_ce_config = target_ce_config_wlan_epping;
2253 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002254 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2255 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002256 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002257
2258 switch (tgt_info->target_type) {
2259 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302260 hif_state->host_ce_config = host_ce_config_wlan;
2261 hif_state->target_ce_config = target_ce_config_wlan;
2262 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002263 break;
2264 case TARGET_TYPE_AR900B:
2265 case TARGET_TYPE_QCA9984:
2266 case TARGET_TYPE_IPQ4019:
2267 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302268 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2269 hif_state->host_ce_config =
2270 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2271 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2272 hif_state->host_ce_config =
2273 host_lowdesc_ce_cfg_wlan_ar900b;
2274 } else {
2275 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2276 }
2277
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302278 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2279 hif_state->target_ce_config_sz =
2280 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002281
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002282 break;
2283
2284 case TARGET_TYPE_AR9888:
2285 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302286 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2287 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2288 } else {
2289 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2290 }
2291
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302292 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2293 hif_state->target_ce_config_sz =
2294 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002295
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002296 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002297
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302298 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002299 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2300 hif_state->host_ce_config =
2301 host_ce_config_wlan_qca8074_pci;
2302 hif_state->target_ce_config =
2303 target_ce_config_wlan_qca8074_pci;
2304 hif_state->target_ce_config_sz =
2305 sizeof(target_ce_config_wlan_qca8074_pci);
2306 } else {
2307 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2308 hif_state->target_ce_config =
2309 target_ce_config_wlan_qca8074;
2310 hif_state->target_ce_config_sz =
2311 sizeof(target_ce_config_wlan_qca8074);
2312 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302313 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002314 case TARGET_TYPE_QCA6290:
2315 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2316 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2317 hif_state->target_ce_config_sz =
2318 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002319
Houston Hoffman710af5a2016-11-22 21:59:03 -08002320 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002321 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002322 }
Houston Hoffman108da402016-03-14 21:11:24 -07002323}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002324
Houston Hoffman108da402016-03-14 21:11:24 -07002325/**
2326 * hif_ce_open() - do ce specific allocations
2327 * @hif_sc: pointer to hif context
2328 *
2329 * return: 0 for success or QDF_STATUS_E_NOMEM
2330 */
2331QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2332{
2333 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002334
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302335 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302336 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002337 return QDF_STATUS_SUCCESS;
2338}
2339
2340/**
2341 * hif_ce_close() - do ce specific free
2342 * @hif_sc: pointer to hif context
2343 */
2344void hif_ce_close(struct hif_softc *hif_sc)
2345{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302346 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2347
2348 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002349}
2350
2351/**
2352 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2353 * @hif_sc: hif context
2354 *
2355 * uses state variables to support cleaning up when hif_config_ce fails.
2356 */
2357void hif_unconfig_ce(struct hif_softc *hif_sc)
2358{
2359 int pipe_num;
2360 struct HIF_CE_pipe_info *pipe_info;
2361 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2362
2363 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2364 pipe_info = &hif_state->pipe_info[pipe_num];
2365 if (pipe_info->ce_hdl) {
2366 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002367 ce_fini(pipe_info->ce_hdl);
2368 pipe_info->ce_hdl = NULL;
2369 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002370 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002371 }
2372 }
Houston Hoffman108da402016-03-14 21:11:24 -07002373 if (hif_sc->athdiag_procfs_inited) {
2374 athdiag_procfs_remove();
2375 hif_sc->athdiag_procfs_inited = false;
2376 }
2377}
2378
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002379#ifdef CONFIG_BYPASS_QMI
2380#define FW_SHARED_MEM (2 * 1024 * 1024)
2381
2382/**
2383 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2384 * @scn: pointer to HIF structure
2385 *
2386 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2387 *
2388 * Return: void
2389 */
2390static void hif_post_static_buf_to_target(struct hif_softc *scn)
2391{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002392 void *target_va;
2393 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002394
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002395 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2396 FW_SHARED_MEM, &target_pa);
2397 if (NULL == target_va) {
2398 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002399 return;
2400 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002401 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2402 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002403}
2404#else
2405static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2406{
2407 return;
2408}
2409#endif
2410
Dustin Brown6bdbda52016-09-27 15:52:30 -07002411#ifdef WLAN_SUSPEND_RESUME_TEST
2412static void hif_fake_apps_init_ctx(struct hif_softc *scn)
2413{
2414 INIT_WORK(&scn->fake_apps_ctx.resume_work,
2415 hif_fake_apps_resume_work);
2416}
2417#else
2418static inline void hif_fake_apps_init_ctx(struct hif_softc *scn) {}
2419#endif
2420
Houston Hoffman108da402016-03-14 21:11:24 -07002421/**
2422 * hif_config_ce() - configure copy engines
2423 * @scn: hif context
2424 *
2425 * Prepares fw, copy engine hardware and host sw according
2426 * to the attributes selected by hif_ce_prepare_config.
2427 *
2428 * also calls athdiag_procfs_init
2429 *
2430 * return: 0 for success nonzero for failure.
2431 */
2432int hif_config_ce(struct hif_softc *scn)
2433{
2434 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2435 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2436 struct HIF_CE_pipe_info *pipe_info;
2437 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002438 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002439#ifdef ADRASTEA_SHADOW_REGISTERS
2440 int i;
2441#endif
2442 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2443
2444 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002445
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002446 hif_post_static_buf_to_target(scn);
2447
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002448 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002449
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002450 hif_config_rri_on_ddr(scn);
2451
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002452 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2453 struct CE_attr *attr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002454 pipe_info = &hif_state->pipe_info[pipe_num];
2455 pipe_info->pipe_num = pipe_num;
2456 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302457 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002458
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002459 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002460 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002461 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302462 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002463 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302464 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002465 A_TARGET_ACCESS_UNLIKELY(scn);
2466 goto err;
2467 }
2468
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302469 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002470 /* Reserve the ultimate CE for
2471 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002472 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002473 continue;
2474 }
2475
Houston Hoffman85925072016-05-06 17:02:18 -07002476 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2477 (ce_state->htt_rx_data))
2478 continue;
2479
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302480 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002481 if (attr->dest_nentries > 0) {
2482 atomic_set(&pipe_info->recv_bufs_needed,
2483 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302484 /*SRNG based CE has one entry less */
2485 if (ce_srng_based(scn))
2486 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002487 } else {
2488 atomic_set(&pipe_info->recv_bufs_needed, 0);
2489 }
2490 ce_tasklet_init(hif_state, (1 << pipe_num));
2491 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002492 }
2493
2494 if (athdiag_procfs_init(scn) != 0) {
2495 A_TARGET_ACCESS_UNLIKELY(scn);
2496 goto err;
2497 }
2498 scn->athdiag_procfs_inited = true;
2499
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002500 HIF_INFO_MED("%s: ce_init done", __func__);
2501
Houston Hoffman108da402016-03-14 21:11:24 -07002502 init_tasklet_workers(hif_hdl);
Dustin Brown6bdbda52016-09-27 15:52:30 -07002503 hif_fake_apps_init_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002504
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002505 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002506
2507#ifdef ADRASTEA_SHADOW_REGISTERS
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002508 HIF_INFO("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002509 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002510 HIF_INFO("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002511 __func__, i,
2512 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2513 }
2514#endif
2515
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302516 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002517
2518err:
2519 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002520 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002521 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302522 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002523}
2524
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002525#ifdef WLAN_FEATURE_FASTPATH
2526/**
2527 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2528 * @handler: Callback funtcion
2529 * @context: handle for callback function
2530 *
2531 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2532 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002533int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2534 fastpath_msg_handler handler,
2535 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002536{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002537 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002538 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002539 int i;
2540
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302541 if (!scn) {
2542 HIF_ERROR("%s: scn is NULL", __func__);
2543 QDF_ASSERT(0);
2544 return QDF_STATUS_E_FAILURE;
2545 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002546
2547 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002548 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002549 return QDF_STATUS_E_FAILURE;
2550 }
2551
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002552 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002553 ce_state = scn->ce_id_to_state[i];
2554 if (ce_state->htt_rx_data) {
2555 ce_state->fastpath_handler = handler;
2556 ce_state->context = context;
2557 }
2558 }
2559
2560 return QDF_STATUS_SUCCESS;
2561}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002562#endif
2563
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002564#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002565/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302566 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002567 * @scn: bus context
2568 * @ce_sr_base_paddr: copyengine source ring base physical address
2569 * @ce_sr_ring_size: copyengine source ring size
2570 * @ce_reg_paddr: copyengine register physical address
2571 *
2572 * IPA micro controller data path offload feature enabled,
2573 * HIF should release copy engine related resource information to IPA UC
2574 * IPA UC will access hardware resource with released information
2575 *
2576 * Return: None
2577 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302578void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302579 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002580 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302581 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002582{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302583 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002584 struct HIF_CE_pipe_info *pipe_info =
2585 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2586 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2587
2588 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2589 ce_reg_paddr);
2590 return;
2591}
2592#endif /* IPA_OFFLOAD */
2593
2594
2595#ifdef ADRASTEA_SHADOW_REGISTERS
2596
2597/*
2598 Current shadow register config
2599
2600 -----------------------------------------------------------
2601 Shadow Register | CE | src/dst write index
2602 -----------------------------------------------------------
2603 0 | 0 | src
2604 1 No Config - Doesn't point to anything
2605 2 No Config - Doesn't point to anything
2606 3 | 3 | src
2607 4 | 4 | src
2608 5 | 5 | src
2609 6 No Config - Doesn't point to anything
2610 7 | 7 | src
2611 8 No Config - Doesn't point to anything
2612 9 No Config - Doesn't point to anything
2613 10 No Config - Doesn't point to anything
2614 11 No Config - Doesn't point to anything
2615 -----------------------------------------------------------
2616 12 No Config - Doesn't point to anything
2617 13 | 1 | dst
2618 14 | 2 | dst
2619 15 No Config - Doesn't point to anything
2620 16 No Config - Doesn't point to anything
2621 17 No Config - Doesn't point to anything
2622 18 No Config - Doesn't point to anything
2623 19 | 7 | dst
2624 20 | 8 | dst
2625 21 No Config - Doesn't point to anything
2626 22 No Config - Doesn't point to anything
2627 23 No Config - Doesn't point to anything
2628 -----------------------------------------------------------
2629
2630
2631 ToDo - Move shadow register config to following in the future
2632 This helps free up a block of shadow registers towards the end.
2633 Can be used for other purposes
2634
2635 -----------------------------------------------------------
2636 Shadow Register | CE | src/dst write index
2637 -----------------------------------------------------------
2638 0 | 0 | src
2639 1 | 3 | src
2640 2 | 4 | src
2641 3 | 5 | src
2642 4 | 7 | src
2643 -----------------------------------------------------------
2644 5 | 1 | dst
2645 6 | 2 | dst
2646 7 | 7 | dst
2647 8 | 8 | dst
2648 -----------------------------------------------------------
2649 9 No Config - Doesn't point to anything
2650 12 No Config - Doesn't point to anything
2651 13 No Config - Doesn't point to anything
2652 14 No Config - Doesn't point to anything
2653 15 No Config - Doesn't point to anything
2654 16 No Config - Doesn't point to anything
2655 17 No Config - Doesn't point to anything
2656 18 No Config - Doesn't point to anything
2657 19 No Config - Doesn't point to anything
2658 20 No Config - Doesn't point to anything
2659 21 No Config - Doesn't point to anything
2660 22 No Config - Doesn't point to anything
2661 23 No Config - Doesn't point to anything
2662 -----------------------------------------------------------
2663*/
2664
Komal Seelam644263d2016-02-22 20:45:49 +05302665u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002666{
2667 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002668 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002669
Houston Hoffmane6330442016-02-26 12:19:11 -08002670 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002671 case 0:
2672 addr = SHADOW_VALUE0;
2673 break;
2674 case 3:
2675 addr = SHADOW_VALUE3;
2676 break;
2677 case 4:
2678 addr = SHADOW_VALUE4;
2679 break;
2680 case 5:
2681 addr = SHADOW_VALUE5;
2682 break;
2683 case 7:
2684 addr = SHADOW_VALUE7;
2685 break;
2686 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002687 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302688 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002689 }
2690 return addr;
2691
2692}
2693
Komal Seelam644263d2016-02-22 20:45:49 +05302694u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002695{
2696 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002697 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002698
Houston Hoffmane6330442016-02-26 12:19:11 -08002699 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002700 case 1:
2701 addr = SHADOW_VALUE13;
2702 break;
2703 case 2:
2704 addr = SHADOW_VALUE14;
2705 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002706 case 5:
2707 addr = SHADOW_VALUE17;
2708 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002709 case 7:
2710 addr = SHADOW_VALUE19;
2711 break;
2712 case 8:
2713 addr = SHADOW_VALUE20;
2714 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002715 case 9:
2716 addr = SHADOW_VALUE21;
2717 break;
2718 case 10:
2719 addr = SHADOW_VALUE22;
2720 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302721 case 11:
2722 addr = SHADOW_VALUE23;
2723 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002724 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002725 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302726 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002727 }
2728
2729 return addr;
2730
2731}
2732#endif
2733
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002734#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002735void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2736{
2737 struct CE_state *ce_state;
2738 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2739
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002740 ce_state = scn->ce_id_to_state[ctx_id];
2741
2742 return ce_state->lro_data;
2743}
2744
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002745/**
2746 * ce_lro_flush_cb_register() - register the LRO flush
2747 * callback
2748 * @scn: HIF context
2749 * @handler: callback function
2750 * @data: opaque data pointer to be passed back
2751 *
2752 * Store the LRO flush callback provided
2753 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002754 * Return: Number of instances the callback is registered for
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002755 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002756int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002757 void (handler)(void *),
2758 void *(lro_init_handler)(void))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002759{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002760 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002761 int i;
2762 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302763 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002764 void *data = NULL;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002765
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302766 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002767
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002768 if (scn != NULL) {
2769 for (i = 0; i < scn->ce_count; i++) {
2770 ce_state = scn->ce_id_to_state[i];
2771 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002772 data = lro_init_handler();
2773 if (data == NULL) {
2774 HIF_ERROR("%s: Failed to init LRO for CE %d",
2775 __func__, i);
2776 continue;
2777 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002778 ce_state->lro_flush_cb = handler;
2779 ce_state->lro_data = data;
2780 rc++;
2781 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002782 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002783 } else {
2784 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002785 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002786 return rc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002787}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002788
2789/**
2790 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2791 * callback
2792 * @scn: HIF context
2793 *
2794 * Remove the LRO flush callback
2795 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002796 * Return: Number of instances the callback is de-registered
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002797 */
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002798int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
2799 void (lro_deinit_cb)(void *))
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002800{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002801 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002802 int i;
2803 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302804 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002805
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302806 QDF_ASSERT(scn != NULL);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002807 if (scn != NULL) {
2808 for (i = 0; i < scn->ce_count; i++) {
2809 ce_state = scn->ce_id_to_state[i];
2810 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002811 qdf_spin_lock_bh(
2812 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002813 ce_state->lro_flush_cb = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002814 lro_deinit_cb(ce_state->lro_data);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002815 ce_state->lro_data = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002816 qdf_spin_unlock_bh(
2817 &ce_state->lro_unloading_lock);
2818 qdf_spinlock_destroy(
2819 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002820 rc++;
2821 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002822 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002823 } else {
2824 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002825 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002826 return rc;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002827}
2828#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002829
2830/**
2831 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2832 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302833 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002834 * @svc_id: Service ID for which the mapping is needed.
2835 * @ul_pipe: address of the container in which ul pipe is returned.
2836 * @dl_pipe: address of the container in which dl pipe is returned.
2837 * @ul_is_polled: address of the container in which a bool
2838 * indicating if the UL CE for this service
2839 * is polled is returned.
2840 * @dl_is_polled: address of the container in which a bool
2841 * indicating if the DL CE for this service
2842 * is polled is returned.
2843 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002844 * Return: Indicates whether the service has been found in the table.
2845 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2846 * There will be warning logs if either leg has not been updated
2847 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002848 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302849int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002850 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2851 int *dl_is_polled)
2852{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002853 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002854 unsigned int i;
2855 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002856 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002857 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302858 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002859 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002860 bool dl_updated = false;
2861 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002862
Houston Hoffman748e1a62017-03-30 17:20:42 -07002863 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2864 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002865
2866 *dl_is_polled = 0; /* polling for received messages not supported */
2867
2868 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2869
2870 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2871 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002872 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002873 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002874 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302875 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002876 CE_ATTR_DISABLE_INTR) != 0;
2877 ul_updated = true;
2878 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002879 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002880 dl_updated = true;
2881 }
2882 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002883 }
2884 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002885 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302886 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002887 __func__, svc_id);
2888 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302889 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002890 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002891
2892 return status;
2893}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002894
2895#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302896inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002897 uint32_t CE_ctrl_addr)
2898{
2899 uint32_t read_from_hw, srri_from_ddr = 0;
2900
2901 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2902
2903 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2904
2905 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002906 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2907 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002908 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302909 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002910 }
2911 return srri_from_ddr;
2912}
2913
2914
Komal Seelam644263d2016-02-22 20:45:49 +05302915inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002916 uint32_t CE_ctrl_addr)
2917{
2918 uint32_t read_from_hw, drri_from_ddr = 0;
2919
2920 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2921
2922 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2923
2924 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002925 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002926 drri_from_ddr, read_from_hw,
2927 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302928 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002929 }
2930 return drri_from_ddr;
2931}
2932
2933#endif
2934
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002935#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002936/**
2937 * hif_get_src_ring_read_index(): Called to get the SRRI
2938 *
Komal Seelam644263d2016-02-22 20:45:49 +05302939 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002940 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2941 *
2942 * This function returns the SRRI to the caller. For CEs that
2943 * dont have interrupts enabled, we look at the DDR based SRRI
2944 *
2945 * Return: SRRI
2946 */
Komal Seelam644263d2016-02-22 20:45:49 +05302947inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002948 uint32_t CE_ctrl_addr)
2949{
2950 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302951 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002952
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302953 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002954 if (attr.flags & CE_ATTR_DISABLE_INTR)
2955 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2956 else
2957 return A_TARGET_READ(scn,
2958 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2959}
2960
2961/**
2962 * hif_get_dst_ring_read_index(): Called to get the DRRI
2963 *
Komal Seelam644263d2016-02-22 20:45:49 +05302964 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002965 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2966 *
2967 * This function returns the DRRI to the caller. For CEs that
2968 * dont have interrupts enabled, we look at the DDR based DRRI
2969 *
2970 * Return: DRRI
2971 */
Komal Seelam644263d2016-02-22 20:45:49 +05302972inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002973 uint32_t CE_ctrl_addr)
2974{
2975 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302976 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002977
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302978 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002979
2980 if (attr.flags & CE_ATTR_DISABLE_INTR)
2981 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2982 else
2983 return A_TARGET_READ(scn,
2984 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2985}
2986
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002987/**
2988 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2989 *
Komal Seelam644263d2016-02-22 20:45:49 +05302990 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002991 *
2992 * This function allocates non cached memory on ddr and sends
2993 * the physical address of this memory to the CE hardware. The
2994 * hardware updates the RRI on this particular location.
2995 *
2996 * Return: None
2997 */
Komal Seelam644263d2016-02-22 20:45:49 +05302998static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002999{
3000 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303001 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003002 uint32_t high_paddr, low_paddr;
3003 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303004 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3005 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3006 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003007
3008 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3009 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3010
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003011 HIF_INFO("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003012
3013 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3014 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3015
3016 for (i = 0; i < CE_COUNT; i++)
3017 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3018
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303019 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003020
3021 return;
3022}
3023#else
3024
3025/**
3026 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3027 *
Komal Seelam644263d2016-02-22 20:45:49 +05303028 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003029 *
3030 * This is a dummy implementation for platforms that don't
3031 * support this functionality.
3032 *
3033 * Return: None
3034 */
Komal Seelam644263d2016-02-22 20:45:49 +05303035static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003036{
3037 return;
3038}
3039#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303040
3041/**
3042 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303043 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303044 *
3045 * Output the copy engine registers
3046 *
3047 * Return: 0 for success or error code
3048 */
Komal Seelam644263d2016-02-22 20:45:49 +05303049int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303050{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303051 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303052 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003053 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303054 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3055 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303056 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303057
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003058 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3059 if (scn->ce_id_to_state[i] == NULL) {
3060 HIF_DBG("CE%d not used.", i);
3061 continue;
3062 }
3063
Komal Seelam644263d2016-02-22 20:45:49 +05303064 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003065 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303066 ce_reg_word_size * sizeof(uint32_t));
3067
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303068 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05303069 HIF_ERROR("Dumping CE register failed!");
3070 return -EACCES;
3071 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303072 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303073 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003074 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303075 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303076 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3077 + SR_WR_INDEX_ADDRESS),
3078 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3079 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3080 + CURRENT_SRRI_ADDRESS),
3081 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3082 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3083 + DST_WR_INDEX_ADDRESS),
3084 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3085 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3086 + CURRENT_DRRI_ADDRESS),
3087 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3088 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303089 }
Govind Singh2443fb32016-01-13 17:44:48 +05303090 return 0;
3091}
Houston Hoffman85925072016-05-06 17:02:18 -07003092#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3093struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3094 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3095{
3096 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3097 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3098 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3099 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3100 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3101 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3102 struct CE_ring_state *src_ring = ce_state->src_ring;
3103 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3104
3105 if (src_ring) {
3106 hif_info->ul_pipe.nentries = src_ring->nentries;
3107 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3108 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3109 hif_info->ul_pipe.write_index = src_ring->write_index;
3110 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3111 hif_info->ul_pipe.base_addr_CE_space =
3112 src_ring->base_addr_CE_space;
3113 hif_info->ul_pipe.base_addr_owner_space =
3114 src_ring->base_addr_owner_space;
3115 }
3116
3117
3118 if (dest_ring) {
3119 hif_info->dl_pipe.nentries = dest_ring->nentries;
3120 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3121 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3122 hif_info->dl_pipe.write_index = dest_ring->write_index;
3123 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3124 hif_info->dl_pipe.base_addr_CE_space =
3125 dest_ring->base_addr_CE_space;
3126 hif_info->dl_pipe.base_addr_owner_space =
3127 dest_ring->base_addr_owner_space;
3128 }
3129
3130 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3131 hif_info->ctrl_addr = ce_state->ctrl_addr;
3132
3133 return hif_info;
3134}
3135
3136uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3137{
3138 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3139
3140 scn->nss_wifi_ol_mode = mode;
3141 return 0;
3142}
3143
3144#endif
3145
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303146void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3147{
3148 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3149 scn->hif_attribute = hif_attrib;
3150}
3151
Houston Hoffman85925072016-05-06 17:02:18 -07003152void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3153{
3154 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3155 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3156 uint32_t ctrl_addr = CE_state->ctrl_addr;
3157
3158 Q_TARGET_ACCESS_BEGIN(scn);
3159 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3160 Q_TARGET_ACCESS_END(scn);
3161}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303162
3163/**
3164 * hif_fw_event_handler() - hif fw event handler
3165 * @hif_state: pointer to hif ce state structure
3166 *
3167 * Process fw events and raise HTC callback to process fw events.
3168 *
3169 * Return: none
3170 */
3171static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3172{
3173 struct hif_msg_callbacks *msg_callbacks =
3174 &hif_state->msg_callbacks_current;
3175
3176 if (!msg_callbacks->fwEventHandler)
3177 return;
3178
3179 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3180 QDF_STATUS_E_FAILURE);
3181}
3182
3183#ifndef QCA_WIFI_3_0
3184/**
3185 * hif_fw_interrupt_handler() - FW interrupt handler
3186 * @irq: irq number
3187 * @arg: the user pointer
3188 *
3189 * Called from the PCI interrupt handler when a
3190 * firmware-generated interrupt to the Host.
3191 *
3192 * Return: status of handled irq
3193 */
3194irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3195{
3196 struct hif_softc *scn = arg;
3197 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3198 uint32_t fw_indicator_address, fw_indicator;
3199
3200 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3201 return ATH_ISR_NOSCHED;
3202
3203 fw_indicator_address = hif_state->fw_indicator_address;
3204 /* For sudden unplug this will return ~0 */
3205 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3206
3207 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3208 /* ACK: clear Target-side pending event */
3209 A_TARGET_WRITE(scn, fw_indicator_address,
3210 fw_indicator & ~FW_IND_EVENT_PENDING);
3211 if (Q_TARGET_ACCESS_END(scn) < 0)
3212 return ATH_ISR_SCHED;
3213
3214 if (hif_state->started) {
3215 hif_fw_event_handler(hif_state);
3216 } else {
3217 /*
3218 * Probable Target failure before we're prepared
3219 * to handle it. Generally unexpected.
3220 */
3221 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3222 ("%s: Early firmware event indicated\n",
3223 __func__));
3224 }
3225 } else {
3226 if (Q_TARGET_ACCESS_END(scn) < 0)
3227 return ATH_ISR_SCHED;
3228 }
3229
3230 return ATH_ISR_SCHED;
3231}
3232#else
3233irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3234{
3235 return ATH_ISR_SCHED;
3236}
3237#endif /* #ifdef QCA_WIFI_3_0 */
3238
3239
3240/**
3241 * hif_wlan_disable(): call the platform driver to disable wlan
3242 * @scn: HIF Context
3243 *
3244 * This function passes the con_mode to platform driver to disable
3245 * wlan.
3246 *
3247 * Return: void
3248 */
3249void hif_wlan_disable(struct hif_softc *scn)
3250{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003251 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303252 uint32_t con_mode = hif_get_conparam(scn);
3253
3254 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003255 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303256 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003257 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303258 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003259 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303260
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003261 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303262}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003263
Dustin Brown6834d322017-03-20 15:02:48 -07003264int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3265{
3266 QDF_STATUS status;
3267 uint8_t ul_pipe, dl_pipe;
3268 int ul_is_polled, dl_is_polled;
3269
3270 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3271 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3272 HTC_CTRL_RSVD_SVC,
3273 &ul_pipe, &dl_pipe,
3274 &ul_is_polled, &dl_is_polled);
3275 if (status) {
3276 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3277 return qdf_status_to_os_return(status);
3278 }
3279
3280 *ce_id = dl_pipe;
3281
3282 return 0;
3283}