blob: 73dcecd512419bc628a5024d81b987a931d8cee3 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053067QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080068
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Nachiket Kukadee5738b52017-09-07 17:16:12 +053091QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053092static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700144
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800145 if (CE_state->timer_inited) {
146 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530147 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800148 }
149}
150
151static unsigned int roundup_pwr2(unsigned int n)
152{
153 int i;
154 unsigned int test_pwr2;
155
156 if (!(n & (n - 1)))
157 return n; /* already a power of 2 */
158
159 test_pwr2 = 4;
160 for (i = 0; i < 29; i++) {
161 if (test_pwr2 > n)
162 return test_pwr2;
163 test_pwr2 = test_pwr2 << 1;
164 }
165
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530166 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800167 return 0;
168}
169
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700170#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
171#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
172
173static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
174 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
182 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800183#ifdef QCA_WIFI_3_0_ADRASTEA
184 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
185 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530186 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800187#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700188};
189
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700190static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
191 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
199 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
200};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700201
202/* CE_PCI TABLE */
203/*
204 * NOTE: the table below is out of date, though still a useful reference.
205 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
206 * mapping of HTC services to HIF pipes.
207 */
208/*
209 * This authoritative table defines Copy Engine configuration and the mapping
210 * of services/endpoints to CEs. A subset of this information is passed to
211 * the Target during startup as a prerequisite to entering BMI phase.
212 * See:
213 * target_service_to_ce_map - Target-side mapping
214 * hif_map_service_to_pipe - Host-side mapping
215 * target_ce_config - Target-side configuration
216 * host_ce_config - Host-side configuration
217 ============================================================================
218 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
219 | | | ctio | Size | Frequency
220 | | | n | |
221 ============================================================================
222 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
223 descriptor | | | | O(100B) | and regular
224 download | | | | |
225 ----------------------------------------------------------------------------
226 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
227 indication | | | | O(10B) | regular
228 upload | | | | |
229 ----------------------------------------------------------------------------
230 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
231 upload | | | | O(1000B) | (frequent
232 e.g. noise | | | | | during IP1.0
233 packets | | | | | testing)
234 ----------------------------------------------------------------------------
235 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
236 download | | | | O(1000B) | (frequent
237 e.g. | | | | | during IP1.0
238 misdirecte | | | | | testing)
239 d EAPOL | | | | |
240 packets | | | | |
241 ----------------------------------------------------------------------------
242 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
243 | DATA_VO (uplink) | | | |
244 ----------------------------------------------------------------------------
245 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
246 | DATA_VO (downlink) | | | |
247 ----------------------------------------------------------------------------
248 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
249 | | | | O(100B) |
250 ----------------------------------------------------------------------------
251 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
252 messages | (downlink) | | | O(100B) |
253 | | | | |
254 ----------------------------------------------------------------------------
255 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
256 | HTC_RAW_STREAMS | | | |
257 | (uplink) | | | |
258 ----------------------------------------------------------------------------
259 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
260 | HTC_RAW_STREAMS | | | |
261 | (downlink) | | | |
262 ----------------------------------------------------------------------------
263 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
264 | | | | | infrequent
265 ============================================================================
266 */
267
268/*
269 * Map from service/endpoint to Copy Engine.
270 * This table is derived from the CE_PCI TABLE, above.
271 * It is passed to the Target at startup for use by firmware.
272 */
273static struct service_to_pipe target_service_to_ce_map_wlan[] = {
274 {
275 WMI_DATA_VO_SVC,
276 PIPEDIR_OUT, /* out = UL = host -> target */
277 3,
278 },
279 {
280 WMI_DATA_VO_SVC,
281 PIPEDIR_IN, /* in = DL = target -> host */
282 2,
283 },
284 {
285 WMI_DATA_BK_SVC,
286 PIPEDIR_OUT, /* out = UL = host -> target */
287 3,
288 },
289 {
290 WMI_DATA_BK_SVC,
291 PIPEDIR_IN, /* in = DL = target -> host */
292 2,
293 },
294 {
295 WMI_DATA_BE_SVC,
296 PIPEDIR_OUT, /* out = UL = host -> target */
297 3,
298 },
299 {
300 WMI_DATA_BE_SVC,
301 PIPEDIR_IN, /* in = DL = target -> host */
302 2,
303 },
304 {
305 WMI_DATA_VI_SVC,
306 PIPEDIR_OUT, /* out = UL = host -> target */
307 3,
308 },
309 {
310 WMI_DATA_VI_SVC,
311 PIPEDIR_IN, /* in = DL = target -> host */
312 2,
313 },
314 {
315 WMI_CONTROL_SVC,
316 PIPEDIR_OUT, /* out = UL = host -> target */
317 3,
318 },
319 {
320 WMI_CONTROL_SVC,
321 PIPEDIR_IN, /* in = DL = target -> host */
322 2,
323 },
324 {
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530325 WMI_CONTROL_SVC_WMAC1,
326 PIPEDIR_OUT, /* out = UL = host -> target */
327 7,
328 },
329 {
330 WMI_CONTROL_SVC_WMAC1,
331 PIPEDIR_IN, /* in = DL = target -> host */
332 2,
333 },
334 {
335 WMI_CONTROL_SVC_WMAC2,
336 PIPEDIR_OUT, /* out = UL = host -> target */
337 9,
338 },
339 {
340 WMI_CONTROL_SVC_WMAC2,
341 PIPEDIR_IN, /* in = DL = target -> host */
342 2,
343 },
344 {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700345 HTC_CTRL_RSVD_SVC,
346 PIPEDIR_OUT, /* out = UL = host -> target */
347 0, /* could be moved to 3 (share with WMI) */
348 },
349 {
350 HTC_CTRL_RSVD_SVC,
351 PIPEDIR_IN, /* in = DL = target -> host */
352 2,
353 },
354 {
355 HTC_RAW_STREAMS_SVC, /* not currently used */
356 PIPEDIR_OUT, /* out = UL = host -> target */
357 0,
358 },
359 {
360 HTC_RAW_STREAMS_SVC, /* not currently used */
361 PIPEDIR_IN, /* in = DL = target -> host */
362 2,
363 },
364 {
365 HTT_DATA_MSG_SVC,
366 PIPEDIR_OUT, /* out = UL = host -> target */
367 4,
368 },
369 {
370 HTT_DATA_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 1,
373 },
374 {
375 WDI_IPA_TX_SVC,
376 PIPEDIR_OUT, /* in = DL = target -> host */
377 5,
378 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800379#if defined(QCA_WIFI_3_0_ADRASTEA)
380 {
381 HTT_DATA2_MSG_SVC,
382 PIPEDIR_IN, /* in = DL = target -> host */
383 9,
384 },
385 {
386 HTT_DATA3_MSG_SVC,
387 PIPEDIR_IN, /* in = DL = target -> host */
388 10,
389 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530390 {
391 PACKET_LOG_SVC,
392 PIPEDIR_IN, /* in = DL = target -> host */
393 11,
394 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800395#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700396 /* (Additions here) */
397
398 { /* Must be last */
399 0,
400 0,
401 0,
402 },
403};
404
Houston Hoffman88c896f2016-12-14 09:56:35 -0800405/* PIPEDIR_OUT = HOST to Target */
406/* PIPEDIR_IN = TARGET to HOST */
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530407static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
408 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
409 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
410 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
411 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
412 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
413 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
414 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
415 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
416 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
417 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
418 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
419 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
420 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
421 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
422 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
423 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
424 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
425 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530426 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530427 /* (Additions here) */
428 { 0, 0, 0, },
429};
430
Houston Hoffman88c896f2016-12-14 09:56:35 -0800431static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
432 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
433 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
434 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
435 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
436 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
437 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
438 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
439 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
440 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
441 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
442 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
443 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
444 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
445 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800446 /* (Additions here) */
447 { 0, 0, 0, },
448};
449
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700450static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
451 {
452 WMI_DATA_VO_SVC,
453 PIPEDIR_OUT, /* out = UL = host -> target */
454 3,
455 },
456 {
457 WMI_DATA_VO_SVC,
458 PIPEDIR_IN, /* in = DL = target -> host */
459 2,
460 },
461 {
462 WMI_DATA_BK_SVC,
463 PIPEDIR_OUT, /* out = UL = host -> target */
464 3,
465 },
466 {
467 WMI_DATA_BK_SVC,
468 PIPEDIR_IN, /* in = DL = target -> host */
469 2,
470 },
471 {
472 WMI_DATA_BE_SVC,
473 PIPEDIR_OUT, /* out = UL = host -> target */
474 3,
475 },
476 {
477 WMI_DATA_BE_SVC,
478 PIPEDIR_IN, /* in = DL = target -> host */
479 2,
480 },
481 {
482 WMI_DATA_VI_SVC,
483 PIPEDIR_OUT, /* out = UL = host -> target */
484 3,
485 },
486 {
487 WMI_DATA_VI_SVC,
488 PIPEDIR_IN, /* in = DL = target -> host */
489 2,
490 },
491 {
492 WMI_CONTROL_SVC,
493 PIPEDIR_OUT, /* out = UL = host -> target */
494 3,
495 },
496 {
497 WMI_CONTROL_SVC,
498 PIPEDIR_IN, /* in = DL = target -> host */
499 2,
500 },
501 {
502 HTC_CTRL_RSVD_SVC,
503 PIPEDIR_OUT, /* out = UL = host -> target */
504 0, /* could be moved to 3 (share with WMI) */
505 },
506 {
507 HTC_CTRL_RSVD_SVC,
508 PIPEDIR_IN, /* in = DL = target -> host */
509 1,
510 },
511 {
512 HTC_RAW_STREAMS_SVC, /* not currently used */
513 PIPEDIR_OUT, /* out = UL = host -> target */
514 0,
515 },
516 {
517 HTC_RAW_STREAMS_SVC, /* not currently used */
518 PIPEDIR_IN, /* in = DL = target -> host */
519 1,
520 },
521 {
522 HTT_DATA_MSG_SVC,
523 PIPEDIR_OUT, /* out = UL = host -> target */
524 4,
525 },
526#if WLAN_FEATURE_FASTPATH
527 {
528 HTT_DATA_MSG_SVC,
529 PIPEDIR_IN, /* in = DL = target -> host */
530 5,
531 },
532#else /* WLAN_FEATURE_FASTPATH */
533 {
534 HTT_DATA_MSG_SVC,
535 PIPEDIR_IN, /* in = DL = target -> host */
536 1,
537 },
538#endif /* WLAN_FEATURE_FASTPATH */
539
540 /* (Additions here) */
541
542 { /* Must be last */
543 0,
544 0,
545 0,
546 },
547};
548
549
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700550static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
551static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
552
553static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
554 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
555 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
556 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
557 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
558 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
559 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
560 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
561 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
562 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
563 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
564 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
565 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
566 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
567 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
568 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
569 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
570 {0, 0, 0,}, /* Must be last */
571};
572
Houston Hoffman748e1a62017-03-30 17:20:42 -0700573static void hif_select_service_to_pipe_map(struct hif_softc *scn,
574 struct service_to_pipe **tgt_svc_map_to_use,
575 uint32_t *sz_tgt_svc_map_to_use)
576{
577 uint32_t mode = hif_get_conparam(scn);
578 struct hif_target_info *tgt_info = &scn->target_info;
579
580 if (QDF_IS_EPPING_ENABLED(mode)) {
581 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
582 *sz_tgt_svc_map_to_use =
583 sizeof(target_service_to_ce_map_wlan_epping);
584 } else {
585 switch (tgt_info->target_type) {
586 default:
587 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
588 *sz_tgt_svc_map_to_use =
589 sizeof(target_service_to_ce_map_wlan);
590 break;
591 case TARGET_TYPE_AR900B:
592 case TARGET_TYPE_QCA9984:
593 case TARGET_TYPE_IPQ4019:
594 case TARGET_TYPE_QCA9888:
595 case TARGET_TYPE_AR9888:
596 case TARGET_TYPE_AR9888V2:
597 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
598 *sz_tgt_svc_map_to_use =
599 sizeof(target_service_to_ce_map_ar900b);
600 break;
601 case TARGET_TYPE_QCA6290:
602 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
603 *sz_tgt_svc_map_to_use =
604 sizeof(target_service_to_ce_map_qca6290);
605 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530606 case TARGET_TYPE_QCA8074:
607 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
608 *sz_tgt_svc_map_to_use =
609 sizeof(target_service_to_ce_map_qca8074);
610 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700611 }
612 }
613}
614
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700615/**
616 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
617 * @ce_state : pointer to the state context of the CE
618 *
619 * Description:
620 * Sets htt_rx_data attribute of the state structure if the
621 * CE serves one of the HTT DATA services.
622 *
623 * Return:
624 * false (attribute set to false)
625 * true (attribute set to true);
626 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700627static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700628{
629 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530630 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700631 int i;
632 bool rc = false;
633
634 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700635 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
636 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700637
Kiran Venkatappac0687092017-04-13 16:45:03 +0530638 map_len = map_sz / sizeof(struct service_to_pipe);
639 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700640 if ((svc_map[i].pipenum == ce_state->id) &&
641 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
642 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
643 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
644 /* HTT CEs are unidirectional */
645 if (svc_map[i].pipedir == PIPEDIR_IN)
646 ce_state->htt_rx_data = true;
647 else
648 ce_state->htt_tx_data = true;
649 rc = true;
650 }
651 }
652 }
653 return rc;
654}
655
Houston Hoffman47808172016-05-06 10:04:21 -0700656/**
657 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
658 * @ce_id: ce in question
659 * @ring: ring state being examined
660 * @type: "src_ring" or "dest_ring" string for identifying the ring
661 *
662 * Warns on non-zero index values.
663 * Causes a kernel panic if the ring is not empty durring initialization.
664 */
665static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
666 char *type)
667{
668 if (ring->write_index != 0 || ring->sw_index != 0)
669 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
670 ce_id, type, ring->sw_index, ring->write_index);
671 if (ring->write_index != ring->sw_index)
672 QDF_BUG(0);
673}
674
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530675/**
676 * ce_srng_based() - Does this target use srng
677 * @ce_state : pointer to the state context of the CE
678 *
679 * Description:
680 * returns true if the target is SRNG based
681 *
682 * Return:
683 * false (attribute set to false)
684 * true (attribute set to true);
685 */
686bool ce_srng_based(struct hif_softc *scn)
687{
688 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
689 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
690
691 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530692 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700693 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530694 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530695 default:
696 return false;
697 }
698 return false;
699}
700
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800701#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700702static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530703{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530704 if (ce_srng_based(scn))
705 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530706
707 return ce_services_legacy();
708}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800709
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800710
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800711#else /* QCA_LITHIUM */
712static struct ce_ops *ce_services_attach(struct hif_softc *scn)
713{
714 return ce_services_legacy();
715}
716#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530717
Houston Hoffman403c2df2017-01-27 12:51:15 -0800718static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800719 struct pld_shadow_reg_v2_cfg **shadow_config,
720 int *num_shadow_registers_configured) {
721 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
722
723 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
724 scn, shadow_config, num_shadow_registers_configured);
725}
726
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530727static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
728 uint8_t ring_type)
729{
730 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
731
732 return hif_state->ce_services->ce_get_desc_size(ring_type);
733}
734
735
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700736static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530737 uint8_t ring_type, uint32_t nentries)
738{
739 uint32_t ce_nbytes;
740 char *ptr;
741 qdf_dma_addr_t base_addr;
742 struct CE_ring_state *ce_ring;
743 uint32_t desc_size;
744 struct hif_softc *scn = CE_state->scn;
745
746 ce_nbytes = sizeof(struct CE_ring_state)
747 + (nentries * sizeof(void *));
748 ptr = qdf_mem_malloc(ce_nbytes);
749 if (!ptr)
750 return NULL;
751
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530752 ce_ring = (struct CE_ring_state *)ptr;
753 ptr += sizeof(struct CE_ring_state);
754 ce_ring->nentries = nentries;
755 ce_ring->nentries_mask = nentries - 1;
756
757 ce_ring->low_water_mark_nentries = 0;
758 ce_ring->high_water_mark_nentries = nentries;
759 ce_ring->per_transfer_context = (void **)ptr;
760
761 desc_size = ce_get_desc_size(scn, ring_type);
762
763 /* Legacy platforms that do not support cache
764 * coherent DMA are unsupported
765 */
766 ce_ring->base_addr_owner_space_unaligned =
767 qdf_mem_alloc_consistent(scn->qdf_dev,
768 scn->qdf_dev->dev,
769 (nentries *
770 desc_size +
771 CE_DESC_RING_ALIGN),
772 &base_addr);
773 if (ce_ring->base_addr_owner_space_unaligned
774 == NULL) {
775 HIF_ERROR("%s: ring has no DMA mem",
776 __func__);
777 qdf_mem_free(ptr);
778 return NULL;
779 }
780 ce_ring->base_addr_CE_space_unaligned = base_addr;
781
782 /* Correctly initialize memory to 0 to
783 * prevent garbage data crashing system
784 * when download firmware
785 */
786 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
787 nentries * desc_size +
788 CE_DESC_RING_ALIGN);
789
790 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
791
792 ce_ring->base_addr_CE_space =
793 (ce_ring->base_addr_CE_space_unaligned +
794 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
795
796 ce_ring->base_addr_owner_space = (void *)
797 (((size_t) ce_ring->base_addr_owner_space_unaligned +
798 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
799 } else {
800 ce_ring->base_addr_CE_space =
801 ce_ring->base_addr_CE_space_unaligned;
802 ce_ring->base_addr_owner_space =
803 ce_ring->base_addr_owner_space_unaligned;
804 }
805
806 return ce_ring;
807}
808
809static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
810 uint32_t ce_id, struct CE_ring_state *ring,
811 struct CE_attr *attr)
812{
813 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
814
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700815 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
816 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530817}
818
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800819int hif_ce_bus_early_suspend(struct hif_softc *scn)
820{
821 uint8_t ul_pipe, dl_pipe;
822 int ce_id, status, ul_is_polled, dl_is_polled;
823 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700824
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800825 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
826 &ul_pipe, &dl_pipe,
827 &ul_is_polled, &dl_is_polled);
828 if (status) {
829 HIF_ERROR("%s: pipe_mapping failure", __func__);
830 return status;
831 }
832
833 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
834 if (ce_id == ul_pipe)
835 continue;
836 if (ce_id == dl_pipe)
837 continue;
838
839 ce_state = scn->ce_id_to_state[ce_id];
840 qdf_spin_lock_bh(&ce_state->ce_index_lock);
841 if (ce_state->state == CE_RUNNING)
842 ce_state->state = CE_PAUSED;
843 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
844 }
845
846 return status;
847}
848
849int hif_ce_bus_late_resume(struct hif_softc *scn)
850{
851 int ce_id;
852 struct CE_state *ce_state;
853 int write_index;
854 bool index_updated;
855
856 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
857 ce_state = scn->ce_id_to_state[ce_id];
858 qdf_spin_lock_bh(&ce_state->ce_index_lock);
859 if (ce_state->state == CE_PENDING) {
860 write_index = ce_state->src_ring->write_index;
861 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
862 write_index);
863 ce_state->state = CE_RUNNING;
864 index_updated = true;
865 } else {
866 index_updated = false;
867 }
868
869 if (ce_state->state == CE_PAUSED)
870 ce_state->state = CE_RUNNING;
871 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
872
873 if (index_updated)
874 hif_record_ce_desc_event(scn, ce_id,
875 RESUME_WRITE_INDEX_UPDATE,
876 NULL, NULL, write_index);
877 }
878
879 return 0;
880}
881
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800882/**
883 * ce_oom_recovery() - try to recover rx ce from oom condition
884 * @context: CE_state of the CE with oom rx ring
885 *
886 * the executing work Will continue to be rescheduled untill
887 * at least 1 descriptor is successfully posted to the rx ring.
888 *
889 * return: none
890 */
891static void ce_oom_recovery(void *context)
892{
893 struct CE_state *ce_state = context;
894 struct hif_softc *scn = ce_state->scn;
895 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
896 struct HIF_CE_pipe_info *pipe_info =
897 &ce_softc->pipe_info[ce_state->id];
898
899 hif_post_recv_buffers_for_pipe(pipe_info);
900}
901
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800902/*
903 * Initialize a Copy Engine based on caller-supplied attributes.
904 * This may be called once to initialize both source and destination
905 * rings or it may be called twice for separate source and destination
906 * initialization. It may be that only one side or the other is
907 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700908 *
909 * This should be called durring the initialization sequence before
910 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911 */
Komal Seelam644263d2016-02-22 20:45:49 +0530912struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800913 unsigned int CE_id, struct CE_attr *attr)
914{
915 struct CE_state *CE_state;
916 uint32_t ctrl_addr;
917 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800918 bool malloc_CE_state = false;
919 bool malloc_src_ring = false;
920
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530921 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800922 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800923 CE_state = scn->ce_id_to_state[CE_id];
924
925 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800926 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530927 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800928 if (!CE_state) {
929 HIF_ERROR("%s: CE_state has no mem", __func__);
930 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800931 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700932 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530933 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700934
935 CE_state->id = CE_id;
936 CE_state->ctrl_addr = ctrl_addr;
937 CE_state->state = CE_RUNNING;
938 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700939 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800940 }
941 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530943 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800944 if (attr == NULL) {
945 /* Already initialized; caller wants the handle */
946 return (struct CE_handle *)CE_state;
947 }
948
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800949 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530950 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800951 else
952 CE_state->src_sz_max = attr->src_sz_max;
953
Houston Hoffman68e837e2015-12-04 12:57:24 -0800954 ce_init_ce_desc_event_log(CE_id,
955 attr->src_nentries + attr->dest_nentries);
956
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800957 /* source ring setup */
958 nentries = attr->src_nentries;
959 if (nentries) {
960 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700961
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962 nentries = roundup_pwr2(nentries);
963 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530964 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800965 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530966 src_ring = CE_state->src_ring =
967 ce_alloc_ring_state(CE_state,
968 CE_RING_SRC,
969 nentries);
970 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800971 /* cannot allocate src ring. If the
972 * CE_state is allocated locally free
973 * CE_State and return error.
974 */
975 HIF_ERROR("%s: src ring has no mem", __func__);
976 if (malloc_CE_state) {
977 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530978 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800979 malloc_CE_state = false;
980 }
981 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700983 /* we can allocate src ring. Mark that the src ring is
984 * allocated locally
985 */
986 malloc_src_ring = true;
987
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800988 /*
989 * Also allocate a shadow src ring in
990 * regular mem to use for faster access.
991 */
992 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994 sizeof(struct CE_src_desc) +
995 CE_DESC_RING_ALIGN);
996 if (src_ring->shadow_base_unaligned == NULL) {
997 HIF_ERROR("%s: src ring no shadow_base mem",
998 __func__);
999 goto error_no_dma_mem;
1000 }
1001 src_ring->shadow_base = (struct CE_src_desc *)
1002 (((size_t) src_ring->shadow_base_unaligned +
1003 CE_DESC_RING_ALIGN - 1) &
1004 ~(CE_DESC_RING_ALIGN - 1));
1005
Houston Hoffman4411ad42016-03-14 21:12:04 -07001006 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1007 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001008
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301009 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
1010
Houston Hoffman4411ad42016-03-14 21:12:04 -07001011 if (Q_TARGET_ACCESS_END(scn) < 0)
1012 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301013 ce_ring_test_initial_indexes(CE_id, src_ring,
1014 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001015 }
1016 }
1017
1018 /* destination ring setup */
1019 nentries = attr->dest_nentries;
1020 if (nentries) {
1021 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001022
1023 nentries = roundup_pwr2(nentries);
1024 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301025 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001026 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301027 dest_ring = CE_state->dest_ring =
1028 ce_alloc_ring_state(CE_state,
1029 CE_RING_DEST,
1030 nentries);
1031 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001032 /* cannot allocate dst ring. If the CE_state
1033 * or src ring is allocated locally free
1034 * CE_State and src ring and return error.
1035 */
1036 HIF_ERROR("%s: dest ring has no mem",
1037 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301038 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001039 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040
Houston Hoffman4411ad42016-03-14 21:12:04 -07001041 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1042 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301043
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001044 ce_ring_setup(scn, CE_RING_DEST, CE_id,
1045 dest_ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301046
1047 if (Q_TARGET_ACCESS_END(scn) < 0)
1048 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001049
1050 ce_ring_test_initial_indexes(CE_id, dest_ring,
1051 "dest_ring");
1052
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301053 /* For srng based target, init status ring here */
1054 if (ce_srng_based(CE_state->scn)) {
1055 CE_state->status_ring =
1056 ce_alloc_ring_state(CE_state,
1057 CE_RING_STATUS,
1058 nentries);
1059 if (CE_state->status_ring == NULL) {
1060 /*Allocation failed. Cleanup*/
1061 qdf_mem_free(CE_state->dest_ring);
1062 if (malloc_src_ring) {
1063 qdf_mem_free
1064 (CE_state->src_ring);
1065 CE_state->src_ring = NULL;
1066 malloc_src_ring = false;
1067 }
1068 if (malloc_CE_state) {
1069 /* allocated CE_state locally */
1070 scn->ce_id_to_state[CE_id] =
1071 NULL;
1072 qdf_mem_free(CE_state);
1073 malloc_CE_state = false;
1074 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001075
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301076 return NULL;
1077 }
1078 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1079 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001080
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301081 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1082 CE_state->status_ring, attr);
1083
1084 if (Q_TARGET_ACCESS_END(scn) < 0)
1085 goto error_target_access;
1086
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001087 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001088
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001089 /* epping */
1090 /* poll timer */
1091 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301092 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001093 &CE_state->poll_timer,
1094 ce_poll_timeout,
1095 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301096 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001097 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301098 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001099 CE_POLL_TIMEOUT);
1100 }
1101 }
1102 }
1103
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301104 if (!ce_srng_based(scn)) {
1105 /* Enable CE error interrupts */
1106 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1107 goto error_target_access;
1108 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1109 if (Q_TARGET_ACCESS_END(scn) < 0)
1110 goto error_target_access;
1111 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001112
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001113 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1114 ce_oom_recovery, CE_state);
1115
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001116 /* update the htt_data attribute */
1117 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001118 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001119
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001120 return (struct CE_handle *)CE_state;
1121
Houston Hoffman4411ad42016-03-14 21:12:04 -07001122error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001123error_no_dma_mem:
1124 ce_fini((struct CE_handle *)CE_state);
1125 return NULL;
1126}
1127
1128#ifdef WLAN_FEATURE_FASTPATH
1129/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001130 * hif_enable_fastpath() Update that we have enabled fastpath mode
1131 * @hif_ctx: HIF context
1132 *
1133 * For use in data path
1134 *
1135 * Retrun: void
1136 */
1137void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1138{
1139 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1140
Houston Hoffmand63cd742016-12-05 11:59:56 -08001141 if (ce_srng_based(scn)) {
1142 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1143 return;
1144 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001145 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001146 scn->fastpath_mode_on = true;
1147}
1148
1149/**
1150 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1151 * @hif_ctx: HIF Context
1152 *
1153 * For use in data path to skip HTC
1154 *
1155 * Return: bool
1156 */
1157bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1158{
1159 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1160
1161 return scn->fastpath_mode_on;
1162}
1163
1164/**
1165 * hif_get_ce_handle - API to get CE handle for FastPath mode
1166 * @hif_ctx: HIF Context
1167 * @id: CopyEngine Id
1168 *
1169 * API to return CE handle for fastpath mode
1170 *
1171 * Return: void
1172 */
1173void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1174{
1175 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1176
1177 return scn->ce_id_to_state[id];
1178}
1179
1180/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001181 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1182 * No processing is required inside this function.
1183 * @ce_hdl: Cope engine handle
1184 * Using an assert, this function makes sure that,
1185 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001186 *
1187 * This is called while dismantling CE structures. No other thread
1188 * should be using these structures while dismantling is occuring
1189 * therfore no locking is needed.
1190 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001191 * Return: none
1192 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001193void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001194{
1195 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1196 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301197 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001198 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001199
Houston Hoffman85925072016-05-06 17:02:18 -07001200 if (hif_is_nss_wifi_enabled(sc))
1201 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001202
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001203 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001204 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001205 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001206 sw_index = src_ring->sw_index;
1207 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001208
1209 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301210 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001211 }
1212}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001213
1214/**
1215 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1216 * @ce_hdl: Handle to CE
1217 *
1218 * These buffers are never allocated on the fly, but
1219 * are allocated only once during HIF start and freed
1220 * only once during HIF stop.
1221 * NOTE:
1222 * The assumption here is there is no in-flight DMA in progress
1223 * currently, so that buffers can be freed up safely.
1224 *
1225 * Return: NONE
1226 */
1227void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1228{
1229 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1230 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1231 qdf_nbuf_t nbuf;
1232 int i;
1233
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001234 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001235 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001236
1237 if (!ce_state->htt_rx_data)
1238 return;
1239
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001240 /*
1241 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1242 * this CE is completely full: does not leave one blank space, to
1243 * distinguish between empty queue & full queue. So free all the
1244 * entries.
1245 */
1246 for (i = 0; i < dst_ring->nentries; i++) {
1247 nbuf = dst_ring->per_transfer_context[i];
1248
1249 /*
1250 * The reasons for doing this check are:
1251 * 1) Protect against calling cleanup before allocating buffers
1252 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1253 * could have a partially filled ring, because of a memory
1254 * allocation failure in the middle of allocating ring.
1255 * This check accounts for that case, checking
1256 * fastpath_mode_on flag or started flag would not have
1257 * covered that case. This is not in performance path,
1258 * so OK to do this.
1259 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001260 if (nbuf) {
1261 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1262 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001263 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001264 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001265 }
1266}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001267
1268/**
1269 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1270 * @scn: HIF handle
1271 *
1272 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1273 * Hence we have to post all the entries in the pipe, even, in the beginning
1274 * unlike for other CE pipes where one less than dest_nentries are filled in
1275 * the beginning.
1276 *
1277 * Return: None
1278 */
1279static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1280{
1281 int pipe_num;
1282 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1283
1284 if (scn->fastpath_mode_on == false)
1285 return;
1286
1287 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1288 struct HIF_CE_pipe_info *pipe_info =
1289 &hif_state->pipe_info[pipe_num];
1290 struct CE_state *ce_state =
1291 scn->ce_id_to_state[pipe_info->pipe_num];
1292
1293 if (ce_state->htt_rx_data)
1294 atomic_inc(&pipe_info->recv_bufs_needed);
1295 }
1296}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001297#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001298static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001299{
1300}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001301
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001302static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001303{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001304 return false;
1305}
1306
1307static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1308{
1309 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001310}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001311#endif /* WLAN_FEATURE_FASTPATH */
1312
1313void ce_fini(struct CE_handle *copyeng)
1314{
1315 struct CE_state *CE_state = (struct CE_state *)copyeng;
1316 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301317 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001318
1319 CE_state->state = CE_UNUSED;
1320 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001321
1322 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1323
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001324 qdf_lro_deinit(CE_state->lro_data);
1325
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001326 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001327 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001328 ce_h2t_tx_ce_cleanup(copyeng);
1329
1330 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301331 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001332 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301333 qdf_mem_free_consistent(scn->qdf_dev,
1334 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335 (CE_state->src_ring->nentries *
1336 sizeof(struct CE_src_desc) +
1337 CE_DESC_RING_ALIGN),
1338 CE_state->src_ring->
1339 base_addr_owner_space_unaligned,
1340 CE_state->src_ring->
1341 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301342 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 }
1344 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001345 /* Cleanup the datapath Rx ring */
1346 ce_t2h_msg_ce_cleanup(copyeng);
1347
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001348 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301349 qdf_mem_free_consistent(scn->qdf_dev,
1350 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001351 (CE_state->dest_ring->nentries *
1352 sizeof(struct CE_dest_desc) +
1353 CE_DESC_RING_ALIGN),
1354 CE_state->dest_ring->
1355 base_addr_owner_space_unaligned,
1356 CE_state->dest_ring->
1357 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301358 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001359
1360 /* epping */
1361 if (CE_state->timer_inited) {
1362 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301363 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001364 }
1365 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001366 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301367 /* Cleanup the datapath Tx ring */
1368 ce_h2t_tx_ce_cleanup(copyeng);
1369
1370 if (CE_state->status_ring->shadow_base_unaligned)
1371 qdf_mem_free(
1372 CE_state->status_ring->shadow_base_unaligned);
1373
1374 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1375 qdf_mem_free_consistent(scn->qdf_dev,
1376 scn->qdf_dev->dev,
1377 (CE_state->status_ring->nentries *
1378 sizeof(struct CE_src_desc) +
1379 CE_DESC_RING_ALIGN),
1380 CE_state->status_ring->
1381 base_addr_owner_space_unaligned,
1382 CE_state->status_ring->
1383 base_addr_CE_space, 0);
1384 qdf_mem_free(CE_state->status_ring);
1385 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001386
1387 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301388 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001389}
1390
Komal Seelam5584a7c2016-02-24 19:22:48 +05301391void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001392{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301393 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001394
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301395 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001396 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301397 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001398 sizeof(hif_state->msg_callbacks_current));
1399}
1400
1401/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301402QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301403hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001404 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301405 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001406{
Komal Seelam644263d2016-02-22 20:45:49 +05301407 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301408 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001409 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1410 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1411 int bytes = nbytes, nfrags = 0;
1412 struct ce_sendlist sendlist;
1413 int status, i = 0;
1414 unsigned int mux_id = 0;
1415
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301416 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001417
1418 transfer_id =
1419 (mux_id & MUX_ID_MASK) |
1420 (transfer_id & TRANSACTION_ID_MASK);
1421 data_attr &= DESC_DATA_FLAG_MASK;
1422 /*
1423 * The common case involves sending multiple fragments within a
1424 * single download (the tx descriptor and the tx frame header).
1425 * So, optimize for the case of multiple fragments by not even
1426 * checking whether it's necessary to use a sendlist.
1427 * The overhead of using a sendlist for a single buffer download
1428 * is not a big deal, since it happens rarely (for WMI messages).
1429 */
1430 ce_sendlist_init(&sendlist);
1431 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001433 int frag_bytes;
1434
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301435 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1436 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001437 /*
1438 * Clear the packet offset for all but the first CE desc.
1439 */
1440 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301441 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442
1443 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1444 frag_bytes >
1445 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301446 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001447 (nbuf,
1448 nfrags) ? 0 :
1449 CE_SEND_FLAG_SWAP_DISABLE,
1450 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301451 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001452 HIF_ERROR("%s: error, frag_num %d larger than limit",
1453 __func__, nfrags);
1454 return status;
1455 }
1456 bytes -= frag_bytes;
1457 nfrags++;
1458 } while (bytes > 0);
1459
1460 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301461 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301463 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001464 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301465 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001466 }
1467 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301468 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001469
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301470 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001471 HIF_ERROR("%s: error CE handle is null", __func__);
1472 return A_ERROR;
1473 }
1474
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301475 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301476 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301477 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1478 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001479 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301480 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481
1482 return status;
1483}
1484
Komal Seelam5584a7c2016-02-24 19:22:48 +05301485void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1486 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487{
Komal Seelam644263d2016-02-22 20:45:49 +05301488 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301489 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301490
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001491 if (!force) {
1492 int resources;
1493 /*
1494 * Decide whether to actually poll for completions, or just
1495 * wait for a later chance. If there seem to be plenty of
1496 * resources left, then just wait, since checking involves
1497 * reading a CE register, which is a relatively expensive
1498 * operation.
1499 */
Komal Seelam644263d2016-02-22 20:45:49 +05301500 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 /*
1502 * If at least 50% of the total resources are still available,
1503 * don't bother checking again yet.
1504 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001505 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1506 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001507 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001508 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001509#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001510 ce_per_engine_servicereap(scn, pipe);
1511#else
1512 ce_per_engine_service(scn, pipe);
1513#endif
1514}
1515
Komal Seelam5584a7c2016-02-24 19:22:48 +05301516uint16_t
1517hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001518{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301519 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1521 uint16_t rv;
1522
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301523 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301525 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001526 return rv;
1527}
1528
1529/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001530static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001531hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301532 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001533 unsigned int nbytes, unsigned int transfer_id,
1534 unsigned int sw_index, unsigned int hw_index,
1535 unsigned int toeplitz_hash_result)
1536{
1537 struct HIF_CE_pipe_info *pipe_info =
1538 (struct HIF_CE_pipe_info *)ce_context;
1539 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301540 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001542 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301543 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001544
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001545 do {
1546 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001547 * The upper layer callback will be triggered
1548 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001549 */
Houston Hoffman85118512015-09-28 14:17:11 -07001550 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001551 if (scn->target_status == TARGET_STATUS_RESET) {
1552
1553 qdf_nbuf_unmap_single(scn->qdf_dev,
1554 transfer_context,
1555 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301556 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001557 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001558 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001559 msg_callbacks->Context,
1560 transfer_context, transfer_id,
1561 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001562 }
1563
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301564 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001565 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301566 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001567 } while (ce_completed_send_next(copyeng,
1568 &ce_context, &transfer_context,
1569 &CE_data, &nbytes, &transfer_id,
1570 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301571 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001572}
1573
Houston Hoffman910c6262015-09-28 12:56:25 -07001574/**
1575 * hif_ce_do_recv(): send message from copy engine to upper layers
1576 * @msg_callbacks: structure containing callback and callback context
1577 * @netbuff: skb containing message
1578 * @nbytes: number of bytes in the message
1579 * @pipe_info: used for the pipe_number info
1580 *
1581 * Checks the packet length, configures the lenght in the netbuff,
1582 * and calls the upper layer callback.
1583 *
1584 * return: None
1585 */
1586static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301587 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001588 struct HIF_CE_pipe_info *pipe_info) {
1589 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301590 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001591 msg_callbacks->
1592 rxCompletionHandler(msg_callbacks->Context,
1593 netbuf, pipe_info->pipe_num);
1594 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07001595 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07001596 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001597
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301598 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001599 }
1600}
1601
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001602/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001603static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001604hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301605 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001606 unsigned int nbytes, unsigned int transfer_id,
1607 unsigned int flags)
1608{
1609 struct HIF_CE_pipe_info *pipe_info =
1610 (struct HIF_CE_pipe_info *)ce_context;
1611 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001612 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301613 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001614#ifdef HIF_PCI
1615 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1616#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001617 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301618 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001620 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001621#ifdef HIF_PCI
1622 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1623#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301624 qdf_nbuf_unmap_single(scn->qdf_dev,
1625 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301626 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001627
Houston Hoffman910c6262015-09-28 12:56:25 -07001628 atomic_inc(&pipe_info->recv_bufs_needed);
1629 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301630 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301631 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001632 else
1633 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001634 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001635
1636 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001637 * MAX_NUM_OF_RECEIVES
1638 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001639 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001640 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001641 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642 break;
1643 }
1644 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1645 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301646 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001647
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648}
1649
1650/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1651
1652void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301653hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 struct hif_msg_callbacks *callbacks)
1655{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301656 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657
1658#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1659 spin_lock_init(&pcie_access_log_lock);
1660#endif
1661 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301662 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663 sizeof(hif_state->msg_callbacks_pending));
1664
1665}
1666
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001667static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001668{
1669 struct CE_handle *ce_diag = hif_state->ce_diag;
1670 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301671 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001672 struct hif_msg_callbacks *hif_msg_callbacks =
1673 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001674
1675 /* daemonize("hif_compl_thread"); */
1676
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001677 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001678 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001679 return -EINVAL;
1680 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001681
1682 if (!hif_msg_callbacks ||
1683 !hif_msg_callbacks->rxCompletionHandler ||
1684 !hif_msg_callbacks->txCompletionHandler) {
1685 HIF_ERROR("%s: no completion handler registered", __func__);
1686 return -EFAULT;
1687 }
1688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001689 A_TARGET_ACCESS_LIKELY(scn);
1690 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1691 struct CE_attr attr;
1692 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001693
1694 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001695 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301697 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001698 if (attr.src_nentries) {
1699 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07001700 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001701 __func__, pipe_num, pipe_info);
1702 ce_send_cb_register(pipe_info->ce_hdl,
1703 hif_pci_ce_send_done, pipe_info,
1704 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001705 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1706 }
1707 if (attr.dest_nentries) {
1708 /* pipe used to receive from target */
1709 ce_recv_cb_register(pipe_info->ce_hdl,
1710 hif_pci_ce_recv_data, pipe_info,
1711 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001712 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001713
1714 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301715 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301716
1717 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1718 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001719 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001720
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001721 A_TARGET_ACCESS_UNLIKELY(scn);
1722 return 0;
1723}
1724
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001725/*
1726 * Install pending msg callbacks.
1727 *
1728 * TBDXXX: This hack is needed because upper layers install msg callbacks
1729 * for use with HTC before BMI is done; yet this HIF implementation
1730 * needs to continue to use BMI msg callbacks. Really, upper layers
1731 * should not register HTC callbacks until AFTER BMI phase.
1732 */
Komal Seelam644263d2016-02-22 20:45:49 +05301733static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301735 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001736
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301737 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 &hif_state->msg_callbacks_pending,
1739 sizeof(hif_state->msg_callbacks_pending));
1740}
1741
Komal Seelam5584a7c2016-02-24 19:22:48 +05301742void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1743 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001744{
1745 int ul_is_polled, dl_is_polled;
1746
Komal Seelam644263d2016-02-22 20:45:49 +05301747 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001748 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1749}
1750
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001751/**
1752 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301753 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001754 *
1755 * Output the pipe error counts of each pipe to log file
1756 *
1757 * Return: N/A
1758 */
Komal Seelam644263d2016-02-22 20:45:49 +05301759void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001760{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301761 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001762 int pipe_num;
1763
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001764 if (hif_state == NULL) {
1765 HIF_ERROR("%s hif_state is NULL", __func__);
1766 return;
1767 }
1768 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1769 struct HIF_CE_pipe_info *pipe_info;
1770
1771 pipe_info = &hif_state->pipe_info[pipe_num];
1772
1773 if (pipe_info->nbuf_alloc_err_count > 0 ||
1774 pipe_info->nbuf_dma_err_count > 0 ||
1775 pipe_info->nbuf_ce_enqueue_err_count)
1776 HIF_ERROR(
1777 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1778 __func__, pipe_info->pipe_num,
1779 atomic_read(&pipe_info->recv_bufs_needed),
1780 pipe_info->nbuf_alloc_err_count,
1781 pipe_info->nbuf_dma_err_count,
1782 pipe_info->nbuf_ce_enqueue_err_count);
1783 }
1784}
1785
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001786static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1787 void *nbuf, uint32_t *error_cnt,
1788 enum hif_ce_event_type failure_type,
1789 const char *failure_type_string)
1790{
1791 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1792 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1793 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1794 int ce_id = CE_state->id;
1795 uint32_t error_cnt_tmp;
1796
1797 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1798 error_cnt_tmp = ++(*error_cnt);
1799 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05301800 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001801 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1802 failure_type_string);
1803 hif_record_ce_desc_event(scn, ce_id, failure_type,
1804 NULL, nbuf, bufs_needed_tmp);
1805 /* if we fail to allocate the last buffer for an rx pipe,
1806 * there is no trigger to refill the ce and we will
1807 * eventually crash
1808 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301809 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001810 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301811
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001812}
1813
1814
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001815
1816
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301817QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001818{
1819 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301820 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301821 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301822 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001823 uint32_t bufs_posted = 0;
1824
1825 buf_sz = pipe_info->buf_sz;
1826 if (buf_sz == 0) {
1827 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301828 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 }
1830
1831 ce_hdl = pipe_info->ce_hdl;
1832
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301833 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301835 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301836 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001837
1838 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301839 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301841 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001842 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001843 hif_post_recv_buffers_failure(pipe_info, nbuf,
1844 &pipe_info->nbuf_alloc_err_count,
1845 HIF_RX_NBUF_ALLOC_FAILURE,
1846 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301847 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848 }
1849
1850 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301851 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001852 * CE_data = dma_map_single(dev, data, buf_sz, );
1853 * DMA_FROM_DEVICE);
1854 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301855 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301856 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001857
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301858 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001859 hif_post_recv_buffers_failure(pipe_info, nbuf,
1860 &pipe_info->nbuf_dma_err_count,
1861 HIF_RX_NBUF_MAP_FAILURE,
1862 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301863 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301864 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001865 }
1866
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301867 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001868
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301869 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001870 buf_sz, DMA_FROM_DEVICE);
1871 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301872 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001873 hif_post_recv_buffers_failure(pipe_info, nbuf,
1874 &pipe_info->nbuf_ce_enqueue_err_count,
1875 HIF_RX_NBUF_ENQUEUE_FAILURE,
1876 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1877
Govind Singh4fcafd42016-08-08 12:37:31 +05301878 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1879 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301880 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301881 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882 }
1883
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301884 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001885 bufs_posted++;
1886 }
1887 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001888 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001889 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1890 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001891 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001892 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1893 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001894 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001895 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001896
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301897 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301899 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001900}
1901
1902/*
1903 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05301904 * Returns 0 for non fastpath rx copy engine as
1905 * oom_allocation_work will be scheduled to recover any
1906 * failures, non-zero if unable to completely replenish
1907 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001908 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301909QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001910{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301911 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301912 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07001913 struct CE_state *ce_state;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301914 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001915
1916 A_TARGET_ACCESS_LIKELY(scn);
1917 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1918 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001919
Houston Hoffman85925072016-05-06 17:02:18 -07001920 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001921 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001922
1923 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001924 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07001925 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07001926
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301927 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
1928 if (!QDF_IS_STATUS_SUCCESS(qdf_status) &&
Govind Singhcaa850e2017-04-20 16:41:36 +05301929 ce_state->htt_rx_data &&
1930 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301931 A_TARGET_ACCESS_UNLIKELY(scn);
1932 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05301933 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001934 }
1935
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001936 A_TARGET_ACCESS_UNLIKELY(scn);
1937
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301938 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939}
1940
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301941QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942{
Komal Seelam644263d2016-02-22 20:45:49 +05301943 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301944 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301945 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001946
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001947 hif_update_fastpath_recv_bufs_cnt(scn);
1948
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001949 hif_msg_callbacks_install(scn);
1950
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001951 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301952 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001953
Houston Hoffman271951f2016-11-12 15:24:27 -08001954 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001955 hif_state->started = true;
1956
Houston Hoffman271951f2016-11-12 15:24:27 -08001957 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301958 qdf_status = hif_post_recv_buffers(scn);
1959 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08001960 /* cleanup is done in hif_ce_disable */
1961 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301962 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08001963 }
1964
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301965 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966}
1967
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001968static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001969{
Komal Seelam644263d2016-02-22 20:45:49 +05301970 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971 struct CE_handle *ce_hdl;
1972 uint32_t buf_sz;
1973 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301974 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301975 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001976 void *per_CE_context;
1977
1978 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001979 /* Unused Copy Engine */
1980 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001982
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983
1984 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001985 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001987
Komal Seelam02cf2f82016-02-22 20:44:25 +05301988 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001989 ce_hdl = pipe_info->ce_hdl;
1990
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001991 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001993 while (ce_revoke_recv_next
1994 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301995 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05301996 if (netbuf) {
1997 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
1998 QDF_DMA_FROM_DEVICE);
1999 qdf_nbuf_free(netbuf);
2000 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002001 }
2002}
2003
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002004static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002005{
2006 struct CE_handle *ce_hdl;
2007 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302008 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302009 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302011 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002012 unsigned int nbytes;
2013 unsigned int id;
2014 uint32_t buf_sz;
2015 uint32_t toeplitz_hash_result;
2016
2017 buf_sz = pipe_info->buf_sz;
2018 if (buf_sz == 0) {
2019 /* Unused Copy Engine */
2020 return;
2021 }
2022
2023 hif_state = pipe_info->HIF_CE_state;
2024 if (!hif_state->started) {
2025 return;
2026 }
2027
Komal Seelam02cf2f82016-02-22 20:44:25 +05302028 scn = HIF_GET_SOFTC(hif_state);
2029
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030 ce_hdl = pipe_info->ce_hdl;
2031
2032 while (ce_cancel_send_next
2033 (ce_hdl, &per_CE_context,
2034 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302035 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002036 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2037 /*
2038 * Packets enqueued by htt_h2t_ver_req_msg() and
2039 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2040 * freed in htt_htc_misc_pkt_pool_free() in
2041 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002042 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002043 * which they are queued in.
2044 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302045 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002046 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302047 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002048 * layer to free the buffer
2049 */
2050 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302051 pipe_info->pipe_callbacks.
2052 txCompletionHandler(pipe_info->
2053 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002054 netbuf, id, toeplitz_hash_result);
2055 }
2056 }
2057}
2058
2059/*
2060 * Cleanup residual buffers for device shutdown:
2061 * buffers that were enqueued for receive
2062 * buffers that were to be sent
2063 * Note: Buffers that had completed but which were
2064 * not yet processed are on a completion queue. They
2065 * are handled when the completion thread shuts down.
2066 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002067static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002068{
2069 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302070 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002071 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002072
Komal Seelam02cf2f82016-02-22 20:44:25 +05302073 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074 struct HIF_CE_pipe_info *pipe_info;
2075
Houston Hoffman85925072016-05-06 17:02:18 -07002076 ce_state = scn->ce_id_to_state[pipe_num];
2077 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2078 ((ce_state->htt_tx_data) ||
2079 (ce_state->htt_rx_data))) {
2080 continue;
2081 }
2082
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002083 pipe_info = &hif_state->pipe_info[pipe_num];
2084 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2085 hif_send_buffer_cleanup_on_pipe(pipe_info);
2086 }
2087}
2088
Komal Seelam5584a7c2016-02-24 19:22:48 +05302089void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002090{
Komal Seelam644263d2016-02-22 20:45:49 +05302091 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302092 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302093
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002094 hif_buffer_cleanup(hif_state);
2095}
2096
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002097static void hif_destroy_oom_work(struct hif_softc *scn)
2098{
2099 struct CE_state *ce_state;
2100 int ce_id;
2101
2102 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2103 ce_state = scn->ce_id_to_state[ce_id];
2104 if (ce_state)
2105 qdf_destroy_work(scn->qdf_dev,
2106 &ce_state->oom_allocation_work);
2107 }
2108}
2109
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302110void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302112 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 int pipe_num;
2114
Houston Hoffmana69581e2016-11-14 18:03:19 -08002115 /*
2116 * before cleaning up any memory, ensure irq &
2117 * bottom half contexts will not be re-entered
2118 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002119 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002120 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002122
2123 /*
2124 * At this point, asynchronous threads are stopped,
2125 * The Target should not DMA nor interrupt, Host code may
2126 * not initiate anything more. So we just need to clean
2127 * up Host-side state.
2128 */
2129
2130 if (scn->athdiag_procfs_inited) {
2131 athdiag_procfs_remove();
2132 scn->athdiag_procfs_inited = false;
2133 }
2134
2135 hif_buffer_cleanup(hif_state);
2136
2137 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2138 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302139 struct CE_attr attr;
2140 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002141
2142 pipe_info = &hif_state->pipe_info[pipe_num];
2143 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302144 if (pipe_info->ce_hdl != ce_diag) {
2145 attr = hif_state->host_ce_config[pipe_num];
2146 if (attr.src_nentries)
2147 qdf_spinlock_destroy(&pipe_info->
2148 completion_freeq_lock);
2149 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002150 ce_fini(pipe_info->ce_hdl);
2151 pipe_info->ce_hdl = NULL;
2152 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302153 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002154 }
2155 }
2156
2157 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302158 qdf_timer_stop(&hif_state->sleep_timer);
2159 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002160 hif_state->sleep_timer_init = false;
2161 }
2162
2163 hif_state->started = false;
2164}
2165
Houston Hoffman748e1a62017-03-30 17:20:42 -07002166
Houston Hoffman854e67f2016-03-14 21:11:39 -07002167/**
2168 * hif_get_target_ce_config() - get copy engine configuration
2169 * @target_ce_config_ret: basic copy engine configuration
2170 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2171 * @target_service_to_ce_map_ret: service mapping for the copy engines
2172 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2173 * @target_shadow_reg_cfg_ret: shadow register configuration
2174 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2175 *
2176 * providing accessor to these values outside of this file.
2177 * currently these are stored in static pointers to const sections.
2178 * there are multiple configurations that are selected from at compile time.
2179 * Runtime selection would need to consider mode, target type and bus type.
2180 *
2181 * Return: return by parameter.
2182 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302183void hif_get_target_ce_config(struct hif_softc *scn,
2184 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002185 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002186 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002187 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002188 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002189 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002190{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302191 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2192
2193 *target_ce_config_ret = hif_state->target_ce_config;
2194 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002195
2196 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2197 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002198
2199 if (target_shadow_reg_cfg_ret)
2200 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2201
2202 if (shadow_cfg_sz_ret)
2203 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002204}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002205
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002206#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002207static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002208{
2209 int i;
2210 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2211 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2212
2213 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2214 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2215 "%s: i %d, val %x\n", __func__, i,
2216 cfg->shadow_reg_v2_cfg[i].addr);
2217 }
2218}
2219
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002220#else
2221static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2222{
2223 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2224 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2225}
2226#endif
2227
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002228/**
2229 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302230 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002231 *
2232 * This function passes the con_mode and CE configuration to
2233 * platform driver to enable wlan.
2234 *
Houston Hoffman108da402016-03-14 21:11:24 -07002235 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002236 */
Houston Hoffman108da402016-03-14 21:11:24 -07002237int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002238{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002239 struct pld_wlan_enable_cfg cfg;
2240 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302241 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002242
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302243 hif_get_target_ce_config(scn,
2244 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002245 &cfg.num_ce_tgt_cfg,
2246 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2247 &cfg.num_ce_svc_pipe_cfg,
2248 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2249 &cfg.num_shadow_reg_cfg);
2250
2251 /* translate from structure size to array size */
2252 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2253 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2254 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002256 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2257 &cfg.num_shadow_reg_v2_cfg);
2258
2259 hif_print_hal_shadow_register_cfg(&cfg);
2260
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302261 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002262 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002263 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002264 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002265 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002266 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002267
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002268 if (BYPASS_QMI)
2269 return 0;
2270 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002271 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2272 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002273}
2274
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002275#define CE_EPPING_USES_IRQ true
2276
Houston Hoffman108da402016-03-14 21:11:24 -07002277/**
2278 * hif_ce_prepare_config() - load the correct static tables.
2279 * @scn: hif context
2280 *
2281 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002282 */
Houston Hoffman108da402016-03-14 21:11:24 -07002283void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002284{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302285 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002286 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2287 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302288 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002289
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002290 hif_state->ce_services = ce_services_attach(scn);
2291
Houston Hoffman710af5a2016-11-22 21:59:03 -08002292 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002293 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002294 if (QDF_IS_EPPING_ENABLED(mode)) {
2295 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302296 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002297 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302298 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2299 hif_state->target_ce_config = target_ce_config_wlan_epping;
2300 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002301 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2302 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002303 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002304
2305 switch (tgt_info->target_type) {
2306 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302307 hif_state->host_ce_config = host_ce_config_wlan;
2308 hif_state->target_ce_config = target_ce_config_wlan;
2309 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002310 break;
2311 case TARGET_TYPE_AR900B:
2312 case TARGET_TYPE_QCA9984:
2313 case TARGET_TYPE_IPQ4019:
2314 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302315 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2316 hif_state->host_ce_config =
2317 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2318 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2319 hif_state->host_ce_config =
2320 host_lowdesc_ce_cfg_wlan_ar900b;
2321 } else {
2322 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2323 }
2324
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302325 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2326 hif_state->target_ce_config_sz =
2327 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002328
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002329 break;
2330
2331 case TARGET_TYPE_AR9888:
2332 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302333 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2334 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2335 } else {
2336 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2337 }
2338
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302339 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2340 hif_state->target_ce_config_sz =
2341 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002342
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002343 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002344
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302345 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002346 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2347 hif_state->host_ce_config =
2348 host_ce_config_wlan_qca8074_pci;
2349 hif_state->target_ce_config =
2350 target_ce_config_wlan_qca8074_pci;
2351 hif_state->target_ce_config_sz =
2352 sizeof(target_ce_config_wlan_qca8074_pci);
2353 } else {
2354 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2355 hif_state->target_ce_config =
2356 target_ce_config_wlan_qca8074;
2357 hif_state->target_ce_config_sz =
2358 sizeof(target_ce_config_wlan_qca8074);
2359 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302360 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002361 case TARGET_TYPE_QCA6290:
2362 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2363 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2364 hif_state->target_ce_config_sz =
2365 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002366
Houston Hoffman710af5a2016-11-22 21:59:03 -08002367 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002368 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002369 }
Houston Hoffman108da402016-03-14 21:11:24 -07002370}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002371
Houston Hoffman108da402016-03-14 21:11:24 -07002372/**
2373 * hif_ce_open() - do ce specific allocations
2374 * @hif_sc: pointer to hif context
2375 *
2376 * return: 0 for success or QDF_STATUS_E_NOMEM
2377 */
2378QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2379{
2380 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002381
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302382 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302383 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002384 return QDF_STATUS_SUCCESS;
2385}
2386
2387/**
2388 * hif_ce_close() - do ce specific free
2389 * @hif_sc: pointer to hif context
2390 */
2391void hif_ce_close(struct hif_softc *hif_sc)
2392{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302393 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2394
2395 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302396 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002397}
2398
2399/**
2400 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2401 * @hif_sc: hif context
2402 *
2403 * uses state variables to support cleaning up when hif_config_ce fails.
2404 */
2405void hif_unconfig_ce(struct hif_softc *hif_sc)
2406{
2407 int pipe_num;
2408 struct HIF_CE_pipe_info *pipe_info;
2409 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2410
2411 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2412 pipe_info = &hif_state->pipe_info[pipe_num];
2413 if (pipe_info->ce_hdl) {
2414 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002415 ce_fini(pipe_info->ce_hdl);
2416 pipe_info->ce_hdl = NULL;
2417 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002418 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002419 }
2420 }
Houston Hoffman108da402016-03-14 21:11:24 -07002421 if (hif_sc->athdiag_procfs_inited) {
2422 athdiag_procfs_remove();
2423 hif_sc->athdiag_procfs_inited = false;
2424 }
2425}
2426
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002427#ifdef CONFIG_BYPASS_QMI
2428#define FW_SHARED_MEM (2 * 1024 * 1024)
2429
2430/**
2431 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2432 * @scn: pointer to HIF structure
2433 *
2434 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2435 *
2436 * Return: void
2437 */
2438static void hif_post_static_buf_to_target(struct hif_softc *scn)
2439{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002440 void *target_va;
2441 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002442
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002443 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2444 FW_SHARED_MEM, &target_pa);
2445 if (NULL == target_va) {
2446 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002447 return;
2448 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002449 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2450 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002451}
2452#else
2453static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2454{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002455}
2456#endif
2457
Houston Hoffman579c02f2017-08-02 01:57:38 -07002458static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2459 bool wait_for_it)
2460{
2461 /* todo */
2462 return 0;
2463}
2464
Houston Hoffman108da402016-03-14 21:11:24 -07002465/**
2466 * hif_config_ce() - configure copy engines
2467 * @scn: hif context
2468 *
2469 * Prepares fw, copy engine hardware and host sw according
2470 * to the attributes selected by hif_ce_prepare_config.
2471 *
2472 * also calls athdiag_procfs_init
2473 *
2474 * return: 0 for success nonzero for failure.
2475 */
2476int hif_config_ce(struct hif_softc *scn)
2477{
2478 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2479 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2480 struct HIF_CE_pipe_info *pipe_info;
2481 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002482 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002483#ifdef ADRASTEA_SHADOW_REGISTERS
2484 int i;
2485#endif
2486 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2487
2488 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002489
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002490 hif_post_static_buf_to_target(scn);
2491
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002492 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002493
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002494 hif_config_rri_on_ddr(scn);
2495
Houston Hoffman579c02f2017-08-02 01:57:38 -07002496 if (ce_srng_based(scn))
2497 scn->bus_ops.hif_target_sleep_state_adjust =
2498 &hif_srng_sleep_state_adjust;
2499
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002500 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2501 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002502
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002503 pipe_info = &hif_state->pipe_info[pipe_num];
2504 pipe_info->pipe_num = pipe_num;
2505 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302506 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002507
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002508 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002509 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002510 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302511 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002512 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302513 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002514 A_TARGET_ACCESS_UNLIKELY(scn);
2515 goto err;
2516 }
2517
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07002518 ce_state->lro_data = qdf_lro_init();
2519
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302520 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002521 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002522 * Diagnostic Window support
2523 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002524 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002525 continue;
2526 }
2527
Houston Hoffman85925072016-05-06 17:02:18 -07002528 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2529 (ce_state->htt_rx_data))
2530 continue;
2531
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302532 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002533 if (attr->dest_nentries > 0) {
2534 atomic_set(&pipe_info->recv_bufs_needed,
2535 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302536 /*SRNG based CE has one entry less */
2537 if (ce_srng_based(scn))
2538 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002539 } else {
2540 atomic_set(&pipe_info->recv_bufs_needed, 0);
2541 }
2542 ce_tasklet_init(hif_state, (1 << pipe_num));
2543 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002544 }
2545
2546 if (athdiag_procfs_init(scn) != 0) {
2547 A_TARGET_ACCESS_UNLIKELY(scn);
2548 goto err;
2549 }
2550 scn->athdiag_procfs_inited = true;
2551
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002552 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002553
Houston Hoffman108da402016-03-14 21:11:24 -07002554 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002555
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002556 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002557
2558#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002559 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002560 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002561 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002562 __func__, i,
2563 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2564 }
2565#endif
2566
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302567 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002568
2569err:
2570 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002571 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002572 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302573 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002574}
2575
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002576#ifdef WLAN_FEATURE_FASTPATH
2577/**
2578 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2579 * @handler: Callback funtcion
2580 * @context: handle for callback function
2581 *
2582 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2583 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002584int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2585 fastpath_msg_handler handler,
2586 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002587{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002588 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002589 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002590 int i;
2591
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302592 if (!scn) {
2593 HIF_ERROR("%s: scn is NULL", __func__);
2594 QDF_ASSERT(0);
2595 return QDF_STATUS_E_FAILURE;
2596 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002597
2598 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002599 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002600 return QDF_STATUS_E_FAILURE;
2601 }
2602
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002603 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002604 ce_state = scn->ce_id_to_state[i];
2605 if (ce_state->htt_rx_data) {
2606 ce_state->fastpath_handler = handler;
2607 ce_state->context = context;
2608 }
2609 }
2610
2611 return QDF_STATUS_SUCCESS;
2612}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002613#endif
2614
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002615#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002616/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302617 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002618 * @scn: bus context
2619 * @ce_sr_base_paddr: copyengine source ring base physical address
2620 * @ce_sr_ring_size: copyengine source ring size
2621 * @ce_reg_paddr: copyengine register physical address
2622 *
2623 * IPA micro controller data path offload feature enabled,
2624 * HIF should release copy engine related resource information to IPA UC
2625 * IPA UC will access hardware resource with released information
2626 *
2627 * Return: None
2628 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302629void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302630 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002631 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302632 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002633{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302634 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002635 struct HIF_CE_pipe_info *pipe_info =
2636 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2637 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2638
2639 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2640 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002641}
2642#endif /* IPA_OFFLOAD */
2643
2644
2645#ifdef ADRASTEA_SHADOW_REGISTERS
2646
2647/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002648 * Current shadow register config
2649 *
2650 * -----------------------------------------------------------
2651 * Shadow Register | CE | src/dst write index
2652 * -----------------------------------------------------------
2653 * 0 | 0 | src
2654 * 1 No Config - Doesn't point to anything
2655 * 2 No Config - Doesn't point to anything
2656 * 3 | 3 | src
2657 * 4 | 4 | src
2658 * 5 | 5 | src
2659 * 6 No Config - Doesn't point to anything
2660 * 7 | 7 | src
2661 * 8 No Config - Doesn't point to anything
2662 * 9 No Config - Doesn't point to anything
2663 * 10 No Config - Doesn't point to anything
2664 * 11 No Config - Doesn't point to anything
2665 * -----------------------------------------------------------
2666 * 12 No Config - Doesn't point to anything
2667 * 13 | 1 | dst
2668 * 14 | 2 | dst
2669 * 15 No Config - Doesn't point to anything
2670 * 16 No Config - Doesn't point to anything
2671 * 17 No Config - Doesn't point to anything
2672 * 18 No Config - Doesn't point to anything
2673 * 19 | 7 | dst
2674 * 20 | 8 | dst
2675 * 21 No Config - Doesn't point to anything
2676 * 22 No Config - Doesn't point to anything
2677 * 23 No Config - Doesn't point to anything
2678 * -----------------------------------------------------------
2679 *
2680 *
2681 * ToDo - Move shadow register config to following in the future
2682 * This helps free up a block of shadow registers towards the end.
2683 * Can be used for other purposes
2684 *
2685 * -----------------------------------------------------------
2686 * Shadow Register | CE | src/dst write index
2687 * -----------------------------------------------------------
2688 * 0 | 0 | src
2689 * 1 | 3 | src
2690 * 2 | 4 | src
2691 * 3 | 5 | src
2692 * 4 | 7 | src
2693 * -----------------------------------------------------------
2694 * 5 | 1 | dst
2695 * 6 | 2 | dst
2696 * 7 | 7 | dst
2697 * 8 | 8 | dst
2698 * -----------------------------------------------------------
2699 * 9 No Config - Doesn't point to anything
2700 * 12 No Config - Doesn't point to anything
2701 * 13 No Config - Doesn't point to anything
2702 * 14 No Config - Doesn't point to anything
2703 * 15 No Config - Doesn't point to anything
2704 * 16 No Config - Doesn't point to anything
2705 * 17 No Config - Doesn't point to anything
2706 * 18 No Config - Doesn't point to anything
2707 * 19 No Config - Doesn't point to anything
2708 * 20 No Config - Doesn't point to anything
2709 * 21 No Config - Doesn't point to anything
2710 * 22 No Config - Doesn't point to anything
2711 * 23 No Config - Doesn't point to anything
2712 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002713*/
2714
Komal Seelam644263d2016-02-22 20:45:49 +05302715u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002716{
2717 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002718 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002719
Houston Hoffmane6330442016-02-26 12:19:11 -08002720 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002721 case 0:
2722 addr = SHADOW_VALUE0;
2723 break;
2724 case 3:
2725 addr = SHADOW_VALUE3;
2726 break;
2727 case 4:
2728 addr = SHADOW_VALUE4;
2729 break;
2730 case 5:
2731 addr = SHADOW_VALUE5;
2732 break;
2733 case 7:
2734 addr = SHADOW_VALUE7;
2735 break;
2736 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002737 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302738 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002739 }
2740 return addr;
2741
2742}
2743
Komal Seelam644263d2016-02-22 20:45:49 +05302744u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002745{
2746 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002747 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002748
Houston Hoffmane6330442016-02-26 12:19:11 -08002749 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002750 case 1:
2751 addr = SHADOW_VALUE13;
2752 break;
2753 case 2:
2754 addr = SHADOW_VALUE14;
2755 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002756 case 5:
2757 addr = SHADOW_VALUE17;
2758 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002759 case 7:
2760 addr = SHADOW_VALUE19;
2761 break;
2762 case 8:
2763 addr = SHADOW_VALUE20;
2764 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002765 case 9:
2766 addr = SHADOW_VALUE21;
2767 break;
2768 case 10:
2769 addr = SHADOW_VALUE22;
2770 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302771 case 11:
2772 addr = SHADOW_VALUE23;
2773 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002774 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002775 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302776 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002777 }
2778
2779 return addr;
2780
2781}
2782#endif
2783
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002784#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002785void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2786{
2787 struct CE_state *ce_state;
2788 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2789
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002790 ce_state = scn->ce_id_to_state[ctx_id];
2791
2792 return ce_state->lro_data;
2793}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002794#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002795
2796/**
2797 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2798 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302799 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002800 * @svc_id: Service ID for which the mapping is needed.
2801 * @ul_pipe: address of the container in which ul pipe is returned.
2802 * @dl_pipe: address of the container in which dl pipe is returned.
2803 * @ul_is_polled: address of the container in which a bool
2804 * indicating if the UL CE for this service
2805 * is polled is returned.
2806 * @dl_is_polled: address of the container in which a bool
2807 * indicating if the DL CE for this service
2808 * is polled is returned.
2809 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002810 * Return: Indicates whether the service has been found in the table.
2811 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2812 * There will be warning logs if either leg has not been updated
2813 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002814 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302815int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002816 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2817 int *dl_is_polled)
2818{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002819 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002820 unsigned int i;
2821 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002822 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002823 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302824 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002825 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002826 bool dl_updated = false;
2827 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002828
Houston Hoffman748e1a62017-03-30 17:20:42 -07002829 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2830 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002831
2832 *dl_is_polled = 0; /* polling for received messages not supported */
2833
2834 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2835
2836 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2837 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002838 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002839 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002840 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302841 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002842 CE_ATTR_DISABLE_INTR) != 0;
2843 ul_updated = true;
2844 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002845 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002846 dl_updated = true;
2847 }
2848 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002849 }
2850 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002851 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302852 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002853 __func__, svc_id);
2854 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302855 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002856 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002857
2858 return status;
2859}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002860
2861#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302862inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002863 uint32_t CE_ctrl_addr)
2864{
2865 uint32_t read_from_hw, srri_from_ddr = 0;
2866
2867 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2868
2869 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2870
2871 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002872 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2873 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002874 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302875 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002876 }
2877 return srri_from_ddr;
2878}
2879
2880
Komal Seelam644263d2016-02-22 20:45:49 +05302881inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002882 uint32_t CE_ctrl_addr)
2883{
2884 uint32_t read_from_hw, drri_from_ddr = 0;
2885
2886 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2887
2888 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2889
2890 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002891 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002892 drri_from_ddr, read_from_hw,
2893 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302894 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002895 }
2896 return drri_from_ddr;
2897}
2898
2899#endif
2900
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002901#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002902/**
2903 * hif_get_src_ring_read_index(): Called to get the SRRI
2904 *
Komal Seelam644263d2016-02-22 20:45:49 +05302905 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002906 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2907 *
2908 * This function returns the SRRI to the caller. For CEs that
2909 * dont have interrupts enabled, we look at the DDR based SRRI
2910 *
2911 * Return: SRRI
2912 */
Komal Seelam644263d2016-02-22 20:45:49 +05302913inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002914 uint32_t CE_ctrl_addr)
2915{
2916 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302917 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002918
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302919 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002920 if (attr.flags & CE_ATTR_DISABLE_INTR)
2921 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2922 else
2923 return A_TARGET_READ(scn,
2924 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2925}
2926
2927/**
2928 * hif_get_dst_ring_read_index(): Called to get the DRRI
2929 *
Komal Seelam644263d2016-02-22 20:45:49 +05302930 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002931 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2932 *
2933 * This function returns the DRRI to the caller. For CEs that
2934 * dont have interrupts enabled, we look at the DDR based DRRI
2935 *
2936 * Return: DRRI
2937 */
Komal Seelam644263d2016-02-22 20:45:49 +05302938inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002939 uint32_t CE_ctrl_addr)
2940{
2941 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302942 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002943
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302944 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002945
2946 if (attr.flags & CE_ATTR_DISABLE_INTR)
2947 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2948 else
2949 return A_TARGET_READ(scn,
2950 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2951}
2952
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002953/**
2954 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2955 *
Komal Seelam644263d2016-02-22 20:45:49 +05302956 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002957 *
2958 * This function allocates non cached memory on ddr and sends
2959 * the physical address of this memory to the CE hardware. The
2960 * hardware updates the RRI on this particular location.
2961 *
2962 * Return: None
2963 */
Komal Seelam644263d2016-02-22 20:45:49 +05302964static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002965{
2966 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302967 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002968 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002969
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002970 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302971 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2972 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2973 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002974
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05302975 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002976 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2977 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2978
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002979 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002980
2981 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2982 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2983
2984 for (i = 0; i < CE_COUNT; i++)
2985 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2986
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302987 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002988
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002989}
2990#else
2991
2992/**
2993 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2994 *
Komal Seelam644263d2016-02-22 20:45:49 +05302995 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002996 *
2997 * This is a dummy implementation for platforms that don't
2998 * support this functionality.
2999 *
3000 * Return: None
3001 */
Komal Seelam644263d2016-02-22 20:45:49 +05303002static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003003{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003004}
3005#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303006
3007/**
3008 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303009 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303010 *
3011 * Output the copy engine registers
3012 *
3013 * Return: 0 for success or error code
3014 */
Komal Seelam644263d2016-02-22 20:45:49 +05303015int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303016{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303017 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303018 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003019 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303020 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3021 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303022 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303023
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003024 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3025 if (scn->ce_id_to_state[i] == NULL) {
3026 HIF_DBG("CE%d not used.", i);
3027 continue;
3028 }
3029
Komal Seelam644263d2016-02-22 20:45:49 +05303030 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003031 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303032 ce_reg_word_size * sizeof(uint32_t));
3033
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303034 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003035 HIF_ERROR("Dumping CE register failed!");
3036 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303037 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303038 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303039 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003040 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303041 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303042 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3043 + SR_WR_INDEX_ADDRESS),
3044 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3045 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3046 + CURRENT_SRRI_ADDRESS),
3047 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3048 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3049 + DST_WR_INDEX_ADDRESS),
3050 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3051 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3052 + CURRENT_DRRI_ADDRESS),
3053 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3054 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303055 }
Govind Singh2443fb32016-01-13 17:44:48 +05303056 return 0;
3057}
Houston Hoffman85925072016-05-06 17:02:18 -07003058#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3059struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3060 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3061{
3062 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3063 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3064 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3065 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3066 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3067 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3068 struct CE_ring_state *src_ring = ce_state->src_ring;
3069 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3070
3071 if (src_ring) {
3072 hif_info->ul_pipe.nentries = src_ring->nentries;
3073 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3074 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3075 hif_info->ul_pipe.write_index = src_ring->write_index;
3076 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3077 hif_info->ul_pipe.base_addr_CE_space =
3078 src_ring->base_addr_CE_space;
3079 hif_info->ul_pipe.base_addr_owner_space =
3080 src_ring->base_addr_owner_space;
3081 }
3082
3083
3084 if (dest_ring) {
3085 hif_info->dl_pipe.nentries = dest_ring->nentries;
3086 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3087 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3088 hif_info->dl_pipe.write_index = dest_ring->write_index;
3089 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3090 hif_info->dl_pipe.base_addr_CE_space =
3091 dest_ring->base_addr_CE_space;
3092 hif_info->dl_pipe.base_addr_owner_space =
3093 dest_ring->base_addr_owner_space;
3094 }
3095
3096 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3097 hif_info->ctrl_addr = ce_state->ctrl_addr;
3098
3099 return hif_info;
3100}
3101
3102uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3103{
3104 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3105
3106 scn->nss_wifi_ol_mode = mode;
3107 return 0;
3108}
3109
3110#endif
3111
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303112void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3113{
3114 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3115 scn->hif_attribute = hif_attrib;
3116}
3117
Houston Hoffman85925072016-05-06 17:02:18 -07003118void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3119{
3120 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3121 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3122 uint32_t ctrl_addr = CE_state->ctrl_addr;
3123
3124 Q_TARGET_ACCESS_BEGIN(scn);
3125 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3126 Q_TARGET_ACCESS_END(scn);
3127}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303128
3129/**
3130 * hif_fw_event_handler() - hif fw event handler
3131 * @hif_state: pointer to hif ce state structure
3132 *
3133 * Process fw events and raise HTC callback to process fw events.
3134 *
3135 * Return: none
3136 */
3137static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3138{
3139 struct hif_msg_callbacks *msg_callbacks =
3140 &hif_state->msg_callbacks_current;
3141
3142 if (!msg_callbacks->fwEventHandler)
3143 return;
3144
3145 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3146 QDF_STATUS_E_FAILURE);
3147}
3148
3149#ifndef QCA_WIFI_3_0
3150/**
3151 * hif_fw_interrupt_handler() - FW interrupt handler
3152 * @irq: irq number
3153 * @arg: the user pointer
3154 *
3155 * Called from the PCI interrupt handler when a
3156 * firmware-generated interrupt to the Host.
3157 *
3158 * Return: status of handled irq
3159 */
3160irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3161{
3162 struct hif_softc *scn = arg;
3163 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3164 uint32_t fw_indicator_address, fw_indicator;
3165
3166 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3167 return ATH_ISR_NOSCHED;
3168
3169 fw_indicator_address = hif_state->fw_indicator_address;
3170 /* For sudden unplug this will return ~0 */
3171 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3172
3173 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3174 /* ACK: clear Target-side pending event */
3175 A_TARGET_WRITE(scn, fw_indicator_address,
3176 fw_indicator & ~FW_IND_EVENT_PENDING);
3177 if (Q_TARGET_ACCESS_END(scn) < 0)
3178 return ATH_ISR_SCHED;
3179
3180 if (hif_state->started) {
3181 hif_fw_event_handler(hif_state);
3182 } else {
3183 /*
3184 * Probable Target failure before we're prepared
3185 * to handle it. Generally unexpected.
3186 */
3187 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3188 ("%s: Early firmware event indicated\n",
3189 __func__));
3190 }
3191 } else {
3192 if (Q_TARGET_ACCESS_END(scn) < 0)
3193 return ATH_ISR_SCHED;
3194 }
3195
3196 return ATH_ISR_SCHED;
3197}
3198#else
3199irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3200{
3201 return ATH_ISR_SCHED;
3202}
3203#endif /* #ifdef QCA_WIFI_3_0 */
3204
3205
3206/**
3207 * hif_wlan_disable(): call the platform driver to disable wlan
3208 * @scn: HIF Context
3209 *
3210 * This function passes the con_mode to platform driver to disable
3211 * wlan.
3212 *
3213 * Return: void
3214 */
3215void hif_wlan_disable(struct hif_softc *scn)
3216{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003217 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303218 uint32_t con_mode = hif_get_conparam(scn);
3219
3220 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003221 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303222 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003223 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303224 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003225 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303226
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003227 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303228}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003229
Dustin Brown6834d322017-03-20 15:02:48 -07003230int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3231{
3232 QDF_STATUS status;
3233 uint8_t ul_pipe, dl_pipe;
3234 int ul_is_polled, dl_is_polled;
3235
3236 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3237 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3238 HTC_CTRL_RSVD_SVC,
3239 &ul_pipe, &dl_pipe,
3240 &ul_is_polled, &dl_is_polled);
3241 if (status) {
3242 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3243 return qdf_status_to_os_return(status);
3244 }
3245
3246 *ce_id = dl_pipe;
3247
3248 return 0;
3249}