blob: 134ec118075752f30112860e626e769fc8cd07f8 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053067QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080068
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Nachiket Kukadee5738b52017-09-07 17:16:12 +053091QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053092static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700144
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800145 if (CE_state->timer_inited) {
146 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530147 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800148 }
149}
150
151static unsigned int roundup_pwr2(unsigned int n)
152{
153 int i;
154 unsigned int test_pwr2;
155
156 if (!(n & (n - 1)))
157 return n; /* already a power of 2 */
158
159 test_pwr2 = 4;
160 for (i = 0; i < 29; i++) {
161 if (test_pwr2 > n)
162 return test_pwr2;
163 test_pwr2 = test_pwr2 << 1;
164 }
165
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530166 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800167 return 0;
168}
169
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700170#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
171#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
172
173static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
174 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
182 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800183#ifdef QCA_WIFI_3_0_ADRASTEA
184 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
185 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530186 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800187#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700188};
189
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700190static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
191 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
199 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
200};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700201
202/* CE_PCI TABLE */
203/*
204 * NOTE: the table below is out of date, though still a useful reference.
205 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
206 * mapping of HTC services to HIF pipes.
207 */
208/*
209 * This authoritative table defines Copy Engine configuration and the mapping
210 * of services/endpoints to CEs. A subset of this information is passed to
211 * the Target during startup as a prerequisite to entering BMI phase.
212 * See:
213 * target_service_to_ce_map - Target-side mapping
214 * hif_map_service_to_pipe - Host-side mapping
215 * target_ce_config - Target-side configuration
216 * host_ce_config - Host-side configuration
217 ============================================================================
218 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
219 | | | ctio | Size | Frequency
220 | | | n | |
221 ============================================================================
222 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
223 descriptor | | | | O(100B) | and regular
224 download | | | | |
225 ----------------------------------------------------------------------------
226 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
227 indication | | | | O(10B) | regular
228 upload | | | | |
229 ----------------------------------------------------------------------------
230 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
231 upload | | | | O(1000B) | (frequent
232 e.g. noise | | | | | during IP1.0
233 packets | | | | | testing)
234 ----------------------------------------------------------------------------
235 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
236 download | | | | O(1000B) | (frequent
237 e.g. | | | | | during IP1.0
238 misdirecte | | | | | testing)
239 d EAPOL | | | | |
240 packets | | | | |
241 ----------------------------------------------------------------------------
242 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
243 | DATA_VO (uplink) | | | |
244 ----------------------------------------------------------------------------
245 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
246 | DATA_VO (downlink) | | | |
247 ----------------------------------------------------------------------------
248 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
249 | | | | O(100B) |
250 ----------------------------------------------------------------------------
251 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
252 messages | (downlink) | | | O(100B) |
253 | | | | |
254 ----------------------------------------------------------------------------
255 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
256 | HTC_RAW_STREAMS | | | |
257 | (uplink) | | | |
258 ----------------------------------------------------------------------------
259 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
260 | HTC_RAW_STREAMS | | | |
261 | (downlink) | | | |
262 ----------------------------------------------------------------------------
263 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
264 | | | | | infrequent
265 ============================================================================
266 */
267
268/*
269 * Map from service/endpoint to Copy Engine.
270 * This table is derived from the CE_PCI TABLE, above.
271 * It is passed to the Target at startup for use by firmware.
272 */
273static struct service_to_pipe target_service_to_ce_map_wlan[] = {
274 {
275 WMI_DATA_VO_SVC,
276 PIPEDIR_OUT, /* out = UL = host -> target */
277 3,
278 },
279 {
280 WMI_DATA_VO_SVC,
281 PIPEDIR_IN, /* in = DL = target -> host */
282 2,
283 },
284 {
285 WMI_DATA_BK_SVC,
286 PIPEDIR_OUT, /* out = UL = host -> target */
287 3,
288 },
289 {
290 WMI_DATA_BK_SVC,
291 PIPEDIR_IN, /* in = DL = target -> host */
292 2,
293 },
294 {
295 WMI_DATA_BE_SVC,
296 PIPEDIR_OUT, /* out = UL = host -> target */
297 3,
298 },
299 {
300 WMI_DATA_BE_SVC,
301 PIPEDIR_IN, /* in = DL = target -> host */
302 2,
303 },
304 {
305 WMI_DATA_VI_SVC,
306 PIPEDIR_OUT, /* out = UL = host -> target */
307 3,
308 },
309 {
310 WMI_DATA_VI_SVC,
311 PIPEDIR_IN, /* in = DL = target -> host */
312 2,
313 },
314 {
315 WMI_CONTROL_SVC,
316 PIPEDIR_OUT, /* out = UL = host -> target */
317 3,
318 },
319 {
320 WMI_CONTROL_SVC,
321 PIPEDIR_IN, /* in = DL = target -> host */
322 2,
323 },
324 {
325 HTC_CTRL_RSVD_SVC,
326 PIPEDIR_OUT, /* out = UL = host -> target */
327 0, /* could be moved to 3 (share with WMI) */
328 },
329 {
330 HTC_CTRL_RSVD_SVC,
331 PIPEDIR_IN, /* in = DL = target -> host */
332 2,
333 },
334 {
335 HTC_RAW_STREAMS_SVC, /* not currently used */
336 PIPEDIR_OUT, /* out = UL = host -> target */
337 0,
338 },
339 {
340 HTC_RAW_STREAMS_SVC, /* not currently used */
341 PIPEDIR_IN, /* in = DL = target -> host */
342 2,
343 },
344 {
345 HTT_DATA_MSG_SVC,
346 PIPEDIR_OUT, /* out = UL = host -> target */
347 4,
348 },
349 {
350 HTT_DATA_MSG_SVC,
351 PIPEDIR_IN, /* in = DL = target -> host */
352 1,
353 },
354 {
355 WDI_IPA_TX_SVC,
356 PIPEDIR_OUT, /* in = DL = target -> host */
357 5,
358 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800359#if defined(QCA_WIFI_3_0_ADRASTEA)
360 {
361 HTT_DATA2_MSG_SVC,
362 PIPEDIR_IN, /* in = DL = target -> host */
363 9,
364 },
365 {
366 HTT_DATA3_MSG_SVC,
367 PIPEDIR_IN, /* in = DL = target -> host */
368 10,
369 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530370 {
371 PACKET_LOG_SVC,
372 PIPEDIR_IN, /* in = DL = target -> host */
373 11,
374 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800375#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700376 /* (Additions here) */
377
378 { /* Must be last */
379 0,
380 0,
381 0,
382 },
383};
384
Houston Hoffman88c896f2016-12-14 09:56:35 -0800385/* PIPEDIR_OUT = HOST to Target */
386/* PIPEDIR_IN = TARGET to HOST */
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530387static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
388 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
389 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
390 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
391 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
392 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
393 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
394 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
395 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
396 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
397 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
398 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
399 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
400 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
401 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
402 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
403 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
404 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
405 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530406 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530407 /* (Additions here) */
408 { 0, 0, 0, },
409};
410
Houston Hoffman88c896f2016-12-14 09:56:35 -0800411static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
412 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
413 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
414 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
415 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
416 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
417 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
418 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
419 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
420 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
421 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
422 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
423 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
424 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
425 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800426 /* (Additions here) */
427 { 0, 0, 0, },
428};
429
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700430static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
431 {
432 WMI_DATA_VO_SVC,
433 PIPEDIR_OUT, /* out = UL = host -> target */
434 3,
435 },
436 {
437 WMI_DATA_VO_SVC,
438 PIPEDIR_IN, /* in = DL = target -> host */
439 2,
440 },
441 {
442 WMI_DATA_BK_SVC,
443 PIPEDIR_OUT, /* out = UL = host -> target */
444 3,
445 },
446 {
447 WMI_DATA_BK_SVC,
448 PIPEDIR_IN, /* in = DL = target -> host */
449 2,
450 },
451 {
452 WMI_DATA_BE_SVC,
453 PIPEDIR_OUT, /* out = UL = host -> target */
454 3,
455 },
456 {
457 WMI_DATA_BE_SVC,
458 PIPEDIR_IN, /* in = DL = target -> host */
459 2,
460 },
461 {
462 WMI_DATA_VI_SVC,
463 PIPEDIR_OUT, /* out = UL = host -> target */
464 3,
465 },
466 {
467 WMI_DATA_VI_SVC,
468 PIPEDIR_IN, /* in = DL = target -> host */
469 2,
470 },
471 {
472 WMI_CONTROL_SVC,
473 PIPEDIR_OUT, /* out = UL = host -> target */
474 3,
475 },
476 {
477 WMI_CONTROL_SVC,
478 PIPEDIR_IN, /* in = DL = target -> host */
479 2,
480 },
481 {
482 HTC_CTRL_RSVD_SVC,
483 PIPEDIR_OUT, /* out = UL = host -> target */
484 0, /* could be moved to 3 (share with WMI) */
485 },
486 {
487 HTC_CTRL_RSVD_SVC,
488 PIPEDIR_IN, /* in = DL = target -> host */
489 1,
490 },
491 {
492 HTC_RAW_STREAMS_SVC, /* not currently used */
493 PIPEDIR_OUT, /* out = UL = host -> target */
494 0,
495 },
496 {
497 HTC_RAW_STREAMS_SVC, /* not currently used */
498 PIPEDIR_IN, /* in = DL = target -> host */
499 1,
500 },
501 {
502 HTT_DATA_MSG_SVC,
503 PIPEDIR_OUT, /* out = UL = host -> target */
504 4,
505 },
506#if WLAN_FEATURE_FASTPATH
507 {
508 HTT_DATA_MSG_SVC,
509 PIPEDIR_IN, /* in = DL = target -> host */
510 5,
511 },
512#else /* WLAN_FEATURE_FASTPATH */
513 {
514 HTT_DATA_MSG_SVC,
515 PIPEDIR_IN, /* in = DL = target -> host */
516 1,
517 },
518#endif /* WLAN_FEATURE_FASTPATH */
519
520 /* (Additions here) */
521
522 { /* Must be last */
523 0,
524 0,
525 0,
526 },
527};
528
529
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700530static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
531static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
532
533static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
534 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
535 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
536 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
537 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
538 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
539 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
540 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
541 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
542 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
543 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
544 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
545 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
546 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
547 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
548 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
549 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
550 {0, 0, 0,}, /* Must be last */
551};
552
Houston Hoffman748e1a62017-03-30 17:20:42 -0700553static void hif_select_service_to_pipe_map(struct hif_softc *scn,
554 struct service_to_pipe **tgt_svc_map_to_use,
555 uint32_t *sz_tgt_svc_map_to_use)
556{
557 uint32_t mode = hif_get_conparam(scn);
558 struct hif_target_info *tgt_info = &scn->target_info;
559
560 if (QDF_IS_EPPING_ENABLED(mode)) {
561 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
562 *sz_tgt_svc_map_to_use =
563 sizeof(target_service_to_ce_map_wlan_epping);
564 } else {
565 switch (tgt_info->target_type) {
566 default:
567 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
568 *sz_tgt_svc_map_to_use =
569 sizeof(target_service_to_ce_map_wlan);
570 break;
571 case TARGET_TYPE_AR900B:
572 case TARGET_TYPE_QCA9984:
573 case TARGET_TYPE_IPQ4019:
574 case TARGET_TYPE_QCA9888:
575 case TARGET_TYPE_AR9888:
576 case TARGET_TYPE_AR9888V2:
577 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
578 *sz_tgt_svc_map_to_use =
579 sizeof(target_service_to_ce_map_ar900b);
580 break;
581 case TARGET_TYPE_QCA6290:
582 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
583 *sz_tgt_svc_map_to_use =
584 sizeof(target_service_to_ce_map_qca6290);
585 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530586 case TARGET_TYPE_QCA8074:
587 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
588 *sz_tgt_svc_map_to_use =
589 sizeof(target_service_to_ce_map_qca8074);
590 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700591 }
592 }
593}
594
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700595/**
596 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
597 * @ce_state : pointer to the state context of the CE
598 *
599 * Description:
600 * Sets htt_rx_data attribute of the state structure if the
601 * CE serves one of the HTT DATA services.
602 *
603 * Return:
604 * false (attribute set to false)
605 * true (attribute set to true);
606 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700607static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700608{
609 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530610 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700611 int i;
612 bool rc = false;
613
614 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700615 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
616 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700617
Kiran Venkatappac0687092017-04-13 16:45:03 +0530618 map_len = map_sz / sizeof(struct service_to_pipe);
619 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700620 if ((svc_map[i].pipenum == ce_state->id) &&
621 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
622 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
623 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
624 /* HTT CEs are unidirectional */
625 if (svc_map[i].pipedir == PIPEDIR_IN)
626 ce_state->htt_rx_data = true;
627 else
628 ce_state->htt_tx_data = true;
629 rc = true;
630 }
631 }
632 }
633 return rc;
634}
635
Houston Hoffman47808172016-05-06 10:04:21 -0700636/**
637 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
638 * @ce_id: ce in question
639 * @ring: ring state being examined
640 * @type: "src_ring" or "dest_ring" string for identifying the ring
641 *
642 * Warns on non-zero index values.
643 * Causes a kernel panic if the ring is not empty durring initialization.
644 */
645static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
646 char *type)
647{
648 if (ring->write_index != 0 || ring->sw_index != 0)
649 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
650 ce_id, type, ring->sw_index, ring->write_index);
651 if (ring->write_index != ring->sw_index)
652 QDF_BUG(0);
653}
654
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530655/**
656 * ce_srng_based() - Does this target use srng
657 * @ce_state : pointer to the state context of the CE
658 *
659 * Description:
660 * returns true if the target is SRNG based
661 *
662 * Return:
663 * false (attribute set to false)
664 * true (attribute set to true);
665 */
666bool ce_srng_based(struct hif_softc *scn)
667{
668 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
669 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
670
671 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530672 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700673 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530674 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530675 default:
676 return false;
677 }
678 return false;
679}
680
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800681#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700682static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530683{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530684 if (ce_srng_based(scn))
685 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530686
687 return ce_services_legacy();
688}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800689
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800690
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800691#else /* QCA_LITHIUM */
692static struct ce_ops *ce_services_attach(struct hif_softc *scn)
693{
694 return ce_services_legacy();
695}
696#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530697
Houston Hoffman403c2df2017-01-27 12:51:15 -0800698static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800699 struct pld_shadow_reg_v2_cfg **shadow_config,
700 int *num_shadow_registers_configured) {
701 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
702
703 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
704 scn, shadow_config, num_shadow_registers_configured);
705}
706
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530707static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
708 uint8_t ring_type)
709{
710 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
711
712 return hif_state->ce_services->ce_get_desc_size(ring_type);
713}
714
715
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700716static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530717 uint8_t ring_type, uint32_t nentries)
718{
719 uint32_t ce_nbytes;
720 char *ptr;
721 qdf_dma_addr_t base_addr;
722 struct CE_ring_state *ce_ring;
723 uint32_t desc_size;
724 struct hif_softc *scn = CE_state->scn;
725
726 ce_nbytes = sizeof(struct CE_ring_state)
727 + (nentries * sizeof(void *));
728 ptr = qdf_mem_malloc(ce_nbytes);
729 if (!ptr)
730 return NULL;
731
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530732 ce_ring = (struct CE_ring_state *)ptr;
733 ptr += sizeof(struct CE_ring_state);
734 ce_ring->nentries = nentries;
735 ce_ring->nentries_mask = nentries - 1;
736
737 ce_ring->low_water_mark_nentries = 0;
738 ce_ring->high_water_mark_nentries = nentries;
739 ce_ring->per_transfer_context = (void **)ptr;
740
741 desc_size = ce_get_desc_size(scn, ring_type);
742
743 /* Legacy platforms that do not support cache
744 * coherent DMA are unsupported
745 */
746 ce_ring->base_addr_owner_space_unaligned =
747 qdf_mem_alloc_consistent(scn->qdf_dev,
748 scn->qdf_dev->dev,
749 (nentries *
750 desc_size +
751 CE_DESC_RING_ALIGN),
752 &base_addr);
753 if (ce_ring->base_addr_owner_space_unaligned
754 == NULL) {
755 HIF_ERROR("%s: ring has no DMA mem",
756 __func__);
757 qdf_mem_free(ptr);
758 return NULL;
759 }
760 ce_ring->base_addr_CE_space_unaligned = base_addr;
761
762 /* Correctly initialize memory to 0 to
763 * prevent garbage data crashing system
764 * when download firmware
765 */
766 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
767 nentries * desc_size +
768 CE_DESC_RING_ALIGN);
769
770 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
771
772 ce_ring->base_addr_CE_space =
773 (ce_ring->base_addr_CE_space_unaligned +
774 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
775
776 ce_ring->base_addr_owner_space = (void *)
777 (((size_t) ce_ring->base_addr_owner_space_unaligned +
778 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
779 } else {
780 ce_ring->base_addr_CE_space =
781 ce_ring->base_addr_CE_space_unaligned;
782 ce_ring->base_addr_owner_space =
783 ce_ring->base_addr_owner_space_unaligned;
784 }
785
786 return ce_ring;
787}
788
789static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
790 uint32_t ce_id, struct CE_ring_state *ring,
791 struct CE_attr *attr)
792{
793 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
794
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700795 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
796 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530797}
798
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800799int hif_ce_bus_early_suspend(struct hif_softc *scn)
800{
801 uint8_t ul_pipe, dl_pipe;
802 int ce_id, status, ul_is_polled, dl_is_polled;
803 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700804
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800805 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
806 &ul_pipe, &dl_pipe,
807 &ul_is_polled, &dl_is_polled);
808 if (status) {
809 HIF_ERROR("%s: pipe_mapping failure", __func__);
810 return status;
811 }
812
813 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
814 if (ce_id == ul_pipe)
815 continue;
816 if (ce_id == dl_pipe)
817 continue;
818
819 ce_state = scn->ce_id_to_state[ce_id];
820 qdf_spin_lock_bh(&ce_state->ce_index_lock);
821 if (ce_state->state == CE_RUNNING)
822 ce_state->state = CE_PAUSED;
823 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
824 }
825
826 return status;
827}
828
829int hif_ce_bus_late_resume(struct hif_softc *scn)
830{
831 int ce_id;
832 struct CE_state *ce_state;
833 int write_index;
834 bool index_updated;
835
836 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
837 ce_state = scn->ce_id_to_state[ce_id];
838 qdf_spin_lock_bh(&ce_state->ce_index_lock);
839 if (ce_state->state == CE_PENDING) {
840 write_index = ce_state->src_ring->write_index;
841 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
842 write_index);
843 ce_state->state = CE_RUNNING;
844 index_updated = true;
845 } else {
846 index_updated = false;
847 }
848
849 if (ce_state->state == CE_PAUSED)
850 ce_state->state = CE_RUNNING;
851 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
852
853 if (index_updated)
854 hif_record_ce_desc_event(scn, ce_id,
855 RESUME_WRITE_INDEX_UPDATE,
856 NULL, NULL, write_index);
857 }
858
859 return 0;
860}
861
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800862/**
863 * ce_oom_recovery() - try to recover rx ce from oom condition
864 * @context: CE_state of the CE with oom rx ring
865 *
866 * the executing work Will continue to be rescheduled untill
867 * at least 1 descriptor is successfully posted to the rx ring.
868 *
869 * return: none
870 */
871static void ce_oom_recovery(void *context)
872{
873 struct CE_state *ce_state = context;
874 struct hif_softc *scn = ce_state->scn;
875 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
876 struct HIF_CE_pipe_info *pipe_info =
877 &ce_softc->pipe_info[ce_state->id];
878
879 hif_post_recv_buffers_for_pipe(pipe_info);
880}
881
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800882/*
883 * Initialize a Copy Engine based on caller-supplied attributes.
884 * This may be called once to initialize both source and destination
885 * rings or it may be called twice for separate source and destination
886 * initialization. It may be that only one side or the other is
887 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700888 *
889 * This should be called durring the initialization sequence before
890 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800891 */
Komal Seelam644263d2016-02-22 20:45:49 +0530892struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893 unsigned int CE_id, struct CE_attr *attr)
894{
895 struct CE_state *CE_state;
896 uint32_t ctrl_addr;
897 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 bool malloc_CE_state = false;
899 bool malloc_src_ring = false;
900
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530901 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800902 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800903 CE_state = scn->ce_id_to_state[CE_id];
904
905 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800906 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530907 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800908 if (!CE_state) {
909 HIF_ERROR("%s: CE_state has no mem", __func__);
910 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700912 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530913 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700914
915 CE_state->id = CE_id;
916 CE_state->ctrl_addr = ctrl_addr;
917 CE_state->state = CE_RUNNING;
918 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700919 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800920 }
921 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800922
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530923 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800924 if (attr == NULL) {
925 /* Already initialized; caller wants the handle */
926 return (struct CE_handle *)CE_state;
927 }
928
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800929 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530930 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800931 else
932 CE_state->src_sz_max = attr->src_sz_max;
933
Houston Hoffman68e837e2015-12-04 12:57:24 -0800934 ce_init_ce_desc_event_log(CE_id,
935 attr->src_nentries + attr->dest_nentries);
936
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800937 /* source ring setup */
938 nentries = attr->src_nentries;
939 if (nentries) {
940 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700941
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 nentries = roundup_pwr2(nentries);
943 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530944 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800945 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530946 src_ring = CE_state->src_ring =
947 ce_alloc_ring_state(CE_state,
948 CE_RING_SRC,
949 nentries);
950 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800951 /* cannot allocate src ring. If the
952 * CE_state is allocated locally free
953 * CE_State and return error.
954 */
955 HIF_ERROR("%s: src ring has no mem", __func__);
956 if (malloc_CE_state) {
957 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530958 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800959 malloc_CE_state = false;
960 }
961 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700963 /* we can allocate src ring. Mark that the src ring is
964 * allocated locally
965 */
966 malloc_src_ring = true;
967
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800968 /*
969 * Also allocate a shadow src ring in
970 * regular mem to use for faster access.
971 */
972 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530973 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800974 sizeof(struct CE_src_desc) +
975 CE_DESC_RING_ALIGN);
976 if (src_ring->shadow_base_unaligned == NULL) {
977 HIF_ERROR("%s: src ring no shadow_base mem",
978 __func__);
979 goto error_no_dma_mem;
980 }
981 src_ring->shadow_base = (struct CE_src_desc *)
982 (((size_t) src_ring->shadow_base_unaligned +
983 CE_DESC_RING_ALIGN - 1) &
984 ~(CE_DESC_RING_ALIGN - 1));
985
Houston Hoffman4411ad42016-03-14 21:12:04 -0700986 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
987 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -0700988
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530989 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
990
Houston Hoffman4411ad42016-03-14 21:12:04 -0700991 if (Q_TARGET_ACCESS_END(scn) < 0)
992 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530993 ce_ring_test_initial_indexes(CE_id, src_ring,
994 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800995 }
996 }
997
998 /* destination ring setup */
999 nentries = attr->dest_nentries;
1000 if (nentries) {
1001 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001002
1003 nentries = roundup_pwr2(nentries);
1004 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301005 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001006 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301007 dest_ring = CE_state->dest_ring =
1008 ce_alloc_ring_state(CE_state,
1009 CE_RING_DEST,
1010 nentries);
1011 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001012 /* cannot allocate dst ring. If the CE_state
1013 * or src ring is allocated locally free
1014 * CE_State and src ring and return error.
1015 */
1016 HIF_ERROR("%s: dest ring has no mem",
1017 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301018 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001019 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001020
Houston Hoffman4411ad42016-03-14 21:12:04 -07001021 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1022 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301023
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001024 ce_ring_setup(scn, CE_RING_DEST, CE_id,
1025 dest_ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301026
1027 if (Q_TARGET_ACCESS_END(scn) < 0)
1028 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001029
1030 ce_ring_test_initial_indexes(CE_id, dest_ring,
1031 "dest_ring");
1032
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301033 /* For srng based target, init status ring here */
1034 if (ce_srng_based(CE_state->scn)) {
1035 CE_state->status_ring =
1036 ce_alloc_ring_state(CE_state,
1037 CE_RING_STATUS,
1038 nentries);
1039 if (CE_state->status_ring == NULL) {
1040 /*Allocation failed. Cleanup*/
1041 qdf_mem_free(CE_state->dest_ring);
1042 if (malloc_src_ring) {
1043 qdf_mem_free
1044 (CE_state->src_ring);
1045 CE_state->src_ring = NULL;
1046 malloc_src_ring = false;
1047 }
1048 if (malloc_CE_state) {
1049 /* allocated CE_state locally */
1050 scn->ce_id_to_state[CE_id] =
1051 NULL;
1052 qdf_mem_free(CE_state);
1053 malloc_CE_state = false;
1054 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001055
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301056 return NULL;
1057 }
1058 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1059 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001060
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301061 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1062 CE_state->status_ring, attr);
1063
1064 if (Q_TARGET_ACCESS_END(scn) < 0)
1065 goto error_target_access;
1066
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001067 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001068
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001069 /* epping */
1070 /* poll timer */
1071 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301072 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001073 &CE_state->poll_timer,
1074 ce_poll_timeout,
1075 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301076 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001077 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301078 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001079 CE_POLL_TIMEOUT);
1080 }
1081 }
1082 }
1083
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301084 if (!ce_srng_based(scn)) {
1085 /* Enable CE error interrupts */
1086 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1087 goto error_target_access;
1088 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1089 if (Q_TARGET_ACCESS_END(scn) < 0)
1090 goto error_target_access;
1091 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001092
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001093 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1094 ce_oom_recovery, CE_state);
1095
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001096 /* update the htt_data attribute */
1097 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001098 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001099
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001100 return (struct CE_handle *)CE_state;
1101
Houston Hoffman4411ad42016-03-14 21:12:04 -07001102error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001103error_no_dma_mem:
1104 ce_fini((struct CE_handle *)CE_state);
1105 return NULL;
1106}
1107
1108#ifdef WLAN_FEATURE_FASTPATH
1109/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001110 * hif_enable_fastpath() Update that we have enabled fastpath mode
1111 * @hif_ctx: HIF context
1112 *
1113 * For use in data path
1114 *
1115 * Retrun: void
1116 */
1117void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1118{
1119 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1120
Houston Hoffmand63cd742016-12-05 11:59:56 -08001121 if (ce_srng_based(scn)) {
1122 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1123 return;
1124 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001125 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001126 scn->fastpath_mode_on = true;
1127}
1128
1129/**
1130 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1131 * @hif_ctx: HIF Context
1132 *
1133 * For use in data path to skip HTC
1134 *
1135 * Return: bool
1136 */
1137bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1138{
1139 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1140
1141 return scn->fastpath_mode_on;
1142}
1143
1144/**
1145 * hif_get_ce_handle - API to get CE handle for FastPath mode
1146 * @hif_ctx: HIF Context
1147 * @id: CopyEngine Id
1148 *
1149 * API to return CE handle for fastpath mode
1150 *
1151 * Return: void
1152 */
1153void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1154{
1155 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1156
1157 return scn->ce_id_to_state[id];
1158}
1159
1160/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001161 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1162 * No processing is required inside this function.
1163 * @ce_hdl: Cope engine handle
1164 * Using an assert, this function makes sure that,
1165 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001166 *
1167 * This is called while dismantling CE structures. No other thread
1168 * should be using these structures while dismantling is occuring
1169 * therfore no locking is needed.
1170 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001171 * Return: none
1172 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001173void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001174{
1175 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1176 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301177 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001178 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001179
Houston Hoffman85925072016-05-06 17:02:18 -07001180 if (hif_is_nss_wifi_enabled(sc))
1181 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001183 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001184 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001185 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001186 sw_index = src_ring->sw_index;
1187 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001188
1189 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301190 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001191 }
1192}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001193
1194/**
1195 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1196 * @ce_hdl: Handle to CE
1197 *
1198 * These buffers are never allocated on the fly, but
1199 * are allocated only once during HIF start and freed
1200 * only once during HIF stop.
1201 * NOTE:
1202 * The assumption here is there is no in-flight DMA in progress
1203 * currently, so that buffers can be freed up safely.
1204 *
1205 * Return: NONE
1206 */
1207void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1208{
1209 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1210 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1211 qdf_nbuf_t nbuf;
1212 int i;
1213
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001214 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001215 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001216
1217 if (!ce_state->htt_rx_data)
1218 return;
1219
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001220 /*
1221 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1222 * this CE is completely full: does not leave one blank space, to
1223 * distinguish between empty queue & full queue. So free all the
1224 * entries.
1225 */
1226 for (i = 0; i < dst_ring->nentries; i++) {
1227 nbuf = dst_ring->per_transfer_context[i];
1228
1229 /*
1230 * The reasons for doing this check are:
1231 * 1) Protect against calling cleanup before allocating buffers
1232 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1233 * could have a partially filled ring, because of a memory
1234 * allocation failure in the middle of allocating ring.
1235 * This check accounts for that case, checking
1236 * fastpath_mode_on flag or started flag would not have
1237 * covered that case. This is not in performance path,
1238 * so OK to do this.
1239 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001240 if (nbuf) {
1241 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1242 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001243 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001244 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001245 }
1246}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001247
1248/**
1249 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1250 * @scn: HIF handle
1251 *
1252 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1253 * Hence we have to post all the entries in the pipe, even, in the beginning
1254 * unlike for other CE pipes where one less than dest_nentries are filled in
1255 * the beginning.
1256 *
1257 * Return: None
1258 */
1259static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1260{
1261 int pipe_num;
1262 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1263
1264 if (scn->fastpath_mode_on == false)
1265 return;
1266
1267 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1268 struct HIF_CE_pipe_info *pipe_info =
1269 &hif_state->pipe_info[pipe_num];
1270 struct CE_state *ce_state =
1271 scn->ce_id_to_state[pipe_info->pipe_num];
1272
1273 if (ce_state->htt_rx_data)
1274 atomic_inc(&pipe_info->recv_bufs_needed);
1275 }
1276}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001277#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001278static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001279{
1280}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001281
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001282static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001283{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001284 return false;
1285}
1286
1287static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1288{
1289 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001290}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291#endif /* WLAN_FEATURE_FASTPATH */
1292
1293void ce_fini(struct CE_handle *copyeng)
1294{
1295 struct CE_state *CE_state = (struct CE_state *)copyeng;
1296 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301297 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001298
1299 CE_state->state = CE_UNUSED;
1300 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001301
1302 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1303
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001304 qdf_lro_deinit(CE_state->lro_data);
1305
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001306 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001307 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001308 ce_h2t_tx_ce_cleanup(copyeng);
1309
1310 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301311 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001312 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301313 qdf_mem_free_consistent(scn->qdf_dev,
1314 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001315 (CE_state->src_ring->nentries *
1316 sizeof(struct CE_src_desc) +
1317 CE_DESC_RING_ALIGN),
1318 CE_state->src_ring->
1319 base_addr_owner_space_unaligned,
1320 CE_state->src_ring->
1321 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301322 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001323 }
1324 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001325 /* Cleanup the datapath Rx ring */
1326 ce_t2h_msg_ce_cleanup(copyeng);
1327
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001328 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301329 qdf_mem_free_consistent(scn->qdf_dev,
1330 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001331 (CE_state->dest_ring->nentries *
1332 sizeof(struct CE_dest_desc) +
1333 CE_DESC_RING_ALIGN),
1334 CE_state->dest_ring->
1335 base_addr_owner_space_unaligned,
1336 CE_state->dest_ring->
1337 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301338 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339
1340 /* epping */
1341 if (CE_state->timer_inited) {
1342 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301343 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001344 }
1345 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001346 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301347 /* Cleanup the datapath Tx ring */
1348 ce_h2t_tx_ce_cleanup(copyeng);
1349
1350 if (CE_state->status_ring->shadow_base_unaligned)
1351 qdf_mem_free(
1352 CE_state->status_ring->shadow_base_unaligned);
1353
1354 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1355 qdf_mem_free_consistent(scn->qdf_dev,
1356 scn->qdf_dev->dev,
1357 (CE_state->status_ring->nentries *
1358 sizeof(struct CE_src_desc) +
1359 CE_DESC_RING_ALIGN),
1360 CE_state->status_ring->
1361 base_addr_owner_space_unaligned,
1362 CE_state->status_ring->
1363 base_addr_CE_space, 0);
1364 qdf_mem_free(CE_state->status_ring);
1365 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001366
1367 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301368 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369}
1370
Komal Seelam5584a7c2016-02-24 19:22:48 +05301371void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001372{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301373 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001374
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301375 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001376 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301377 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001378 sizeof(hif_state->msg_callbacks_current));
1379}
1380
1381/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301382QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301383hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001384 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301385 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001386{
Komal Seelam644263d2016-02-22 20:45:49 +05301387 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301388 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001389 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1390 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1391 int bytes = nbytes, nfrags = 0;
1392 struct ce_sendlist sendlist;
1393 int status, i = 0;
1394 unsigned int mux_id = 0;
1395
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301396 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001397
1398 transfer_id =
1399 (mux_id & MUX_ID_MASK) |
1400 (transfer_id & TRANSACTION_ID_MASK);
1401 data_attr &= DESC_DATA_FLAG_MASK;
1402 /*
1403 * The common case involves sending multiple fragments within a
1404 * single download (the tx descriptor and the tx frame header).
1405 * So, optimize for the case of multiple fragments by not even
1406 * checking whether it's necessary to use a sendlist.
1407 * The overhead of using a sendlist for a single buffer download
1408 * is not a big deal, since it happens rarely (for WMI messages).
1409 */
1410 ce_sendlist_init(&sendlist);
1411 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301412 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001413 int frag_bytes;
1414
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301415 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1416 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001417 /*
1418 * Clear the packet offset for all but the first CE desc.
1419 */
1420 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301421 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422
1423 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1424 frag_bytes >
1425 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301426 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001427 (nbuf,
1428 nfrags) ? 0 :
1429 CE_SEND_FLAG_SWAP_DISABLE,
1430 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301431 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001432 HIF_ERROR("%s: error, frag_num %d larger than limit",
1433 __func__, nfrags);
1434 return status;
1435 }
1436 bytes -= frag_bytes;
1437 nfrags++;
1438 } while (bytes > 0);
1439
1440 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301441 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301443 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001444 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301445 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001446 }
1447 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301448 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001449
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301450 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001451 HIF_ERROR("%s: error CE handle is null", __func__);
1452 return A_ERROR;
1453 }
1454
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301455 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301456 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301457 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1458 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001459 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301460 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001461
1462 return status;
1463}
1464
Komal Seelam5584a7c2016-02-24 19:22:48 +05301465void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1466 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001467{
Komal Seelam644263d2016-02-22 20:45:49 +05301468 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301469 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301470
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001471 if (!force) {
1472 int resources;
1473 /*
1474 * Decide whether to actually poll for completions, or just
1475 * wait for a later chance. If there seem to be plenty of
1476 * resources left, then just wait, since checking involves
1477 * reading a CE register, which is a relatively expensive
1478 * operation.
1479 */
Komal Seelam644263d2016-02-22 20:45:49 +05301480 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 /*
1482 * If at least 50% of the total resources are still available,
1483 * don't bother checking again yet.
1484 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001485 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1486 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001488 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001489#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001490 ce_per_engine_servicereap(scn, pipe);
1491#else
1492 ce_per_engine_service(scn, pipe);
1493#endif
1494}
1495
Komal Seelam5584a7c2016-02-24 19:22:48 +05301496uint16_t
1497hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001498{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301499 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001500 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1501 uint16_t rv;
1502
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301503 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301505 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 return rv;
1507}
1508
1509/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001510static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001511hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301512 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001513 unsigned int nbytes, unsigned int transfer_id,
1514 unsigned int sw_index, unsigned int hw_index,
1515 unsigned int toeplitz_hash_result)
1516{
1517 struct HIF_CE_pipe_info *pipe_info =
1518 (struct HIF_CE_pipe_info *)ce_context;
1519 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301520 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001521 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001522 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301523 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001525 do {
1526 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001527 * The upper layer callback will be triggered
1528 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001529 */
Houston Hoffman85118512015-09-28 14:17:11 -07001530 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001531 if (scn->target_status == TARGET_STATUS_RESET) {
1532
1533 qdf_nbuf_unmap_single(scn->qdf_dev,
1534 transfer_context,
1535 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301536 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001537 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001538 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001539 msg_callbacks->Context,
1540 transfer_context, transfer_id,
1541 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 }
1543
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301544 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001545 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301546 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 } while (ce_completed_send_next(copyeng,
1548 &ce_context, &transfer_context,
1549 &CE_data, &nbytes, &transfer_id,
1550 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301551 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001552}
1553
Houston Hoffman910c6262015-09-28 12:56:25 -07001554/**
1555 * hif_ce_do_recv(): send message from copy engine to upper layers
1556 * @msg_callbacks: structure containing callback and callback context
1557 * @netbuff: skb containing message
1558 * @nbytes: number of bytes in the message
1559 * @pipe_info: used for the pipe_number info
1560 *
1561 * Checks the packet length, configures the lenght in the netbuff,
1562 * and calls the upper layer callback.
1563 *
1564 * return: None
1565 */
1566static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301567 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001568 struct HIF_CE_pipe_info *pipe_info) {
1569 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301570 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001571 msg_callbacks->
1572 rxCompletionHandler(msg_callbacks->Context,
1573 netbuf, pipe_info->pipe_num);
1574 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07001575 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07001576 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001577
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301578 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001579 }
1580}
1581
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001582/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001583static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301585 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 unsigned int nbytes, unsigned int transfer_id,
1587 unsigned int flags)
1588{
1589 struct HIF_CE_pipe_info *pipe_info =
1590 (struct HIF_CE_pipe_info *)ce_context;
1591 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001592 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301593 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001594#ifdef HIF_PCI
1595 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1596#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001597 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301598 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001599
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001600 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001601#ifdef HIF_PCI
1602 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1603#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301604 qdf_nbuf_unmap_single(scn->qdf_dev,
1605 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301606 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001607
Houston Hoffman910c6262015-09-28 12:56:25 -07001608 atomic_inc(&pipe_info->recv_bufs_needed);
1609 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301610 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301611 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001612 else
1613 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001614 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615
1616 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001617 * MAX_NUM_OF_RECEIVES
1618 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001619 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001620 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001621 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001622 break;
1623 }
1624 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1625 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301626 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001627
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628}
1629
1630/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1631
1632void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301633hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001634 struct hif_msg_callbacks *callbacks)
1635{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301636 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001637
1638#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1639 spin_lock_init(&pcie_access_log_lock);
1640#endif
1641 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301642 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643 sizeof(hif_state->msg_callbacks_pending));
1644
1645}
1646
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001647static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648{
1649 struct CE_handle *ce_diag = hif_state->ce_diag;
1650 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301651 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001652 struct hif_msg_callbacks *hif_msg_callbacks =
1653 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654
1655 /* daemonize("hif_compl_thread"); */
1656
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001658 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001659 return -EINVAL;
1660 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001661
1662 if (!hif_msg_callbacks ||
1663 !hif_msg_callbacks->rxCompletionHandler ||
1664 !hif_msg_callbacks->txCompletionHandler) {
1665 HIF_ERROR("%s: no completion handler registered", __func__);
1666 return -EFAULT;
1667 }
1668
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669 A_TARGET_ACCESS_LIKELY(scn);
1670 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1671 struct CE_attr attr;
1672 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673
1674 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001675 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001676 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301677 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001678 if (attr.src_nentries) {
1679 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07001680 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001681 __func__, pipe_num, pipe_info);
1682 ce_send_cb_register(pipe_info->ce_hdl,
1683 hif_pci_ce_send_done, pipe_info,
1684 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001685 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1686 }
1687 if (attr.dest_nentries) {
1688 /* pipe used to receive from target */
1689 ce_recv_cb_register(pipe_info->ce_hdl,
1690 hif_pci_ce_recv_data, pipe_info,
1691 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001693
1694 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301695 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301696
1697 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1698 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001699 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001700
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001701 A_TARGET_ACCESS_UNLIKELY(scn);
1702 return 0;
1703}
1704
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001705/*
1706 * Install pending msg callbacks.
1707 *
1708 * TBDXXX: This hack is needed because upper layers install msg callbacks
1709 * for use with HTC before BMI is done; yet this HIF implementation
1710 * needs to continue to use BMI msg callbacks. Really, upper layers
1711 * should not register HTC callbacks until AFTER BMI phase.
1712 */
Komal Seelam644263d2016-02-22 20:45:49 +05301713static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001714{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301715 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001716
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301717 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001718 &hif_state->msg_callbacks_pending,
1719 sizeof(hif_state->msg_callbacks_pending));
1720}
1721
Komal Seelam5584a7c2016-02-24 19:22:48 +05301722void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1723 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724{
1725 int ul_is_polled, dl_is_polled;
1726
Komal Seelam644263d2016-02-22 20:45:49 +05301727 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1729}
1730
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731/**
1732 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301733 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 *
1735 * Output the pipe error counts of each pipe to log file
1736 *
1737 * Return: N/A
1738 */
Komal Seelam644263d2016-02-22 20:45:49 +05301739void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301741 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742 int pipe_num;
1743
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001744 if (hif_state == NULL) {
1745 HIF_ERROR("%s hif_state is NULL", __func__);
1746 return;
1747 }
1748 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1749 struct HIF_CE_pipe_info *pipe_info;
1750
1751 pipe_info = &hif_state->pipe_info[pipe_num];
1752
1753 if (pipe_info->nbuf_alloc_err_count > 0 ||
1754 pipe_info->nbuf_dma_err_count > 0 ||
1755 pipe_info->nbuf_ce_enqueue_err_count)
1756 HIF_ERROR(
1757 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1758 __func__, pipe_info->pipe_num,
1759 atomic_read(&pipe_info->recv_bufs_needed),
1760 pipe_info->nbuf_alloc_err_count,
1761 pipe_info->nbuf_dma_err_count,
1762 pipe_info->nbuf_ce_enqueue_err_count);
1763 }
1764}
1765
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001766static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1767 void *nbuf, uint32_t *error_cnt,
1768 enum hif_ce_event_type failure_type,
1769 const char *failure_type_string)
1770{
1771 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1772 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1773 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1774 int ce_id = CE_state->id;
1775 uint32_t error_cnt_tmp;
1776
1777 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1778 error_cnt_tmp = ++(*error_cnt);
1779 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05301780 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001781 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1782 failure_type_string);
1783 hif_record_ce_desc_event(scn, ce_id, failure_type,
1784 NULL, nbuf, bufs_needed_tmp);
1785 /* if we fail to allocate the last buffer for an rx pipe,
1786 * there is no trigger to refill the ce and we will
1787 * eventually crash
1788 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301789 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001790 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301791
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001792}
1793
1794
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001795
1796
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301797QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001798{
1799 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301800 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301801 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301802 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001803 uint32_t bufs_posted = 0;
1804
1805 buf_sz = pipe_info->buf_sz;
1806 if (buf_sz == 0) {
1807 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301808 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001809 }
1810
1811 ce_hdl = pipe_info->ce_hdl;
1812
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301813 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301815 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301816 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817
1818 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301819 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001820
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301821 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001822 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001823 hif_post_recv_buffers_failure(pipe_info, nbuf,
1824 &pipe_info->nbuf_alloc_err_count,
1825 HIF_RX_NBUF_ALLOC_FAILURE,
1826 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301827 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828 }
1829
1830 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301831 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832 * CE_data = dma_map_single(dev, data, buf_sz, );
1833 * DMA_FROM_DEVICE);
1834 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301835 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301836 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001837
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301838 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001839 hif_post_recv_buffers_failure(pipe_info, nbuf,
1840 &pipe_info->nbuf_dma_err_count,
1841 HIF_RX_NBUF_MAP_FAILURE,
1842 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301843 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301844 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001845 }
1846
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301847 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301849 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001850 buf_sz, DMA_FROM_DEVICE);
1851 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301852 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001853 hif_post_recv_buffers_failure(pipe_info, nbuf,
1854 &pipe_info->nbuf_ce_enqueue_err_count,
1855 HIF_RX_NBUF_ENQUEUE_FAILURE,
1856 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1857
Govind Singh4fcafd42016-08-08 12:37:31 +05301858 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1859 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301860 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301861 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001862 }
1863
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301864 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001865 bufs_posted++;
1866 }
1867 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001868 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001869 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1870 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001871 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001872 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1873 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001874 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001875 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001876
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301877 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001878
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301879 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001880}
1881
1882/*
1883 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05301884 * Returns 0 for non fastpath rx copy engine as
1885 * oom_allocation_work will be scheduled to recover any
1886 * failures, non-zero if unable to completely replenish
1887 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301889QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301891 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301892 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07001893 struct CE_state *ce_state;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301894 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001895
1896 A_TARGET_ACCESS_LIKELY(scn);
1897 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1898 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001899
Houston Hoffman85925072016-05-06 17:02:18 -07001900 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001902
1903 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001904 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07001905 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07001906
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301907 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
1908 if (!QDF_IS_STATUS_SUCCESS(qdf_status) &&
Govind Singhcaa850e2017-04-20 16:41:36 +05301909 ce_state->htt_rx_data &&
1910 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301911 A_TARGET_ACCESS_UNLIKELY(scn);
1912 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05301913 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 }
1915
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916 A_TARGET_ACCESS_UNLIKELY(scn);
1917
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301918 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001919}
1920
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301921QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001922{
Komal Seelam644263d2016-02-22 20:45:49 +05301923 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301924 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301925 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001927 hif_update_fastpath_recv_bufs_cnt(scn);
1928
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001929 hif_msg_callbacks_install(scn);
1930
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001931 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301932 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933
Houston Hoffman271951f2016-11-12 15:24:27 -08001934 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001935 hif_state->started = true;
1936
Houston Hoffman271951f2016-11-12 15:24:27 -08001937 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301938 qdf_status = hif_post_recv_buffers(scn);
1939 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08001940 /* cleanup is done in hif_ce_disable */
1941 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301942 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08001943 }
1944
Nachiket Kukadee5738b52017-09-07 17:16:12 +05301945 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001946}
1947
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001948static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001949{
Komal Seelam644263d2016-02-22 20:45:49 +05301950 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001951 struct CE_handle *ce_hdl;
1952 uint32_t buf_sz;
1953 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301954 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301955 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 void *per_CE_context;
1957
1958 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001959 /* Unused Copy Engine */
1960 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001962
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963
1964 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001965 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001967
Komal Seelam02cf2f82016-02-22 20:44:25 +05301968 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001969 ce_hdl = pipe_info->ce_hdl;
1970
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001971 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001972 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001973 while (ce_revoke_recv_next
1974 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301975 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05301976 if (netbuf) {
1977 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
1978 QDF_DMA_FROM_DEVICE);
1979 qdf_nbuf_free(netbuf);
1980 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981 }
1982}
1983
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001984static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001985{
1986 struct CE_handle *ce_hdl;
1987 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301988 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301989 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001990 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301991 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992 unsigned int nbytes;
1993 unsigned int id;
1994 uint32_t buf_sz;
1995 uint32_t toeplitz_hash_result;
1996
1997 buf_sz = pipe_info->buf_sz;
1998 if (buf_sz == 0) {
1999 /* Unused Copy Engine */
2000 return;
2001 }
2002
2003 hif_state = pipe_info->HIF_CE_state;
2004 if (!hif_state->started) {
2005 return;
2006 }
2007
Komal Seelam02cf2f82016-02-22 20:44:25 +05302008 scn = HIF_GET_SOFTC(hif_state);
2009
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 ce_hdl = pipe_info->ce_hdl;
2011
2012 while (ce_cancel_send_next
2013 (ce_hdl, &per_CE_context,
2014 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302015 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002016 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2017 /*
2018 * Packets enqueued by htt_h2t_ver_req_msg() and
2019 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2020 * freed in htt_htc_misc_pkt_pool_free() in
2021 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002022 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002023 * which they are queued in.
2024 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302025 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002026 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302027 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002028 * layer to free the buffer
2029 */
2030 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302031 pipe_info->pipe_callbacks.
2032 txCompletionHandler(pipe_info->
2033 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002034 netbuf, id, toeplitz_hash_result);
2035 }
2036 }
2037}
2038
2039/*
2040 * Cleanup residual buffers for device shutdown:
2041 * buffers that were enqueued for receive
2042 * buffers that were to be sent
2043 * Note: Buffers that had completed but which were
2044 * not yet processed are on a completion queue. They
2045 * are handled when the completion thread shuts down.
2046 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002047static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002048{
2049 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302050 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002051 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002052
Komal Seelam02cf2f82016-02-22 20:44:25 +05302053 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002054 struct HIF_CE_pipe_info *pipe_info;
2055
Houston Hoffman85925072016-05-06 17:02:18 -07002056 ce_state = scn->ce_id_to_state[pipe_num];
2057 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2058 ((ce_state->htt_tx_data) ||
2059 (ce_state->htt_rx_data))) {
2060 continue;
2061 }
2062
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002063 pipe_info = &hif_state->pipe_info[pipe_num];
2064 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2065 hif_send_buffer_cleanup_on_pipe(pipe_info);
2066 }
2067}
2068
Komal Seelam5584a7c2016-02-24 19:22:48 +05302069void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002070{
Komal Seelam644263d2016-02-22 20:45:49 +05302071 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302072 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302073
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074 hif_buffer_cleanup(hif_state);
2075}
2076
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002077static void hif_destroy_oom_work(struct hif_softc *scn)
2078{
2079 struct CE_state *ce_state;
2080 int ce_id;
2081
2082 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2083 ce_state = scn->ce_id_to_state[ce_id];
2084 if (ce_state)
2085 qdf_destroy_work(scn->qdf_dev,
2086 &ce_state->oom_allocation_work);
2087 }
2088}
2089
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302090void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002091{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302092 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002093 int pipe_num;
2094
Houston Hoffmana69581e2016-11-14 18:03:19 -08002095 /*
2096 * before cleaning up any memory, ensure irq &
2097 * bottom half contexts will not be re-entered
2098 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002099 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002100 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002101 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002102
2103 /*
2104 * At this point, asynchronous threads are stopped,
2105 * The Target should not DMA nor interrupt, Host code may
2106 * not initiate anything more. So we just need to clean
2107 * up Host-side state.
2108 */
2109
2110 if (scn->athdiag_procfs_inited) {
2111 athdiag_procfs_remove();
2112 scn->athdiag_procfs_inited = false;
2113 }
2114
2115 hif_buffer_cleanup(hif_state);
2116
2117 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2118 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302119 struct CE_attr attr;
2120 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121
2122 pipe_info = &hif_state->pipe_info[pipe_num];
2123 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302124 if (pipe_info->ce_hdl != ce_diag) {
2125 attr = hif_state->host_ce_config[pipe_num];
2126 if (attr.src_nentries)
2127 qdf_spinlock_destroy(&pipe_info->
2128 completion_freeq_lock);
2129 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002130 ce_fini(pipe_info->ce_hdl);
2131 pipe_info->ce_hdl = NULL;
2132 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302133 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002134 }
2135 }
2136
2137 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302138 qdf_timer_stop(&hif_state->sleep_timer);
2139 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002140 hif_state->sleep_timer_init = false;
2141 }
2142
2143 hif_state->started = false;
2144}
2145
Houston Hoffman748e1a62017-03-30 17:20:42 -07002146
Houston Hoffman854e67f2016-03-14 21:11:39 -07002147/**
2148 * hif_get_target_ce_config() - get copy engine configuration
2149 * @target_ce_config_ret: basic copy engine configuration
2150 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2151 * @target_service_to_ce_map_ret: service mapping for the copy engines
2152 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2153 * @target_shadow_reg_cfg_ret: shadow register configuration
2154 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2155 *
2156 * providing accessor to these values outside of this file.
2157 * currently these are stored in static pointers to const sections.
2158 * there are multiple configurations that are selected from at compile time.
2159 * Runtime selection would need to consider mode, target type and bus type.
2160 *
2161 * Return: return by parameter.
2162 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302163void hif_get_target_ce_config(struct hif_softc *scn,
2164 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002165 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002166 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002167 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002168 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002169 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002170{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302171 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2172
2173 *target_ce_config_ret = hif_state->target_ce_config;
2174 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002175
2176 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2177 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002178
2179 if (target_shadow_reg_cfg_ret)
2180 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2181
2182 if (shadow_cfg_sz_ret)
2183 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002184}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002185
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002186#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002187static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002188{
2189 int i;
2190 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2191 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2192
2193 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2195 "%s: i %d, val %x\n", __func__, i,
2196 cfg->shadow_reg_v2_cfg[i].addr);
2197 }
2198}
2199
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002200#else
2201static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2202{
2203 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2204 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2205}
2206#endif
2207
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002208/**
2209 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302210 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002211 *
2212 * This function passes the con_mode and CE configuration to
2213 * platform driver to enable wlan.
2214 *
Houston Hoffman108da402016-03-14 21:11:24 -07002215 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216 */
Houston Hoffman108da402016-03-14 21:11:24 -07002217int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002218{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002219 struct pld_wlan_enable_cfg cfg;
2220 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302221 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002222
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302223 hif_get_target_ce_config(scn,
2224 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002225 &cfg.num_ce_tgt_cfg,
2226 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2227 &cfg.num_ce_svc_pipe_cfg,
2228 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2229 &cfg.num_shadow_reg_cfg);
2230
2231 /* translate from structure size to array size */
2232 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2233 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2234 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002235
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002236 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2237 &cfg.num_shadow_reg_v2_cfg);
2238
2239 hif_print_hal_shadow_register_cfg(&cfg);
2240
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302241 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002242 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302243 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2244 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002245 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002246 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002247 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002248 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002249
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002250 if (BYPASS_QMI)
2251 return 0;
2252 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002253 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2254 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255}
2256
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002257#define CE_EPPING_USES_IRQ true
2258
Houston Hoffman108da402016-03-14 21:11:24 -07002259/**
2260 * hif_ce_prepare_config() - load the correct static tables.
2261 * @scn: hif context
2262 *
2263 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002264 */
Houston Hoffman108da402016-03-14 21:11:24 -07002265void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002266{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302267 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002268 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2269 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302270 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002271
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002272 hif_state->ce_services = ce_services_attach(scn);
2273
Houston Hoffman710af5a2016-11-22 21:59:03 -08002274 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002275 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002276 if (QDF_IS_EPPING_ENABLED(mode)) {
2277 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302278 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002279 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302280 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2281 hif_state->target_ce_config = target_ce_config_wlan_epping;
2282 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002283 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2284 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002285 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002286
2287 switch (tgt_info->target_type) {
2288 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302289 hif_state->host_ce_config = host_ce_config_wlan;
2290 hif_state->target_ce_config = target_ce_config_wlan;
2291 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002292 break;
2293 case TARGET_TYPE_AR900B:
2294 case TARGET_TYPE_QCA9984:
2295 case TARGET_TYPE_IPQ4019:
2296 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302297 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2298 hif_state->host_ce_config =
2299 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2300 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2301 hif_state->host_ce_config =
2302 host_lowdesc_ce_cfg_wlan_ar900b;
2303 } else {
2304 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2305 }
2306
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302307 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2308 hif_state->target_ce_config_sz =
2309 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002310
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002311 break;
2312
2313 case TARGET_TYPE_AR9888:
2314 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302315 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2316 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2317 } else {
2318 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2319 }
2320
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302321 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2322 hif_state->target_ce_config_sz =
2323 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002324
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002325 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002326
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302327 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002328 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2329 hif_state->host_ce_config =
2330 host_ce_config_wlan_qca8074_pci;
2331 hif_state->target_ce_config =
2332 target_ce_config_wlan_qca8074_pci;
2333 hif_state->target_ce_config_sz =
2334 sizeof(target_ce_config_wlan_qca8074_pci);
2335 } else {
2336 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2337 hif_state->target_ce_config =
2338 target_ce_config_wlan_qca8074;
2339 hif_state->target_ce_config_sz =
2340 sizeof(target_ce_config_wlan_qca8074);
2341 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302342 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002343 case TARGET_TYPE_QCA6290:
2344 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2345 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2346 hif_state->target_ce_config_sz =
2347 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002348
Houston Hoffman710af5a2016-11-22 21:59:03 -08002349 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002350 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002351 }
Houston Hoffman108da402016-03-14 21:11:24 -07002352}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002353
Houston Hoffman108da402016-03-14 21:11:24 -07002354/**
2355 * hif_ce_open() - do ce specific allocations
2356 * @hif_sc: pointer to hif context
2357 *
2358 * return: 0 for success or QDF_STATUS_E_NOMEM
2359 */
2360QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2361{
2362 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002363
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302364 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302365 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002366 return QDF_STATUS_SUCCESS;
2367}
2368
2369/**
2370 * hif_ce_close() - do ce specific free
2371 * @hif_sc: pointer to hif context
2372 */
2373void hif_ce_close(struct hif_softc *hif_sc)
2374{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302375 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2376
2377 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302378 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002379}
2380
2381/**
2382 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2383 * @hif_sc: hif context
2384 *
2385 * uses state variables to support cleaning up when hif_config_ce fails.
2386 */
2387void hif_unconfig_ce(struct hif_softc *hif_sc)
2388{
2389 int pipe_num;
2390 struct HIF_CE_pipe_info *pipe_info;
2391 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2392
2393 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2394 pipe_info = &hif_state->pipe_info[pipe_num];
2395 if (pipe_info->ce_hdl) {
2396 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002397 ce_fini(pipe_info->ce_hdl);
2398 pipe_info->ce_hdl = NULL;
2399 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002400 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002401 }
2402 }
Houston Hoffman108da402016-03-14 21:11:24 -07002403 if (hif_sc->athdiag_procfs_inited) {
2404 athdiag_procfs_remove();
2405 hif_sc->athdiag_procfs_inited = false;
2406 }
2407}
2408
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002409#ifdef CONFIG_BYPASS_QMI
2410#define FW_SHARED_MEM (2 * 1024 * 1024)
2411
2412/**
2413 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2414 * @scn: pointer to HIF structure
2415 *
2416 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2417 *
2418 * Return: void
2419 */
2420static void hif_post_static_buf_to_target(struct hif_softc *scn)
2421{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002422 void *target_va;
2423 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002424
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002425 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2426 FW_SHARED_MEM, &target_pa);
2427 if (NULL == target_va) {
2428 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002429 return;
2430 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002431 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2432 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002433}
2434#else
2435static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2436{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002437}
2438#endif
2439
Houston Hoffman579c02f2017-08-02 01:57:38 -07002440static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2441 bool wait_for_it)
2442{
2443 /* todo */
2444 return 0;
2445}
2446
Houston Hoffman108da402016-03-14 21:11:24 -07002447/**
2448 * hif_config_ce() - configure copy engines
2449 * @scn: hif context
2450 *
2451 * Prepares fw, copy engine hardware and host sw according
2452 * to the attributes selected by hif_ce_prepare_config.
2453 *
2454 * also calls athdiag_procfs_init
2455 *
2456 * return: 0 for success nonzero for failure.
2457 */
2458int hif_config_ce(struct hif_softc *scn)
2459{
2460 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2461 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2462 struct HIF_CE_pipe_info *pipe_info;
2463 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002464 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002465#ifdef ADRASTEA_SHADOW_REGISTERS
2466 int i;
2467#endif
2468 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2469
2470 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002472 hif_post_static_buf_to_target(scn);
2473
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002475
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002476 hif_config_rri_on_ddr(scn);
2477
Houston Hoffman579c02f2017-08-02 01:57:38 -07002478 if (ce_srng_based(scn))
2479 scn->bus_ops.hif_target_sleep_state_adjust =
2480 &hif_srng_sleep_state_adjust;
2481
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002482 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2483 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002484
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002485 pipe_info = &hif_state->pipe_info[pipe_num];
2486 pipe_info->pipe_num = pipe_num;
2487 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302488 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002489
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002490 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002491 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002492 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302493 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002494 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302495 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002496 A_TARGET_ACCESS_UNLIKELY(scn);
2497 goto err;
2498 }
2499
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07002500 ce_state->lro_data = qdf_lro_init();
2501
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302502 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002503 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002504 * Diagnostic Window support
2505 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002506 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002507 continue;
2508 }
2509
Houston Hoffman85925072016-05-06 17:02:18 -07002510 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2511 (ce_state->htt_rx_data))
2512 continue;
2513
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302514 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002515 if (attr->dest_nentries > 0) {
2516 atomic_set(&pipe_info->recv_bufs_needed,
2517 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302518 /*SRNG based CE has one entry less */
2519 if (ce_srng_based(scn))
2520 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002521 } else {
2522 atomic_set(&pipe_info->recv_bufs_needed, 0);
2523 }
2524 ce_tasklet_init(hif_state, (1 << pipe_num));
2525 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526 }
2527
2528 if (athdiag_procfs_init(scn) != 0) {
2529 A_TARGET_ACCESS_UNLIKELY(scn);
2530 goto err;
2531 }
2532 scn->athdiag_procfs_inited = true;
2533
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002534 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002535
Houston Hoffman108da402016-03-14 21:11:24 -07002536 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002537
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002538 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002539
2540#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002541 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002542 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002543 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002544 __func__, i,
2545 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2546 }
2547#endif
2548
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302549 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002550
2551err:
2552 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002553 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002554 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302555 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002556}
2557
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002558#ifdef WLAN_FEATURE_FASTPATH
2559/**
2560 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2561 * @handler: Callback funtcion
2562 * @context: handle for callback function
2563 *
2564 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2565 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002566int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2567 fastpath_msg_handler handler,
2568 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002569{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002570 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002571 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002572 int i;
2573
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302574 if (!scn) {
2575 HIF_ERROR("%s: scn is NULL", __func__);
2576 QDF_ASSERT(0);
2577 return QDF_STATUS_E_FAILURE;
2578 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002579
2580 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002581 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002582 return QDF_STATUS_E_FAILURE;
2583 }
2584
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002585 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002586 ce_state = scn->ce_id_to_state[i];
2587 if (ce_state->htt_rx_data) {
2588 ce_state->fastpath_handler = handler;
2589 ce_state->context = context;
2590 }
2591 }
2592
2593 return QDF_STATUS_SUCCESS;
2594}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002595#endif
2596
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002597#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002598/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302599 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002600 * @scn: bus context
2601 * @ce_sr_base_paddr: copyengine source ring base physical address
2602 * @ce_sr_ring_size: copyengine source ring size
2603 * @ce_reg_paddr: copyengine register physical address
2604 *
2605 * IPA micro controller data path offload feature enabled,
2606 * HIF should release copy engine related resource information to IPA UC
2607 * IPA UC will access hardware resource with released information
2608 *
2609 * Return: None
2610 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302611void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302612 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002613 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302614 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002615{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302616 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002617 struct HIF_CE_pipe_info *pipe_info =
2618 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2619 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2620
2621 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2622 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002623}
2624#endif /* IPA_OFFLOAD */
2625
2626
2627#ifdef ADRASTEA_SHADOW_REGISTERS
2628
2629/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002630 * Current shadow register config
2631 *
2632 * -----------------------------------------------------------
2633 * Shadow Register | CE | src/dst write index
2634 * -----------------------------------------------------------
2635 * 0 | 0 | src
2636 * 1 No Config - Doesn't point to anything
2637 * 2 No Config - Doesn't point to anything
2638 * 3 | 3 | src
2639 * 4 | 4 | src
2640 * 5 | 5 | src
2641 * 6 No Config - Doesn't point to anything
2642 * 7 | 7 | src
2643 * 8 No Config - Doesn't point to anything
2644 * 9 No Config - Doesn't point to anything
2645 * 10 No Config - Doesn't point to anything
2646 * 11 No Config - Doesn't point to anything
2647 * -----------------------------------------------------------
2648 * 12 No Config - Doesn't point to anything
2649 * 13 | 1 | dst
2650 * 14 | 2 | dst
2651 * 15 No Config - Doesn't point to anything
2652 * 16 No Config - Doesn't point to anything
2653 * 17 No Config - Doesn't point to anything
2654 * 18 No Config - Doesn't point to anything
2655 * 19 | 7 | dst
2656 * 20 | 8 | dst
2657 * 21 No Config - Doesn't point to anything
2658 * 22 No Config - Doesn't point to anything
2659 * 23 No Config - Doesn't point to anything
2660 * -----------------------------------------------------------
2661 *
2662 *
2663 * ToDo - Move shadow register config to following in the future
2664 * This helps free up a block of shadow registers towards the end.
2665 * Can be used for other purposes
2666 *
2667 * -----------------------------------------------------------
2668 * Shadow Register | CE | src/dst write index
2669 * -----------------------------------------------------------
2670 * 0 | 0 | src
2671 * 1 | 3 | src
2672 * 2 | 4 | src
2673 * 3 | 5 | src
2674 * 4 | 7 | src
2675 * -----------------------------------------------------------
2676 * 5 | 1 | dst
2677 * 6 | 2 | dst
2678 * 7 | 7 | dst
2679 * 8 | 8 | dst
2680 * -----------------------------------------------------------
2681 * 9 No Config - Doesn't point to anything
2682 * 12 No Config - Doesn't point to anything
2683 * 13 No Config - Doesn't point to anything
2684 * 14 No Config - Doesn't point to anything
2685 * 15 No Config - Doesn't point to anything
2686 * 16 No Config - Doesn't point to anything
2687 * 17 No Config - Doesn't point to anything
2688 * 18 No Config - Doesn't point to anything
2689 * 19 No Config - Doesn't point to anything
2690 * 20 No Config - Doesn't point to anything
2691 * 21 No Config - Doesn't point to anything
2692 * 22 No Config - Doesn't point to anything
2693 * 23 No Config - Doesn't point to anything
2694 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002695*/
2696
Komal Seelam644263d2016-02-22 20:45:49 +05302697u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002698{
2699 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002700 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002701
Houston Hoffmane6330442016-02-26 12:19:11 -08002702 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002703 case 0:
2704 addr = SHADOW_VALUE0;
2705 break;
2706 case 3:
2707 addr = SHADOW_VALUE3;
2708 break;
2709 case 4:
2710 addr = SHADOW_VALUE4;
2711 break;
2712 case 5:
2713 addr = SHADOW_VALUE5;
2714 break;
2715 case 7:
2716 addr = SHADOW_VALUE7;
2717 break;
2718 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002719 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302720 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002721 }
2722 return addr;
2723
2724}
2725
Komal Seelam644263d2016-02-22 20:45:49 +05302726u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002727{
2728 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002729 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002730
Houston Hoffmane6330442016-02-26 12:19:11 -08002731 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002732 case 1:
2733 addr = SHADOW_VALUE13;
2734 break;
2735 case 2:
2736 addr = SHADOW_VALUE14;
2737 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002738 case 5:
2739 addr = SHADOW_VALUE17;
2740 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002741 case 7:
2742 addr = SHADOW_VALUE19;
2743 break;
2744 case 8:
2745 addr = SHADOW_VALUE20;
2746 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002747 case 9:
2748 addr = SHADOW_VALUE21;
2749 break;
2750 case 10:
2751 addr = SHADOW_VALUE22;
2752 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302753 case 11:
2754 addr = SHADOW_VALUE23;
2755 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002756 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002757 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302758 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002759 }
2760
2761 return addr;
2762
2763}
2764#endif
2765
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002766#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002767void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2768{
2769 struct CE_state *ce_state;
2770 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2771
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002772 ce_state = scn->ce_id_to_state[ctx_id];
2773
2774 return ce_state->lro_data;
2775}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002776#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002777
2778/**
2779 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2780 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302781 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002782 * @svc_id: Service ID for which the mapping is needed.
2783 * @ul_pipe: address of the container in which ul pipe is returned.
2784 * @dl_pipe: address of the container in which dl pipe is returned.
2785 * @ul_is_polled: address of the container in which a bool
2786 * indicating if the UL CE for this service
2787 * is polled is returned.
2788 * @dl_is_polled: address of the container in which a bool
2789 * indicating if the DL CE for this service
2790 * is polled is returned.
2791 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002792 * Return: Indicates whether the service has been found in the table.
2793 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2794 * There will be warning logs if either leg has not been updated
2795 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002796 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302797int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002798 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2799 int *dl_is_polled)
2800{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002801 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002802 unsigned int i;
2803 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002804 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002805 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302806 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002807 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002808 bool dl_updated = false;
2809 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002810
Houston Hoffman748e1a62017-03-30 17:20:42 -07002811 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2812 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002813
2814 *dl_is_polled = 0; /* polling for received messages not supported */
2815
2816 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2817
2818 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2819 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002820 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002821 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002822 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302823 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002824 CE_ATTR_DISABLE_INTR) != 0;
2825 ul_updated = true;
2826 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002827 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002828 dl_updated = true;
2829 }
2830 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002831 }
2832 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002833 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302834 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002835 __func__, svc_id);
2836 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302837 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002838 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002839
2840 return status;
2841}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002842
2843#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302844inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002845 uint32_t CE_ctrl_addr)
2846{
2847 uint32_t read_from_hw, srri_from_ddr = 0;
2848
2849 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2850
2851 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2852
2853 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002854 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2855 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002856 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302857 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002858 }
2859 return srri_from_ddr;
2860}
2861
2862
Komal Seelam644263d2016-02-22 20:45:49 +05302863inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002864 uint32_t CE_ctrl_addr)
2865{
2866 uint32_t read_from_hw, drri_from_ddr = 0;
2867
2868 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2869
2870 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2871
2872 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002873 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002874 drri_from_ddr, read_from_hw,
2875 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302876 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002877 }
2878 return drri_from_ddr;
2879}
2880
2881#endif
2882
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002883#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002884/**
2885 * hif_get_src_ring_read_index(): Called to get the SRRI
2886 *
Komal Seelam644263d2016-02-22 20:45:49 +05302887 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002888 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2889 *
2890 * This function returns the SRRI to the caller. For CEs that
2891 * dont have interrupts enabled, we look at the DDR based SRRI
2892 *
2893 * Return: SRRI
2894 */
Komal Seelam644263d2016-02-22 20:45:49 +05302895inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002896 uint32_t CE_ctrl_addr)
2897{
2898 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302899 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002900
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302901 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Govind Singhbc679dc2017-06-08 12:33:59 +05302902 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002903 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05302904 } else {
2905 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2906 return A_TARGET_READ(scn,
2907 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2908 else
2909 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2910 CE_ctrl_addr);
2911 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002912}
2913
2914/**
2915 * hif_get_dst_ring_read_index(): Called to get the DRRI
2916 *
Komal Seelam644263d2016-02-22 20:45:49 +05302917 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002918 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2919 *
2920 * This function returns the DRRI to the caller. For CEs that
2921 * dont have interrupts enabled, we look at the DDR based DRRI
2922 *
2923 * Return: DRRI
2924 */
Komal Seelam644263d2016-02-22 20:45:49 +05302925inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002926 uint32_t CE_ctrl_addr)
2927{
2928 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302929 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002930
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302931 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002932
Govind Singhbc679dc2017-06-08 12:33:59 +05302933 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002934 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05302935 } else {
2936 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2937 return A_TARGET_READ(scn,
2938 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2939 else
2940 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2941 CE_ctrl_addr);
2942 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002943}
2944
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002945/**
2946 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2947 *
Komal Seelam644263d2016-02-22 20:45:49 +05302948 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002949 *
2950 * This function allocates non cached memory on ddr and sends
2951 * the physical address of this memory to the CE hardware. The
2952 * hardware updates the RRI on this particular location.
2953 *
2954 * Return: None
2955 */
Komal Seelam644263d2016-02-22 20:45:49 +05302956static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002957{
2958 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302959 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002960 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002961
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002962 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302963 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2964 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2965 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002966
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05302967 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002968 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2969 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2970
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002971 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002972
2973 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2974 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2975
2976 for (i = 0; i < CE_COUNT; i++)
2977 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2978
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302979 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002980
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002981}
2982#else
2983
2984/**
2985 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2986 *
Komal Seelam644263d2016-02-22 20:45:49 +05302987 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002988 *
2989 * This is a dummy implementation for platforms that don't
2990 * support this functionality.
2991 *
2992 * Return: None
2993 */
Komal Seelam644263d2016-02-22 20:45:49 +05302994static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002995{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002996}
2997#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302998
2999/**
3000 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303001 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303002 *
3003 * Output the copy engine registers
3004 *
3005 * Return: 0 for success or error code
3006 */
Komal Seelam644263d2016-02-22 20:45:49 +05303007int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303008{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303009 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303010 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003011 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303012 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3013 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303014 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303015
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003016 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3017 if (scn->ce_id_to_state[i] == NULL) {
3018 HIF_DBG("CE%d not used.", i);
3019 continue;
3020 }
3021
Komal Seelam644263d2016-02-22 20:45:49 +05303022 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003023 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303024 ce_reg_word_size * sizeof(uint32_t));
3025
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303026 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003027 HIF_ERROR("Dumping CE register failed!");
3028 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303029 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303030 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303031 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003032 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303033 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303034 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3035 + SR_WR_INDEX_ADDRESS),
3036 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3037 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3038 + CURRENT_SRRI_ADDRESS),
3039 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3040 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3041 + DST_WR_INDEX_ADDRESS),
3042 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3043 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3044 + CURRENT_DRRI_ADDRESS),
3045 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3046 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303047 }
Govind Singh2443fb32016-01-13 17:44:48 +05303048 return 0;
3049}
Houston Hoffman85925072016-05-06 17:02:18 -07003050#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3051struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3052 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3053{
3054 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3055 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3056 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3057 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3058 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3059 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3060 struct CE_ring_state *src_ring = ce_state->src_ring;
3061 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3062
3063 if (src_ring) {
3064 hif_info->ul_pipe.nentries = src_ring->nentries;
3065 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3066 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3067 hif_info->ul_pipe.write_index = src_ring->write_index;
3068 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3069 hif_info->ul_pipe.base_addr_CE_space =
3070 src_ring->base_addr_CE_space;
3071 hif_info->ul_pipe.base_addr_owner_space =
3072 src_ring->base_addr_owner_space;
3073 }
3074
3075
3076 if (dest_ring) {
3077 hif_info->dl_pipe.nentries = dest_ring->nentries;
3078 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3079 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3080 hif_info->dl_pipe.write_index = dest_ring->write_index;
3081 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3082 hif_info->dl_pipe.base_addr_CE_space =
3083 dest_ring->base_addr_CE_space;
3084 hif_info->dl_pipe.base_addr_owner_space =
3085 dest_ring->base_addr_owner_space;
3086 }
3087
3088 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3089 hif_info->ctrl_addr = ce_state->ctrl_addr;
3090
3091 return hif_info;
3092}
3093
3094uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3095{
3096 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3097
3098 scn->nss_wifi_ol_mode = mode;
3099 return 0;
3100}
3101
3102#endif
3103
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303104void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3105{
3106 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3107 scn->hif_attribute = hif_attrib;
3108}
3109
Houston Hoffman85925072016-05-06 17:02:18 -07003110void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3111{
3112 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3113 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3114 uint32_t ctrl_addr = CE_state->ctrl_addr;
3115
3116 Q_TARGET_ACCESS_BEGIN(scn);
3117 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3118 Q_TARGET_ACCESS_END(scn);
3119}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303120
3121/**
3122 * hif_fw_event_handler() - hif fw event handler
3123 * @hif_state: pointer to hif ce state structure
3124 *
3125 * Process fw events and raise HTC callback to process fw events.
3126 *
3127 * Return: none
3128 */
3129static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3130{
3131 struct hif_msg_callbacks *msg_callbacks =
3132 &hif_state->msg_callbacks_current;
3133
3134 if (!msg_callbacks->fwEventHandler)
3135 return;
3136
3137 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3138 QDF_STATUS_E_FAILURE);
3139}
3140
3141#ifndef QCA_WIFI_3_0
3142/**
3143 * hif_fw_interrupt_handler() - FW interrupt handler
3144 * @irq: irq number
3145 * @arg: the user pointer
3146 *
3147 * Called from the PCI interrupt handler when a
3148 * firmware-generated interrupt to the Host.
3149 *
3150 * Return: status of handled irq
3151 */
3152irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3153{
3154 struct hif_softc *scn = arg;
3155 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3156 uint32_t fw_indicator_address, fw_indicator;
3157
3158 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3159 return ATH_ISR_NOSCHED;
3160
3161 fw_indicator_address = hif_state->fw_indicator_address;
3162 /* For sudden unplug this will return ~0 */
3163 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3164
3165 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3166 /* ACK: clear Target-side pending event */
3167 A_TARGET_WRITE(scn, fw_indicator_address,
3168 fw_indicator & ~FW_IND_EVENT_PENDING);
3169 if (Q_TARGET_ACCESS_END(scn) < 0)
3170 return ATH_ISR_SCHED;
3171
3172 if (hif_state->started) {
3173 hif_fw_event_handler(hif_state);
3174 } else {
3175 /*
3176 * Probable Target failure before we're prepared
3177 * to handle it. Generally unexpected.
3178 */
3179 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3180 ("%s: Early firmware event indicated\n",
3181 __func__));
3182 }
3183 } else {
3184 if (Q_TARGET_ACCESS_END(scn) < 0)
3185 return ATH_ISR_SCHED;
3186 }
3187
3188 return ATH_ISR_SCHED;
3189}
3190#else
3191irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3192{
3193 return ATH_ISR_SCHED;
3194}
3195#endif /* #ifdef QCA_WIFI_3_0 */
3196
3197
3198/**
3199 * hif_wlan_disable(): call the platform driver to disable wlan
3200 * @scn: HIF Context
3201 *
3202 * This function passes the con_mode to platform driver to disable
3203 * wlan.
3204 *
3205 * Return: void
3206 */
3207void hif_wlan_disable(struct hif_softc *scn)
3208{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003209 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303210 uint32_t con_mode = hif_get_conparam(scn);
3211
3212 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003213 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303214 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003215 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303216 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003217 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303218
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003219 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303220}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003221
Dustin Brown6834d322017-03-20 15:02:48 -07003222int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3223{
3224 QDF_STATUS status;
3225 uint8_t ul_pipe, dl_pipe;
3226 int ul_is_polled, dl_is_polled;
3227
3228 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3229 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3230 HTC_CTRL_RSVD_SVC,
3231 &ul_pipe, &dl_pipe,
3232 &ul_is_polled, &dl_is_polled);
3233 if (status) {
3234 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3235 return qdf_status_to_os_return(status);
3236 }
3237
3238 *ce_id = dl_pipe;
3239
3240 return 0;
3241}