blob: dd3b38c0143de044ce29f51ebabfa9e88c87330c [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
67static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Komal Seelam644263d2016-02-22 20:45:49 +053091static int hif_post_recv_buffers(struct hif_softc *scn);
92static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
144 if (CE_state->timer_inited) {
145 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530146 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800147 }
148}
149
150static unsigned int roundup_pwr2(unsigned int n)
151{
152 int i;
153 unsigned int test_pwr2;
154
155 if (!(n & (n - 1)))
156 return n; /* already a power of 2 */
157
158 test_pwr2 = 4;
159 for (i = 0; i < 29; i++) {
160 if (test_pwr2 > n)
161 return test_pwr2;
162 test_pwr2 = test_pwr2 << 1;
163 }
164
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530165 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800166 return 0;
167}
168
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700169#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
170#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
171
172static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
173 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800182#ifdef QCA_WIFI_3_0_ADRASTEA
183 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
184 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530185 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800186#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700187};
188
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700189static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
190 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
191 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
195 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
199};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700200
201/* CE_PCI TABLE */
202/*
203 * NOTE: the table below is out of date, though still a useful reference.
204 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
205 * mapping of HTC services to HIF pipes.
206 */
207/*
208 * This authoritative table defines Copy Engine configuration and the mapping
209 * of services/endpoints to CEs. A subset of this information is passed to
210 * the Target during startup as a prerequisite to entering BMI phase.
211 * See:
212 * target_service_to_ce_map - Target-side mapping
213 * hif_map_service_to_pipe - Host-side mapping
214 * target_ce_config - Target-side configuration
215 * host_ce_config - Host-side configuration
216 ============================================================================
217 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
218 | | | ctio | Size | Frequency
219 | | | n | |
220 ============================================================================
221 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
222 descriptor | | | | O(100B) | and regular
223 download | | | | |
224 ----------------------------------------------------------------------------
225 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
226 indication | | | | O(10B) | regular
227 upload | | | | |
228 ----------------------------------------------------------------------------
229 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
230 upload | | | | O(1000B) | (frequent
231 e.g. noise | | | | | during IP1.0
232 packets | | | | | testing)
233 ----------------------------------------------------------------------------
234 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
235 download | | | | O(1000B) | (frequent
236 e.g. | | | | | during IP1.0
237 misdirecte | | | | | testing)
238 d EAPOL | | | | |
239 packets | | | | |
240 ----------------------------------------------------------------------------
241 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
242 | DATA_VO (uplink) | | | |
243 ----------------------------------------------------------------------------
244 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
245 | DATA_VO (downlink) | | | |
246 ----------------------------------------------------------------------------
247 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
248 | | | | O(100B) |
249 ----------------------------------------------------------------------------
250 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
251 messages | (downlink) | | | O(100B) |
252 | | | | |
253 ----------------------------------------------------------------------------
254 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
255 | HTC_RAW_STREAMS | | | |
256 | (uplink) | | | |
257 ----------------------------------------------------------------------------
258 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
259 | HTC_RAW_STREAMS | | | |
260 | (downlink) | | | |
261 ----------------------------------------------------------------------------
262 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
263 | | | | | infrequent
264 ============================================================================
265 */
266
267/*
268 * Map from service/endpoint to Copy Engine.
269 * This table is derived from the CE_PCI TABLE, above.
270 * It is passed to the Target at startup for use by firmware.
271 */
272static struct service_to_pipe target_service_to_ce_map_wlan[] = {
273 {
274 WMI_DATA_VO_SVC,
275 PIPEDIR_OUT, /* out = UL = host -> target */
276 3,
277 },
278 {
279 WMI_DATA_VO_SVC,
280 PIPEDIR_IN, /* in = DL = target -> host */
281 2,
282 },
283 {
284 WMI_DATA_BK_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_BK_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BE_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BE_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_VI_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_VI_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_CONTROL_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_CONTROL_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530324 WMI_CONTROL_SVC_WMAC1,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 7,
327 },
328 {
329 WMI_CONTROL_SVC_WMAC1,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 WMI_CONTROL_SVC_WMAC2,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 9,
337 },
338 {
339 WMI_CONTROL_SVC_WMAC2,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700344 HTC_CTRL_RSVD_SVC,
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0, /* could be moved to 3 (share with WMI) */
347 },
348 {
349 HTC_CTRL_RSVD_SVC,
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTC_RAW_STREAMS_SVC, /* not currently used */
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 0,
357 },
358 {
359 HTC_RAW_STREAMS_SVC, /* not currently used */
360 PIPEDIR_IN, /* in = DL = target -> host */
361 2,
362 },
363 {
364 HTT_DATA_MSG_SVC,
365 PIPEDIR_OUT, /* out = UL = host -> target */
366 4,
367 },
368 {
369 HTT_DATA_MSG_SVC,
370 PIPEDIR_IN, /* in = DL = target -> host */
371 1,
372 },
373 {
374 WDI_IPA_TX_SVC,
375 PIPEDIR_OUT, /* in = DL = target -> host */
376 5,
377 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800378#if defined(QCA_WIFI_3_0_ADRASTEA)
379 {
380 HTT_DATA2_MSG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 9,
383 },
384 {
385 HTT_DATA3_MSG_SVC,
386 PIPEDIR_IN, /* in = DL = target -> host */
387 10,
388 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530389 {
390 PACKET_LOG_SVC,
391 PIPEDIR_IN, /* in = DL = target -> host */
392 11,
393 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800394#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700395 /* (Additions here) */
396
397 { /* Must be last */
398 0,
399 0,
400 0,
401 },
402};
403
Houston Hoffman88c896f2016-12-14 09:56:35 -0800404/* PIPEDIR_OUT = HOST to Target */
405/* PIPEDIR_IN = TARGET to HOST */
406static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
407 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
408 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
409 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
410 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
411 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
412 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
413 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
414 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
415 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
416 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
417 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
418 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
419 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
420 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
421 { PACKET_LOG_SVC, PIPEDIR_IN , 5, },
422 /* (Additions here) */
423 { 0, 0, 0, },
424};
425
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700426static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
427 {
428 WMI_DATA_VO_SVC,
429 PIPEDIR_OUT, /* out = UL = host -> target */
430 3,
431 },
432 {
433 WMI_DATA_VO_SVC,
434 PIPEDIR_IN, /* in = DL = target -> host */
435 2,
436 },
437 {
438 WMI_DATA_BK_SVC,
439 PIPEDIR_OUT, /* out = UL = host -> target */
440 3,
441 },
442 {
443 WMI_DATA_BK_SVC,
444 PIPEDIR_IN, /* in = DL = target -> host */
445 2,
446 },
447 {
448 WMI_DATA_BE_SVC,
449 PIPEDIR_OUT, /* out = UL = host -> target */
450 3,
451 },
452 {
453 WMI_DATA_BE_SVC,
454 PIPEDIR_IN, /* in = DL = target -> host */
455 2,
456 },
457 {
458 WMI_DATA_VI_SVC,
459 PIPEDIR_OUT, /* out = UL = host -> target */
460 3,
461 },
462 {
463 WMI_DATA_VI_SVC,
464 PIPEDIR_IN, /* in = DL = target -> host */
465 2,
466 },
467 {
468 WMI_CONTROL_SVC,
469 PIPEDIR_OUT, /* out = UL = host -> target */
470 3,
471 },
472 {
473 WMI_CONTROL_SVC,
474 PIPEDIR_IN, /* in = DL = target -> host */
475 2,
476 },
477 {
478 HTC_CTRL_RSVD_SVC,
479 PIPEDIR_OUT, /* out = UL = host -> target */
480 0, /* could be moved to 3 (share with WMI) */
481 },
482 {
483 HTC_CTRL_RSVD_SVC,
484 PIPEDIR_IN, /* in = DL = target -> host */
485 1,
486 },
487 {
488 HTC_RAW_STREAMS_SVC, /* not currently used */
489 PIPEDIR_OUT, /* out = UL = host -> target */
490 0,
491 },
492 {
493 HTC_RAW_STREAMS_SVC, /* not currently used */
494 PIPEDIR_IN, /* in = DL = target -> host */
495 1,
496 },
497 {
498 HTT_DATA_MSG_SVC,
499 PIPEDIR_OUT, /* out = UL = host -> target */
500 4,
501 },
502#if WLAN_FEATURE_FASTPATH
503 {
504 HTT_DATA_MSG_SVC,
505 PIPEDIR_IN, /* in = DL = target -> host */
506 5,
507 },
508#else /* WLAN_FEATURE_FASTPATH */
509 {
510 HTT_DATA_MSG_SVC,
511 PIPEDIR_IN, /* in = DL = target -> host */
512 1,
513 },
514#endif /* WLAN_FEATURE_FASTPATH */
515
516 /* (Additions here) */
517
518 { /* Must be last */
519 0,
520 0,
521 0,
522 },
523};
524
525
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700526static struct service_to_pipe *target_service_to_ce_map =
527 target_service_to_ce_map_wlan;
528static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
529
530static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
531static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
532
533static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
534 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
535 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
536 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
537 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
538 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
539 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
540 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
541 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
542 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
543 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
544 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
545 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
546 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
547 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
548 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
549 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
550 {0, 0, 0,}, /* Must be last */
551};
552
553/**
554 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
555 * @ce_state : pointer to the state context of the CE
556 *
557 * Description:
558 * Sets htt_rx_data attribute of the state structure if the
559 * CE serves one of the HTT DATA services.
560 *
561 * Return:
562 * false (attribute set to false)
563 * true (attribute set to true);
564 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700565static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700566{
567 struct service_to_pipe *svc_map;
568 size_t map_sz;
569 int i;
570 bool rc = false;
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700571 struct hif_target_info *tgt_info;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700572
573 if (ce_state != NULL) {
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700574 tgt_info = &ce_state->scn->target_info;
575
Houston Hoffman75ef5a52016-04-14 17:15:49 -0700576 if (QDF_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700577 svc_map = target_service_to_ce_map_wlan_epping;
578 map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
579 sizeof(struct service_to_pipe);
580 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700581 switch (tgt_info->target_type) {
582 default:
583 svc_map = target_service_to_ce_map_wlan;
584 map_sz =
585 sizeof(target_service_to_ce_map_wlan) /
586 sizeof(struct service_to_pipe);
587 break;
588 case TARGET_TYPE_AR900B:
589 case TARGET_TYPE_QCA9984:
590 case TARGET_TYPE_IPQ4019:
591 case TARGET_TYPE_QCA9888:
592 case TARGET_TYPE_AR9888:
593 case TARGET_TYPE_AR9888V2:
594 svc_map = target_service_to_ce_map_ar900b;
595 map_sz =
596 sizeof(target_service_to_ce_map_ar900b)
597 / sizeof(struct service_to_pipe);
598 break;
599 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700600 }
601 for (i = 0; i < map_sz; i++) {
602 if ((svc_map[i].pipenum == ce_state->id) &&
603 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
604 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
605 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
606 /* HTT CEs are unidirectional */
607 if (svc_map[i].pipedir == PIPEDIR_IN)
608 ce_state->htt_rx_data = true;
609 else
610 ce_state->htt_tx_data = true;
611 rc = true;
612 }
613 }
614 }
615 return rc;
616}
617
Houston Hoffman47808172016-05-06 10:04:21 -0700618/**
619 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
620 * @ce_id: ce in question
621 * @ring: ring state being examined
622 * @type: "src_ring" or "dest_ring" string for identifying the ring
623 *
624 * Warns on non-zero index values.
625 * Causes a kernel panic if the ring is not empty durring initialization.
626 */
627static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
628 char *type)
629{
630 if (ring->write_index != 0 || ring->sw_index != 0)
631 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
632 ce_id, type, ring->sw_index, ring->write_index);
633 if (ring->write_index != ring->sw_index)
634 QDF_BUG(0);
635}
636
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530637/**
638 * ce_srng_based() - Does this target use srng
639 * @ce_state : pointer to the state context of the CE
640 *
641 * Description:
642 * returns true if the target is SRNG based
643 *
644 * Return:
645 * false (attribute set to false)
646 * true (attribute set to true);
647 */
648bool ce_srng_based(struct hif_softc *scn)
649{
650 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
651 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
652
653 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530654 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700655 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530656 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530657 default:
658 return false;
659 }
660 return false;
661}
662
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800663#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700664static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530666 if (ce_srng_based(scn))
667 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530668
669 return ce_services_legacy();
670}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800671
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800672
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800673#else /* QCA_LITHIUM */
674static struct ce_ops *ce_services_attach(struct hif_softc *scn)
675{
676 return ce_services_legacy();
677}
678#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530679
Houston Hoffman403c2df2017-01-27 12:51:15 -0800680static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800681 struct pld_shadow_reg_v2_cfg **shadow_config,
682 int *num_shadow_registers_configured) {
683 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
684
685 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
686 scn, shadow_config, num_shadow_registers_configured);
687}
688
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530689static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
690 uint8_t ring_type)
691{
692 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
693
694 return hif_state->ce_services->ce_get_desc_size(ring_type);
695}
696
697
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700698static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530699 uint8_t ring_type, uint32_t nentries)
700{
701 uint32_t ce_nbytes;
702 char *ptr;
703 qdf_dma_addr_t base_addr;
704 struct CE_ring_state *ce_ring;
705 uint32_t desc_size;
706 struct hif_softc *scn = CE_state->scn;
707
708 ce_nbytes = sizeof(struct CE_ring_state)
709 + (nentries * sizeof(void *));
710 ptr = qdf_mem_malloc(ce_nbytes);
711 if (!ptr)
712 return NULL;
713
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530714 ce_ring = (struct CE_ring_state *)ptr;
715 ptr += sizeof(struct CE_ring_state);
716 ce_ring->nentries = nentries;
717 ce_ring->nentries_mask = nentries - 1;
718
719 ce_ring->low_water_mark_nentries = 0;
720 ce_ring->high_water_mark_nentries = nentries;
721 ce_ring->per_transfer_context = (void **)ptr;
722
723 desc_size = ce_get_desc_size(scn, ring_type);
724
725 /* Legacy platforms that do not support cache
726 * coherent DMA are unsupported
727 */
728 ce_ring->base_addr_owner_space_unaligned =
729 qdf_mem_alloc_consistent(scn->qdf_dev,
730 scn->qdf_dev->dev,
731 (nentries *
732 desc_size +
733 CE_DESC_RING_ALIGN),
734 &base_addr);
735 if (ce_ring->base_addr_owner_space_unaligned
736 == NULL) {
737 HIF_ERROR("%s: ring has no DMA mem",
738 __func__);
739 qdf_mem_free(ptr);
740 return NULL;
741 }
742 ce_ring->base_addr_CE_space_unaligned = base_addr;
743
744 /* Correctly initialize memory to 0 to
745 * prevent garbage data crashing system
746 * when download firmware
747 */
748 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
749 nentries * desc_size +
750 CE_DESC_RING_ALIGN);
751
752 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
753
754 ce_ring->base_addr_CE_space =
755 (ce_ring->base_addr_CE_space_unaligned +
756 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
757
758 ce_ring->base_addr_owner_space = (void *)
759 (((size_t) ce_ring->base_addr_owner_space_unaligned +
760 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
761 } else {
762 ce_ring->base_addr_CE_space =
763 ce_ring->base_addr_CE_space_unaligned;
764 ce_ring->base_addr_owner_space =
765 ce_ring->base_addr_owner_space_unaligned;
766 }
767
768 return ce_ring;
769}
770
771static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
772 uint32_t ce_id, struct CE_ring_state *ring,
773 struct CE_attr *attr)
774{
775 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
776
777 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, ring, attr);
778}
779
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800780int hif_ce_bus_early_suspend(struct hif_softc *scn)
781{
782 uint8_t ul_pipe, dl_pipe;
783 int ce_id, status, ul_is_polled, dl_is_polled;
784 struct CE_state *ce_state;
785 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
786 &ul_pipe, &dl_pipe,
787 &ul_is_polled, &dl_is_polled);
788 if (status) {
789 HIF_ERROR("%s: pipe_mapping failure", __func__);
790 return status;
791 }
792
793 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
794 if (ce_id == ul_pipe)
795 continue;
796 if (ce_id == dl_pipe)
797 continue;
798
799 ce_state = scn->ce_id_to_state[ce_id];
800 qdf_spin_lock_bh(&ce_state->ce_index_lock);
801 if (ce_state->state == CE_RUNNING)
802 ce_state->state = CE_PAUSED;
803 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
804 }
805
806 return status;
807}
808
809int hif_ce_bus_late_resume(struct hif_softc *scn)
810{
811 int ce_id;
812 struct CE_state *ce_state;
813 int write_index;
814 bool index_updated;
815
816 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
817 ce_state = scn->ce_id_to_state[ce_id];
818 qdf_spin_lock_bh(&ce_state->ce_index_lock);
819 if (ce_state->state == CE_PENDING) {
820 write_index = ce_state->src_ring->write_index;
821 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
822 write_index);
823 ce_state->state = CE_RUNNING;
824 index_updated = true;
825 } else {
826 index_updated = false;
827 }
828
829 if (ce_state->state == CE_PAUSED)
830 ce_state->state = CE_RUNNING;
831 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
832
833 if (index_updated)
834 hif_record_ce_desc_event(scn, ce_id,
835 RESUME_WRITE_INDEX_UPDATE,
836 NULL, NULL, write_index);
837 }
838
839 return 0;
840}
841
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800842/*
843 * Initialize a Copy Engine based on caller-supplied attributes.
844 * This may be called once to initialize both source and destination
845 * rings or it may be called twice for separate source and destination
846 * initialization. It may be that only one side or the other is
847 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700848 *
849 * This should be called durring the initialization sequence before
850 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800851 */
Komal Seelam644263d2016-02-22 20:45:49 +0530852struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800853 unsigned int CE_id, struct CE_attr *attr)
854{
855 struct CE_state *CE_state;
856 uint32_t ctrl_addr;
857 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800858 bool malloc_CE_state = false;
859 bool malloc_src_ring = false;
860
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530861 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800862 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800863 CE_state = scn->ce_id_to_state[CE_id];
864
865 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800866 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530867 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800868 if (!CE_state) {
869 HIF_ERROR("%s: CE_state has no mem", __func__);
870 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800871 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700872 malloc_CE_state = true;
Houston Hoffman233e9092015-09-02 13:37:21 -0700873 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530874 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700875
876 CE_state->id = CE_id;
877 CE_state->ctrl_addr = ctrl_addr;
878 CE_state->state = CE_RUNNING;
879 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700880 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800881 }
882 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800883
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530884 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800885 if (attr == NULL) {
886 /* Already initialized; caller wants the handle */
887 return (struct CE_handle *)CE_state;
888 }
889
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530891 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800892 else
893 CE_state->src_sz_max = attr->src_sz_max;
894
Houston Hoffman68e837e2015-12-04 12:57:24 -0800895 ce_init_ce_desc_event_log(CE_id,
896 attr->src_nentries + attr->dest_nentries);
897
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800898 /* source ring setup */
899 nentries = attr->src_nentries;
900 if (nentries) {
901 struct CE_ring_state *src_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800902 nentries = roundup_pwr2(nentries);
903 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530904 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800905 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530906 src_ring = CE_state->src_ring =
907 ce_alloc_ring_state(CE_state,
908 CE_RING_SRC,
909 nentries);
910 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911 /* cannot allocate src ring. If the
912 * CE_state is allocated locally free
913 * CE_State and return error.
914 */
915 HIF_ERROR("%s: src ring has no mem", __func__);
916 if (malloc_CE_state) {
917 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800918 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530919 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800920 malloc_CE_state = false;
921 }
922 return NULL;
923 } else {
924 /* we can allocate src ring.
925 * Mark that the src ring is
926 * allocated locally
927 */
928 malloc_src_ring = true;
929 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800930 /*
931 * Also allocate a shadow src ring in
932 * regular mem to use for faster access.
933 */
934 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936 sizeof(struct CE_src_desc) +
937 CE_DESC_RING_ALIGN);
938 if (src_ring->shadow_base_unaligned == NULL) {
939 HIF_ERROR("%s: src ring no shadow_base mem",
940 __func__);
941 goto error_no_dma_mem;
942 }
943 src_ring->shadow_base = (struct CE_src_desc *)
944 (((size_t) src_ring->shadow_base_unaligned +
945 CE_DESC_RING_ALIGN - 1) &
946 ~(CE_DESC_RING_ALIGN - 1));
947
Houston Hoffman4411ad42016-03-14 21:12:04 -0700948 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
949 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -0700950
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530951 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
952
Houston Hoffman4411ad42016-03-14 21:12:04 -0700953 if (Q_TARGET_ACCESS_END(scn) < 0)
954 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530955 ce_ring_test_initial_indexes(CE_id, src_ring,
956 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800957 }
958 }
959
960 /* destination ring setup */
961 nentries = attr->dest_nentries;
962 if (nentries) {
963 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800964
965 nentries = roundup_pwr2(nentries);
966 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530967 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800968 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530969 dest_ring = CE_state->dest_ring =
970 ce_alloc_ring_state(CE_state,
971 CE_RING_DEST,
972 nentries);
973 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800974 /* cannot allocate dst ring. If the CE_state
975 * or src ring is allocated locally free
976 * CE_State and src ring and return error.
977 */
978 HIF_ERROR("%s: dest ring has no mem",
979 __func__);
980 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530981 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800982 CE_state->src_ring = NULL;
983 malloc_src_ring = false;
984 }
985 if (malloc_CE_state) {
986 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800987 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530988 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989 malloc_CE_state = false;
990 }
991 return NULL;
992 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800993
Houston Hoffman4411ad42016-03-14 21:12:04 -0700994 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
995 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530996
997 ce_ring_setup(scn, CE_RING_DEST, CE_id, dest_ring, attr);
998
999 if (Q_TARGET_ACCESS_END(scn) < 0)
1000 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001001
1002 ce_ring_test_initial_indexes(CE_id, dest_ring,
1003 "dest_ring");
1004
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301005 /* For srng based target, init status ring here */
1006 if (ce_srng_based(CE_state->scn)) {
1007 CE_state->status_ring =
1008 ce_alloc_ring_state(CE_state,
1009 CE_RING_STATUS,
1010 nentries);
1011 if (CE_state->status_ring == NULL) {
1012 /*Allocation failed. Cleanup*/
1013 qdf_mem_free(CE_state->dest_ring);
1014 if (malloc_src_ring) {
1015 qdf_mem_free
1016 (CE_state->src_ring);
1017 CE_state->src_ring = NULL;
1018 malloc_src_ring = false;
1019 }
1020 if (malloc_CE_state) {
1021 /* allocated CE_state locally */
1022 scn->ce_id_to_state[CE_id] =
1023 NULL;
1024 qdf_mem_free(CE_state);
1025 malloc_CE_state = false;
1026 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001027
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301028 return NULL;
1029 }
1030 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1031 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001032
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301033 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1034 CE_state->status_ring, attr);
1035
1036 if (Q_TARGET_ACCESS_END(scn) < 0)
1037 goto error_target_access;
1038
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001039 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001040
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001041 /* epping */
1042 /* poll timer */
1043 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301044 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001045 &CE_state->poll_timer,
1046 ce_poll_timeout,
1047 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301048 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001049 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301050 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001051 CE_POLL_TIMEOUT);
1052 }
1053 }
1054 }
1055
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301056 if (!ce_srng_based(scn)) {
1057 /* Enable CE error interrupts */
1058 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1059 goto error_target_access;
1060 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1061 if (Q_TARGET_ACCESS_END(scn) < 0)
1062 goto error_target_access;
1063 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001065 /* update the htt_data attribute */
1066 ce_mark_datapath(CE_state);
1067
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001068 return (struct CE_handle *)CE_state;
1069
Houston Hoffman4411ad42016-03-14 21:12:04 -07001070error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001071error_no_dma_mem:
1072 ce_fini((struct CE_handle *)CE_state);
1073 return NULL;
1074}
1075
1076#ifdef WLAN_FEATURE_FASTPATH
1077/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001078 * hif_enable_fastpath() Update that we have enabled fastpath mode
1079 * @hif_ctx: HIF context
1080 *
1081 * For use in data path
1082 *
1083 * Retrun: void
1084 */
1085void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1086{
1087 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1088
Houston Hoffmand63cd742016-12-05 11:59:56 -08001089 if (ce_srng_based(scn)) {
1090 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1091 return;
1092 }
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001093 HIF_INFO("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001094 scn->fastpath_mode_on = true;
1095}
1096
1097/**
1098 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1099 * @hif_ctx: HIF Context
1100 *
1101 * For use in data path to skip HTC
1102 *
1103 * Return: bool
1104 */
1105bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1106{
1107 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1108
1109 return scn->fastpath_mode_on;
1110}
1111
1112/**
1113 * hif_get_ce_handle - API to get CE handle for FastPath mode
1114 * @hif_ctx: HIF Context
1115 * @id: CopyEngine Id
1116 *
1117 * API to return CE handle for fastpath mode
1118 *
1119 * Return: void
1120 */
1121void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1122{
1123 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1124
1125 return scn->ce_id_to_state[id];
1126}
1127
1128/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001129 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1130 * No processing is required inside this function.
1131 * @ce_hdl: Cope engine handle
1132 * Using an assert, this function makes sure that,
1133 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001134 *
1135 * This is called while dismantling CE structures. No other thread
1136 * should be using these structures while dismantling is occuring
1137 * therfore no locking is needed.
1138 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001139 * Return: none
1140 */
1141void
1142ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1143{
1144 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1145 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301146 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001147 uint32_t sw_index, write_index;
Houston Hoffman85925072016-05-06 17:02:18 -07001148 if (hif_is_nss_wifi_enabled(sc))
1149 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001150
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001151 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Houston Hoffman85925072016-05-06 17:02:18 -07001152 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1153 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001154 sw_index = src_ring->sw_index;
1155 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001156
1157 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301158 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001159 }
1160}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001161
1162/**
1163 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1164 * @ce_hdl: Handle to CE
1165 *
1166 * These buffers are never allocated on the fly, but
1167 * are allocated only once during HIF start and freed
1168 * only once during HIF stop.
1169 * NOTE:
1170 * The assumption here is there is no in-flight DMA in progress
1171 * currently, so that buffers can be freed up safely.
1172 *
1173 * Return: NONE
1174 */
1175void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1176{
1177 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1178 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1179 qdf_nbuf_t nbuf;
1180 int i;
1181
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001182 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001183 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001184
1185 if (!ce_state->htt_rx_data)
1186 return;
1187
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001188 /*
1189 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1190 * this CE is completely full: does not leave one blank space, to
1191 * distinguish between empty queue & full queue. So free all the
1192 * entries.
1193 */
1194 for (i = 0; i < dst_ring->nentries; i++) {
1195 nbuf = dst_ring->per_transfer_context[i];
1196
1197 /*
1198 * The reasons for doing this check are:
1199 * 1) Protect against calling cleanup before allocating buffers
1200 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1201 * could have a partially filled ring, because of a memory
1202 * allocation failure in the middle of allocating ring.
1203 * This check accounts for that case, checking
1204 * fastpath_mode_on flag or started flag would not have
1205 * covered that case. This is not in performance path,
1206 * so OK to do this.
1207 */
1208 if (nbuf)
1209 qdf_nbuf_free(nbuf);
1210 }
1211}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001212
1213/**
1214 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1215 * @scn: HIF handle
1216 *
1217 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1218 * Hence we have to post all the entries in the pipe, even, in the beginning
1219 * unlike for other CE pipes where one less than dest_nentries are filled in
1220 * the beginning.
1221 *
1222 * Return: None
1223 */
1224static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1225{
1226 int pipe_num;
1227 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1228
1229 if (scn->fastpath_mode_on == false)
1230 return;
1231
1232 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1233 struct HIF_CE_pipe_info *pipe_info =
1234 &hif_state->pipe_info[pipe_num];
1235 struct CE_state *ce_state =
1236 scn->ce_id_to_state[pipe_info->pipe_num];
1237
1238 if (ce_state->htt_rx_data)
1239 atomic_inc(&pipe_info->recv_bufs_needed);
1240 }
1241}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001242#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001243static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001244{
1245}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001246
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001247static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001248{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001249 return false;
1250}
1251
1252static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1253{
1254 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001255}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001256#endif /* WLAN_FEATURE_FASTPATH */
1257
1258void ce_fini(struct CE_handle *copyeng)
1259{
1260 struct CE_state *CE_state = (struct CE_state *)copyeng;
1261 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301262 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001263
1264 CE_state->state = CE_UNUSED;
1265 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001266
1267 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1268
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001269 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001270 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271 ce_h2t_tx_ce_cleanup(copyeng);
1272
1273 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301274 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001275 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301276 qdf_mem_free_consistent(scn->qdf_dev,
1277 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001278 (CE_state->src_ring->nentries *
1279 sizeof(struct CE_src_desc) +
1280 CE_DESC_RING_ALIGN),
1281 CE_state->src_ring->
1282 base_addr_owner_space_unaligned,
1283 CE_state->src_ring->
1284 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301285 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001286 }
1287 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001288 /* Cleanup the datapath Rx ring */
1289 ce_t2h_msg_ce_cleanup(copyeng);
1290
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301292 qdf_mem_free_consistent(scn->qdf_dev,
1293 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001294 (CE_state->dest_ring->nentries *
1295 sizeof(struct CE_dest_desc) +
1296 CE_DESC_RING_ALIGN),
1297 CE_state->dest_ring->
1298 base_addr_owner_space_unaligned,
1299 CE_state->dest_ring->
1300 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301301 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001302
1303 /* epping */
1304 if (CE_state->timer_inited) {
1305 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301306 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001307 }
1308 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001309 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301310 /* Cleanup the datapath Tx ring */
1311 ce_h2t_tx_ce_cleanup(copyeng);
1312
1313 if (CE_state->status_ring->shadow_base_unaligned)
1314 qdf_mem_free(
1315 CE_state->status_ring->shadow_base_unaligned);
1316
1317 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1318 qdf_mem_free_consistent(scn->qdf_dev,
1319 scn->qdf_dev->dev,
1320 (CE_state->status_ring->nentries *
1321 sizeof(struct CE_src_desc) +
1322 CE_DESC_RING_ALIGN),
1323 CE_state->status_ring->
1324 base_addr_owner_space_unaligned,
1325 CE_state->status_ring->
1326 base_addr_CE_space, 0);
1327 qdf_mem_free(CE_state->status_ring);
1328 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001329
1330 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301331 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001332}
1333
Komal Seelam5584a7c2016-02-24 19:22:48 +05301334void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301336 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001337
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301338 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301340 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001341 sizeof(hif_state->msg_callbacks_current));
1342}
1343
1344/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301345QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301346hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001347 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301348 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001349{
Komal Seelam644263d2016-02-22 20:45:49 +05301350 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301351 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001352 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1353 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1354 int bytes = nbytes, nfrags = 0;
1355 struct ce_sendlist sendlist;
1356 int status, i = 0;
1357 unsigned int mux_id = 0;
1358
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301359 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001360
1361 transfer_id =
1362 (mux_id & MUX_ID_MASK) |
1363 (transfer_id & TRANSACTION_ID_MASK);
1364 data_attr &= DESC_DATA_FLAG_MASK;
1365 /*
1366 * The common case involves sending multiple fragments within a
1367 * single download (the tx descriptor and the tx frame header).
1368 * So, optimize for the case of multiple fragments by not even
1369 * checking whether it's necessary to use a sendlist.
1370 * The overhead of using a sendlist for a single buffer download
1371 * is not a big deal, since it happens rarely (for WMI messages).
1372 */
1373 ce_sendlist_init(&sendlist);
1374 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301375 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001376 int frag_bytes;
1377
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301378 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1379 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001380 /*
1381 * Clear the packet offset for all but the first CE desc.
1382 */
1383 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301384 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001385
1386 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1387 frag_bytes >
1388 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301389 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001390 (nbuf,
1391 nfrags) ? 0 :
1392 CE_SEND_FLAG_SWAP_DISABLE,
1393 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301394 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001395 HIF_ERROR("%s: error, frag_num %d larger than limit",
1396 __func__, nfrags);
1397 return status;
1398 }
1399 bytes -= frag_bytes;
1400 nfrags++;
1401 } while (bytes > 0);
1402
1403 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301404 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001405 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301406 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001407 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301408 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001409 }
1410 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301411 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001412
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301413 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001414 HIF_ERROR("%s: error CE handle is null", __func__);
1415 return A_ERROR;
1416 }
1417
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301418 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301419 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +05301420 qdf_nbuf_data_addr(nbuf),
Nirav Shah29beae02016-04-26 22:58:54 +05301421 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301423 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001424
1425 return status;
1426}
1427
Komal Seelam5584a7c2016-02-24 19:22:48 +05301428void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1429 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001430{
Komal Seelam644263d2016-02-22 20:45:49 +05301431 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301432 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301433
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001434 if (!force) {
1435 int resources;
1436 /*
1437 * Decide whether to actually poll for completions, or just
1438 * wait for a later chance. If there seem to be plenty of
1439 * resources left, then just wait, since checking involves
1440 * reading a CE register, which is a relatively expensive
1441 * operation.
1442 */
Komal Seelam644263d2016-02-22 20:45:49 +05301443 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001444 /*
1445 * If at least 50% of the total resources are still available,
1446 * don't bother checking again yet.
1447 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301448 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 1)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001449 return;
1450 }
1451 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001452#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001453 ce_per_engine_servicereap(scn, pipe);
1454#else
1455 ce_per_engine_service(scn, pipe);
1456#endif
1457}
1458
Komal Seelam5584a7c2016-02-24 19:22:48 +05301459uint16_t
1460hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001461{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301462 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001463 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1464 uint16_t rv;
1465
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301466 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001467 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301468 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001469 return rv;
1470}
1471
1472/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001473static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001474hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301475 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001476 unsigned int nbytes, unsigned int transfer_id,
1477 unsigned int sw_index, unsigned int hw_index,
1478 unsigned int toeplitz_hash_result)
1479{
1480 struct HIF_CE_pipe_info *pipe_info =
1481 (struct HIF_CE_pipe_info *)ce_context;
1482 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301483 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001484 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001485 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301486 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001488 do {
1489 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001490 * The upper layer callback will be triggered
1491 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001492 */
Houston Hoffman85118512015-09-28 14:17:11 -07001493 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam6ee55902016-04-11 17:11:07 +05301494 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301495 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001496 else
1497 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001498 msg_callbacks->Context,
1499 transfer_context, transfer_id,
1500 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 }
1502
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301503 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001504 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301505 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 } while (ce_completed_send_next(copyeng,
1507 &ce_context, &transfer_context,
1508 &CE_data, &nbytes, &transfer_id,
1509 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301510 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001511}
1512
Houston Hoffman910c6262015-09-28 12:56:25 -07001513/**
1514 * hif_ce_do_recv(): send message from copy engine to upper layers
1515 * @msg_callbacks: structure containing callback and callback context
1516 * @netbuff: skb containing message
1517 * @nbytes: number of bytes in the message
1518 * @pipe_info: used for the pipe_number info
1519 *
1520 * Checks the packet length, configures the lenght in the netbuff,
1521 * and calls the upper layer callback.
1522 *
1523 * return: None
1524 */
1525static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301526 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001527 struct HIF_CE_pipe_info *pipe_info) {
1528 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301529 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001530 msg_callbacks->
1531 rxCompletionHandler(msg_callbacks->Context,
1532 netbuf, pipe_info->pipe_num);
1533 } else {
1534 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1535 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301536 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001537 }
1538}
1539
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001541static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301543 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001544 unsigned int nbytes, unsigned int transfer_id,
1545 unsigned int flags)
1546{
1547 struct HIF_CE_pipe_info *pipe_info =
1548 (struct HIF_CE_pipe_info *)ce_context;
1549 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001550 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301551 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001552#ifdef HIF_PCI
1553 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1554#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001555 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301556 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001557
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001558 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001559#ifdef HIF_PCI
1560 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1561#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301562 qdf_nbuf_unmap_single(scn->qdf_dev,
1563 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301564 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001565
Houston Hoffman910c6262015-09-28 12:56:25 -07001566 atomic_inc(&pipe_info->recv_bufs_needed);
1567 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301568 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301569 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001570 else
1571 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001572 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573
1574 /* Set up force_break flag if num of receices reaches
1575 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001576 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001577 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001578 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 break;
1580 }
1581 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1582 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301583 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001584
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001585}
1586
1587/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1588
1589void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301590hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001591 struct hif_msg_callbacks *callbacks)
1592{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301593 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001594
1595#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1596 spin_lock_init(&pcie_access_log_lock);
1597#endif
1598 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301599 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001600 sizeof(hif_state->msg_callbacks_pending));
1601
1602}
1603
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001604static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001605{
1606 struct CE_handle *ce_diag = hif_state->ce_diag;
1607 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301608 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001609 struct hif_msg_callbacks *hif_msg_callbacks =
1610 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001611
1612 /* daemonize("hif_compl_thread"); */
1613
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001615 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 return -EINVAL;
1617 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001618
1619 if (!hif_msg_callbacks ||
1620 !hif_msg_callbacks->rxCompletionHandler ||
1621 !hif_msg_callbacks->txCompletionHandler) {
1622 HIF_ERROR("%s: no completion handler registered", __func__);
1623 return -EFAULT;
1624 }
1625
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001626 A_TARGET_ACCESS_LIKELY(scn);
1627 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1628 struct CE_attr attr;
1629 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001630
1631 pipe_info = &hif_state->pipe_info[pipe_num];
1632 if (pipe_info->ce_hdl == ce_diag) {
1633 continue; /* Handle Diagnostic CE specially */
1634 }
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301635 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001636 if (attr.src_nentries) {
1637 /* pipe used to send to target */
1638 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1639 __func__, pipe_num, pipe_info);
1640 ce_send_cb_register(pipe_info->ce_hdl,
1641 hif_pci_ce_send_done, pipe_info,
1642 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1644 }
1645 if (attr.dest_nentries) {
1646 /* pipe used to receive from target */
1647 ce_recv_cb_register(pipe_info->ce_hdl,
1648 hif_pci_ce_recv_data, pipe_info,
1649 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001650 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001651
1652 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301653 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301654
1655 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1656 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001658
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001659 A_TARGET_ACCESS_UNLIKELY(scn);
1660 return 0;
1661}
1662
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663/*
1664 * Install pending msg callbacks.
1665 *
1666 * TBDXXX: This hack is needed because upper layers install msg callbacks
1667 * for use with HTC before BMI is done; yet this HIF implementation
1668 * needs to continue to use BMI msg callbacks. Really, upper layers
1669 * should not register HTC callbacks until AFTER BMI phase.
1670 */
Komal Seelam644263d2016-02-22 20:45:49 +05301671static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001672{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301673 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001674
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301675 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001676 &hif_state->msg_callbacks_pending,
1677 sizeof(hif_state->msg_callbacks_pending));
1678}
1679
Komal Seelam5584a7c2016-02-24 19:22:48 +05301680void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1681 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001682{
1683 int ul_is_polled, dl_is_polled;
1684
Komal Seelam644263d2016-02-22 20:45:49 +05301685 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001686 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1687}
1688
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001689/**
1690 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301691 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692 *
1693 * Output the pipe error counts of each pipe to log file
1694 *
1695 * Return: N/A
1696 */
Komal Seelam644263d2016-02-22 20:45:49 +05301697void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001698{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301699 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001700 int pipe_num;
1701
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001702 if (hif_state == NULL) {
1703 HIF_ERROR("%s hif_state is NULL", __func__);
1704 return;
1705 }
1706 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1707 struct HIF_CE_pipe_info *pipe_info;
1708
1709 pipe_info = &hif_state->pipe_info[pipe_num];
1710
1711 if (pipe_info->nbuf_alloc_err_count > 0 ||
1712 pipe_info->nbuf_dma_err_count > 0 ||
1713 pipe_info->nbuf_ce_enqueue_err_count)
1714 HIF_ERROR(
1715 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1716 __func__, pipe_info->pipe_num,
1717 atomic_read(&pipe_info->recv_bufs_needed),
1718 pipe_info->nbuf_alloc_err_count,
1719 pipe_info->nbuf_dma_err_count,
1720 pipe_info->nbuf_ce_enqueue_err_count);
1721 }
1722}
1723
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001724static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1725 void *nbuf, uint32_t *error_cnt,
1726 enum hif_ce_event_type failure_type,
1727 const char *failure_type_string)
1728{
1729 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1730 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1731 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1732 int ce_id = CE_state->id;
1733 uint32_t error_cnt_tmp;
1734
1735 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1736 error_cnt_tmp = ++(*error_cnt);
1737 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1738 HIF_ERROR("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
1739 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1740 failure_type_string);
1741 hif_record_ce_desc_event(scn, ce_id, failure_type,
1742 NULL, nbuf, bufs_needed_tmp);
1743 /* if we fail to allocate the last buffer for an rx pipe,
1744 * there is no trigger to refill the ce and we will
1745 * eventually crash
1746 */
1747 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
1748 QDF_ASSERT(0);
1749}
1750
1751
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001752static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1753{
1754 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301755 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301756 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301757 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001758 uint32_t bufs_posted = 0;
1759
1760 buf_sz = pipe_info->buf_sz;
1761 if (buf_sz == 0) {
1762 /* Unused Copy Engine */
1763 return 0;
1764 }
1765
1766 ce_hdl = pipe_info->ce_hdl;
1767
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301768 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001769 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301770 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301771 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001772 int status;
1773
1774 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301775 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001776
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301777 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001778 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001779 hif_post_recv_buffers_failure(pipe_info, nbuf,
1780 &pipe_info->nbuf_alloc_err_count,
1781 HIF_RX_NBUF_ALLOC_FAILURE,
1782 "HIF_RX_NBUF_ALLOC_FAILURE");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001783 return 1;
1784 }
1785
1786 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301787 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001788 * CE_data = dma_map_single(dev, data, buf_sz, );
1789 * DMA_FROM_DEVICE);
1790 */
1791 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301792 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301793 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301795 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001796 hif_post_recv_buffers_failure(pipe_info, nbuf,
1797 &pipe_info->nbuf_dma_err_count,
1798 HIF_RX_NBUF_MAP_FAILURE,
1799 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301800 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001801 return 1;
1802 }
1803
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301804 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001805
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301806 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001807 buf_sz, DMA_FROM_DEVICE);
1808 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301809 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001810 if (unlikely(status != EOK)) {
1811 hif_post_recv_buffers_failure(pipe_info, nbuf,
1812 &pipe_info->nbuf_ce_enqueue_err_count,
1813 HIF_RX_NBUF_ENQUEUE_FAILURE,
1814 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1815
Govind Singh4fcafd42016-08-08 12:37:31 +05301816 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1817 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301818 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819 return 1;
1820 }
1821
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301822 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001823 bufs_posted++;
1824 }
1825 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001826 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001827 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1828 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001829 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001830 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1831 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001832 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001833 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301835 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836
1837 return 0;
1838}
1839
1840/*
1841 * Try to post all desired receive buffers for all pipes.
1842 * Returns 0 if all desired buffers are posted,
1843 * non-zero if were were unable to completely
1844 * replenish receive buffers.
1845 */
Komal Seelam644263d2016-02-22 20:45:49 +05301846static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001847{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301848 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001849 int pipe_num, rv = 0;
Houston Hoffman85925072016-05-06 17:02:18 -07001850 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001851
1852 A_TARGET_ACCESS_LIKELY(scn);
1853 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1854 struct HIF_CE_pipe_info *pipe_info;
Houston Hoffman85925072016-05-06 17:02:18 -07001855 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001856 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001857
1858 if (hif_is_nss_wifi_enabled(scn) &&
1859 ce_state && (ce_state->htt_rx_data)) {
1860 continue;
1861 }
1862
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001863 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1864 rv = 1;
1865 goto done;
1866 }
1867 }
1868
1869done:
1870 A_TARGET_ACCESS_UNLIKELY(scn);
1871
1872 return rv;
1873}
1874
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301875QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001876{
Komal Seelam644263d2016-02-22 20:45:49 +05301877 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301878 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001879
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001880 hif_update_fastpath_recv_bufs_cnt(scn);
1881
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001882 hif_msg_callbacks_install(scn);
1883
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001884 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301885 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001886
Houston Hoffman271951f2016-11-12 15:24:27 -08001887 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 hif_state->started = true;
1889
Houston Hoffman271951f2016-11-12 15:24:27 -08001890 /* Post buffers once to start things off. */
1891 if (hif_post_recv_buffers(scn)) {
1892 /* cleanup is done in hif_ce_disable */
1893 HIF_ERROR("%s:failed to post buffers", __func__);
1894 return QDF_STATUS_E_FAILURE;
1895 }
1896
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301897 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898}
1899
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001900static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901{
Komal Seelam644263d2016-02-22 20:45:49 +05301902 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903 struct CE_handle *ce_hdl;
1904 uint32_t buf_sz;
1905 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301906 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301907 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001908 void *per_CE_context;
1909
1910 buf_sz = pipe_info->buf_sz;
1911 if (buf_sz == 0) {
1912 /* Unused Copy Engine */
1913 return;
1914 }
1915
1916 hif_state = pipe_info->HIF_CE_state;
1917 if (!hif_state->started) {
1918 return;
1919 }
1920
Komal Seelam02cf2f82016-02-22 20:44:25 +05301921 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001922 ce_hdl = pipe_info->ce_hdl;
1923
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301924 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001925 return;
1926 }
1927 while (ce_revoke_recv_next
1928 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301929 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301930 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301931 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301932 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933 }
1934}
1935
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001936static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937{
1938 struct CE_handle *ce_hdl;
1939 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301940 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301941 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301943 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001944 unsigned int nbytes;
1945 unsigned int id;
1946 uint32_t buf_sz;
1947 uint32_t toeplitz_hash_result;
1948
1949 buf_sz = pipe_info->buf_sz;
1950 if (buf_sz == 0) {
1951 /* Unused Copy Engine */
1952 return;
1953 }
1954
1955 hif_state = pipe_info->HIF_CE_state;
1956 if (!hif_state->started) {
1957 return;
1958 }
1959
Komal Seelam02cf2f82016-02-22 20:44:25 +05301960 scn = HIF_GET_SOFTC(hif_state);
1961
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001962 ce_hdl = pipe_info->ce_hdl;
1963
1964 while (ce_cancel_send_next
1965 (ce_hdl, &per_CE_context,
1966 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301967 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001968 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1969 /*
1970 * Packets enqueued by htt_h2t_ver_req_msg() and
1971 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1972 * freed in htt_htc_misc_pkt_pool_free() in
1973 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001974 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975 * which they are queued in.
1976 */
Nirav Shahd7f91592016-04-21 14:18:43 +05301977 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001978 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05301979 /* Indicate the completion to higher
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001980 * layer to free the buffer */
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301981 if (pipe_info->pipe_callbacks.
Himanshu Agarwal8d0cdea2016-09-02 21:05:01 +05301982 txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301983 pipe_info->pipe_callbacks.
1984 txCompletionHandler(pipe_info->
1985 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 netbuf, id, toeplitz_hash_result);
1987 }
1988 }
1989}
1990
1991/*
1992 * Cleanup residual buffers for device shutdown:
1993 * buffers that were enqueued for receive
1994 * buffers that were to be sent
1995 * Note: Buffers that had completed but which were
1996 * not yet processed are on a completion queue. They
1997 * are handled when the completion thread shuts down.
1998 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001999static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002000{
2001 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302002 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002003 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002004
Komal Seelam02cf2f82016-02-22 20:44:25 +05302005 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002006 struct HIF_CE_pipe_info *pipe_info;
2007
Houston Hoffman85925072016-05-06 17:02:18 -07002008 ce_state = scn->ce_id_to_state[pipe_num];
2009 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2010 ((ce_state->htt_tx_data) ||
2011 (ce_state->htt_rx_data))) {
2012 continue;
2013 }
2014
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015 pipe_info = &hif_state->pipe_info[pipe_num];
2016 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2017 hif_send_buffer_cleanup_on_pipe(pipe_info);
2018 }
2019}
2020
Komal Seelam5584a7c2016-02-24 19:22:48 +05302021void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002022{
Komal Seelam644263d2016-02-22 20:45:49 +05302023 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302024 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302025
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002026 hif_buffer_cleanup(hif_state);
2027}
2028
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302029void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302031 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002032 int pipe_num;
2033
Houston Hoffmana69581e2016-11-14 18:03:19 -08002034 /*
2035 * before cleaning up any memory, ensure irq &
2036 * bottom half contexts will not be re-entered
2037 */
2038 hif_nointrs(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002039 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040
2041 /*
2042 * At this point, asynchronous threads are stopped,
2043 * The Target should not DMA nor interrupt, Host code may
2044 * not initiate anything more. So we just need to clean
2045 * up Host-side state.
2046 */
2047
2048 if (scn->athdiag_procfs_inited) {
2049 athdiag_procfs_remove();
2050 scn->athdiag_procfs_inited = false;
2051 }
2052
2053 hif_buffer_cleanup(hif_state);
2054
2055 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2056 struct HIF_CE_pipe_info *pipe_info;
2057
2058 pipe_info = &hif_state->pipe_info[pipe_num];
2059 if (pipe_info->ce_hdl) {
2060 ce_fini(pipe_info->ce_hdl);
2061 pipe_info->ce_hdl = NULL;
2062 pipe_info->buf_sz = 0;
2063 }
2064 }
2065
2066 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302067 qdf_timer_stop(&hif_state->sleep_timer);
2068 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069 hif_state->sleep_timer_init = false;
2070 }
2071
2072 hif_state->started = false;
2073}
2074
Houston Hoffman854e67f2016-03-14 21:11:39 -07002075/**
2076 * hif_get_target_ce_config() - get copy engine configuration
2077 * @target_ce_config_ret: basic copy engine configuration
2078 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2079 * @target_service_to_ce_map_ret: service mapping for the copy engines
2080 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2081 * @target_shadow_reg_cfg_ret: shadow register configuration
2082 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2083 *
2084 * providing accessor to these values outside of this file.
2085 * currently these are stored in static pointers to const sections.
2086 * there are multiple configurations that are selected from at compile time.
2087 * Runtime selection would need to consider mode, target type and bus type.
2088 *
2089 * Return: return by parameter.
2090 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302091void hif_get_target_ce_config(struct hif_softc *scn,
2092 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002093 int *target_ce_config_sz_ret,
2094 struct service_to_pipe **target_service_to_ce_map_ret,
2095 int *target_service_to_ce_map_sz_ret,
2096 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2097 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002098{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302099 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2100
2101 *target_ce_config_ret = hif_state->target_ce_config;
2102 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman854e67f2016-03-14 21:11:39 -07002103 *target_service_to_ce_map_ret = target_service_to_ce_map;
2104 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
2105
2106 if (target_shadow_reg_cfg_ret)
2107 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2108
2109 if (shadow_cfg_sz_ret)
2110 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002112
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002113#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002114static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002115{
2116 int i;
2117 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2118 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2119
2120 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2121 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2122 "%s: i %d, val %x\n", __func__, i,
2123 cfg->shadow_reg_v2_cfg[i].addr);
2124 }
2125}
2126
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002127#else
2128static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2129{
2130 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2131 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2132}
2133#endif
2134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002135/**
2136 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302137 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002138 *
2139 * This function passes the con_mode and CE configuration to
2140 * platform driver to enable wlan.
2141 *
Houston Hoffman108da402016-03-14 21:11:24 -07002142 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002143 */
Houston Hoffman108da402016-03-14 21:11:24 -07002144int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002145{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002146 struct pld_wlan_enable_cfg cfg;
2147 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302148 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002149
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302150 hif_get_target_ce_config(scn,
2151 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002152 &cfg.num_ce_tgt_cfg,
2153 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2154 &cfg.num_ce_svc_pipe_cfg,
2155 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2156 &cfg.num_shadow_reg_cfg);
2157
2158 /* translate from structure size to array size */
2159 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2160 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2161 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002162
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002163 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2164 &cfg.num_shadow_reg_v2_cfg);
2165
2166 hif_print_hal_shadow_register_cfg(&cfg);
2167
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302168 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002169 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002170 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002171 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002172 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002173 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002174
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002175 if (BYPASS_QMI)
2176 return 0;
2177 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002178 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2179 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002180}
2181
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002182#define CE_EPPING_USES_IRQ true
2183
Houston Hoffman108da402016-03-14 21:11:24 -07002184/**
2185 * hif_ce_prepare_config() - load the correct static tables.
2186 * @scn: hif context
2187 *
2188 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002189 */
Houston Hoffman108da402016-03-14 21:11:24 -07002190void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002191{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302192 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002193 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2194 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302195 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002196
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002197 hif_state->ce_services = ce_services_attach(scn);
2198
Houston Hoffman710af5a2016-11-22 21:59:03 -08002199 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002200 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002201 if (QDF_IS_EPPING_ENABLED(mode)) {
2202 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302203 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002204 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302205 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2206 hif_state->target_ce_config = target_ce_config_wlan_epping;
2207 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002208 target_service_to_ce_map =
2209 target_service_to_ce_map_wlan_epping;
2210 target_service_to_ce_map_sz =
2211 sizeof(target_service_to_ce_map_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002212 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2213 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002214 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002215
2216 switch (tgt_info->target_type) {
2217 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302218 hif_state->host_ce_config = host_ce_config_wlan;
2219 hif_state->target_ce_config = target_ce_config_wlan;
2220 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002221 break;
2222 case TARGET_TYPE_AR900B:
2223 case TARGET_TYPE_QCA9984:
2224 case TARGET_TYPE_IPQ4019:
2225 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302226 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2227 hif_state->host_ce_config =
2228 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2229 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2230 hif_state->host_ce_config =
2231 host_lowdesc_ce_cfg_wlan_ar900b;
2232 } else {
2233 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2234 }
2235
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302236 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2237 hif_state->target_ce_config_sz =
2238 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002239
2240 target_service_to_ce_map = target_service_to_ce_map_ar900b;
2241 target_service_to_ce_map_sz =
2242 sizeof(target_service_to_ce_map_ar900b);
2243 break;
2244
2245 case TARGET_TYPE_AR9888:
2246 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302247 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2248 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2249 } else {
2250 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2251 }
2252
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302253 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2254 hif_state->target_ce_config_sz =
2255 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002256
2257 target_service_to_ce_map = target_service_to_ce_map_ar900b;
2258 target_service_to_ce_map_sz =
2259 sizeof(target_service_to_ce_map_ar900b);
2260 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002261
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302262 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002263 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2264 hif_state->host_ce_config =
2265 host_ce_config_wlan_qca8074_pci;
2266 hif_state->target_ce_config =
2267 target_ce_config_wlan_qca8074_pci;
2268 hif_state->target_ce_config_sz =
2269 sizeof(target_ce_config_wlan_qca8074_pci);
2270 } else {
2271 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2272 hif_state->target_ce_config =
2273 target_ce_config_wlan_qca8074;
2274 hif_state->target_ce_config_sz =
2275 sizeof(target_ce_config_wlan_qca8074);
2276 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302277 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002278 case TARGET_TYPE_QCA6290:
2279 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2280 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2281 hif_state->target_ce_config_sz =
2282 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman710af5a2016-11-22 21:59:03 -08002283 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002284 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002285 }
Houston Hoffman108da402016-03-14 21:11:24 -07002286}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002287
Houston Hoffman108da402016-03-14 21:11:24 -07002288/**
2289 * hif_ce_open() - do ce specific allocations
2290 * @hif_sc: pointer to hif context
2291 *
2292 * return: 0 for success or QDF_STATUS_E_NOMEM
2293 */
2294QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2295{
2296 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002297
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302298 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302299 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002300 return QDF_STATUS_SUCCESS;
2301}
2302
2303/**
2304 * hif_ce_close() - do ce specific free
2305 * @hif_sc: pointer to hif context
2306 */
2307void hif_ce_close(struct hif_softc *hif_sc)
2308{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302309 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2310
2311 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002312}
2313
2314/**
2315 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2316 * @hif_sc: hif context
2317 *
2318 * uses state variables to support cleaning up when hif_config_ce fails.
2319 */
2320void hif_unconfig_ce(struct hif_softc *hif_sc)
2321{
2322 int pipe_num;
2323 struct HIF_CE_pipe_info *pipe_info;
2324 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2325
2326 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2327 pipe_info = &hif_state->pipe_info[pipe_num];
2328 if (pipe_info->ce_hdl) {
2329 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002330 ce_fini(pipe_info->ce_hdl);
2331 pipe_info->ce_hdl = NULL;
2332 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002333 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002334 }
2335 }
Houston Hoffman108da402016-03-14 21:11:24 -07002336 if (hif_sc->athdiag_procfs_inited) {
2337 athdiag_procfs_remove();
2338 hif_sc->athdiag_procfs_inited = false;
2339 }
2340}
2341
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002342#ifdef CONFIG_BYPASS_QMI
2343#define FW_SHARED_MEM (2 * 1024 * 1024)
2344
2345/**
2346 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2347 * @scn: pointer to HIF structure
2348 *
2349 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2350 *
2351 * Return: void
2352 */
2353static void hif_post_static_buf_to_target(struct hif_softc *scn)
2354{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002355 void *target_va;
2356 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002357
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002358 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2359 FW_SHARED_MEM, &target_pa);
2360 if (NULL == target_va) {
2361 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002362 return;
2363 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002364 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2365 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002366}
2367#else
2368static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2369{
2370 return;
2371}
2372#endif
2373
Dustin Brown6bdbda52016-09-27 15:52:30 -07002374#ifdef WLAN_SUSPEND_RESUME_TEST
2375static void hif_fake_apps_init_ctx(struct hif_softc *scn)
2376{
2377 INIT_WORK(&scn->fake_apps_ctx.resume_work,
2378 hif_fake_apps_resume_work);
2379}
2380#else
2381static inline void hif_fake_apps_init_ctx(struct hif_softc *scn) {}
2382#endif
2383
Houston Hoffman108da402016-03-14 21:11:24 -07002384/**
2385 * hif_config_ce() - configure copy engines
2386 * @scn: hif context
2387 *
2388 * Prepares fw, copy engine hardware and host sw according
2389 * to the attributes selected by hif_ce_prepare_config.
2390 *
2391 * also calls athdiag_procfs_init
2392 *
2393 * return: 0 for success nonzero for failure.
2394 */
2395int hif_config_ce(struct hif_softc *scn)
2396{
2397 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2398 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2399 struct HIF_CE_pipe_info *pipe_info;
2400 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002401 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002402#ifdef ADRASTEA_SHADOW_REGISTERS
2403 int i;
2404#endif
2405 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2406
2407 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002408
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002409 hif_post_static_buf_to_target(scn);
2410
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002411 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002412
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002413 hif_config_rri_on_ddr(scn);
2414
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002415 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2416 struct CE_attr *attr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417 pipe_info = &hif_state->pipe_info[pipe_num];
2418 pipe_info->pipe_num = pipe_num;
2419 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302420 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002421
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002422 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002423 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002424 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302425 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002426 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302427 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002428 A_TARGET_ACCESS_UNLIKELY(scn);
2429 goto err;
2430 }
2431
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302432 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002433 /* Reserve the ultimate CE for
2434 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002435 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002436 continue;
2437 }
2438
Houston Hoffman85925072016-05-06 17:02:18 -07002439 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2440 (ce_state->htt_rx_data))
2441 continue;
2442
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302443 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002444 if (attr->dest_nentries > 0) {
2445 atomic_set(&pipe_info->recv_bufs_needed,
2446 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302447 /*SRNG based CE has one entry less */
2448 if (ce_srng_based(scn))
2449 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002450 } else {
2451 atomic_set(&pipe_info->recv_bufs_needed, 0);
2452 }
2453 ce_tasklet_init(hif_state, (1 << pipe_num));
2454 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455 }
2456
2457 if (athdiag_procfs_init(scn) != 0) {
2458 A_TARGET_ACCESS_UNLIKELY(scn);
2459 goto err;
2460 }
2461 scn->athdiag_procfs_inited = true;
2462
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002463 HIF_INFO_MED("%s: ce_init done", __func__);
2464
Houston Hoffman108da402016-03-14 21:11:24 -07002465 init_tasklet_workers(hif_hdl);
Dustin Brown6bdbda52016-09-27 15:52:30 -07002466 hif_fake_apps_init_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002467
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002468 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002469
2470#ifdef ADRASTEA_SHADOW_REGISTERS
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002471 HIF_INFO("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002472 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002473 HIF_INFO("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 __func__, i,
2475 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2476 }
2477#endif
2478
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302479 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480
2481err:
2482 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002483 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002484 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302485 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002486}
2487
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002488#ifdef WLAN_FEATURE_FASTPATH
2489/**
2490 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2491 * @handler: Callback funtcion
2492 * @context: handle for callback function
2493 *
2494 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2495 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002496int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2497 fastpath_msg_handler handler,
2498 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002499{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002500 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002501 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002502 int i;
2503
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302504 if (!scn) {
2505 HIF_ERROR("%s: scn is NULL", __func__);
2506 QDF_ASSERT(0);
2507 return QDF_STATUS_E_FAILURE;
2508 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002509
2510 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002511 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002512 return QDF_STATUS_E_FAILURE;
2513 }
2514
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002515 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002516 ce_state = scn->ce_id_to_state[i];
2517 if (ce_state->htt_rx_data) {
2518 ce_state->fastpath_handler = handler;
2519 ce_state->context = context;
2520 }
2521 }
2522
2523 return QDF_STATUS_SUCCESS;
2524}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002525#endif
2526
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002527#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002528/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302529 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002530 * @scn: bus context
2531 * @ce_sr_base_paddr: copyengine source ring base physical address
2532 * @ce_sr_ring_size: copyengine source ring size
2533 * @ce_reg_paddr: copyengine register physical address
2534 *
2535 * IPA micro controller data path offload feature enabled,
2536 * HIF should release copy engine related resource information to IPA UC
2537 * IPA UC will access hardware resource with released information
2538 *
2539 * Return: None
2540 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302541void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302542 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002543 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302544 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002545{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302546 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002547 struct HIF_CE_pipe_info *pipe_info =
2548 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2549 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2550
2551 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2552 ce_reg_paddr);
2553 return;
2554}
2555#endif /* IPA_OFFLOAD */
2556
2557
2558#ifdef ADRASTEA_SHADOW_REGISTERS
2559
2560/*
2561 Current shadow register config
2562
2563 -----------------------------------------------------------
2564 Shadow Register | CE | src/dst write index
2565 -----------------------------------------------------------
2566 0 | 0 | src
2567 1 No Config - Doesn't point to anything
2568 2 No Config - Doesn't point to anything
2569 3 | 3 | src
2570 4 | 4 | src
2571 5 | 5 | src
2572 6 No Config - Doesn't point to anything
2573 7 | 7 | src
2574 8 No Config - Doesn't point to anything
2575 9 No Config - Doesn't point to anything
2576 10 No Config - Doesn't point to anything
2577 11 No Config - Doesn't point to anything
2578 -----------------------------------------------------------
2579 12 No Config - Doesn't point to anything
2580 13 | 1 | dst
2581 14 | 2 | dst
2582 15 No Config - Doesn't point to anything
2583 16 No Config - Doesn't point to anything
2584 17 No Config - Doesn't point to anything
2585 18 No Config - Doesn't point to anything
2586 19 | 7 | dst
2587 20 | 8 | dst
2588 21 No Config - Doesn't point to anything
2589 22 No Config - Doesn't point to anything
2590 23 No Config - Doesn't point to anything
2591 -----------------------------------------------------------
2592
2593
2594 ToDo - Move shadow register config to following in the future
2595 This helps free up a block of shadow registers towards the end.
2596 Can be used for other purposes
2597
2598 -----------------------------------------------------------
2599 Shadow Register | CE | src/dst write index
2600 -----------------------------------------------------------
2601 0 | 0 | src
2602 1 | 3 | src
2603 2 | 4 | src
2604 3 | 5 | src
2605 4 | 7 | src
2606 -----------------------------------------------------------
2607 5 | 1 | dst
2608 6 | 2 | dst
2609 7 | 7 | dst
2610 8 | 8 | dst
2611 -----------------------------------------------------------
2612 9 No Config - Doesn't point to anything
2613 12 No Config - Doesn't point to anything
2614 13 No Config - Doesn't point to anything
2615 14 No Config - Doesn't point to anything
2616 15 No Config - Doesn't point to anything
2617 16 No Config - Doesn't point to anything
2618 17 No Config - Doesn't point to anything
2619 18 No Config - Doesn't point to anything
2620 19 No Config - Doesn't point to anything
2621 20 No Config - Doesn't point to anything
2622 21 No Config - Doesn't point to anything
2623 22 No Config - Doesn't point to anything
2624 23 No Config - Doesn't point to anything
2625 -----------------------------------------------------------
2626*/
2627
Komal Seelam644263d2016-02-22 20:45:49 +05302628u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002629{
2630 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002631 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002632
Houston Hoffmane6330442016-02-26 12:19:11 -08002633 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002634 case 0:
2635 addr = SHADOW_VALUE0;
2636 break;
2637 case 3:
2638 addr = SHADOW_VALUE3;
2639 break;
2640 case 4:
2641 addr = SHADOW_VALUE4;
2642 break;
2643 case 5:
2644 addr = SHADOW_VALUE5;
2645 break;
2646 case 7:
2647 addr = SHADOW_VALUE7;
2648 break;
2649 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002650 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302651 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002652 }
2653 return addr;
2654
2655}
2656
Komal Seelam644263d2016-02-22 20:45:49 +05302657u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002658{
2659 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002660 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002661
Houston Hoffmane6330442016-02-26 12:19:11 -08002662 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002663 case 1:
2664 addr = SHADOW_VALUE13;
2665 break;
2666 case 2:
2667 addr = SHADOW_VALUE14;
2668 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002669 case 5:
2670 addr = SHADOW_VALUE17;
2671 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002672 case 7:
2673 addr = SHADOW_VALUE19;
2674 break;
2675 case 8:
2676 addr = SHADOW_VALUE20;
2677 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002678 case 9:
2679 addr = SHADOW_VALUE21;
2680 break;
2681 case 10:
2682 addr = SHADOW_VALUE22;
2683 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302684 case 11:
2685 addr = SHADOW_VALUE23;
2686 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002687 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002688 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302689 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002690 }
2691
2692 return addr;
2693
2694}
2695#endif
2696
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002697#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002698void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2699{
2700 struct CE_state *ce_state;
2701 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2702
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002703 ce_state = scn->ce_id_to_state[ctx_id];
2704
2705 return ce_state->lro_data;
2706}
2707
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002708/**
2709 * ce_lro_flush_cb_register() - register the LRO flush
2710 * callback
2711 * @scn: HIF context
2712 * @handler: callback function
2713 * @data: opaque data pointer to be passed back
2714 *
2715 * Store the LRO flush callback provided
2716 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002717 * Return: Number of instances the callback is registered for
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002718 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002719int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002720 void (handler)(void *),
2721 void *(lro_init_handler)(void))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002722{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002723 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002724 int i;
2725 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302726 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002727 void *data = NULL;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002728
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302729 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002730
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002731 if (scn != NULL) {
2732 for (i = 0; i < scn->ce_count; i++) {
2733 ce_state = scn->ce_id_to_state[i];
2734 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002735 data = lro_init_handler();
2736 if (data == NULL) {
2737 HIF_ERROR("%s: Failed to init LRO for CE %d",
2738 __func__, i);
2739 continue;
2740 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002741 ce_state->lro_flush_cb = handler;
2742 ce_state->lro_data = data;
2743 rc++;
2744 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002745 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002746 } else {
2747 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002748 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002749 return rc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002750}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002751
2752/**
2753 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2754 * callback
2755 * @scn: HIF context
2756 *
2757 * Remove the LRO flush callback
2758 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002759 * Return: Number of instances the callback is de-registered
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002760 */
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002761int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
2762 void (lro_deinit_cb)(void *))
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002763{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002764 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002765 int i;
2766 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302767 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002768
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302769 QDF_ASSERT(scn != NULL);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002770 if (scn != NULL) {
2771 for (i = 0; i < scn->ce_count; i++) {
2772 ce_state = scn->ce_id_to_state[i];
2773 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002774 qdf_spin_lock_bh(
2775 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002776 ce_state->lro_flush_cb = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002777 lro_deinit_cb(ce_state->lro_data);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002778 ce_state->lro_data = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002779 qdf_spin_unlock_bh(
2780 &ce_state->lro_unloading_lock);
2781 qdf_spinlock_destroy(
2782 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002783 rc++;
2784 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002785 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002786 } else {
2787 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002788 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002789 return rc;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002790}
2791#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002792
2793/**
2794 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2795 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302796 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002797 * @svc_id: Service ID for which the mapping is needed.
2798 * @ul_pipe: address of the container in which ul pipe is returned.
2799 * @dl_pipe: address of the container in which dl pipe is returned.
2800 * @ul_is_polled: address of the container in which a bool
2801 * indicating if the UL CE for this service
2802 * is polled is returned.
2803 * @dl_is_polled: address of the container in which a bool
2804 * indicating if the DL CE for this service
2805 * is polled is returned.
2806 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002807 * Return: Indicates whether the service has been found in the table.
2808 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2809 * There will be warning logs if either leg has not been updated
2810 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002811 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302812int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002813 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2814 int *dl_is_polled)
2815{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002816 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002817 unsigned int i;
2818 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002819 struct service_to_pipe *tgt_svc_map_to_use;
2820 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302821 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2822 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002823 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002824 bool dl_updated = false;
2825 bool ul_updated = false;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302826 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002827
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002828 if (QDF_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002829 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2830 sz_tgt_svc_map_to_use =
2831 sizeof(target_service_to_ce_map_wlan_epping);
2832 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002833 switch (tgt_info->target_type) {
2834 default:
2835 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2836 sz_tgt_svc_map_to_use =
2837 sizeof(target_service_to_ce_map_wlan);
2838 break;
2839 case TARGET_TYPE_AR900B:
2840 case TARGET_TYPE_QCA9984:
2841 case TARGET_TYPE_IPQ4019:
2842 case TARGET_TYPE_QCA9888:
2843 case TARGET_TYPE_AR9888:
2844 case TARGET_TYPE_AR9888V2:
2845 tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
2846 sz_tgt_svc_map_to_use =
2847 sizeof(target_service_to_ce_map_ar900b);
2848 break;
Houston Hoffman88c896f2016-12-14 09:56:35 -08002849 case TARGET_TYPE_QCA6290:
2850 tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
2851 sz_tgt_svc_map_to_use =
2852 sizeof(target_service_to_ce_map_qca6290);
2853 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002854 }
Sanjay Devnanic319c822015-11-06 16:44:28 -08002855 }
2856
2857 *dl_is_polled = 0; /* polling for received messages not supported */
2858
2859 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2860
2861 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2862 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002863 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002864 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002865 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302866 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002867 CE_ATTR_DISABLE_INTR) != 0;
2868 ul_updated = true;
2869 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002870 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002871 dl_updated = true;
2872 }
2873 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002874 }
2875 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002876 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302877 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002878 __func__, svc_id);
2879 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302880 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002881 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002882
2883 return status;
2884}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002885
2886#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302887inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002888 uint32_t CE_ctrl_addr)
2889{
2890 uint32_t read_from_hw, srri_from_ddr = 0;
2891
2892 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2893
2894 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2895
2896 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002897 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2898 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002899 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302900 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002901 }
2902 return srri_from_ddr;
2903}
2904
2905
Komal Seelam644263d2016-02-22 20:45:49 +05302906inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002907 uint32_t CE_ctrl_addr)
2908{
2909 uint32_t read_from_hw, drri_from_ddr = 0;
2910
2911 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2912
2913 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2914
2915 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002916 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002917 drri_from_ddr, read_from_hw,
2918 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302919 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002920 }
2921 return drri_from_ddr;
2922}
2923
2924#endif
2925
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002926#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002927/**
2928 * hif_get_src_ring_read_index(): Called to get the SRRI
2929 *
Komal Seelam644263d2016-02-22 20:45:49 +05302930 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002931 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2932 *
2933 * This function returns the SRRI to the caller. For CEs that
2934 * dont have interrupts enabled, we look at the DDR based SRRI
2935 *
2936 * Return: SRRI
2937 */
Komal Seelam644263d2016-02-22 20:45:49 +05302938inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002939 uint32_t CE_ctrl_addr)
2940{
2941 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302942 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002943
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302944 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002945 if (attr.flags & CE_ATTR_DISABLE_INTR)
2946 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2947 else
2948 return A_TARGET_READ(scn,
2949 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2950}
2951
2952/**
2953 * hif_get_dst_ring_read_index(): Called to get the DRRI
2954 *
Komal Seelam644263d2016-02-22 20:45:49 +05302955 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002956 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2957 *
2958 * This function returns the DRRI to the caller. For CEs that
2959 * dont have interrupts enabled, we look at the DDR based DRRI
2960 *
2961 * Return: DRRI
2962 */
Komal Seelam644263d2016-02-22 20:45:49 +05302963inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002964 uint32_t CE_ctrl_addr)
2965{
2966 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302967 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002968
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302969 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002970
2971 if (attr.flags & CE_ATTR_DISABLE_INTR)
2972 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2973 else
2974 return A_TARGET_READ(scn,
2975 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2976}
2977
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002978/**
2979 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2980 *
Komal Seelam644263d2016-02-22 20:45:49 +05302981 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002982 *
2983 * This function allocates non cached memory on ddr and sends
2984 * the physical address of this memory to the CE hardware. The
2985 * hardware updates the RRI on this particular location.
2986 *
2987 * Return: None
2988 */
Komal Seelam644263d2016-02-22 20:45:49 +05302989static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002990{
2991 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302992 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002993 uint32_t high_paddr, low_paddr;
2994 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302995 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2996 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2997 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002998
2999 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3000 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3001
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003002 HIF_INFO("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003003
3004 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3005 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3006
3007 for (i = 0; i < CE_COUNT; i++)
3008 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3009
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303010 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003011
3012 return;
3013}
3014#else
3015
3016/**
3017 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3018 *
Komal Seelam644263d2016-02-22 20:45:49 +05303019 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003020 *
3021 * This is a dummy implementation for platforms that don't
3022 * support this functionality.
3023 *
3024 * Return: None
3025 */
Komal Seelam644263d2016-02-22 20:45:49 +05303026static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003027{
3028 return;
3029}
3030#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303031
3032/**
3033 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303034 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303035 *
3036 * Output the copy engine registers
3037 *
3038 * Return: 0 for success or error code
3039 */
Komal Seelam644263d2016-02-22 20:45:49 +05303040int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303041{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303042 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303043 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003044 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303045 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3046 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303047 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303048
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003049 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3050 if (scn->ce_id_to_state[i] == NULL) {
3051 HIF_DBG("CE%d not used.", i);
3052 continue;
3053 }
3054
Komal Seelam644263d2016-02-22 20:45:49 +05303055 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003056 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303057 ce_reg_word_size * sizeof(uint32_t));
3058
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303059 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05303060 HIF_ERROR("Dumping CE register failed!");
3061 return -EACCES;
3062 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303063 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303064 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003065 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303066 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303067 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3068 + SR_WR_INDEX_ADDRESS),
3069 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3070 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3071 + CURRENT_SRRI_ADDRESS),
3072 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3073 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3074 + DST_WR_INDEX_ADDRESS),
3075 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3076 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3077 + CURRENT_DRRI_ADDRESS),
3078 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3079 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303080 }
Govind Singh2443fb32016-01-13 17:44:48 +05303081 return 0;
3082}
Houston Hoffman85925072016-05-06 17:02:18 -07003083#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3084struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3085 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3086{
3087 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3088 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3089 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3090 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3091 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3092 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3093 struct CE_ring_state *src_ring = ce_state->src_ring;
3094 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3095
3096 if (src_ring) {
3097 hif_info->ul_pipe.nentries = src_ring->nentries;
3098 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3099 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3100 hif_info->ul_pipe.write_index = src_ring->write_index;
3101 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3102 hif_info->ul_pipe.base_addr_CE_space =
3103 src_ring->base_addr_CE_space;
3104 hif_info->ul_pipe.base_addr_owner_space =
3105 src_ring->base_addr_owner_space;
3106 }
3107
3108
3109 if (dest_ring) {
3110 hif_info->dl_pipe.nentries = dest_ring->nentries;
3111 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3112 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3113 hif_info->dl_pipe.write_index = dest_ring->write_index;
3114 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3115 hif_info->dl_pipe.base_addr_CE_space =
3116 dest_ring->base_addr_CE_space;
3117 hif_info->dl_pipe.base_addr_owner_space =
3118 dest_ring->base_addr_owner_space;
3119 }
3120
3121 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3122 hif_info->ctrl_addr = ce_state->ctrl_addr;
3123
3124 return hif_info;
3125}
3126
3127uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3128{
3129 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3130
3131 scn->nss_wifi_ol_mode = mode;
3132 return 0;
3133}
3134
3135#endif
3136
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303137void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3138{
3139 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3140 scn->hif_attribute = hif_attrib;
3141}
3142
Houston Hoffman85925072016-05-06 17:02:18 -07003143void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3144{
3145 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3146 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3147 uint32_t ctrl_addr = CE_state->ctrl_addr;
3148
3149 Q_TARGET_ACCESS_BEGIN(scn);
3150 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3151 Q_TARGET_ACCESS_END(scn);
3152}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303153
3154/**
3155 * hif_fw_event_handler() - hif fw event handler
3156 * @hif_state: pointer to hif ce state structure
3157 *
3158 * Process fw events and raise HTC callback to process fw events.
3159 *
3160 * Return: none
3161 */
3162static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3163{
3164 struct hif_msg_callbacks *msg_callbacks =
3165 &hif_state->msg_callbacks_current;
3166
3167 if (!msg_callbacks->fwEventHandler)
3168 return;
3169
3170 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3171 QDF_STATUS_E_FAILURE);
3172}
3173
3174#ifndef QCA_WIFI_3_0
3175/**
3176 * hif_fw_interrupt_handler() - FW interrupt handler
3177 * @irq: irq number
3178 * @arg: the user pointer
3179 *
3180 * Called from the PCI interrupt handler when a
3181 * firmware-generated interrupt to the Host.
3182 *
3183 * Return: status of handled irq
3184 */
3185irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3186{
3187 struct hif_softc *scn = arg;
3188 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3189 uint32_t fw_indicator_address, fw_indicator;
3190
3191 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3192 return ATH_ISR_NOSCHED;
3193
3194 fw_indicator_address = hif_state->fw_indicator_address;
3195 /* For sudden unplug this will return ~0 */
3196 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3197
3198 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3199 /* ACK: clear Target-side pending event */
3200 A_TARGET_WRITE(scn, fw_indicator_address,
3201 fw_indicator & ~FW_IND_EVENT_PENDING);
3202 if (Q_TARGET_ACCESS_END(scn) < 0)
3203 return ATH_ISR_SCHED;
3204
3205 if (hif_state->started) {
3206 hif_fw_event_handler(hif_state);
3207 } else {
3208 /*
3209 * Probable Target failure before we're prepared
3210 * to handle it. Generally unexpected.
3211 */
3212 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3213 ("%s: Early firmware event indicated\n",
3214 __func__));
3215 }
3216 } else {
3217 if (Q_TARGET_ACCESS_END(scn) < 0)
3218 return ATH_ISR_SCHED;
3219 }
3220
3221 return ATH_ISR_SCHED;
3222}
3223#else
3224irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3225{
3226 return ATH_ISR_SCHED;
3227}
3228#endif /* #ifdef QCA_WIFI_3_0 */
3229
3230
3231/**
3232 * hif_wlan_disable(): call the platform driver to disable wlan
3233 * @scn: HIF Context
3234 *
3235 * This function passes the con_mode to platform driver to disable
3236 * wlan.
3237 *
3238 * Return: void
3239 */
3240void hif_wlan_disable(struct hif_softc *scn)
3241{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003242 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303243 uint32_t con_mode = hif_get_conparam(scn);
3244
3245 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003246 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303247 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003248 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303249 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003250 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303251
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003252 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303253}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003254
Dustin Brown6834d322017-03-20 15:02:48 -07003255int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3256{
3257 QDF_STATUS status;
3258 uint8_t ul_pipe, dl_pipe;
3259 int ul_is_polled, dl_is_polled;
3260
3261 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3262 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3263 HTC_CTRL_RSVD_SVC,
3264 &ul_pipe, &dl_pipe,
3265 &ul_is_polled, &dl_is_polled);
3266 if (status) {
3267 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3268 return qdf_status_to_os_return(status);
3269 }
3270
3271 *ce_id = dl_pipe;
3272
3273 return 0;
3274}