blob: 286c742d7c15e50491846ffdf87d4b95c4c4e799 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
67static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Komal Seelam644263d2016-02-22 20:45:49 +053091static int hif_post_recv_buffers(struct hif_softc *scn);
92static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700144
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800145 if (CE_state->timer_inited) {
146 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530147 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800148 }
149}
150
151static unsigned int roundup_pwr2(unsigned int n)
152{
153 int i;
154 unsigned int test_pwr2;
155
156 if (!(n & (n - 1)))
157 return n; /* already a power of 2 */
158
159 test_pwr2 = 4;
160 for (i = 0; i < 29; i++) {
161 if (test_pwr2 > n)
162 return test_pwr2;
163 test_pwr2 = test_pwr2 << 1;
164 }
165
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530166 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800167 return 0;
168}
169
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700170#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
171#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
172
173static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
174 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
182 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800183#ifdef QCA_WIFI_3_0_ADRASTEA
184 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
185 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530186 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800187#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700188};
189
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700190static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
191 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
199 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
200};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700201
202/* CE_PCI TABLE */
203/*
204 * NOTE: the table below is out of date, though still a useful reference.
205 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
206 * mapping of HTC services to HIF pipes.
207 */
208/*
209 * This authoritative table defines Copy Engine configuration and the mapping
210 * of services/endpoints to CEs. A subset of this information is passed to
211 * the Target during startup as a prerequisite to entering BMI phase.
212 * See:
213 * target_service_to_ce_map - Target-side mapping
214 * hif_map_service_to_pipe - Host-side mapping
215 * target_ce_config - Target-side configuration
216 * host_ce_config - Host-side configuration
217 ============================================================================
218 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
219 | | | ctio | Size | Frequency
220 | | | n | |
221 ============================================================================
222 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
223 descriptor | | | | O(100B) | and regular
224 download | | | | |
225 ----------------------------------------------------------------------------
226 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
227 indication | | | | O(10B) | regular
228 upload | | | | |
229 ----------------------------------------------------------------------------
230 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
231 upload | | | | O(1000B) | (frequent
232 e.g. noise | | | | | during IP1.0
233 packets | | | | | testing)
234 ----------------------------------------------------------------------------
235 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
236 download | | | | O(1000B) | (frequent
237 e.g. | | | | | during IP1.0
238 misdirecte | | | | | testing)
239 d EAPOL | | | | |
240 packets | | | | |
241 ----------------------------------------------------------------------------
242 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
243 | DATA_VO (uplink) | | | |
244 ----------------------------------------------------------------------------
245 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
246 | DATA_VO (downlink) | | | |
247 ----------------------------------------------------------------------------
248 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
249 | | | | O(100B) |
250 ----------------------------------------------------------------------------
251 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
252 messages | (downlink) | | | O(100B) |
253 | | | | |
254 ----------------------------------------------------------------------------
255 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
256 | HTC_RAW_STREAMS | | | |
257 | (uplink) | | | |
258 ----------------------------------------------------------------------------
259 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
260 | HTC_RAW_STREAMS | | | |
261 | (downlink) | | | |
262 ----------------------------------------------------------------------------
263 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
264 | | | | | infrequent
265 ============================================================================
266 */
267
268/*
269 * Map from service/endpoint to Copy Engine.
270 * This table is derived from the CE_PCI TABLE, above.
271 * It is passed to the Target at startup for use by firmware.
272 */
273static struct service_to_pipe target_service_to_ce_map_wlan[] = {
274 {
275 WMI_DATA_VO_SVC,
276 PIPEDIR_OUT, /* out = UL = host -> target */
277 3,
278 },
279 {
280 WMI_DATA_VO_SVC,
281 PIPEDIR_IN, /* in = DL = target -> host */
282 2,
283 },
284 {
285 WMI_DATA_BK_SVC,
286 PIPEDIR_OUT, /* out = UL = host -> target */
287 3,
288 },
289 {
290 WMI_DATA_BK_SVC,
291 PIPEDIR_IN, /* in = DL = target -> host */
292 2,
293 },
294 {
295 WMI_DATA_BE_SVC,
296 PIPEDIR_OUT, /* out = UL = host -> target */
297 3,
298 },
299 {
300 WMI_DATA_BE_SVC,
301 PIPEDIR_IN, /* in = DL = target -> host */
302 2,
303 },
304 {
305 WMI_DATA_VI_SVC,
306 PIPEDIR_OUT, /* out = UL = host -> target */
307 3,
308 },
309 {
310 WMI_DATA_VI_SVC,
311 PIPEDIR_IN, /* in = DL = target -> host */
312 2,
313 },
314 {
315 WMI_CONTROL_SVC,
316 PIPEDIR_OUT, /* out = UL = host -> target */
317 3,
318 },
319 {
320 WMI_CONTROL_SVC,
321 PIPEDIR_IN, /* in = DL = target -> host */
322 2,
323 },
324 {
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530325 WMI_CONTROL_SVC_WMAC1,
326 PIPEDIR_OUT, /* out = UL = host -> target */
327 7,
328 },
329 {
330 WMI_CONTROL_SVC_WMAC1,
331 PIPEDIR_IN, /* in = DL = target -> host */
332 2,
333 },
334 {
335 WMI_CONTROL_SVC_WMAC2,
336 PIPEDIR_OUT, /* out = UL = host -> target */
337 9,
338 },
339 {
340 WMI_CONTROL_SVC_WMAC2,
341 PIPEDIR_IN, /* in = DL = target -> host */
342 2,
343 },
344 {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700345 HTC_CTRL_RSVD_SVC,
346 PIPEDIR_OUT, /* out = UL = host -> target */
347 0, /* could be moved to 3 (share with WMI) */
348 },
349 {
350 HTC_CTRL_RSVD_SVC,
351 PIPEDIR_IN, /* in = DL = target -> host */
352 2,
353 },
354 {
355 HTC_RAW_STREAMS_SVC, /* not currently used */
356 PIPEDIR_OUT, /* out = UL = host -> target */
357 0,
358 },
359 {
360 HTC_RAW_STREAMS_SVC, /* not currently used */
361 PIPEDIR_IN, /* in = DL = target -> host */
362 2,
363 },
364 {
365 HTT_DATA_MSG_SVC,
366 PIPEDIR_OUT, /* out = UL = host -> target */
367 4,
368 },
369 {
370 HTT_DATA_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 1,
373 },
374 {
375 WDI_IPA_TX_SVC,
376 PIPEDIR_OUT, /* in = DL = target -> host */
377 5,
378 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800379#if defined(QCA_WIFI_3_0_ADRASTEA)
380 {
381 HTT_DATA2_MSG_SVC,
382 PIPEDIR_IN, /* in = DL = target -> host */
383 9,
384 },
385 {
386 HTT_DATA3_MSG_SVC,
387 PIPEDIR_IN, /* in = DL = target -> host */
388 10,
389 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530390 {
391 PACKET_LOG_SVC,
392 PIPEDIR_IN, /* in = DL = target -> host */
393 11,
394 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800395#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700396 /* (Additions here) */
397
398 { /* Must be last */
399 0,
400 0,
401 0,
402 },
403};
404
Houston Hoffman88c896f2016-12-14 09:56:35 -0800405/* PIPEDIR_OUT = HOST to Target */
406/* PIPEDIR_IN = TARGET to HOST */
407static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
408 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
409 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
410 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
411 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
412 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
413 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
414 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
415 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
416 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
417 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
418 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
419 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
420 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
421 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800422 /* (Additions here) */
423 { 0, 0, 0, },
424};
425
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700426static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
427 {
428 WMI_DATA_VO_SVC,
429 PIPEDIR_OUT, /* out = UL = host -> target */
430 3,
431 },
432 {
433 WMI_DATA_VO_SVC,
434 PIPEDIR_IN, /* in = DL = target -> host */
435 2,
436 },
437 {
438 WMI_DATA_BK_SVC,
439 PIPEDIR_OUT, /* out = UL = host -> target */
440 3,
441 },
442 {
443 WMI_DATA_BK_SVC,
444 PIPEDIR_IN, /* in = DL = target -> host */
445 2,
446 },
447 {
448 WMI_DATA_BE_SVC,
449 PIPEDIR_OUT, /* out = UL = host -> target */
450 3,
451 },
452 {
453 WMI_DATA_BE_SVC,
454 PIPEDIR_IN, /* in = DL = target -> host */
455 2,
456 },
457 {
458 WMI_DATA_VI_SVC,
459 PIPEDIR_OUT, /* out = UL = host -> target */
460 3,
461 },
462 {
463 WMI_DATA_VI_SVC,
464 PIPEDIR_IN, /* in = DL = target -> host */
465 2,
466 },
467 {
468 WMI_CONTROL_SVC,
469 PIPEDIR_OUT, /* out = UL = host -> target */
470 3,
471 },
472 {
473 WMI_CONTROL_SVC,
474 PIPEDIR_IN, /* in = DL = target -> host */
475 2,
476 },
477 {
478 HTC_CTRL_RSVD_SVC,
479 PIPEDIR_OUT, /* out = UL = host -> target */
480 0, /* could be moved to 3 (share with WMI) */
481 },
482 {
483 HTC_CTRL_RSVD_SVC,
484 PIPEDIR_IN, /* in = DL = target -> host */
485 1,
486 },
487 {
488 HTC_RAW_STREAMS_SVC, /* not currently used */
489 PIPEDIR_OUT, /* out = UL = host -> target */
490 0,
491 },
492 {
493 HTC_RAW_STREAMS_SVC, /* not currently used */
494 PIPEDIR_IN, /* in = DL = target -> host */
495 1,
496 },
497 {
498 HTT_DATA_MSG_SVC,
499 PIPEDIR_OUT, /* out = UL = host -> target */
500 4,
501 },
502#if WLAN_FEATURE_FASTPATH
503 {
504 HTT_DATA_MSG_SVC,
505 PIPEDIR_IN, /* in = DL = target -> host */
506 5,
507 },
508#else /* WLAN_FEATURE_FASTPATH */
509 {
510 HTT_DATA_MSG_SVC,
511 PIPEDIR_IN, /* in = DL = target -> host */
512 1,
513 },
514#endif /* WLAN_FEATURE_FASTPATH */
515
516 /* (Additions here) */
517
518 { /* Must be last */
519 0,
520 0,
521 0,
522 },
523};
524
525
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700526static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
527static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
528
529static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
530 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
531 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
532 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
533 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
534 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
535 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
536 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
537 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
538 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
539 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
540 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
541 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
542 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
543 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
544 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
545 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
546 {0, 0, 0,}, /* Must be last */
547};
548
Houston Hoffman748e1a62017-03-30 17:20:42 -0700549static void hif_select_service_to_pipe_map(struct hif_softc *scn,
550 struct service_to_pipe **tgt_svc_map_to_use,
551 uint32_t *sz_tgt_svc_map_to_use)
552{
553 uint32_t mode = hif_get_conparam(scn);
554 struct hif_target_info *tgt_info = &scn->target_info;
555
556 if (QDF_IS_EPPING_ENABLED(mode)) {
557 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
558 *sz_tgt_svc_map_to_use =
559 sizeof(target_service_to_ce_map_wlan_epping);
560 } else {
561 switch (tgt_info->target_type) {
562 default:
563 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
564 *sz_tgt_svc_map_to_use =
565 sizeof(target_service_to_ce_map_wlan);
566 break;
567 case TARGET_TYPE_AR900B:
568 case TARGET_TYPE_QCA9984:
569 case TARGET_TYPE_IPQ4019:
570 case TARGET_TYPE_QCA9888:
571 case TARGET_TYPE_AR9888:
572 case TARGET_TYPE_AR9888V2:
573 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
574 *sz_tgt_svc_map_to_use =
575 sizeof(target_service_to_ce_map_ar900b);
576 break;
577 case TARGET_TYPE_QCA6290:
578 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
579 *sz_tgt_svc_map_to_use =
580 sizeof(target_service_to_ce_map_qca6290);
581 break;
582 }
583 }
584}
585
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700586/**
587 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
588 * @ce_state : pointer to the state context of the CE
589 *
590 * Description:
591 * Sets htt_rx_data attribute of the state structure if the
592 * CE serves one of the HTT DATA services.
593 *
594 * Return:
595 * false (attribute set to false)
596 * true (attribute set to true);
597 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700598static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700599{
600 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530601 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700602 int i;
603 bool rc = false;
604
605 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700606 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
607 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700608
Kiran Venkatappac0687092017-04-13 16:45:03 +0530609 map_len = map_sz / sizeof(struct service_to_pipe);
610 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700611 if ((svc_map[i].pipenum == ce_state->id) &&
612 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
613 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
614 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
615 /* HTT CEs are unidirectional */
616 if (svc_map[i].pipedir == PIPEDIR_IN)
617 ce_state->htt_rx_data = true;
618 else
619 ce_state->htt_tx_data = true;
620 rc = true;
621 }
622 }
623 }
624 return rc;
625}
626
Houston Hoffman47808172016-05-06 10:04:21 -0700627/**
628 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
629 * @ce_id: ce in question
630 * @ring: ring state being examined
631 * @type: "src_ring" or "dest_ring" string for identifying the ring
632 *
633 * Warns on non-zero index values.
634 * Causes a kernel panic if the ring is not empty durring initialization.
635 */
636static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
637 char *type)
638{
639 if (ring->write_index != 0 || ring->sw_index != 0)
640 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
641 ce_id, type, ring->sw_index, ring->write_index);
642 if (ring->write_index != ring->sw_index)
643 QDF_BUG(0);
644}
645
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530646/**
647 * ce_srng_based() - Does this target use srng
648 * @ce_state : pointer to the state context of the CE
649 *
650 * Description:
651 * returns true if the target is SRNG based
652 *
653 * Return:
654 * false (attribute set to false)
655 * true (attribute set to true);
656 */
657bool ce_srng_based(struct hif_softc *scn)
658{
659 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
660 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
661
662 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530663 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700664 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530666 default:
667 return false;
668 }
669 return false;
670}
671
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800672#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700673static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530674{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530675 if (ce_srng_based(scn))
676 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530677
678 return ce_services_legacy();
679}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800680
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800681
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800682#else /* QCA_LITHIUM */
683static struct ce_ops *ce_services_attach(struct hif_softc *scn)
684{
685 return ce_services_legacy();
686}
687#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530688
Houston Hoffman403c2df2017-01-27 12:51:15 -0800689static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800690 struct pld_shadow_reg_v2_cfg **shadow_config,
691 int *num_shadow_registers_configured) {
692 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
693
694 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
695 scn, shadow_config, num_shadow_registers_configured);
696}
697
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530698static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
699 uint8_t ring_type)
700{
701 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
702
703 return hif_state->ce_services->ce_get_desc_size(ring_type);
704}
705
706
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700707static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530708 uint8_t ring_type, uint32_t nentries)
709{
710 uint32_t ce_nbytes;
711 char *ptr;
712 qdf_dma_addr_t base_addr;
713 struct CE_ring_state *ce_ring;
714 uint32_t desc_size;
715 struct hif_softc *scn = CE_state->scn;
716
717 ce_nbytes = sizeof(struct CE_ring_state)
718 + (nentries * sizeof(void *));
719 ptr = qdf_mem_malloc(ce_nbytes);
720 if (!ptr)
721 return NULL;
722
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530723 ce_ring = (struct CE_ring_state *)ptr;
724 ptr += sizeof(struct CE_ring_state);
725 ce_ring->nentries = nentries;
726 ce_ring->nentries_mask = nentries - 1;
727
728 ce_ring->low_water_mark_nentries = 0;
729 ce_ring->high_water_mark_nentries = nentries;
730 ce_ring->per_transfer_context = (void **)ptr;
731
732 desc_size = ce_get_desc_size(scn, ring_type);
733
734 /* Legacy platforms that do not support cache
735 * coherent DMA are unsupported
736 */
737 ce_ring->base_addr_owner_space_unaligned =
738 qdf_mem_alloc_consistent(scn->qdf_dev,
739 scn->qdf_dev->dev,
740 (nentries *
741 desc_size +
742 CE_DESC_RING_ALIGN),
743 &base_addr);
744 if (ce_ring->base_addr_owner_space_unaligned
745 == NULL) {
746 HIF_ERROR("%s: ring has no DMA mem",
747 __func__);
748 qdf_mem_free(ptr);
749 return NULL;
750 }
751 ce_ring->base_addr_CE_space_unaligned = base_addr;
752
753 /* Correctly initialize memory to 0 to
754 * prevent garbage data crashing system
755 * when download firmware
756 */
757 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
758 nentries * desc_size +
759 CE_DESC_RING_ALIGN);
760
761 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
762
763 ce_ring->base_addr_CE_space =
764 (ce_ring->base_addr_CE_space_unaligned +
765 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
766
767 ce_ring->base_addr_owner_space = (void *)
768 (((size_t) ce_ring->base_addr_owner_space_unaligned +
769 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
770 } else {
771 ce_ring->base_addr_CE_space =
772 ce_ring->base_addr_CE_space_unaligned;
773 ce_ring->base_addr_owner_space =
774 ce_ring->base_addr_owner_space_unaligned;
775 }
776
777 return ce_ring;
778}
779
780static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
781 uint32_t ce_id, struct CE_ring_state *ring,
782 struct CE_attr *attr)
783{
784 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
785
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700786 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
787 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530788}
789
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800790int hif_ce_bus_early_suspend(struct hif_softc *scn)
791{
792 uint8_t ul_pipe, dl_pipe;
793 int ce_id, status, ul_is_polled, dl_is_polled;
794 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700795
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800796 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
797 &ul_pipe, &dl_pipe,
798 &ul_is_polled, &dl_is_polled);
799 if (status) {
800 HIF_ERROR("%s: pipe_mapping failure", __func__);
801 return status;
802 }
803
804 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
805 if (ce_id == ul_pipe)
806 continue;
807 if (ce_id == dl_pipe)
808 continue;
809
810 ce_state = scn->ce_id_to_state[ce_id];
811 qdf_spin_lock_bh(&ce_state->ce_index_lock);
812 if (ce_state->state == CE_RUNNING)
813 ce_state->state = CE_PAUSED;
814 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
815 }
816
817 return status;
818}
819
820int hif_ce_bus_late_resume(struct hif_softc *scn)
821{
822 int ce_id;
823 struct CE_state *ce_state;
824 int write_index;
825 bool index_updated;
826
827 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
828 ce_state = scn->ce_id_to_state[ce_id];
829 qdf_spin_lock_bh(&ce_state->ce_index_lock);
830 if (ce_state->state == CE_PENDING) {
831 write_index = ce_state->src_ring->write_index;
832 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
833 write_index);
834 ce_state->state = CE_RUNNING;
835 index_updated = true;
836 } else {
837 index_updated = false;
838 }
839
840 if (ce_state->state == CE_PAUSED)
841 ce_state->state = CE_RUNNING;
842 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
843
844 if (index_updated)
845 hif_record_ce_desc_event(scn, ce_id,
846 RESUME_WRITE_INDEX_UPDATE,
847 NULL, NULL, write_index);
848 }
849
850 return 0;
851}
852
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800853/**
854 * ce_oom_recovery() - try to recover rx ce from oom condition
855 * @context: CE_state of the CE with oom rx ring
856 *
857 * the executing work Will continue to be rescheduled untill
858 * at least 1 descriptor is successfully posted to the rx ring.
859 *
860 * return: none
861 */
862static void ce_oom_recovery(void *context)
863{
864 struct CE_state *ce_state = context;
865 struct hif_softc *scn = ce_state->scn;
866 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
867 struct HIF_CE_pipe_info *pipe_info =
868 &ce_softc->pipe_info[ce_state->id];
869
870 hif_post_recv_buffers_for_pipe(pipe_info);
871}
872
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800873/*
874 * Initialize a Copy Engine based on caller-supplied attributes.
875 * This may be called once to initialize both source and destination
876 * rings or it may be called twice for separate source and destination
877 * initialization. It may be that only one side or the other is
878 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700879 *
880 * This should be called durring the initialization sequence before
881 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800882 */
Komal Seelam644263d2016-02-22 20:45:49 +0530883struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800884 unsigned int CE_id, struct CE_attr *attr)
885{
886 struct CE_state *CE_state;
887 uint32_t ctrl_addr;
888 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800889 bool malloc_CE_state = false;
890 bool malloc_src_ring = false;
891
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530892 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800894 CE_state = scn->ce_id_to_state[CE_id];
895
896 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800897 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530898 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800899 if (!CE_state) {
900 HIF_ERROR("%s: CE_state has no mem", __func__);
901 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800902 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700903 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530904 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700905
906 CE_state->id = CE_id;
907 CE_state->ctrl_addr = ctrl_addr;
908 CE_state->state = CE_RUNNING;
909 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700910 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911 }
912 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800913
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530914 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800915 if (attr == NULL) {
916 /* Already initialized; caller wants the handle */
917 return (struct CE_handle *)CE_state;
918 }
919
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800920 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530921 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800922 else
923 CE_state->src_sz_max = attr->src_sz_max;
924
Houston Hoffman68e837e2015-12-04 12:57:24 -0800925 ce_init_ce_desc_event_log(CE_id,
926 attr->src_nentries + attr->dest_nentries);
927
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800928 /* source ring setup */
929 nentries = attr->src_nentries;
930 if (nentries) {
931 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700932
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800933 nentries = roundup_pwr2(nentries);
934 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530935 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800936 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530937 src_ring = CE_state->src_ring =
938 ce_alloc_ring_state(CE_state,
939 CE_RING_SRC,
940 nentries);
941 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 /* cannot allocate src ring. If the
943 * CE_state is allocated locally free
944 * CE_State and return error.
945 */
946 HIF_ERROR("%s: src ring has no mem", __func__);
947 if (malloc_CE_state) {
948 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530949 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950 malloc_CE_state = false;
951 }
952 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800953 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700954 /* we can allocate src ring. Mark that the src ring is
955 * allocated locally
956 */
957 malloc_src_ring = true;
958
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800959 /*
960 * Also allocate a shadow src ring in
961 * regular mem to use for faster access.
962 */
963 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530964 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800965 sizeof(struct CE_src_desc) +
966 CE_DESC_RING_ALIGN);
967 if (src_ring->shadow_base_unaligned == NULL) {
968 HIF_ERROR("%s: src ring no shadow_base mem",
969 __func__);
970 goto error_no_dma_mem;
971 }
972 src_ring->shadow_base = (struct CE_src_desc *)
973 (((size_t) src_ring->shadow_base_unaligned +
974 CE_DESC_RING_ALIGN - 1) &
975 ~(CE_DESC_RING_ALIGN - 1));
976
Houston Hoffman4411ad42016-03-14 21:12:04 -0700977 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
978 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -0700979
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530980 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
981
Houston Hoffman4411ad42016-03-14 21:12:04 -0700982 if (Q_TARGET_ACCESS_END(scn) < 0)
983 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530984 ce_ring_test_initial_indexes(CE_id, src_ring,
985 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800986 }
987 }
988
989 /* destination ring setup */
990 nentries = attr->dest_nentries;
991 if (nentries) {
992 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800993
994 nentries = roundup_pwr2(nentries);
995 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530996 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800997 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530998 dest_ring = CE_state->dest_ring =
999 ce_alloc_ring_state(CE_state,
1000 CE_RING_DEST,
1001 nentries);
1002 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001003 /* cannot allocate dst ring. If the CE_state
1004 * or src ring is allocated locally free
1005 * CE_State and src ring and return error.
1006 */
1007 HIF_ERROR("%s: dest ring has no mem",
1008 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301009 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001010 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001011
Houston Hoffman4411ad42016-03-14 21:12:04 -07001012 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1013 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301014
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001015 ce_ring_setup(scn, CE_RING_DEST, CE_id,
1016 dest_ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301017
1018 if (Q_TARGET_ACCESS_END(scn) < 0)
1019 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001020
1021 ce_ring_test_initial_indexes(CE_id, dest_ring,
1022 "dest_ring");
1023
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301024 /* For srng based target, init status ring here */
1025 if (ce_srng_based(CE_state->scn)) {
1026 CE_state->status_ring =
1027 ce_alloc_ring_state(CE_state,
1028 CE_RING_STATUS,
1029 nentries);
1030 if (CE_state->status_ring == NULL) {
1031 /*Allocation failed. Cleanup*/
1032 qdf_mem_free(CE_state->dest_ring);
1033 if (malloc_src_ring) {
1034 qdf_mem_free
1035 (CE_state->src_ring);
1036 CE_state->src_ring = NULL;
1037 malloc_src_ring = false;
1038 }
1039 if (malloc_CE_state) {
1040 /* allocated CE_state locally */
1041 scn->ce_id_to_state[CE_id] =
1042 NULL;
1043 qdf_mem_free(CE_state);
1044 malloc_CE_state = false;
1045 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001046
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301047 return NULL;
1048 }
1049 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1050 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001051
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301052 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1053 CE_state->status_ring, attr);
1054
1055 if (Q_TARGET_ACCESS_END(scn) < 0)
1056 goto error_target_access;
1057
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001058 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001059
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001060 /* epping */
1061 /* poll timer */
1062 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301063 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064 &CE_state->poll_timer,
1065 ce_poll_timeout,
1066 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301067 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001068 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301069 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001070 CE_POLL_TIMEOUT);
1071 }
1072 }
1073 }
1074
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301075 if (!ce_srng_based(scn)) {
1076 /* Enable CE error interrupts */
1077 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1078 goto error_target_access;
1079 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1080 if (Q_TARGET_ACCESS_END(scn) < 0)
1081 goto error_target_access;
1082 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001083
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001084 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1085 ce_oom_recovery, CE_state);
1086
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001087 /* update the htt_data attribute */
1088 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001089 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001090
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001091 return (struct CE_handle *)CE_state;
1092
Houston Hoffman4411ad42016-03-14 21:12:04 -07001093error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001094error_no_dma_mem:
1095 ce_fini((struct CE_handle *)CE_state);
1096 return NULL;
1097}
1098
1099#ifdef WLAN_FEATURE_FASTPATH
1100/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001101 * hif_enable_fastpath() Update that we have enabled fastpath mode
1102 * @hif_ctx: HIF context
1103 *
1104 * For use in data path
1105 *
1106 * Retrun: void
1107 */
1108void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1109{
1110 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1111
Houston Hoffmand63cd742016-12-05 11:59:56 -08001112 if (ce_srng_based(scn)) {
1113 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1114 return;
1115 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001116 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001117 scn->fastpath_mode_on = true;
1118}
1119
1120/**
1121 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1122 * @hif_ctx: HIF Context
1123 *
1124 * For use in data path to skip HTC
1125 *
1126 * Return: bool
1127 */
1128bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1129{
1130 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1131
1132 return scn->fastpath_mode_on;
1133}
1134
1135/**
1136 * hif_get_ce_handle - API to get CE handle for FastPath mode
1137 * @hif_ctx: HIF Context
1138 * @id: CopyEngine Id
1139 *
1140 * API to return CE handle for fastpath mode
1141 *
1142 * Return: void
1143 */
1144void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1145{
1146 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1147
1148 return scn->ce_id_to_state[id];
1149}
1150
1151/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001152 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1153 * No processing is required inside this function.
1154 * @ce_hdl: Cope engine handle
1155 * Using an assert, this function makes sure that,
1156 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001157 *
1158 * This is called while dismantling CE structures. No other thread
1159 * should be using these structures while dismantling is occuring
1160 * therfore no locking is needed.
1161 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001162 * Return: none
1163 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001164void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001165{
1166 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1167 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301168 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001169 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001170
Houston Hoffman85925072016-05-06 17:02:18 -07001171 if (hif_is_nss_wifi_enabled(sc))
1172 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001173
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001174 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001175 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001176 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001177 sw_index = src_ring->sw_index;
1178 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001179
1180 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301181 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182 }
1183}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001184
1185/**
1186 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1187 * @ce_hdl: Handle to CE
1188 *
1189 * These buffers are never allocated on the fly, but
1190 * are allocated only once during HIF start and freed
1191 * only once during HIF stop.
1192 * NOTE:
1193 * The assumption here is there is no in-flight DMA in progress
1194 * currently, so that buffers can be freed up safely.
1195 *
1196 * Return: NONE
1197 */
1198void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1199{
1200 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1201 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1202 qdf_nbuf_t nbuf;
1203 int i;
1204
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001205 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001206 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001207
1208 if (!ce_state->htt_rx_data)
1209 return;
1210
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001211 /*
1212 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1213 * this CE is completely full: does not leave one blank space, to
1214 * distinguish between empty queue & full queue. So free all the
1215 * entries.
1216 */
1217 for (i = 0; i < dst_ring->nentries; i++) {
1218 nbuf = dst_ring->per_transfer_context[i];
1219
1220 /*
1221 * The reasons for doing this check are:
1222 * 1) Protect against calling cleanup before allocating buffers
1223 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1224 * could have a partially filled ring, because of a memory
1225 * allocation failure in the middle of allocating ring.
1226 * This check accounts for that case, checking
1227 * fastpath_mode_on flag or started flag would not have
1228 * covered that case. This is not in performance path,
1229 * so OK to do this.
1230 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001231 if (nbuf) {
1232 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1233 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001234 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001235 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001236 }
1237}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001238
1239/**
1240 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1241 * @scn: HIF handle
1242 *
1243 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1244 * Hence we have to post all the entries in the pipe, even, in the beginning
1245 * unlike for other CE pipes where one less than dest_nentries are filled in
1246 * the beginning.
1247 *
1248 * Return: None
1249 */
1250static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1251{
1252 int pipe_num;
1253 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1254
1255 if (scn->fastpath_mode_on == false)
1256 return;
1257
1258 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1259 struct HIF_CE_pipe_info *pipe_info =
1260 &hif_state->pipe_info[pipe_num];
1261 struct CE_state *ce_state =
1262 scn->ce_id_to_state[pipe_info->pipe_num];
1263
1264 if (ce_state->htt_rx_data)
1265 atomic_inc(&pipe_info->recv_bufs_needed);
1266 }
1267}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001268#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001269static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001270{
1271}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001272
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001273static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001274{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001275 return false;
1276}
1277
1278static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1279{
1280 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001281}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001282#endif /* WLAN_FEATURE_FASTPATH */
1283
1284void ce_fini(struct CE_handle *copyeng)
1285{
1286 struct CE_state *CE_state = (struct CE_state *)copyeng;
1287 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301288 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001289
1290 CE_state->state = CE_UNUSED;
1291 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001292
1293 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1294
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001295 qdf_lro_deinit(CE_state->lro_data);
1296
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001297 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001298 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001299 ce_h2t_tx_ce_cleanup(copyeng);
1300
1301 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301302 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001303 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301304 qdf_mem_free_consistent(scn->qdf_dev,
1305 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001306 (CE_state->src_ring->nentries *
1307 sizeof(struct CE_src_desc) +
1308 CE_DESC_RING_ALIGN),
1309 CE_state->src_ring->
1310 base_addr_owner_space_unaligned,
1311 CE_state->src_ring->
1312 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301313 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001314 }
1315 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001316 /* Cleanup the datapath Rx ring */
1317 ce_t2h_msg_ce_cleanup(copyeng);
1318
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001319 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301320 qdf_mem_free_consistent(scn->qdf_dev,
1321 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001322 (CE_state->dest_ring->nentries *
1323 sizeof(struct CE_dest_desc) +
1324 CE_DESC_RING_ALIGN),
1325 CE_state->dest_ring->
1326 base_addr_owner_space_unaligned,
1327 CE_state->dest_ring->
1328 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301329 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001330
1331 /* epping */
1332 if (CE_state->timer_inited) {
1333 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301334 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335 }
1336 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001337 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301338 /* Cleanup the datapath Tx ring */
1339 ce_h2t_tx_ce_cleanup(copyeng);
1340
1341 if (CE_state->status_ring->shadow_base_unaligned)
1342 qdf_mem_free(
1343 CE_state->status_ring->shadow_base_unaligned);
1344
1345 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1346 qdf_mem_free_consistent(scn->qdf_dev,
1347 scn->qdf_dev->dev,
1348 (CE_state->status_ring->nentries *
1349 sizeof(struct CE_src_desc) +
1350 CE_DESC_RING_ALIGN),
1351 CE_state->status_ring->
1352 base_addr_owner_space_unaligned,
1353 CE_state->status_ring->
1354 base_addr_CE_space, 0);
1355 qdf_mem_free(CE_state->status_ring);
1356 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001357
1358 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301359 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001360}
1361
Komal Seelam5584a7c2016-02-24 19:22:48 +05301362void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001363{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301364 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001365
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301366 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001367 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301368 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369 sizeof(hif_state->msg_callbacks_current));
1370}
1371
1372/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301373QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301374hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001375 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301376 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377{
Komal Seelam644263d2016-02-22 20:45:49 +05301378 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301379 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001380 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1381 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1382 int bytes = nbytes, nfrags = 0;
1383 struct ce_sendlist sendlist;
1384 int status, i = 0;
1385 unsigned int mux_id = 0;
1386
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301387 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001388
1389 transfer_id =
1390 (mux_id & MUX_ID_MASK) |
1391 (transfer_id & TRANSACTION_ID_MASK);
1392 data_attr &= DESC_DATA_FLAG_MASK;
1393 /*
1394 * The common case involves sending multiple fragments within a
1395 * single download (the tx descriptor and the tx frame header).
1396 * So, optimize for the case of multiple fragments by not even
1397 * checking whether it's necessary to use a sendlist.
1398 * The overhead of using a sendlist for a single buffer download
1399 * is not a big deal, since it happens rarely (for WMI messages).
1400 */
1401 ce_sendlist_init(&sendlist);
1402 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301403 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001404 int frag_bytes;
1405
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301406 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1407 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 /*
1409 * Clear the packet offset for all but the first CE desc.
1410 */
1411 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301412 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001413
1414 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1415 frag_bytes >
1416 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301417 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 (nbuf,
1419 nfrags) ? 0 :
1420 CE_SEND_FLAG_SWAP_DISABLE,
1421 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301422 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001423 HIF_ERROR("%s: error, frag_num %d larger than limit",
1424 __func__, nfrags);
1425 return status;
1426 }
1427 bytes -= frag_bytes;
1428 nfrags++;
1429 } while (bytes > 0);
1430
1431 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001433 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301434 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001435 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301436 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001437 }
1438 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301439 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001440
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301441 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442 HIF_ERROR("%s: error CE handle is null", __func__);
1443 return A_ERROR;
1444 }
1445
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301446 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301447 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +05301448 qdf_nbuf_data_addr(nbuf),
Nirav Shah29beae02016-04-26 22:58:54 +05301449 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001450 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301451 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001452
1453 return status;
1454}
1455
Komal Seelam5584a7c2016-02-24 19:22:48 +05301456void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1457 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001458{
Komal Seelam644263d2016-02-22 20:45:49 +05301459 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301460 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301461
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462 if (!force) {
1463 int resources;
1464 /*
1465 * Decide whether to actually poll for completions, or just
1466 * wait for a later chance. If there seem to be plenty of
1467 * resources left, then just wait, since checking involves
1468 * reading a CE register, which is a relatively expensive
1469 * operation.
1470 */
Komal Seelam644263d2016-02-22 20:45:49 +05301471 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001472 /*
1473 * If at least 50% of the total resources are still available,
1474 * don't bother checking again yet.
1475 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001476 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1477 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001478 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001479 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001480#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 ce_per_engine_servicereap(scn, pipe);
1482#else
1483 ce_per_engine_service(scn, pipe);
1484#endif
1485}
1486
Komal Seelam5584a7c2016-02-24 19:22:48 +05301487uint16_t
1488hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001489{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301490 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001491 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1492 uint16_t rv;
1493
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301494 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001495 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301496 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001497 return rv;
1498}
1499
1500/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001501static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001502hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301503 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001504 unsigned int nbytes, unsigned int transfer_id,
1505 unsigned int sw_index, unsigned int hw_index,
1506 unsigned int toeplitz_hash_result)
1507{
1508 struct HIF_CE_pipe_info *pipe_info =
1509 (struct HIF_CE_pipe_info *)ce_context;
1510 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301511 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001513 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301514 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001515
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001516 do {
1517 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001518 * The upper layer callback will be triggered
1519 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 */
Houston Hoffman85118512015-09-28 14:17:11 -07001521 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001522 if (scn->target_status == TARGET_STATUS_RESET) {
1523
1524 qdf_nbuf_unmap_single(scn->qdf_dev,
1525 transfer_context,
1526 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301527 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001528 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001529 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001530 msg_callbacks->Context,
1531 transfer_context, transfer_id,
1532 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001533 }
1534
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301535 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001536 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301537 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001538 } while (ce_completed_send_next(copyeng,
1539 &ce_context, &transfer_context,
1540 &CE_data, &nbytes, &transfer_id,
1541 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301542 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001543}
1544
Houston Hoffman910c6262015-09-28 12:56:25 -07001545/**
1546 * hif_ce_do_recv(): send message from copy engine to upper layers
1547 * @msg_callbacks: structure containing callback and callback context
1548 * @netbuff: skb containing message
1549 * @nbytes: number of bytes in the message
1550 * @pipe_info: used for the pipe_number info
1551 *
1552 * Checks the packet length, configures the lenght in the netbuff,
1553 * and calls the upper layer callback.
1554 *
1555 * return: None
1556 */
1557static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301558 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001559 struct HIF_CE_pipe_info *pipe_info) {
1560 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301561 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001562 msg_callbacks->
1563 rxCompletionHandler(msg_callbacks->Context,
1564 netbuf, pipe_info->pipe_num);
1565 } else {
1566 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1567 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001568
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301569 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001570 }
1571}
1572
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001574static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001575hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301576 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001577 unsigned int nbytes, unsigned int transfer_id,
1578 unsigned int flags)
1579{
1580 struct HIF_CE_pipe_info *pipe_info =
1581 (struct HIF_CE_pipe_info *)ce_context;
1582 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001583 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301584 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001585#ifdef HIF_PCI
1586 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1587#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001588 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301589 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001590
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001591 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001592#ifdef HIF_PCI
1593 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1594#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301595 qdf_nbuf_unmap_single(scn->qdf_dev,
1596 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301597 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001598
Houston Hoffman910c6262015-09-28 12:56:25 -07001599 atomic_inc(&pipe_info->recv_bufs_needed);
1600 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301601 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301602 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001603 else
1604 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001605 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001606
1607 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001608 * MAX_NUM_OF_RECEIVES
1609 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001610 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001611 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001612 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 break;
1614 }
1615 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1616 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301617 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001618
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619}
1620
1621/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1622
1623void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301624hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001625 struct hif_msg_callbacks *callbacks)
1626{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301627 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628
1629#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1630 spin_lock_init(&pcie_access_log_lock);
1631#endif
1632 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301633 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001634 sizeof(hif_state->msg_callbacks_pending));
1635
1636}
1637
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001638static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001639{
1640 struct CE_handle *ce_diag = hif_state->ce_diag;
1641 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301642 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001643 struct hif_msg_callbacks *hif_msg_callbacks =
1644 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001645
1646 /* daemonize("hif_compl_thread"); */
1647
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001649 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001650 return -EINVAL;
1651 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001652
1653 if (!hif_msg_callbacks ||
1654 !hif_msg_callbacks->rxCompletionHandler ||
1655 !hif_msg_callbacks->txCompletionHandler) {
1656 HIF_ERROR("%s: no completion handler registered", __func__);
1657 return -EFAULT;
1658 }
1659
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001660 A_TARGET_ACCESS_LIKELY(scn);
1661 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1662 struct CE_attr attr;
1663 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664
1665 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001666 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301668 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669 if (attr.src_nentries) {
1670 /* pipe used to send to target */
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001671 HIF_DBG("%s: pipe_num:%d pipe_info:0x%p",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001672 __func__, pipe_num, pipe_info);
1673 ce_send_cb_register(pipe_info->ce_hdl,
1674 hif_pci_ce_send_done, pipe_info,
1675 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001676 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1677 }
1678 if (attr.dest_nentries) {
1679 /* pipe used to receive from target */
1680 ce_recv_cb_register(pipe_info->ce_hdl,
1681 hif_pci_ce_recv_data, pipe_info,
1682 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001683 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001684
1685 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301686 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301687
1688 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1689 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001690 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001691
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692 A_TARGET_ACCESS_UNLIKELY(scn);
1693 return 0;
1694}
1695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696/*
1697 * Install pending msg callbacks.
1698 *
1699 * TBDXXX: This hack is needed because upper layers install msg callbacks
1700 * for use with HTC before BMI is done; yet this HIF implementation
1701 * needs to continue to use BMI msg callbacks. Really, upper layers
1702 * should not register HTC callbacks until AFTER BMI phase.
1703 */
Komal Seelam644263d2016-02-22 20:45:49 +05301704static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001705{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301706 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001707
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301708 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709 &hif_state->msg_callbacks_pending,
1710 sizeof(hif_state->msg_callbacks_pending));
1711}
1712
Komal Seelam5584a7c2016-02-24 19:22:48 +05301713void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1714 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001715{
1716 int ul_is_polled, dl_is_polled;
1717
Komal Seelam644263d2016-02-22 20:45:49 +05301718 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001719 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1720}
1721
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001722/**
1723 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301724 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001725 *
1726 * Output the pipe error counts of each pipe to log file
1727 *
1728 * Return: N/A
1729 */
Komal Seelam644263d2016-02-22 20:45:49 +05301730void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301732 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733 int pipe_num;
1734
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001735 if (hif_state == NULL) {
1736 HIF_ERROR("%s hif_state is NULL", __func__);
1737 return;
1738 }
1739 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1740 struct HIF_CE_pipe_info *pipe_info;
1741
1742 pipe_info = &hif_state->pipe_info[pipe_num];
1743
1744 if (pipe_info->nbuf_alloc_err_count > 0 ||
1745 pipe_info->nbuf_dma_err_count > 0 ||
1746 pipe_info->nbuf_ce_enqueue_err_count)
1747 HIF_ERROR(
1748 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1749 __func__, pipe_info->pipe_num,
1750 atomic_read(&pipe_info->recv_bufs_needed),
1751 pipe_info->nbuf_alloc_err_count,
1752 pipe_info->nbuf_dma_err_count,
1753 pipe_info->nbuf_ce_enqueue_err_count);
1754 }
1755}
1756
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001757static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1758 void *nbuf, uint32_t *error_cnt,
1759 enum hif_ce_event_type failure_type,
1760 const char *failure_type_string)
1761{
1762 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1763 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1764 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1765 int ce_id = CE_state->id;
1766 uint32_t error_cnt_tmp;
1767
1768 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1769 error_cnt_tmp = ++(*error_cnt);
1770 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05301771 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001772 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1773 failure_type_string);
1774 hif_record_ce_desc_event(scn, ce_id, failure_type,
1775 NULL, nbuf, bufs_needed_tmp);
1776 /* if we fail to allocate the last buffer for an rx pipe,
1777 * there is no trigger to refill the ce and we will
1778 * eventually crash
1779 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301780 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001781 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301782
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001783}
1784
1785
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001786
1787
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001788static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1789{
1790 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301791 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301792 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301793 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794 uint32_t bufs_posted = 0;
1795
1796 buf_sz = pipe_info->buf_sz;
1797 if (buf_sz == 0) {
1798 /* Unused Copy Engine */
1799 return 0;
1800 }
1801
1802 ce_hdl = pipe_info->ce_hdl;
1803
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301804 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001805 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301806 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301807 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001808 int status;
1809
1810 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301811 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301813 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001814 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001815 hif_post_recv_buffers_failure(pipe_info, nbuf,
1816 &pipe_info->nbuf_alloc_err_count,
1817 HIF_RX_NBUF_ALLOC_FAILURE,
1818 "HIF_RX_NBUF_ALLOC_FAILURE");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819 return 1;
1820 }
1821
1822 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301823 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001824 * CE_data = dma_map_single(dev, data, buf_sz, );
1825 * DMA_FROM_DEVICE);
1826 */
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001827 ret = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301828 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301830 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001831 hif_post_recv_buffers_failure(pipe_info, nbuf,
1832 &pipe_info->nbuf_dma_err_count,
1833 HIF_RX_NBUF_MAP_FAILURE,
1834 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301835 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836 return 1;
1837 }
1838
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301839 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301841 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001842 buf_sz, DMA_FROM_DEVICE);
1843 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301844 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001845 if (unlikely(status != EOK)) {
1846 hif_post_recv_buffers_failure(pipe_info, nbuf,
1847 &pipe_info->nbuf_ce_enqueue_err_count,
1848 HIF_RX_NBUF_ENQUEUE_FAILURE,
1849 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1850
Govind Singh4fcafd42016-08-08 12:37:31 +05301851 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1852 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301853 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854 return 1;
1855 }
1856
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301857 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858 bufs_posted++;
1859 }
1860 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001861 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001862 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1863 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001864 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001865 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1866 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001867 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001868 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001869
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301870 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001871
1872 return 0;
1873}
1874
1875/*
1876 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05301877 * Returns 0 for non fastpath rx copy engine as
1878 * oom_allocation_work will be scheduled to recover any
1879 * failures, non-zero if unable to completely replenish
1880 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001881 */
Komal Seelam644263d2016-02-22 20:45:49 +05301882static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001883{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301884 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001885 int pipe_num, rv = 0;
Houston Hoffman85925072016-05-06 17:02:18 -07001886 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001887
1888 A_TARGET_ACCESS_LIKELY(scn);
1889 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1890 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001891
Houston Hoffman85925072016-05-06 17:02:18 -07001892 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001894
1895 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001896 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07001897 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07001898
Govind Singhcaa850e2017-04-20 16:41:36 +05301899 if (hif_post_recv_buffers_for_pipe(pipe_info) &&
1900 ce_state->htt_rx_data &&
1901 scn->fastpath_mode_on) {
1902 rv = 1;
1903 goto done;
1904 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001905 }
1906
Govind Singhcaa850e2017-04-20 16:41:36 +05301907done:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001908 A_TARGET_ACCESS_UNLIKELY(scn);
1909
1910 return rv;
1911}
1912
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301913QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914{
Komal Seelam644263d2016-02-22 20:45:49 +05301915 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301916 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001917
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001918 hif_update_fastpath_recv_bufs_cnt(scn);
1919
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001920 hif_msg_callbacks_install(scn);
1921
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001922 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301923 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001924
Houston Hoffman271951f2016-11-12 15:24:27 -08001925 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926 hif_state->started = true;
1927
Houston Hoffman271951f2016-11-12 15:24:27 -08001928 /* Post buffers once to start things off. */
1929 if (hif_post_recv_buffers(scn)) {
1930 /* cleanup is done in hif_ce_disable */
1931 HIF_ERROR("%s:failed to post buffers", __func__);
1932 return QDF_STATUS_E_FAILURE;
1933 }
1934
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301935 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001936}
1937
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001938static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939{
Komal Seelam644263d2016-02-22 20:45:49 +05301940 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001941 struct CE_handle *ce_hdl;
1942 uint32_t buf_sz;
1943 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301944 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301945 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001946 void *per_CE_context;
1947
1948 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001949 /* Unused Copy Engine */
1950 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001951 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001952
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001953
1954 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001955 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001957
Komal Seelam02cf2f82016-02-22 20:44:25 +05301958 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 ce_hdl = pipe_info->ce_hdl;
1960
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001961 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001962 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963 while (ce_revoke_recv_next
1964 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301965 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05301966 if (netbuf) {
1967 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
1968 QDF_DMA_FROM_DEVICE);
1969 qdf_nbuf_free(netbuf);
1970 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971 }
1972}
1973
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001974static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975{
1976 struct CE_handle *ce_hdl;
1977 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301978 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301979 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001980 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301981 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001982 unsigned int nbytes;
1983 unsigned int id;
1984 uint32_t buf_sz;
1985 uint32_t toeplitz_hash_result;
1986
1987 buf_sz = pipe_info->buf_sz;
1988 if (buf_sz == 0) {
1989 /* Unused Copy Engine */
1990 return;
1991 }
1992
1993 hif_state = pipe_info->HIF_CE_state;
1994 if (!hif_state->started) {
1995 return;
1996 }
1997
Komal Seelam02cf2f82016-02-22 20:44:25 +05301998 scn = HIF_GET_SOFTC(hif_state);
1999
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002000 ce_hdl = pipe_info->ce_hdl;
2001
2002 while (ce_cancel_send_next
2003 (ce_hdl, &per_CE_context,
2004 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302005 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002006 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2007 /*
2008 * Packets enqueued by htt_h2t_ver_req_msg() and
2009 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2010 * freed in htt_htc_misc_pkt_pool_free() in
2011 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002012 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002013 * which they are queued in.
2014 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302015 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002016 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302017 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002018 * layer to free the buffer
2019 */
2020 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302021 pipe_info->pipe_callbacks.
2022 txCompletionHandler(pipe_info->
2023 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002024 netbuf, id, toeplitz_hash_result);
2025 }
2026 }
2027}
2028
2029/*
2030 * Cleanup residual buffers for device shutdown:
2031 * buffers that were enqueued for receive
2032 * buffers that were to be sent
2033 * Note: Buffers that had completed but which were
2034 * not yet processed are on a completion queue. They
2035 * are handled when the completion thread shuts down.
2036 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002037static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002038{
2039 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302040 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002041 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002042
Komal Seelam02cf2f82016-02-22 20:44:25 +05302043 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002044 struct HIF_CE_pipe_info *pipe_info;
2045
Houston Hoffman85925072016-05-06 17:02:18 -07002046 ce_state = scn->ce_id_to_state[pipe_num];
2047 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2048 ((ce_state->htt_tx_data) ||
2049 (ce_state->htt_rx_data))) {
2050 continue;
2051 }
2052
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002053 pipe_info = &hif_state->pipe_info[pipe_num];
2054 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2055 hif_send_buffer_cleanup_on_pipe(pipe_info);
2056 }
2057}
2058
Komal Seelam5584a7c2016-02-24 19:22:48 +05302059void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002060{
Komal Seelam644263d2016-02-22 20:45:49 +05302061 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302062 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302063
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002064 hif_buffer_cleanup(hif_state);
2065}
2066
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002067static void hif_destroy_oom_work(struct hif_softc *scn)
2068{
2069 struct CE_state *ce_state;
2070 int ce_id;
2071
2072 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2073 ce_state = scn->ce_id_to_state[ce_id];
2074 if (ce_state)
2075 qdf_destroy_work(scn->qdf_dev,
2076 &ce_state->oom_allocation_work);
2077 }
2078}
2079
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302080void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002081{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302082 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002083 int pipe_num;
2084
Houston Hoffmana69581e2016-11-14 18:03:19 -08002085 /*
2086 * before cleaning up any memory, ensure irq &
2087 * bottom half contexts will not be re-entered
2088 */
2089 hif_nointrs(scn);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002090 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002091 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002092
2093 /*
2094 * At this point, asynchronous threads are stopped,
2095 * The Target should not DMA nor interrupt, Host code may
2096 * not initiate anything more. So we just need to clean
2097 * up Host-side state.
2098 */
2099
2100 if (scn->athdiag_procfs_inited) {
2101 athdiag_procfs_remove();
2102 scn->athdiag_procfs_inited = false;
2103 }
2104
2105 hif_buffer_cleanup(hif_state);
2106
2107 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2108 struct HIF_CE_pipe_info *pipe_info;
2109
2110 pipe_info = &hif_state->pipe_info[pipe_num];
2111 if (pipe_info->ce_hdl) {
2112 ce_fini(pipe_info->ce_hdl);
2113 pipe_info->ce_hdl = NULL;
2114 pipe_info->buf_sz = 0;
2115 }
2116 }
2117
2118 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302119 qdf_timer_stop(&hif_state->sleep_timer);
2120 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121 hif_state->sleep_timer_init = false;
2122 }
2123
2124 hif_state->started = false;
2125}
2126
Houston Hoffman748e1a62017-03-30 17:20:42 -07002127
Houston Hoffman854e67f2016-03-14 21:11:39 -07002128/**
2129 * hif_get_target_ce_config() - get copy engine configuration
2130 * @target_ce_config_ret: basic copy engine configuration
2131 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2132 * @target_service_to_ce_map_ret: service mapping for the copy engines
2133 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2134 * @target_shadow_reg_cfg_ret: shadow register configuration
2135 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2136 *
2137 * providing accessor to these values outside of this file.
2138 * currently these are stored in static pointers to const sections.
2139 * there are multiple configurations that are selected from at compile time.
2140 * Runtime selection would need to consider mode, target type and bus type.
2141 *
2142 * Return: return by parameter.
2143 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302144void hif_get_target_ce_config(struct hif_softc *scn,
2145 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002146 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002147 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002148 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002149 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002150 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002151{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302152 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2153
2154 *target_ce_config_ret = hif_state->target_ce_config;
2155 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002156
2157 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2158 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002159
2160 if (target_shadow_reg_cfg_ret)
2161 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2162
2163 if (shadow_cfg_sz_ret)
2164 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002165}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002166
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002167#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002168static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002169{
2170 int i;
2171 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2172 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2173
2174 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2175 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2176 "%s: i %d, val %x\n", __func__, i,
2177 cfg->shadow_reg_v2_cfg[i].addr);
2178 }
2179}
2180
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002181#else
2182static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2183{
2184 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2185 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2186}
2187#endif
2188
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002189/**
2190 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302191 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002192 *
2193 * This function passes the con_mode and CE configuration to
2194 * platform driver to enable wlan.
2195 *
Houston Hoffman108da402016-03-14 21:11:24 -07002196 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197 */
Houston Hoffman108da402016-03-14 21:11:24 -07002198int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002199{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002200 struct pld_wlan_enable_cfg cfg;
2201 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302202 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002203
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302204 hif_get_target_ce_config(scn,
2205 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002206 &cfg.num_ce_tgt_cfg,
2207 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2208 &cfg.num_ce_svc_pipe_cfg,
2209 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2210 &cfg.num_shadow_reg_cfg);
2211
2212 /* translate from structure size to array size */
2213 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2214 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2215 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002217 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2218 &cfg.num_shadow_reg_v2_cfg);
2219
2220 hif_print_hal_shadow_register_cfg(&cfg);
2221
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302222 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002223 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002224 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002225 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002226 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002227 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002228
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002229 if (BYPASS_QMI)
2230 return 0;
2231 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002232 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2233 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002234}
2235
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002236#define CE_EPPING_USES_IRQ true
2237
Houston Hoffman108da402016-03-14 21:11:24 -07002238/**
2239 * hif_ce_prepare_config() - load the correct static tables.
2240 * @scn: hif context
2241 *
2242 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002243 */
Houston Hoffman108da402016-03-14 21:11:24 -07002244void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002245{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302246 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002247 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2248 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302249 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002250
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002251 hif_state->ce_services = ce_services_attach(scn);
2252
Houston Hoffman710af5a2016-11-22 21:59:03 -08002253 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002254 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002255 if (QDF_IS_EPPING_ENABLED(mode)) {
2256 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302257 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002258 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302259 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2260 hif_state->target_ce_config = target_ce_config_wlan_epping;
2261 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002262 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2263 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002264 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002265
2266 switch (tgt_info->target_type) {
2267 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302268 hif_state->host_ce_config = host_ce_config_wlan;
2269 hif_state->target_ce_config = target_ce_config_wlan;
2270 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002271 break;
2272 case TARGET_TYPE_AR900B:
2273 case TARGET_TYPE_QCA9984:
2274 case TARGET_TYPE_IPQ4019:
2275 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302276 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2277 hif_state->host_ce_config =
2278 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2279 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2280 hif_state->host_ce_config =
2281 host_lowdesc_ce_cfg_wlan_ar900b;
2282 } else {
2283 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2284 }
2285
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302286 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2287 hif_state->target_ce_config_sz =
2288 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002289
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002290 break;
2291
2292 case TARGET_TYPE_AR9888:
2293 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302294 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2295 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2296 } else {
2297 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2298 }
2299
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302300 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2301 hif_state->target_ce_config_sz =
2302 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002303
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002304 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002305
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302306 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002307 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2308 hif_state->host_ce_config =
2309 host_ce_config_wlan_qca8074_pci;
2310 hif_state->target_ce_config =
2311 target_ce_config_wlan_qca8074_pci;
2312 hif_state->target_ce_config_sz =
2313 sizeof(target_ce_config_wlan_qca8074_pci);
2314 } else {
2315 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2316 hif_state->target_ce_config =
2317 target_ce_config_wlan_qca8074;
2318 hif_state->target_ce_config_sz =
2319 sizeof(target_ce_config_wlan_qca8074);
2320 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302321 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002322 case TARGET_TYPE_QCA6290:
2323 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2324 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2325 hif_state->target_ce_config_sz =
2326 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002327
Houston Hoffman710af5a2016-11-22 21:59:03 -08002328 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002329 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002330 }
Houston Hoffman108da402016-03-14 21:11:24 -07002331}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002332
Houston Hoffman108da402016-03-14 21:11:24 -07002333/**
2334 * hif_ce_open() - do ce specific allocations
2335 * @hif_sc: pointer to hif context
2336 *
2337 * return: 0 for success or QDF_STATUS_E_NOMEM
2338 */
2339QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2340{
2341 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002342
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302343 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302344 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002345 return QDF_STATUS_SUCCESS;
2346}
2347
2348/**
2349 * hif_ce_close() - do ce specific free
2350 * @hif_sc: pointer to hif context
2351 */
2352void hif_ce_close(struct hif_softc *hif_sc)
2353{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302354 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2355
2356 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002357}
2358
2359/**
2360 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2361 * @hif_sc: hif context
2362 *
2363 * uses state variables to support cleaning up when hif_config_ce fails.
2364 */
2365void hif_unconfig_ce(struct hif_softc *hif_sc)
2366{
2367 int pipe_num;
2368 struct HIF_CE_pipe_info *pipe_info;
2369 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2370
2371 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2372 pipe_info = &hif_state->pipe_info[pipe_num];
2373 if (pipe_info->ce_hdl) {
2374 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002375 ce_fini(pipe_info->ce_hdl);
2376 pipe_info->ce_hdl = NULL;
2377 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002378 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002379 }
2380 }
Houston Hoffman108da402016-03-14 21:11:24 -07002381 if (hif_sc->athdiag_procfs_inited) {
2382 athdiag_procfs_remove();
2383 hif_sc->athdiag_procfs_inited = false;
2384 }
2385}
2386
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002387#ifdef CONFIG_BYPASS_QMI
2388#define FW_SHARED_MEM (2 * 1024 * 1024)
2389
2390/**
2391 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2392 * @scn: pointer to HIF structure
2393 *
2394 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2395 *
2396 * Return: void
2397 */
2398static void hif_post_static_buf_to_target(struct hif_softc *scn)
2399{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002400 void *target_va;
2401 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002402
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002403 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2404 FW_SHARED_MEM, &target_pa);
2405 if (NULL == target_va) {
2406 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002407 return;
2408 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002409 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2410 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002411}
2412#else
2413static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2414{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002415}
2416#endif
2417
Dustin Brown6bdbda52016-09-27 15:52:30 -07002418#ifdef WLAN_SUSPEND_RESUME_TEST
2419static void hif_fake_apps_init_ctx(struct hif_softc *scn)
2420{
2421 INIT_WORK(&scn->fake_apps_ctx.resume_work,
2422 hif_fake_apps_resume_work);
2423}
2424#else
2425static inline void hif_fake_apps_init_ctx(struct hif_softc *scn) {}
2426#endif
2427
Houston Hoffman108da402016-03-14 21:11:24 -07002428/**
2429 * hif_config_ce() - configure copy engines
2430 * @scn: hif context
2431 *
2432 * Prepares fw, copy engine hardware and host sw according
2433 * to the attributes selected by hif_ce_prepare_config.
2434 *
2435 * also calls athdiag_procfs_init
2436 *
2437 * return: 0 for success nonzero for failure.
2438 */
2439int hif_config_ce(struct hif_softc *scn)
2440{
2441 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2442 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2443 struct HIF_CE_pipe_info *pipe_info;
2444 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002445 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002446#ifdef ADRASTEA_SHADOW_REGISTERS
2447 int i;
2448#endif
2449 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2450
2451 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002452
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002453 hif_post_static_buf_to_target(scn);
2454
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002456
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002457 hif_config_rri_on_ddr(scn);
2458
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002459 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2460 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002461
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002462 pipe_info = &hif_state->pipe_info[pipe_num];
2463 pipe_info->pipe_num = pipe_num;
2464 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302465 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002466
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002467 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002468 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002469 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302470 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302472 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002473 A_TARGET_ACCESS_UNLIKELY(scn);
2474 goto err;
2475 }
2476
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07002477 ce_state->lro_data = qdf_lro_init();
2478
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302479 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002481 * Diagnostic Window support
2482 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002483 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002484 continue;
2485 }
2486
Houston Hoffman85925072016-05-06 17:02:18 -07002487 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2488 (ce_state->htt_rx_data))
2489 continue;
2490
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302491 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002492 if (attr->dest_nentries > 0) {
2493 atomic_set(&pipe_info->recv_bufs_needed,
2494 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302495 /*SRNG based CE has one entry less */
2496 if (ce_srng_based(scn))
2497 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002498 } else {
2499 atomic_set(&pipe_info->recv_bufs_needed, 0);
2500 }
2501 ce_tasklet_init(hif_state, (1 << pipe_num));
2502 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002503 }
2504
2505 if (athdiag_procfs_init(scn) != 0) {
2506 A_TARGET_ACCESS_UNLIKELY(scn);
2507 goto err;
2508 }
2509 scn->athdiag_procfs_inited = true;
2510
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002511 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002512
Houston Hoffman108da402016-03-14 21:11:24 -07002513 init_tasklet_workers(hif_hdl);
Dustin Brown6bdbda52016-09-27 15:52:30 -07002514 hif_fake_apps_init_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002515
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002516 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002517
2518#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002519 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002520 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002521 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002522 __func__, i,
2523 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2524 }
2525#endif
2526
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302527 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002528
2529err:
2530 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002531 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002532 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302533 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002534}
2535
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002536#ifdef WLAN_FEATURE_FASTPATH
2537/**
2538 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2539 * @handler: Callback funtcion
2540 * @context: handle for callback function
2541 *
2542 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2543 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002544int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2545 fastpath_msg_handler handler,
2546 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002547{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002548 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002549 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002550 int i;
2551
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302552 if (!scn) {
2553 HIF_ERROR("%s: scn is NULL", __func__);
2554 QDF_ASSERT(0);
2555 return QDF_STATUS_E_FAILURE;
2556 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002557
2558 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002559 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002560 return QDF_STATUS_E_FAILURE;
2561 }
2562
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002563 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002564 ce_state = scn->ce_id_to_state[i];
2565 if (ce_state->htt_rx_data) {
2566 ce_state->fastpath_handler = handler;
2567 ce_state->context = context;
2568 }
2569 }
2570
2571 return QDF_STATUS_SUCCESS;
2572}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002573#endif
2574
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002575#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002576/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302577 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002578 * @scn: bus context
2579 * @ce_sr_base_paddr: copyengine source ring base physical address
2580 * @ce_sr_ring_size: copyengine source ring size
2581 * @ce_reg_paddr: copyengine register physical address
2582 *
2583 * IPA micro controller data path offload feature enabled,
2584 * HIF should release copy engine related resource information to IPA UC
2585 * IPA UC will access hardware resource with released information
2586 *
2587 * Return: None
2588 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302589void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302590 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002591 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302592 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002593{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302594 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002595 struct HIF_CE_pipe_info *pipe_info =
2596 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2597 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2598
2599 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2600 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002601}
2602#endif /* IPA_OFFLOAD */
2603
2604
2605#ifdef ADRASTEA_SHADOW_REGISTERS
2606
2607/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002608 * Current shadow register config
2609 *
2610 * -----------------------------------------------------------
2611 * Shadow Register | CE | src/dst write index
2612 * -----------------------------------------------------------
2613 * 0 | 0 | src
2614 * 1 No Config - Doesn't point to anything
2615 * 2 No Config - Doesn't point to anything
2616 * 3 | 3 | src
2617 * 4 | 4 | src
2618 * 5 | 5 | src
2619 * 6 No Config - Doesn't point to anything
2620 * 7 | 7 | src
2621 * 8 No Config - Doesn't point to anything
2622 * 9 No Config - Doesn't point to anything
2623 * 10 No Config - Doesn't point to anything
2624 * 11 No Config - Doesn't point to anything
2625 * -----------------------------------------------------------
2626 * 12 No Config - Doesn't point to anything
2627 * 13 | 1 | dst
2628 * 14 | 2 | dst
2629 * 15 No Config - Doesn't point to anything
2630 * 16 No Config - Doesn't point to anything
2631 * 17 No Config - Doesn't point to anything
2632 * 18 No Config - Doesn't point to anything
2633 * 19 | 7 | dst
2634 * 20 | 8 | dst
2635 * 21 No Config - Doesn't point to anything
2636 * 22 No Config - Doesn't point to anything
2637 * 23 No Config - Doesn't point to anything
2638 * -----------------------------------------------------------
2639 *
2640 *
2641 * ToDo - Move shadow register config to following in the future
2642 * This helps free up a block of shadow registers towards the end.
2643 * Can be used for other purposes
2644 *
2645 * -----------------------------------------------------------
2646 * Shadow Register | CE | src/dst write index
2647 * -----------------------------------------------------------
2648 * 0 | 0 | src
2649 * 1 | 3 | src
2650 * 2 | 4 | src
2651 * 3 | 5 | src
2652 * 4 | 7 | src
2653 * -----------------------------------------------------------
2654 * 5 | 1 | dst
2655 * 6 | 2 | dst
2656 * 7 | 7 | dst
2657 * 8 | 8 | dst
2658 * -----------------------------------------------------------
2659 * 9 No Config - Doesn't point to anything
2660 * 12 No Config - Doesn't point to anything
2661 * 13 No Config - Doesn't point to anything
2662 * 14 No Config - Doesn't point to anything
2663 * 15 No Config - Doesn't point to anything
2664 * 16 No Config - Doesn't point to anything
2665 * 17 No Config - Doesn't point to anything
2666 * 18 No Config - Doesn't point to anything
2667 * 19 No Config - Doesn't point to anything
2668 * 20 No Config - Doesn't point to anything
2669 * 21 No Config - Doesn't point to anything
2670 * 22 No Config - Doesn't point to anything
2671 * 23 No Config - Doesn't point to anything
2672 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002673*/
2674
Komal Seelam644263d2016-02-22 20:45:49 +05302675u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002676{
2677 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002678 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002679
Houston Hoffmane6330442016-02-26 12:19:11 -08002680 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002681 case 0:
2682 addr = SHADOW_VALUE0;
2683 break;
2684 case 3:
2685 addr = SHADOW_VALUE3;
2686 break;
2687 case 4:
2688 addr = SHADOW_VALUE4;
2689 break;
2690 case 5:
2691 addr = SHADOW_VALUE5;
2692 break;
2693 case 7:
2694 addr = SHADOW_VALUE7;
2695 break;
2696 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002697 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302698 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002699 }
2700 return addr;
2701
2702}
2703
Komal Seelam644263d2016-02-22 20:45:49 +05302704u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002705{
2706 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002707 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002708
Houston Hoffmane6330442016-02-26 12:19:11 -08002709 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002710 case 1:
2711 addr = SHADOW_VALUE13;
2712 break;
2713 case 2:
2714 addr = SHADOW_VALUE14;
2715 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002716 case 5:
2717 addr = SHADOW_VALUE17;
2718 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002719 case 7:
2720 addr = SHADOW_VALUE19;
2721 break;
2722 case 8:
2723 addr = SHADOW_VALUE20;
2724 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002725 case 9:
2726 addr = SHADOW_VALUE21;
2727 break;
2728 case 10:
2729 addr = SHADOW_VALUE22;
2730 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302731 case 11:
2732 addr = SHADOW_VALUE23;
2733 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002734 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002735 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302736 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002737 }
2738
2739 return addr;
2740
2741}
2742#endif
2743
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002744#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002745void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2746{
2747 struct CE_state *ce_state;
2748 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2749
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002750 ce_state = scn->ce_id_to_state[ctx_id];
2751
2752 return ce_state->lro_data;
2753}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002754#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002755
2756/**
2757 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2758 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302759 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002760 * @svc_id: Service ID for which the mapping is needed.
2761 * @ul_pipe: address of the container in which ul pipe is returned.
2762 * @dl_pipe: address of the container in which dl pipe is returned.
2763 * @ul_is_polled: address of the container in which a bool
2764 * indicating if the UL CE for this service
2765 * is polled is returned.
2766 * @dl_is_polled: address of the container in which a bool
2767 * indicating if the DL CE for this service
2768 * is polled is returned.
2769 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002770 * Return: Indicates whether the service has been found in the table.
2771 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2772 * There will be warning logs if either leg has not been updated
2773 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002774 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302775int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002776 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2777 int *dl_is_polled)
2778{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002779 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002780 unsigned int i;
2781 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002782 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002783 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302784 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002785 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002786 bool dl_updated = false;
2787 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002788
Houston Hoffman748e1a62017-03-30 17:20:42 -07002789 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2790 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002791
2792 *dl_is_polled = 0; /* polling for received messages not supported */
2793
2794 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2795
2796 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2797 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002798 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002799 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002800 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302801 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002802 CE_ATTR_DISABLE_INTR) != 0;
2803 ul_updated = true;
2804 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002805 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002806 dl_updated = true;
2807 }
2808 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002809 }
2810 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002811 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302812 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002813 __func__, svc_id);
2814 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302815 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002816 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002817
2818 return status;
2819}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002820
2821#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302822inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002823 uint32_t CE_ctrl_addr)
2824{
2825 uint32_t read_from_hw, srri_from_ddr = 0;
2826
2827 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2828
2829 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2830
2831 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002832 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2833 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002834 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302835 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002836 }
2837 return srri_from_ddr;
2838}
2839
2840
Komal Seelam644263d2016-02-22 20:45:49 +05302841inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002842 uint32_t CE_ctrl_addr)
2843{
2844 uint32_t read_from_hw, drri_from_ddr = 0;
2845
2846 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2847
2848 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2849
2850 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002851 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002852 drri_from_ddr, read_from_hw,
2853 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302854 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002855 }
2856 return drri_from_ddr;
2857}
2858
2859#endif
2860
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002861#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002862/**
2863 * hif_get_src_ring_read_index(): Called to get the SRRI
2864 *
Komal Seelam644263d2016-02-22 20:45:49 +05302865 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002866 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2867 *
2868 * This function returns the SRRI to the caller. For CEs that
2869 * dont have interrupts enabled, we look at the DDR based SRRI
2870 *
2871 * Return: SRRI
2872 */
Komal Seelam644263d2016-02-22 20:45:49 +05302873inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002874 uint32_t CE_ctrl_addr)
2875{
2876 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302877 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002878
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302879 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002880 if (attr.flags & CE_ATTR_DISABLE_INTR)
2881 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2882 else
2883 return A_TARGET_READ(scn,
2884 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2885}
2886
2887/**
2888 * hif_get_dst_ring_read_index(): Called to get the DRRI
2889 *
Komal Seelam644263d2016-02-22 20:45:49 +05302890 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002891 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2892 *
2893 * This function returns the DRRI to the caller. For CEs that
2894 * dont have interrupts enabled, we look at the DDR based DRRI
2895 *
2896 * Return: DRRI
2897 */
Komal Seelam644263d2016-02-22 20:45:49 +05302898inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002899 uint32_t CE_ctrl_addr)
2900{
2901 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302902 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002903
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302904 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002905
2906 if (attr.flags & CE_ATTR_DISABLE_INTR)
2907 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2908 else
2909 return A_TARGET_READ(scn,
2910 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2911}
2912
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002913/**
2914 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2915 *
Komal Seelam644263d2016-02-22 20:45:49 +05302916 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002917 *
2918 * This function allocates non cached memory on ddr and sends
2919 * the physical address of this memory to the CE hardware. The
2920 * hardware updates the RRI on this particular location.
2921 *
2922 * Return: None
2923 */
Komal Seelam644263d2016-02-22 20:45:49 +05302924static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002925{
2926 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302927 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002928 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002929
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002930 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302931 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2932 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2933 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002934
2935 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2936 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2937
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002938 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002939
2940 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2941 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2942
2943 for (i = 0; i < CE_COUNT; i++)
2944 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2945
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302946 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002947
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002948}
2949#else
2950
2951/**
2952 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2953 *
Komal Seelam644263d2016-02-22 20:45:49 +05302954 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002955 *
2956 * This is a dummy implementation for platforms that don't
2957 * support this functionality.
2958 *
2959 * Return: None
2960 */
Komal Seelam644263d2016-02-22 20:45:49 +05302961static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002962{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002963}
2964#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302965
2966/**
2967 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302968 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302969 *
2970 * Output the copy engine registers
2971 *
2972 * Return: 0 for success or error code
2973 */
Komal Seelam644263d2016-02-22 20:45:49 +05302974int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302975{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302976 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302977 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002978 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05302979 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2980 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302981 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302982
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002983 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
2984 if (scn->ce_id_to_state[i] == NULL) {
2985 HIF_DBG("CE%d not used.", i);
2986 continue;
2987 }
2988
Komal Seelam644263d2016-02-22 20:45:49 +05302989 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002990 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05302991 ce_reg_word_size * sizeof(uint32_t));
2992
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302993 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002994 HIF_ERROR("Dumping CE register failed!");
2995 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05302996 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05302997 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302998 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07002999 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303000 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303001 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3002 + SR_WR_INDEX_ADDRESS),
3003 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3004 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3005 + CURRENT_SRRI_ADDRESS),
3006 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3007 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3008 + DST_WR_INDEX_ADDRESS),
3009 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3010 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3011 + CURRENT_DRRI_ADDRESS),
3012 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3013 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303014 }
Govind Singh2443fb32016-01-13 17:44:48 +05303015 return 0;
3016}
Houston Hoffman85925072016-05-06 17:02:18 -07003017#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3018struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3019 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3020{
3021 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3022 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3023 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3024 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3025 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3026 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3027 struct CE_ring_state *src_ring = ce_state->src_ring;
3028 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3029
3030 if (src_ring) {
3031 hif_info->ul_pipe.nentries = src_ring->nentries;
3032 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3033 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3034 hif_info->ul_pipe.write_index = src_ring->write_index;
3035 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3036 hif_info->ul_pipe.base_addr_CE_space =
3037 src_ring->base_addr_CE_space;
3038 hif_info->ul_pipe.base_addr_owner_space =
3039 src_ring->base_addr_owner_space;
3040 }
3041
3042
3043 if (dest_ring) {
3044 hif_info->dl_pipe.nentries = dest_ring->nentries;
3045 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3046 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3047 hif_info->dl_pipe.write_index = dest_ring->write_index;
3048 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3049 hif_info->dl_pipe.base_addr_CE_space =
3050 dest_ring->base_addr_CE_space;
3051 hif_info->dl_pipe.base_addr_owner_space =
3052 dest_ring->base_addr_owner_space;
3053 }
3054
3055 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3056 hif_info->ctrl_addr = ce_state->ctrl_addr;
3057
3058 return hif_info;
3059}
3060
3061uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3062{
3063 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3064
3065 scn->nss_wifi_ol_mode = mode;
3066 return 0;
3067}
3068
3069#endif
3070
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303071void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3072{
3073 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3074 scn->hif_attribute = hif_attrib;
3075}
3076
Houston Hoffman85925072016-05-06 17:02:18 -07003077void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3078{
3079 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3080 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3081 uint32_t ctrl_addr = CE_state->ctrl_addr;
3082
3083 Q_TARGET_ACCESS_BEGIN(scn);
3084 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3085 Q_TARGET_ACCESS_END(scn);
3086}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303087
3088/**
3089 * hif_fw_event_handler() - hif fw event handler
3090 * @hif_state: pointer to hif ce state structure
3091 *
3092 * Process fw events and raise HTC callback to process fw events.
3093 *
3094 * Return: none
3095 */
3096static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3097{
3098 struct hif_msg_callbacks *msg_callbacks =
3099 &hif_state->msg_callbacks_current;
3100
3101 if (!msg_callbacks->fwEventHandler)
3102 return;
3103
3104 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3105 QDF_STATUS_E_FAILURE);
3106}
3107
3108#ifndef QCA_WIFI_3_0
3109/**
3110 * hif_fw_interrupt_handler() - FW interrupt handler
3111 * @irq: irq number
3112 * @arg: the user pointer
3113 *
3114 * Called from the PCI interrupt handler when a
3115 * firmware-generated interrupt to the Host.
3116 *
3117 * Return: status of handled irq
3118 */
3119irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3120{
3121 struct hif_softc *scn = arg;
3122 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3123 uint32_t fw_indicator_address, fw_indicator;
3124
3125 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3126 return ATH_ISR_NOSCHED;
3127
3128 fw_indicator_address = hif_state->fw_indicator_address;
3129 /* For sudden unplug this will return ~0 */
3130 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3131
3132 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3133 /* ACK: clear Target-side pending event */
3134 A_TARGET_WRITE(scn, fw_indicator_address,
3135 fw_indicator & ~FW_IND_EVENT_PENDING);
3136 if (Q_TARGET_ACCESS_END(scn) < 0)
3137 return ATH_ISR_SCHED;
3138
3139 if (hif_state->started) {
3140 hif_fw_event_handler(hif_state);
3141 } else {
3142 /*
3143 * Probable Target failure before we're prepared
3144 * to handle it. Generally unexpected.
3145 */
3146 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3147 ("%s: Early firmware event indicated\n",
3148 __func__));
3149 }
3150 } else {
3151 if (Q_TARGET_ACCESS_END(scn) < 0)
3152 return ATH_ISR_SCHED;
3153 }
3154
3155 return ATH_ISR_SCHED;
3156}
3157#else
3158irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3159{
3160 return ATH_ISR_SCHED;
3161}
3162#endif /* #ifdef QCA_WIFI_3_0 */
3163
3164
3165/**
3166 * hif_wlan_disable(): call the platform driver to disable wlan
3167 * @scn: HIF Context
3168 *
3169 * This function passes the con_mode to platform driver to disable
3170 * wlan.
3171 *
3172 * Return: void
3173 */
3174void hif_wlan_disable(struct hif_softc *scn)
3175{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003176 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303177 uint32_t con_mode = hif_get_conparam(scn);
3178
3179 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003180 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303181 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003182 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303183 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003184 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303185
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003186 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303187}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003188
Dustin Brown6834d322017-03-20 15:02:48 -07003189int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3190{
3191 QDF_STATUS status;
3192 uint8_t ul_pipe, dl_pipe;
3193 int ul_is_polled, dl_is_polled;
3194
3195 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3196 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3197 HTC_CTRL_RSVD_SVC,
3198 &ul_pipe, &dl_pipe,
3199 &ul_is_polled, &dl_is_polled);
3200 if (status) {
3201 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3202 return qdf_status_to_os_return(status);
3203 }
3204
3205 *ce_id = dl_pipe;
3206
3207 return 0;
3208}