blob: 74e7f0435b974b9d70afa3ff0c47503468b5e8bf [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
yeshwanth sriram guntuka78ee68f2016-10-25 11:57:58 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070041#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#include "hif_debug.h"
43#include "ce_internal.h"
44#include "ce_reg.h"
45#include "ce_assignment.h"
46#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070047#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080048#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070049#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080050
51#define CE_POLL_TIMEOUT 10 /* ms */
52
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053053#define AGC_DUMP 1
54#define CHANINFO_DUMP 2
55#define BB_WATCHDOG_DUMP 3
56#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57#define PCIE_ACCESS_DUMP 4
58#endif
59#include "mp_dev.h"
60
Houston Hoffman5141f9d2017-01-05 10:49:17 -080061#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 !defined(QCA_WIFI_SUPPORT_SRNG)
63#define QCA_WIFI_SUPPORT_SRNG
64#endif
65
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080066/* Forward references */
67static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68
69/*
70 * Fix EV118783, poll to check whether a BMI response comes
71 * other than waiting for the interruption which may be lost.
72 */
73/* #define BMI_RSP_POLLING */
74#define BMI_RSP_TO_MILLISEC 1000
75
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070076#ifdef CONFIG_BYPASS_QMI
77#define BYPASS_QMI 1
78#else
79#define BYPASS_QMI 0
80#endif
81
Houston Hoffmanabd00772016-05-06 17:02:48 -070082#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#define WDI_IPA_SERVICE_GROUP 5
85#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053088#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070089#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080090
Komal Seelam644263d2016-02-22 20:45:49 +053091static int hif_post_recv_buffers(struct hif_softc *scn);
92static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080093
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053094/**
95 * hif_target_access_log_dump() - dump access log
96 *
97 * dump access log
98 *
99 * Return: n/a
100 */
101#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102static void hif_target_access_log_dump(void)
103{
104 hif_target_dump_access_log();
105}
106#endif
107
108
109void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 uint8_t cmd_id, bool start)
111{
112 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113
114 switch (cmd_id) {
115 case AGC_DUMP:
116 if (start)
117 priv_start_agc(scn);
118 else
119 priv_dump_agc(scn);
120 break;
121 case CHANINFO_DUMP:
122 if (start)
123 priv_start_cap_chaninfo(scn);
124 else
125 priv_dump_chaninfo(scn);
126 break;
127 case BB_WATCHDOG_DUMP:
128 priv_dump_bbwatchdog(scn);
129 break;
130#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 case PCIE_ACCESS_DUMP:
132 hif_target_access_log_dump();
133 break;
134#endif
135 default:
136 HIF_ERROR("%s: Invalid htc dump command", __func__);
137 break;
138 }
139}
140
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800141static void ce_poll_timeout(void *arg)
142{
143 struct CE_state *CE_state = (struct CE_state *)arg;
144 if (CE_state->timer_inited) {
145 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530146 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800147 }
148}
149
150static unsigned int roundup_pwr2(unsigned int n)
151{
152 int i;
153 unsigned int test_pwr2;
154
155 if (!(n & (n - 1)))
156 return n; /* already a power of 2 */
157
158 test_pwr2 = 4;
159 for (i = 0; i < 29; i++) {
160 if (test_pwr2 > n)
161 return test_pwr2;
162 test_pwr2 = test_pwr2 << 1;
163 }
164
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530165 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800166 return 0;
167}
168
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700169#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
170#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
171
172static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
173 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
181 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800182#ifdef QCA_WIFI_3_0_ADRASTEA
183 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
184 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530185 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800186#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700187};
188
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700189static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
190 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
191 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
195 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
196 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
197 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
198 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
199};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700200
201/* CE_PCI TABLE */
202/*
203 * NOTE: the table below is out of date, though still a useful reference.
204 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
205 * mapping of HTC services to HIF pipes.
206 */
207/*
208 * This authoritative table defines Copy Engine configuration and the mapping
209 * of services/endpoints to CEs. A subset of this information is passed to
210 * the Target during startup as a prerequisite to entering BMI phase.
211 * See:
212 * target_service_to_ce_map - Target-side mapping
213 * hif_map_service_to_pipe - Host-side mapping
214 * target_ce_config - Target-side configuration
215 * host_ce_config - Host-side configuration
216 ============================================================================
217 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
218 | | | ctio | Size | Frequency
219 | | | n | |
220 ============================================================================
221 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
222 descriptor | | | | O(100B) | and regular
223 download | | | | |
224 ----------------------------------------------------------------------------
225 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
226 indication | | | | O(10B) | regular
227 upload | | | | |
228 ----------------------------------------------------------------------------
229 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
230 upload | | | | O(1000B) | (frequent
231 e.g. noise | | | | | during IP1.0
232 packets | | | | | testing)
233 ----------------------------------------------------------------------------
234 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
235 download | | | | O(1000B) | (frequent
236 e.g. | | | | | during IP1.0
237 misdirecte | | | | | testing)
238 d EAPOL | | | | |
239 packets | | | | |
240 ----------------------------------------------------------------------------
241 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
242 | DATA_VO (uplink) | | | |
243 ----------------------------------------------------------------------------
244 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
245 | DATA_VO (downlink) | | | |
246 ----------------------------------------------------------------------------
247 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
248 | | | | O(100B) |
249 ----------------------------------------------------------------------------
250 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
251 messages | (downlink) | | | O(100B) |
252 | | | | |
253 ----------------------------------------------------------------------------
254 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
255 | HTC_RAW_STREAMS | | | |
256 | (uplink) | | | |
257 ----------------------------------------------------------------------------
258 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
259 | HTC_RAW_STREAMS | | | |
260 | (downlink) | | | |
261 ----------------------------------------------------------------------------
262 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
263 | | | | | infrequent
264 ============================================================================
265 */
266
267/*
268 * Map from service/endpoint to Copy Engine.
269 * This table is derived from the CE_PCI TABLE, above.
270 * It is passed to the Target at startup for use by firmware.
271 */
272static struct service_to_pipe target_service_to_ce_map_wlan[] = {
273 {
274 WMI_DATA_VO_SVC,
275 PIPEDIR_OUT, /* out = UL = host -> target */
276 3,
277 },
278 {
279 WMI_DATA_VO_SVC,
280 PIPEDIR_IN, /* in = DL = target -> host */
281 2,
282 },
283 {
284 WMI_DATA_BK_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_BK_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BE_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BE_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_VI_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_VI_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_CONTROL_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_CONTROL_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
Kiran Venkatappae17e3b62017-02-10 16:31:49 +0530324 WMI_CONTROL_SVC_WMAC1,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 7,
327 },
328 {
329 WMI_CONTROL_SVC_WMAC1,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 WMI_CONTROL_SVC_WMAC2,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 9,
337 },
338 {
339 WMI_CONTROL_SVC_WMAC2,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700344 HTC_CTRL_RSVD_SVC,
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0, /* could be moved to 3 (share with WMI) */
347 },
348 {
349 HTC_CTRL_RSVD_SVC,
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTC_RAW_STREAMS_SVC, /* not currently used */
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 0,
357 },
358 {
359 HTC_RAW_STREAMS_SVC, /* not currently used */
360 PIPEDIR_IN, /* in = DL = target -> host */
361 2,
362 },
363 {
364 HTT_DATA_MSG_SVC,
365 PIPEDIR_OUT, /* out = UL = host -> target */
366 4,
367 },
368 {
369 HTT_DATA_MSG_SVC,
370 PIPEDIR_IN, /* in = DL = target -> host */
371 1,
372 },
373 {
374 WDI_IPA_TX_SVC,
375 PIPEDIR_OUT, /* in = DL = target -> host */
376 5,
377 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800378#if defined(QCA_WIFI_3_0_ADRASTEA)
379 {
380 HTT_DATA2_MSG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 9,
383 },
384 {
385 HTT_DATA3_MSG_SVC,
386 PIPEDIR_IN, /* in = DL = target -> host */
387 10,
388 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530389 {
390 PACKET_LOG_SVC,
391 PIPEDIR_IN, /* in = DL = target -> host */
392 11,
393 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800394#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700395 /* (Additions here) */
396
397 { /* Must be last */
398 0,
399 0,
400 0,
401 },
402};
403
Houston Hoffman88c896f2016-12-14 09:56:35 -0800404/* PIPEDIR_OUT = HOST to Target */
405/* PIPEDIR_IN = TARGET to HOST */
406static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
407 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
408 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
409 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
410 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
411 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
412 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
413 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
414 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
415 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
416 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
417 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
418 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
419 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
420 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800421 /* (Additions here) */
422 { 0, 0, 0, },
423};
424
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700425static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
426 {
427 WMI_DATA_VO_SVC,
428 PIPEDIR_OUT, /* out = UL = host -> target */
429 3,
430 },
431 {
432 WMI_DATA_VO_SVC,
433 PIPEDIR_IN, /* in = DL = target -> host */
434 2,
435 },
436 {
437 WMI_DATA_BK_SVC,
438 PIPEDIR_OUT, /* out = UL = host -> target */
439 3,
440 },
441 {
442 WMI_DATA_BK_SVC,
443 PIPEDIR_IN, /* in = DL = target -> host */
444 2,
445 },
446 {
447 WMI_DATA_BE_SVC,
448 PIPEDIR_OUT, /* out = UL = host -> target */
449 3,
450 },
451 {
452 WMI_DATA_BE_SVC,
453 PIPEDIR_IN, /* in = DL = target -> host */
454 2,
455 },
456 {
457 WMI_DATA_VI_SVC,
458 PIPEDIR_OUT, /* out = UL = host -> target */
459 3,
460 },
461 {
462 WMI_DATA_VI_SVC,
463 PIPEDIR_IN, /* in = DL = target -> host */
464 2,
465 },
466 {
467 WMI_CONTROL_SVC,
468 PIPEDIR_OUT, /* out = UL = host -> target */
469 3,
470 },
471 {
472 WMI_CONTROL_SVC,
473 PIPEDIR_IN, /* in = DL = target -> host */
474 2,
475 },
476 {
477 HTC_CTRL_RSVD_SVC,
478 PIPEDIR_OUT, /* out = UL = host -> target */
479 0, /* could be moved to 3 (share with WMI) */
480 },
481 {
482 HTC_CTRL_RSVD_SVC,
483 PIPEDIR_IN, /* in = DL = target -> host */
484 1,
485 },
486 {
487 HTC_RAW_STREAMS_SVC, /* not currently used */
488 PIPEDIR_OUT, /* out = UL = host -> target */
489 0,
490 },
491 {
492 HTC_RAW_STREAMS_SVC, /* not currently used */
493 PIPEDIR_IN, /* in = DL = target -> host */
494 1,
495 },
496 {
497 HTT_DATA_MSG_SVC,
498 PIPEDIR_OUT, /* out = UL = host -> target */
499 4,
500 },
501#if WLAN_FEATURE_FASTPATH
502 {
503 HTT_DATA_MSG_SVC,
504 PIPEDIR_IN, /* in = DL = target -> host */
505 5,
506 },
507#else /* WLAN_FEATURE_FASTPATH */
508 {
509 HTT_DATA_MSG_SVC,
510 PIPEDIR_IN, /* in = DL = target -> host */
511 1,
512 },
513#endif /* WLAN_FEATURE_FASTPATH */
514
515 /* (Additions here) */
516
517 { /* Must be last */
518 0,
519 0,
520 0,
521 },
522};
523
524
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700525static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
526static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
527
528static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
529 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
530 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
531 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
532 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
533 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
534 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
535 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
536 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
537 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
538 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
539 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
540 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
541 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
542 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
543 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
544 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
545 {0, 0, 0,}, /* Must be last */
546};
547
Houston Hoffman748e1a62017-03-30 17:20:42 -0700548static void hif_select_service_to_pipe_map(struct hif_softc *scn,
549 struct service_to_pipe **tgt_svc_map_to_use,
550 uint32_t *sz_tgt_svc_map_to_use)
551{
552 uint32_t mode = hif_get_conparam(scn);
553 struct hif_target_info *tgt_info = &scn->target_info;
554
555 if (QDF_IS_EPPING_ENABLED(mode)) {
556 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
557 *sz_tgt_svc_map_to_use =
558 sizeof(target_service_to_ce_map_wlan_epping);
559 } else {
560 switch (tgt_info->target_type) {
561 default:
562 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
563 *sz_tgt_svc_map_to_use =
564 sizeof(target_service_to_ce_map_wlan);
565 break;
566 case TARGET_TYPE_AR900B:
567 case TARGET_TYPE_QCA9984:
568 case TARGET_TYPE_IPQ4019:
569 case TARGET_TYPE_QCA9888:
570 case TARGET_TYPE_AR9888:
571 case TARGET_TYPE_AR9888V2:
572 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
573 *sz_tgt_svc_map_to_use =
574 sizeof(target_service_to_ce_map_ar900b);
575 break;
576 case TARGET_TYPE_QCA6290:
577 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
578 *sz_tgt_svc_map_to_use =
579 sizeof(target_service_to_ce_map_qca6290);
580 break;
581 }
582 }
583}
584
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700585/**
586 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
587 * @ce_state : pointer to the state context of the CE
588 *
589 * Description:
590 * Sets htt_rx_data attribute of the state structure if the
591 * CE serves one of the HTT DATA services.
592 *
593 * Return:
594 * false (attribute set to false)
595 * true (attribute set to true);
596 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700597static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700598{
599 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530600 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700601 int i;
602 bool rc = false;
603
604 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700605 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
606 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700607
Kiran Venkatappac0687092017-04-13 16:45:03 +0530608 map_len = map_sz / sizeof(struct service_to_pipe);
609 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700610 if ((svc_map[i].pipenum == ce_state->id) &&
611 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
612 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
613 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
614 /* HTT CEs are unidirectional */
615 if (svc_map[i].pipedir == PIPEDIR_IN)
616 ce_state->htt_rx_data = true;
617 else
618 ce_state->htt_tx_data = true;
619 rc = true;
620 }
621 }
622 }
623 return rc;
624}
625
Houston Hoffman47808172016-05-06 10:04:21 -0700626/**
627 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
628 * @ce_id: ce in question
629 * @ring: ring state being examined
630 * @type: "src_ring" or "dest_ring" string for identifying the ring
631 *
632 * Warns on non-zero index values.
633 * Causes a kernel panic if the ring is not empty durring initialization.
634 */
635static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
636 char *type)
637{
638 if (ring->write_index != 0 || ring->sw_index != 0)
639 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
640 ce_id, type, ring->sw_index, ring->write_index);
641 if (ring->write_index != ring->sw_index)
642 QDF_BUG(0);
643}
644
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530645/**
646 * ce_srng_based() - Does this target use srng
647 * @ce_state : pointer to the state context of the CE
648 *
649 * Description:
650 * returns true if the target is SRNG based
651 *
652 * Return:
653 * false (attribute set to false)
654 * true (attribute set to true);
655 */
656bool ce_srng_based(struct hif_softc *scn)
657{
658 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
659 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
660
661 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530662 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700663 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530664 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530665 default:
666 return false;
667 }
668 return false;
669}
670
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800671#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700672static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530673{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530674 if (ce_srng_based(scn))
675 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530676
677 return ce_services_legacy();
678}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800679
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800680
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800681#else /* QCA_LITHIUM */
682static struct ce_ops *ce_services_attach(struct hif_softc *scn)
683{
684 return ce_services_legacy();
685}
686#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530687
Houston Hoffman403c2df2017-01-27 12:51:15 -0800688static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800689 struct pld_shadow_reg_v2_cfg **shadow_config,
690 int *num_shadow_registers_configured) {
691 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
692
693 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
694 scn, shadow_config, num_shadow_registers_configured);
695}
696
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530697static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
698 uint8_t ring_type)
699{
700 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
701
702 return hif_state->ce_services->ce_get_desc_size(ring_type);
703}
704
705
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700706static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530707 uint8_t ring_type, uint32_t nentries)
708{
709 uint32_t ce_nbytes;
710 char *ptr;
711 qdf_dma_addr_t base_addr;
712 struct CE_ring_state *ce_ring;
713 uint32_t desc_size;
714 struct hif_softc *scn = CE_state->scn;
715
716 ce_nbytes = sizeof(struct CE_ring_state)
717 + (nentries * sizeof(void *));
718 ptr = qdf_mem_malloc(ce_nbytes);
719 if (!ptr)
720 return NULL;
721
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530722 ce_ring = (struct CE_ring_state *)ptr;
723 ptr += sizeof(struct CE_ring_state);
724 ce_ring->nentries = nentries;
725 ce_ring->nentries_mask = nentries - 1;
726
727 ce_ring->low_water_mark_nentries = 0;
728 ce_ring->high_water_mark_nentries = nentries;
729 ce_ring->per_transfer_context = (void **)ptr;
730
731 desc_size = ce_get_desc_size(scn, ring_type);
732
733 /* Legacy platforms that do not support cache
734 * coherent DMA are unsupported
735 */
736 ce_ring->base_addr_owner_space_unaligned =
737 qdf_mem_alloc_consistent(scn->qdf_dev,
738 scn->qdf_dev->dev,
739 (nentries *
740 desc_size +
741 CE_DESC_RING_ALIGN),
742 &base_addr);
743 if (ce_ring->base_addr_owner_space_unaligned
744 == NULL) {
745 HIF_ERROR("%s: ring has no DMA mem",
746 __func__);
747 qdf_mem_free(ptr);
748 return NULL;
749 }
750 ce_ring->base_addr_CE_space_unaligned = base_addr;
751
752 /* Correctly initialize memory to 0 to
753 * prevent garbage data crashing system
754 * when download firmware
755 */
756 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
757 nentries * desc_size +
758 CE_DESC_RING_ALIGN);
759
760 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
761
762 ce_ring->base_addr_CE_space =
763 (ce_ring->base_addr_CE_space_unaligned +
764 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
765
766 ce_ring->base_addr_owner_space = (void *)
767 (((size_t) ce_ring->base_addr_owner_space_unaligned +
768 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
769 } else {
770 ce_ring->base_addr_CE_space =
771 ce_ring->base_addr_CE_space_unaligned;
772 ce_ring->base_addr_owner_space =
773 ce_ring->base_addr_owner_space_unaligned;
774 }
775
776 return ce_ring;
777}
778
779static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
780 uint32_t ce_id, struct CE_ring_state *ring,
781 struct CE_attr *attr)
782{
783 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
784
785 hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, ring, attr);
786}
787
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800788int hif_ce_bus_early_suspend(struct hif_softc *scn)
789{
790 uint8_t ul_pipe, dl_pipe;
791 int ce_id, status, ul_is_polled, dl_is_polled;
792 struct CE_state *ce_state;
793 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
794 &ul_pipe, &dl_pipe,
795 &ul_is_polled, &dl_is_polled);
796 if (status) {
797 HIF_ERROR("%s: pipe_mapping failure", __func__);
798 return status;
799 }
800
801 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
802 if (ce_id == ul_pipe)
803 continue;
804 if (ce_id == dl_pipe)
805 continue;
806
807 ce_state = scn->ce_id_to_state[ce_id];
808 qdf_spin_lock_bh(&ce_state->ce_index_lock);
809 if (ce_state->state == CE_RUNNING)
810 ce_state->state = CE_PAUSED;
811 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
812 }
813
814 return status;
815}
816
817int hif_ce_bus_late_resume(struct hif_softc *scn)
818{
819 int ce_id;
820 struct CE_state *ce_state;
821 int write_index;
822 bool index_updated;
823
824 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
825 ce_state = scn->ce_id_to_state[ce_id];
826 qdf_spin_lock_bh(&ce_state->ce_index_lock);
827 if (ce_state->state == CE_PENDING) {
828 write_index = ce_state->src_ring->write_index;
829 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
830 write_index);
831 ce_state->state = CE_RUNNING;
832 index_updated = true;
833 } else {
834 index_updated = false;
835 }
836
837 if (ce_state->state == CE_PAUSED)
838 ce_state->state = CE_RUNNING;
839 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
840
841 if (index_updated)
842 hif_record_ce_desc_event(scn, ce_id,
843 RESUME_WRITE_INDEX_UPDATE,
844 NULL, NULL, write_index);
845 }
846
847 return 0;
848}
849
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800850/**
851 * ce_oom_recovery() - try to recover rx ce from oom condition
852 * @context: CE_state of the CE with oom rx ring
853 *
854 * the executing work Will continue to be rescheduled untill
855 * at least 1 descriptor is successfully posted to the rx ring.
856 *
857 * return: none
858 */
859static void ce_oom_recovery(void *context)
860{
861 struct CE_state *ce_state = context;
862 struct hif_softc *scn = ce_state->scn;
863 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
864 struct HIF_CE_pipe_info *pipe_info =
865 &ce_softc->pipe_info[ce_state->id];
866
867 hif_post_recv_buffers_for_pipe(pipe_info);
868}
869
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800870/*
871 * Initialize a Copy Engine based on caller-supplied attributes.
872 * This may be called once to initialize both source and destination
873 * rings or it may be called twice for separate source and destination
874 * initialization. It may be that only one side or the other is
875 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700876 *
877 * This should be called durring the initialization sequence before
878 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800879 */
Komal Seelam644263d2016-02-22 20:45:49 +0530880struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800881 unsigned int CE_id, struct CE_attr *attr)
882{
883 struct CE_state *CE_state;
884 uint32_t ctrl_addr;
885 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800886 bool malloc_CE_state = false;
887 bool malloc_src_ring = false;
888
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530889 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800891 CE_state = scn->ce_id_to_state[CE_id];
892
893 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800894 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530895 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800896 if (!CE_state) {
897 HIF_ERROR("%s: CE_state has no mem", __func__);
898 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800899 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700900 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530901 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700902
903 CE_state->id = CE_id;
904 CE_state->ctrl_addr = ctrl_addr;
905 CE_state->state = CE_RUNNING;
906 CE_state->attr_flags = attr->flags;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -0700907 qdf_spinlock_create(&CE_state->lro_unloading_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800908 }
909 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800910
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530911 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800912 if (attr == NULL) {
913 /* Already initialized; caller wants the handle */
914 return (struct CE_handle *)CE_state;
915 }
916
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800917 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530918 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800919 else
920 CE_state->src_sz_max = attr->src_sz_max;
921
Houston Hoffman68e837e2015-12-04 12:57:24 -0800922 ce_init_ce_desc_event_log(CE_id,
923 attr->src_nentries + attr->dest_nentries);
924
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800925 /* source ring setup */
926 nentries = attr->src_nentries;
927 if (nentries) {
928 struct CE_ring_state *src_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800929 nentries = roundup_pwr2(nentries);
930 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530931 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800932 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530933 src_ring = CE_state->src_ring =
934 ce_alloc_ring_state(CE_state,
935 CE_RING_SRC,
936 nentries);
937 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800938 /* cannot allocate src ring. If the
939 * CE_state is allocated locally free
940 * CE_State and return error.
941 */
942 HIF_ERROR("%s: src ring has no mem", __func__);
943 if (malloc_CE_state) {
944 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530945 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800946 malloc_CE_state = false;
947 }
948 return NULL;
949 } else {
950 /* we can allocate src ring.
951 * Mark that the src ring is
952 * allocated locally
953 */
954 malloc_src_ring = true;
955 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800956 /*
957 * Also allocate a shadow src ring in
958 * regular mem to use for faster access.
959 */
960 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530961 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800962 sizeof(struct CE_src_desc) +
963 CE_DESC_RING_ALIGN);
964 if (src_ring->shadow_base_unaligned == NULL) {
965 HIF_ERROR("%s: src ring no shadow_base mem",
966 __func__);
967 goto error_no_dma_mem;
968 }
969 src_ring->shadow_base = (struct CE_src_desc *)
970 (((size_t) src_ring->shadow_base_unaligned +
971 CE_DESC_RING_ALIGN - 1) &
972 ~(CE_DESC_RING_ALIGN - 1));
973
Houston Hoffman4411ad42016-03-14 21:12:04 -0700974 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
975 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -0700976
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530977 ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
978
Houston Hoffman4411ad42016-03-14 21:12:04 -0700979 if (Q_TARGET_ACCESS_END(scn) < 0)
980 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530981 ce_ring_test_initial_indexes(CE_id, src_ring,
982 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800983 }
984 }
985
986 /* destination ring setup */
987 nentries = attr->dest_nentries;
988 if (nentries) {
989 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800990
991 nentries = roundup_pwr2(nentries);
992 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530993 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530995 dest_ring = CE_state->dest_ring =
996 ce_alloc_ring_state(CE_state,
997 CE_RING_DEST,
998 nentries);
999 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001000 /* cannot allocate dst ring. If the CE_state
1001 * or src ring is allocated locally free
1002 * CE_State and src ring and return error.
1003 */
1004 HIF_ERROR("%s: dest ring has no mem",
1005 __func__);
1006 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301007 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001008 CE_state->src_ring = NULL;
1009 malloc_src_ring = false;
1010 }
1011 if (malloc_CE_state) {
1012 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301013 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001014 malloc_CE_state = false;
1015 }
1016 return NULL;
1017 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001018
Houston Hoffman4411ad42016-03-14 21:12:04 -07001019 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1020 goto error_target_access;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301021
1022 ce_ring_setup(scn, CE_RING_DEST, CE_id, dest_ring, attr);
1023
1024 if (Q_TARGET_ACCESS_END(scn) < 0)
1025 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001026
1027 ce_ring_test_initial_indexes(CE_id, dest_ring,
1028 "dest_ring");
1029
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301030 /* For srng based target, init status ring here */
1031 if (ce_srng_based(CE_state->scn)) {
1032 CE_state->status_ring =
1033 ce_alloc_ring_state(CE_state,
1034 CE_RING_STATUS,
1035 nentries);
1036 if (CE_state->status_ring == NULL) {
1037 /*Allocation failed. Cleanup*/
1038 qdf_mem_free(CE_state->dest_ring);
1039 if (malloc_src_ring) {
1040 qdf_mem_free
1041 (CE_state->src_ring);
1042 CE_state->src_ring = NULL;
1043 malloc_src_ring = false;
1044 }
1045 if (malloc_CE_state) {
1046 /* allocated CE_state locally */
1047 scn->ce_id_to_state[CE_id] =
1048 NULL;
1049 qdf_mem_free(CE_state);
1050 malloc_CE_state = false;
1051 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001052
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301053 return NULL;
1054 }
1055 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1056 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001057
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301058 ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1059 CE_state->status_ring, attr);
1060
1061 if (Q_TARGET_ACCESS_END(scn) < 0)
1062 goto error_target_access;
1063
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001065
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001066 /* epping */
1067 /* poll timer */
1068 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301069 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001070 &CE_state->poll_timer,
1071 ce_poll_timeout,
1072 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301073 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001074 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301075 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001076 CE_POLL_TIMEOUT);
1077 }
1078 }
1079 }
1080
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301081 if (!ce_srng_based(scn)) {
1082 /* Enable CE error interrupts */
1083 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1084 goto error_target_access;
1085 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1086 if (Q_TARGET_ACCESS_END(scn) < 0)
1087 goto error_target_access;
1088 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001089
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001090 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1091 ce_oom_recovery, CE_state);
1092
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001093 /* update the htt_data attribute */
1094 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001095 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001096
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001097 return (struct CE_handle *)CE_state;
1098
Houston Hoffman4411ad42016-03-14 21:12:04 -07001099error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001100error_no_dma_mem:
1101 ce_fini((struct CE_handle *)CE_state);
1102 return NULL;
1103}
1104
1105#ifdef WLAN_FEATURE_FASTPATH
1106/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001107 * hif_enable_fastpath() Update that we have enabled fastpath mode
1108 * @hif_ctx: HIF context
1109 *
1110 * For use in data path
1111 *
1112 * Retrun: void
1113 */
1114void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1115{
1116 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1117
Houston Hoffmand63cd742016-12-05 11:59:56 -08001118 if (ce_srng_based(scn)) {
1119 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1120 return;
1121 }
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001122 HIF_INFO("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001123 scn->fastpath_mode_on = true;
1124}
1125
1126/**
1127 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1128 * @hif_ctx: HIF Context
1129 *
1130 * For use in data path to skip HTC
1131 *
1132 * Return: bool
1133 */
1134bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1135{
1136 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1137
1138 return scn->fastpath_mode_on;
1139}
1140
1141/**
1142 * hif_get_ce_handle - API to get CE handle for FastPath mode
1143 * @hif_ctx: HIF Context
1144 * @id: CopyEngine Id
1145 *
1146 * API to return CE handle for fastpath mode
1147 *
1148 * Return: void
1149 */
1150void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1151{
1152 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1153
1154 return scn->ce_id_to_state[id];
1155}
1156
1157/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001158 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1159 * No processing is required inside this function.
1160 * @ce_hdl: Cope engine handle
1161 * Using an assert, this function makes sure that,
1162 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001163 *
1164 * This is called while dismantling CE structures. No other thread
1165 * should be using these structures while dismantling is occuring
1166 * therfore no locking is needed.
1167 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001168 * Return: none
1169 */
1170void
1171ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1172{
1173 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1174 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301175 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001176 uint32_t sw_index, write_index;
Houston Hoffman85925072016-05-06 17:02:18 -07001177 if (hif_is_nss_wifi_enabled(sc))
1178 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001179
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001180 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Houston Hoffman85925072016-05-06 17:02:18 -07001181 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1182 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001183 sw_index = src_ring->sw_index;
1184 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001185
1186 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301187 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001188 }
1189}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001190
1191/**
1192 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1193 * @ce_hdl: Handle to CE
1194 *
1195 * These buffers are never allocated on the fly, but
1196 * are allocated only once during HIF start and freed
1197 * only once during HIF stop.
1198 * NOTE:
1199 * The assumption here is there is no in-flight DMA in progress
1200 * currently, so that buffers can be freed up safely.
1201 *
1202 * Return: NONE
1203 */
1204void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1205{
1206 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1207 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1208 qdf_nbuf_t nbuf;
1209 int i;
1210
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001211 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001212 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001213
1214 if (!ce_state->htt_rx_data)
1215 return;
1216
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001217 /*
1218 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1219 * this CE is completely full: does not leave one blank space, to
1220 * distinguish between empty queue & full queue. So free all the
1221 * entries.
1222 */
1223 for (i = 0; i < dst_ring->nentries; i++) {
1224 nbuf = dst_ring->per_transfer_context[i];
1225
1226 /*
1227 * The reasons for doing this check are:
1228 * 1) Protect against calling cleanup before allocating buffers
1229 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1230 * could have a partially filled ring, because of a memory
1231 * allocation failure in the middle of allocating ring.
1232 * This check accounts for that case, checking
1233 * fastpath_mode_on flag or started flag would not have
1234 * covered that case. This is not in performance path,
1235 * so OK to do this.
1236 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001237 if (nbuf) {
1238 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1239 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001240 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001241 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001242 }
1243}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001244
1245/**
1246 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1247 * @scn: HIF handle
1248 *
1249 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1250 * Hence we have to post all the entries in the pipe, even, in the beginning
1251 * unlike for other CE pipes where one less than dest_nentries are filled in
1252 * the beginning.
1253 *
1254 * Return: None
1255 */
1256static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1257{
1258 int pipe_num;
1259 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1260
1261 if (scn->fastpath_mode_on == false)
1262 return;
1263
1264 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1265 struct HIF_CE_pipe_info *pipe_info =
1266 &hif_state->pipe_info[pipe_num];
1267 struct CE_state *ce_state =
1268 scn->ce_id_to_state[pipe_info->pipe_num];
1269
1270 if (ce_state->htt_rx_data)
1271 atomic_inc(&pipe_info->recv_bufs_needed);
1272 }
1273}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001274#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001275static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001276{
1277}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001278
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001279static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001280{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001281 return false;
1282}
1283
1284static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1285{
1286 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001287}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001288#endif /* WLAN_FEATURE_FASTPATH */
1289
1290void ce_fini(struct CE_handle *copyeng)
1291{
1292 struct CE_state *CE_state = (struct CE_state *)copyeng;
1293 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301294 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001295
1296 CE_state->state = CE_UNUSED;
1297 scn->ce_id_to_state[CE_id] = NULL;
Houston Hoffman03f46572016-12-12 12:53:56 -08001298
1299 qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1300
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001302 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001303 ce_h2t_tx_ce_cleanup(copyeng);
1304
1305 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301306 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001307 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301308 qdf_mem_free_consistent(scn->qdf_dev,
1309 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001310 (CE_state->src_ring->nentries *
1311 sizeof(struct CE_src_desc) +
1312 CE_DESC_RING_ALIGN),
1313 CE_state->src_ring->
1314 base_addr_owner_space_unaligned,
1315 CE_state->src_ring->
1316 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301317 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001318 }
1319 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001320 /* Cleanup the datapath Rx ring */
1321 ce_t2h_msg_ce_cleanup(copyeng);
1322
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001323 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301324 qdf_mem_free_consistent(scn->qdf_dev,
1325 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001326 (CE_state->dest_ring->nentries *
1327 sizeof(struct CE_dest_desc) +
1328 CE_DESC_RING_ALIGN),
1329 CE_state->dest_ring->
1330 base_addr_owner_space_unaligned,
1331 CE_state->dest_ring->
1332 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301333 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001334
1335 /* epping */
1336 if (CE_state->timer_inited) {
1337 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301338 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339 }
1340 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001341 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301342 /* Cleanup the datapath Tx ring */
1343 ce_h2t_tx_ce_cleanup(copyeng);
1344
1345 if (CE_state->status_ring->shadow_base_unaligned)
1346 qdf_mem_free(
1347 CE_state->status_ring->shadow_base_unaligned);
1348
1349 if (CE_state->status_ring->base_addr_owner_space_unaligned)
1350 qdf_mem_free_consistent(scn->qdf_dev,
1351 scn->qdf_dev->dev,
1352 (CE_state->status_ring->nentries *
1353 sizeof(struct CE_src_desc) +
1354 CE_DESC_RING_ALIGN),
1355 CE_state->status_ring->
1356 base_addr_owner_space_unaligned,
1357 CE_state->status_ring->
1358 base_addr_CE_space, 0);
1359 qdf_mem_free(CE_state->status_ring);
1360 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001361
1362 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301363 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001364}
1365
Komal Seelam5584a7c2016-02-24 19:22:48 +05301366void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001367{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301368 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301370 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001371 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301372 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001373 sizeof(hif_state->msg_callbacks_current));
1374}
1375
1376/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301377QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301378hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001379 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301380 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001381{
Komal Seelam644263d2016-02-22 20:45:49 +05301382 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301383 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001384 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1385 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1386 int bytes = nbytes, nfrags = 0;
1387 struct ce_sendlist sendlist;
1388 int status, i = 0;
1389 unsigned int mux_id = 0;
1390
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301391 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001392
1393 transfer_id =
1394 (mux_id & MUX_ID_MASK) |
1395 (transfer_id & TRANSACTION_ID_MASK);
1396 data_attr &= DESC_DATA_FLAG_MASK;
1397 /*
1398 * The common case involves sending multiple fragments within a
1399 * single download (the tx descriptor and the tx frame header).
1400 * So, optimize for the case of multiple fragments by not even
1401 * checking whether it's necessary to use a sendlist.
1402 * The overhead of using a sendlist for a single buffer download
1403 * is not a big deal, since it happens rarely (for WMI messages).
1404 */
1405 ce_sendlist_init(&sendlist);
1406 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301407 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 int frag_bytes;
1409
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301410 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1411 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001412 /*
1413 * Clear the packet offset for all but the first CE desc.
1414 */
1415 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301416 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001417
1418 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1419 frag_bytes >
1420 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301421 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 (nbuf,
1423 nfrags) ? 0 :
1424 CE_SEND_FLAG_SWAP_DISABLE,
1425 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301426 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001427 HIF_ERROR("%s: error, frag_num %d larger than limit",
1428 __func__, nfrags);
1429 return status;
1430 }
1431 bytes -= frag_bytes;
1432 nfrags++;
1433 } while (bytes > 0);
1434
1435 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301436 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001437 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301438 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001439 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301440 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001441 }
1442 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301443 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001444
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301445 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001446 HIF_ERROR("%s: error CE handle is null", __func__);
1447 return A_ERROR;
1448 }
1449
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301450 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301451 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nirav Shaheaa20d82016-04-25 18:01:05 +05301452 qdf_nbuf_data_addr(nbuf),
Nirav Shah29beae02016-04-26 22:58:54 +05301453 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001454 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301455 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001456
1457 return status;
1458}
1459
Komal Seelam5584a7c2016-02-24 19:22:48 +05301460void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1461 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462{
Komal Seelam644263d2016-02-22 20:45:49 +05301463 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301464 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301465
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001466 if (!force) {
1467 int resources;
1468 /*
1469 * Decide whether to actually poll for completions, or just
1470 * wait for a later chance. If there seem to be plenty of
1471 * resources left, then just wait, since checking involves
1472 * reading a CE register, which is a relatively expensive
1473 * operation.
1474 */
Komal Seelam644263d2016-02-22 20:45:49 +05301475 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001476 /*
1477 * If at least 50% of the total resources are still available,
1478 * don't bother checking again yet.
1479 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301480 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 1)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 return;
1482 }
1483 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001484#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001485 ce_per_engine_servicereap(scn, pipe);
1486#else
1487 ce_per_engine_service(scn, pipe);
1488#endif
1489}
1490
Komal Seelam5584a7c2016-02-24 19:22:48 +05301491uint16_t
1492hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001493{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301494 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001495 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1496 uint16_t rv;
1497
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301498 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001499 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301500 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 return rv;
1502}
1503
1504/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001505static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301507 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001508 unsigned int nbytes, unsigned int transfer_id,
1509 unsigned int sw_index, unsigned int hw_index,
1510 unsigned int toeplitz_hash_result)
1511{
1512 struct HIF_CE_pipe_info *pipe_info =
1513 (struct HIF_CE_pipe_info *)ce_context;
1514 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301515 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001516 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001517 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301518 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 do {
1521 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001522 * The upper layer callback will be triggered
1523 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524 */
Houston Hoffman85118512015-09-28 14:17:11 -07001525 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001526 if (scn->target_status == TARGET_STATUS_RESET) {
1527
1528 qdf_nbuf_unmap_single(scn->qdf_dev,
1529 transfer_context,
1530 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301531 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001532 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001533 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001534 msg_callbacks->Context,
1535 transfer_context, transfer_id,
1536 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537 }
1538
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301539 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001540 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301541 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 } while (ce_completed_send_next(copyeng,
1543 &ce_context, &transfer_context,
1544 &CE_data, &nbytes, &transfer_id,
1545 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301546 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547}
1548
Houston Hoffman910c6262015-09-28 12:56:25 -07001549/**
1550 * hif_ce_do_recv(): send message from copy engine to upper layers
1551 * @msg_callbacks: structure containing callback and callback context
1552 * @netbuff: skb containing message
1553 * @nbytes: number of bytes in the message
1554 * @pipe_info: used for the pipe_number info
1555 *
1556 * Checks the packet length, configures the lenght in the netbuff,
1557 * and calls the upper layer callback.
1558 *
1559 * return: None
1560 */
1561static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301562 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001563 struct HIF_CE_pipe_info *pipe_info) {
1564 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301565 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001566 msg_callbacks->
1567 rxCompletionHandler(msg_callbacks->Context,
1568 netbuf, pipe_info->pipe_num);
1569 } else {
1570 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1571 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001572
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301573 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001574 }
1575}
1576
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001577/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001578static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301580 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001581 unsigned int nbytes, unsigned int transfer_id,
1582 unsigned int flags)
1583{
1584 struct HIF_CE_pipe_info *pipe_info =
1585 (struct HIF_CE_pipe_info *)ce_context;
1586 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001587 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301588 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001589#ifdef HIF_PCI
1590 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1591#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001592 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301593 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001594
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001595 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001596#ifdef HIF_PCI
1597 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1598#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301599 qdf_nbuf_unmap_single(scn->qdf_dev,
1600 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301601 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001602
Houston Hoffman910c6262015-09-28 12:56:25 -07001603 atomic_inc(&pipe_info->recv_bufs_needed);
1604 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301605 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301606 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001607 else
1608 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001609 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001610
1611 /* Set up force_break flag if num of receices reaches
1612 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001613 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001614 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001615 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 break;
1617 }
1618 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1619 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301620 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001621
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001622}
1623
1624/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1625
1626void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301627hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628 struct hif_msg_callbacks *callbacks)
1629{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301630 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001631
1632#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1633 spin_lock_init(&pcie_access_log_lock);
1634#endif
1635 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301636 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001637 sizeof(hif_state->msg_callbacks_pending));
1638
1639}
1640
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001641static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001642{
1643 struct CE_handle *ce_diag = hif_state->ce_diag;
1644 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301645 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001646 struct hif_msg_callbacks *hif_msg_callbacks =
1647 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001648
1649 /* daemonize("hif_compl_thread"); */
1650
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001651 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001652 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001653 return -EINVAL;
1654 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001655
1656 if (!hif_msg_callbacks ||
1657 !hif_msg_callbacks->rxCompletionHandler ||
1658 !hif_msg_callbacks->txCompletionHandler) {
1659 HIF_ERROR("%s: no completion handler registered", __func__);
1660 return -EFAULT;
1661 }
1662
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663 A_TARGET_ACCESS_LIKELY(scn);
1664 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1665 struct CE_attr attr;
1666 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667
1668 pipe_info = &hif_state->pipe_info[pipe_num];
1669 if (pipe_info->ce_hdl == ce_diag) {
1670 continue; /* Handle Diagnostic CE specially */
1671 }
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301672 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001673 if (attr.src_nentries) {
1674 /* pipe used to send to target */
1675 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1676 __func__, pipe_num, pipe_info);
1677 ce_send_cb_register(pipe_info->ce_hdl,
1678 hif_pci_ce_send_done, pipe_info,
1679 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001680 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1681 }
1682 if (attr.dest_nentries) {
1683 /* pipe used to receive from target */
1684 ce_recv_cb_register(pipe_info->ce_hdl,
1685 hif_pci_ce_recv_data, pipe_info,
1686 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001687 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001688
1689 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301690 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301691
1692 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1693 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001694 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 A_TARGET_ACCESS_UNLIKELY(scn);
1697 return 0;
1698}
1699
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001700/*
1701 * Install pending msg callbacks.
1702 *
1703 * TBDXXX: This hack is needed because upper layers install msg callbacks
1704 * for use with HTC before BMI is done; yet this HIF implementation
1705 * needs to continue to use BMI msg callbacks. Really, upper layers
1706 * should not register HTC callbacks until AFTER BMI phase.
1707 */
Komal Seelam644263d2016-02-22 20:45:49 +05301708static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301710 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001711
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301712 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001713 &hif_state->msg_callbacks_pending,
1714 sizeof(hif_state->msg_callbacks_pending));
1715}
1716
Komal Seelam5584a7c2016-02-24 19:22:48 +05301717void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1718 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001719{
1720 int ul_is_polled, dl_is_polled;
1721
Komal Seelam644263d2016-02-22 20:45:49 +05301722 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001723 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1724}
1725
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726/**
1727 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301728 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001729 *
1730 * Output the pipe error counts of each pipe to log file
1731 *
1732 * Return: N/A
1733 */
Komal Seelam644263d2016-02-22 20:45:49 +05301734void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001735{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301736 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737 int pipe_num;
1738
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001739 if (hif_state == NULL) {
1740 HIF_ERROR("%s hif_state is NULL", __func__);
1741 return;
1742 }
1743 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1744 struct HIF_CE_pipe_info *pipe_info;
1745
1746 pipe_info = &hif_state->pipe_info[pipe_num];
1747
1748 if (pipe_info->nbuf_alloc_err_count > 0 ||
1749 pipe_info->nbuf_dma_err_count > 0 ||
1750 pipe_info->nbuf_ce_enqueue_err_count)
1751 HIF_ERROR(
1752 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1753 __func__, pipe_info->pipe_num,
1754 atomic_read(&pipe_info->recv_bufs_needed),
1755 pipe_info->nbuf_alloc_err_count,
1756 pipe_info->nbuf_dma_err_count,
1757 pipe_info->nbuf_ce_enqueue_err_count);
1758 }
1759}
1760
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001761static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1762 void *nbuf, uint32_t *error_cnt,
1763 enum hif_ce_event_type failure_type,
1764 const char *failure_type_string)
1765{
1766 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1767 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1768 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1769 int ce_id = CE_state->id;
1770 uint32_t error_cnt_tmp;
1771
1772 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1773 error_cnt_tmp = ++(*error_cnt);
1774 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05301775 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001776 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1777 failure_type_string);
1778 hif_record_ce_desc_event(scn, ce_id, failure_type,
1779 NULL, nbuf, bufs_needed_tmp);
1780 /* if we fail to allocate the last buffer for an rx pipe,
1781 * there is no trigger to refill the ce and we will
1782 * eventually crash
1783 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301784 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001785 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05301786
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001787}
1788
1789
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001790
1791
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001792static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1793{
1794 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301795 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301796 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301797 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001798 uint32_t bufs_posted = 0;
1799
1800 buf_sz = pipe_info->buf_sz;
1801 if (buf_sz == 0) {
1802 /* Unused Copy Engine */
1803 return 0;
1804 }
1805
1806 ce_hdl = pipe_info->ce_hdl;
1807
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301808 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001809 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301810 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301811 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812 int status;
1813
1814 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301815 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001816
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301817 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001818 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001819 hif_post_recv_buffers_failure(pipe_info, nbuf,
1820 &pipe_info->nbuf_alloc_err_count,
1821 HIF_RX_NBUF_ALLOC_FAILURE,
1822 "HIF_RX_NBUF_ALLOC_FAILURE");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001823 return 1;
1824 }
1825
1826 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301827 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001828 * CE_data = dma_map_single(dev, data, buf_sz, );
1829 * DMA_FROM_DEVICE);
1830 */
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001831 ret = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301832 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001833
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301834 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001835 hif_post_recv_buffers_failure(pipe_info, nbuf,
1836 &pipe_info->nbuf_dma_err_count,
1837 HIF_RX_NBUF_MAP_FAILURE,
1838 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301839 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840 return 1;
1841 }
1842
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301843 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001844
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301845 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001846 buf_sz, DMA_FROM_DEVICE);
1847 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301848 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001849 if (unlikely(status != EOK)) {
1850 hif_post_recv_buffers_failure(pipe_info, nbuf,
1851 &pipe_info->nbuf_ce_enqueue_err_count,
1852 HIF_RX_NBUF_ENQUEUE_FAILURE,
1853 "HIF_RX_NBUF_ENQUEUE_FAILURE");
1854
Govind Singh4fcafd42016-08-08 12:37:31 +05301855 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1856 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301857 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858 return 1;
1859 }
1860
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301861 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001862 bufs_posted++;
1863 }
1864 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001865 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001866 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1867 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001868 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001869 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1870 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001871 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08001872 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001873
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301874 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001875
1876 return 0;
1877}
1878
1879/*
1880 * Try to post all desired receive buffers for all pipes.
1881 * Returns 0 if all desired buffers are posted,
1882 * non-zero if were were unable to completely
1883 * replenish receive buffers.
1884 */
Komal Seelam644263d2016-02-22 20:45:49 +05301885static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001886{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301887 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 int pipe_num, rv = 0;
Houston Hoffman85925072016-05-06 17:02:18 -07001889 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890
1891 A_TARGET_ACCESS_LIKELY(scn);
1892 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1893 struct HIF_CE_pipe_info *pipe_info;
Houston Hoffman85925072016-05-06 17:02:18 -07001894 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001895 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07001896
1897 if (hif_is_nss_wifi_enabled(scn) &&
1898 ce_state && (ce_state->htt_rx_data)) {
1899 continue;
1900 }
1901
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001902 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1903 rv = 1;
1904 goto done;
1905 }
1906 }
1907
1908done:
1909 A_TARGET_ACCESS_UNLIKELY(scn);
1910
1911 return rv;
1912}
1913
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301914QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001915{
Komal Seelam644263d2016-02-22 20:45:49 +05301916 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301917 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001918
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001919 hif_update_fastpath_recv_bufs_cnt(scn);
1920
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001921 hif_msg_callbacks_install(scn);
1922
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001923 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301924 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001925
Houston Hoffman271951f2016-11-12 15:24:27 -08001926 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001927 hif_state->started = true;
1928
Houston Hoffman271951f2016-11-12 15:24:27 -08001929 /* Post buffers once to start things off. */
1930 if (hif_post_recv_buffers(scn)) {
1931 /* cleanup is done in hif_ce_disable */
1932 HIF_ERROR("%s:failed to post buffers", __func__);
1933 return QDF_STATUS_E_FAILURE;
1934 }
1935
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301936 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937}
1938
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001939static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940{
Komal Seelam644263d2016-02-22 20:45:49 +05301941 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942 struct CE_handle *ce_hdl;
1943 uint32_t buf_sz;
1944 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301945 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301946 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001947 void *per_CE_context;
1948
1949 buf_sz = pipe_info->buf_sz;
1950 if (buf_sz == 0) {
1951 /* Unused Copy Engine */
1952 return;
1953 }
1954
1955 hif_state = pipe_info->HIF_CE_state;
1956 if (!hif_state->started) {
1957 return;
1958 }
1959
Komal Seelam02cf2f82016-02-22 20:44:25 +05301960 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 ce_hdl = pipe_info->ce_hdl;
1962
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301963 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001964 return;
1965 }
1966 while (ce_revoke_recv_next
1967 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301968 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301969 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301970 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301971 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001972 }
1973}
1974
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001975static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001976{
1977 struct CE_handle *ce_hdl;
1978 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301979 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301980 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301982 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983 unsigned int nbytes;
1984 unsigned int id;
1985 uint32_t buf_sz;
1986 uint32_t toeplitz_hash_result;
1987
1988 buf_sz = pipe_info->buf_sz;
1989 if (buf_sz == 0) {
1990 /* Unused Copy Engine */
1991 return;
1992 }
1993
1994 hif_state = pipe_info->HIF_CE_state;
1995 if (!hif_state->started) {
1996 return;
1997 }
1998
Komal Seelam02cf2f82016-02-22 20:44:25 +05301999 scn = HIF_GET_SOFTC(hif_state);
2000
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002001 ce_hdl = pipe_info->ce_hdl;
2002
2003 while (ce_cancel_send_next
2004 (ce_hdl, &per_CE_context,
2005 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302006 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002007 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2008 /*
2009 * Packets enqueued by htt_h2t_ver_req_msg() and
2010 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2011 * freed in htt_htc_misc_pkt_pool_free() in
2012 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002013 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002014 * which they are queued in.
2015 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302016 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302018 /* Indicate the completion to higher
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002019 * layer to free the buffer */
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302020 if (pipe_info->pipe_callbacks.
Himanshu Agarwal8d0cdea2016-09-02 21:05:01 +05302021 txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302022 pipe_info->pipe_callbacks.
2023 txCompletionHandler(pipe_info->
2024 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025 netbuf, id, toeplitz_hash_result);
2026 }
2027 }
2028}
2029
2030/*
2031 * Cleanup residual buffers for device shutdown:
2032 * buffers that were enqueued for receive
2033 * buffers that were to be sent
2034 * Note: Buffers that had completed but which were
2035 * not yet processed are on a completion queue. They
2036 * are handled when the completion thread shuts down.
2037 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002038static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002039{
2040 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302041 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002042 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002043
Komal Seelam02cf2f82016-02-22 20:44:25 +05302044 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002045 struct HIF_CE_pipe_info *pipe_info;
2046
Houston Hoffman85925072016-05-06 17:02:18 -07002047 ce_state = scn->ce_id_to_state[pipe_num];
2048 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2049 ((ce_state->htt_tx_data) ||
2050 (ce_state->htt_rx_data))) {
2051 continue;
2052 }
2053
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002054 pipe_info = &hif_state->pipe_info[pipe_num];
2055 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2056 hif_send_buffer_cleanup_on_pipe(pipe_info);
2057 }
2058}
2059
Komal Seelam5584a7c2016-02-24 19:22:48 +05302060void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002061{
Komal Seelam644263d2016-02-22 20:45:49 +05302062 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302063 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302064
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002065 hif_buffer_cleanup(hif_state);
2066}
2067
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002068static void hif_destroy_oom_work(struct hif_softc *scn)
2069{
2070 struct CE_state *ce_state;
2071 int ce_id;
2072
2073 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2074 ce_state = scn->ce_id_to_state[ce_id];
2075 if (ce_state)
2076 qdf_destroy_work(scn->qdf_dev,
2077 &ce_state->oom_allocation_work);
2078 }
2079}
2080
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302081void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002082{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302083 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002084 int pipe_num;
2085
Houston Hoffmana69581e2016-11-14 18:03:19 -08002086 /*
2087 * before cleaning up any memory, ensure irq &
2088 * bottom half contexts will not be re-entered
2089 */
2090 hif_nointrs(scn);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002091 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002092 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002093
2094 /*
2095 * At this point, asynchronous threads are stopped,
2096 * The Target should not DMA nor interrupt, Host code may
2097 * not initiate anything more. So we just need to clean
2098 * up Host-side state.
2099 */
2100
2101 if (scn->athdiag_procfs_inited) {
2102 athdiag_procfs_remove();
2103 scn->athdiag_procfs_inited = false;
2104 }
2105
2106 hif_buffer_cleanup(hif_state);
2107
2108 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2109 struct HIF_CE_pipe_info *pipe_info;
2110
2111 pipe_info = &hif_state->pipe_info[pipe_num];
2112 if (pipe_info->ce_hdl) {
2113 ce_fini(pipe_info->ce_hdl);
2114 pipe_info->ce_hdl = NULL;
2115 pipe_info->buf_sz = 0;
2116 }
2117 }
2118
2119 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302120 qdf_timer_stop(&hif_state->sleep_timer);
2121 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002122 hif_state->sleep_timer_init = false;
2123 }
2124
2125 hif_state->started = false;
2126}
2127
Houston Hoffman748e1a62017-03-30 17:20:42 -07002128
Houston Hoffman854e67f2016-03-14 21:11:39 -07002129/**
2130 * hif_get_target_ce_config() - get copy engine configuration
2131 * @target_ce_config_ret: basic copy engine configuration
2132 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2133 * @target_service_to_ce_map_ret: service mapping for the copy engines
2134 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2135 * @target_shadow_reg_cfg_ret: shadow register configuration
2136 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2137 *
2138 * providing accessor to these values outside of this file.
2139 * currently these are stored in static pointers to const sections.
2140 * there are multiple configurations that are selected from at compile time.
2141 * Runtime selection would need to consider mode, target type and bus type.
2142 *
2143 * Return: return by parameter.
2144 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302145void hif_get_target_ce_config(struct hif_softc *scn,
2146 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002147 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002148 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002149 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002150 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002151 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302153 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2154
2155 *target_ce_config_ret = hif_state->target_ce_config;
2156 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002157
2158 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2159 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002160
2161 if (target_shadow_reg_cfg_ret)
2162 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2163
2164 if (shadow_cfg_sz_ret)
2165 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002166}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002167
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002168#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002169static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002170{
2171 int i;
2172 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2173 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2174
2175 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2176 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2177 "%s: i %d, val %x\n", __func__, i,
2178 cfg->shadow_reg_v2_cfg[i].addr);
2179 }
2180}
2181
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002182#else
2183static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2184{
2185 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2186 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2187}
2188#endif
2189
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002190/**
2191 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302192 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002193 *
2194 * This function passes the con_mode and CE configuration to
2195 * platform driver to enable wlan.
2196 *
Houston Hoffman108da402016-03-14 21:11:24 -07002197 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002198 */
Houston Hoffman108da402016-03-14 21:11:24 -07002199int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002200{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002201 struct pld_wlan_enable_cfg cfg;
2202 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302203 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002204
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302205 hif_get_target_ce_config(scn,
2206 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002207 &cfg.num_ce_tgt_cfg,
2208 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2209 &cfg.num_ce_svc_pipe_cfg,
2210 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2211 &cfg.num_shadow_reg_cfg);
2212
2213 /* translate from structure size to array size */
2214 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2215 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2216 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002217
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002218 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2219 &cfg.num_shadow_reg_v2_cfg);
2220
2221 hif_print_hal_shadow_register_cfg(&cfg);
2222
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302223 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002224 mode = PLD_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002225 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002226 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002227 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002228 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002229
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002230 if (BYPASS_QMI)
2231 return 0;
2232 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002233 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2234 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002235}
2236
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002237#define CE_EPPING_USES_IRQ true
2238
Houston Hoffman108da402016-03-14 21:11:24 -07002239/**
2240 * hif_ce_prepare_config() - load the correct static tables.
2241 * @scn: hif context
2242 *
2243 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002244 */
Houston Hoffman108da402016-03-14 21:11:24 -07002245void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002246{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302247 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002248 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2249 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302250 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002251
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002252 hif_state->ce_services = ce_services_attach(scn);
2253
Houston Hoffman710af5a2016-11-22 21:59:03 -08002254 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002256 if (QDF_IS_EPPING_ENABLED(mode)) {
2257 if (CE_EPPING_USES_IRQ)
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302258 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002259 else
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302260 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2261 hif_state->target_ce_config = target_ce_config_wlan_epping;
2262 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002263 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2264 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002265 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002266
2267 switch (tgt_info->target_type) {
2268 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302269 hif_state->host_ce_config = host_ce_config_wlan;
2270 hif_state->target_ce_config = target_ce_config_wlan;
2271 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002272 break;
2273 case TARGET_TYPE_AR900B:
2274 case TARGET_TYPE_QCA9984:
2275 case TARGET_TYPE_IPQ4019:
2276 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302277 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2278 hif_state->host_ce_config =
2279 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2280 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2281 hif_state->host_ce_config =
2282 host_lowdesc_ce_cfg_wlan_ar900b;
2283 } else {
2284 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2285 }
2286
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302287 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2288 hif_state->target_ce_config_sz =
2289 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002290
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002291 break;
2292
2293 case TARGET_TYPE_AR9888:
2294 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302295 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2296 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2297 } else {
2298 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2299 }
2300
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302301 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2302 hif_state->target_ce_config_sz =
2303 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002304
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002305 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002306
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302307 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002308 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2309 hif_state->host_ce_config =
2310 host_ce_config_wlan_qca8074_pci;
2311 hif_state->target_ce_config =
2312 target_ce_config_wlan_qca8074_pci;
2313 hif_state->target_ce_config_sz =
2314 sizeof(target_ce_config_wlan_qca8074_pci);
2315 } else {
2316 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2317 hif_state->target_ce_config =
2318 target_ce_config_wlan_qca8074;
2319 hif_state->target_ce_config_sz =
2320 sizeof(target_ce_config_wlan_qca8074);
2321 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302322 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002323 case TARGET_TYPE_QCA6290:
2324 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2325 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2326 hif_state->target_ce_config_sz =
2327 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002328
Houston Hoffman710af5a2016-11-22 21:59:03 -08002329 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002330 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002331 }
Houston Hoffman108da402016-03-14 21:11:24 -07002332}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002333
Houston Hoffman108da402016-03-14 21:11:24 -07002334/**
2335 * hif_ce_open() - do ce specific allocations
2336 * @hif_sc: pointer to hif context
2337 *
2338 * return: 0 for success or QDF_STATUS_E_NOMEM
2339 */
2340QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2341{
2342 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002343
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302344 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302345 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002346 return QDF_STATUS_SUCCESS;
2347}
2348
2349/**
2350 * hif_ce_close() - do ce specific free
2351 * @hif_sc: pointer to hif context
2352 */
2353void hif_ce_close(struct hif_softc *hif_sc)
2354{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302355 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2356
2357 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002358}
2359
2360/**
2361 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2362 * @hif_sc: hif context
2363 *
2364 * uses state variables to support cleaning up when hif_config_ce fails.
2365 */
2366void hif_unconfig_ce(struct hif_softc *hif_sc)
2367{
2368 int pipe_num;
2369 struct HIF_CE_pipe_info *pipe_info;
2370 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2371
2372 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2373 pipe_info = &hif_state->pipe_info[pipe_num];
2374 if (pipe_info->ce_hdl) {
2375 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002376 ce_fini(pipe_info->ce_hdl);
2377 pipe_info->ce_hdl = NULL;
2378 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002379 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002380 }
2381 }
Houston Hoffman108da402016-03-14 21:11:24 -07002382 if (hif_sc->athdiag_procfs_inited) {
2383 athdiag_procfs_remove();
2384 hif_sc->athdiag_procfs_inited = false;
2385 }
2386}
2387
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002388#ifdef CONFIG_BYPASS_QMI
2389#define FW_SHARED_MEM (2 * 1024 * 1024)
2390
2391/**
2392 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2393 * @scn: pointer to HIF structure
2394 *
2395 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2396 *
2397 * Return: void
2398 */
2399static void hif_post_static_buf_to_target(struct hif_softc *scn)
2400{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002401 void *target_va;
2402 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002403
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002404 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2405 FW_SHARED_MEM, &target_pa);
2406 if (NULL == target_va) {
2407 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002408 return;
2409 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002410 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2411 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002412}
2413#else
2414static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2415{
2416 return;
2417}
2418#endif
2419
Dustin Brown6bdbda52016-09-27 15:52:30 -07002420#ifdef WLAN_SUSPEND_RESUME_TEST
2421static void hif_fake_apps_init_ctx(struct hif_softc *scn)
2422{
2423 INIT_WORK(&scn->fake_apps_ctx.resume_work,
2424 hif_fake_apps_resume_work);
2425}
2426#else
2427static inline void hif_fake_apps_init_ctx(struct hif_softc *scn) {}
2428#endif
2429
Houston Hoffman108da402016-03-14 21:11:24 -07002430/**
2431 * hif_config_ce() - configure copy engines
2432 * @scn: hif context
2433 *
2434 * Prepares fw, copy engine hardware and host sw according
2435 * to the attributes selected by hif_ce_prepare_config.
2436 *
2437 * also calls athdiag_procfs_init
2438 *
2439 * return: 0 for success nonzero for failure.
2440 */
2441int hif_config_ce(struct hif_softc *scn)
2442{
2443 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2444 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2445 struct HIF_CE_pipe_info *pipe_info;
2446 int pipe_num;
Houston Hoffman85925072016-05-06 17:02:18 -07002447 struct CE_state *ce_state;
Houston Hoffman108da402016-03-14 21:11:24 -07002448#ifdef ADRASTEA_SHADOW_REGISTERS
2449 int i;
2450#endif
2451 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2452
2453 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002454
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002455 hif_post_static_buf_to_target(scn);
2456
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002457 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002458
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002459 hif_config_rri_on_ddr(scn);
2460
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002461 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2462 struct CE_attr *attr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002463 pipe_info = &hif_state->pipe_info[pipe_num];
2464 pipe_info->pipe_num = pipe_num;
2465 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302466 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002467
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002468 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002469 ce_state = scn->ce_id_to_state[pipe_num];
Houston Hoffman03f46572016-12-12 12:53:56 -08002470 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302471 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002472 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302473 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 A_TARGET_ACCESS_UNLIKELY(scn);
2475 goto err;
2476 }
2477
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302478 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002479 /* Reserve the ultimate CE for
2480 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002481 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002482 continue;
2483 }
2484
Houston Hoffman85925072016-05-06 17:02:18 -07002485 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2486 (ce_state->htt_rx_data))
2487 continue;
2488
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302489 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002490 if (attr->dest_nentries > 0) {
2491 atomic_set(&pipe_info->recv_bufs_needed,
2492 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302493 /*SRNG based CE has one entry less */
2494 if (ce_srng_based(scn))
2495 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002496 } else {
2497 atomic_set(&pipe_info->recv_bufs_needed, 0);
2498 }
2499 ce_tasklet_init(hif_state, (1 << pipe_num));
2500 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002501 }
2502
2503 if (athdiag_procfs_init(scn) != 0) {
2504 A_TARGET_ACCESS_UNLIKELY(scn);
2505 goto err;
2506 }
2507 scn->athdiag_procfs_inited = true;
2508
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002509 HIF_INFO_MED("%s: ce_init done", __func__);
2510
Houston Hoffman108da402016-03-14 21:11:24 -07002511 init_tasklet_workers(hif_hdl);
Dustin Brown6bdbda52016-09-27 15:52:30 -07002512 hif_fake_apps_init_ctx(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002513
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002514 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002515
2516#ifdef ADRASTEA_SHADOW_REGISTERS
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002517 HIF_INFO("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002518 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002519 HIF_INFO("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002520 __func__, i,
2521 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2522 }
2523#endif
2524
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302525 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526
2527err:
2528 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002529 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002530 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302531 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002532}
2533
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002534#ifdef WLAN_FEATURE_FASTPATH
2535/**
2536 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2537 * @handler: Callback funtcion
2538 * @context: handle for callback function
2539 *
2540 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2541 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002542int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2543 fastpath_msg_handler handler,
2544 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002545{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002546 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002547 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002548 int i;
2549
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302550 if (!scn) {
2551 HIF_ERROR("%s: scn is NULL", __func__);
2552 QDF_ASSERT(0);
2553 return QDF_STATUS_E_FAILURE;
2554 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002555
2556 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002557 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002558 return QDF_STATUS_E_FAILURE;
2559 }
2560
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002561 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002562 ce_state = scn->ce_id_to_state[i];
2563 if (ce_state->htt_rx_data) {
2564 ce_state->fastpath_handler = handler;
2565 ce_state->context = context;
2566 }
2567 }
2568
2569 return QDF_STATUS_SUCCESS;
2570}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002571#endif
2572
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002573#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002574/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302575 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002576 * @scn: bus context
2577 * @ce_sr_base_paddr: copyengine source ring base physical address
2578 * @ce_sr_ring_size: copyengine source ring size
2579 * @ce_reg_paddr: copyengine register physical address
2580 *
2581 * IPA micro controller data path offload feature enabled,
2582 * HIF should release copy engine related resource information to IPA UC
2583 * IPA UC will access hardware resource with released information
2584 *
2585 * Return: None
2586 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302587void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302588 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002589 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302590 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002591{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302592 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002593 struct HIF_CE_pipe_info *pipe_info =
2594 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2595 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2596
2597 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2598 ce_reg_paddr);
2599 return;
2600}
2601#endif /* IPA_OFFLOAD */
2602
2603
2604#ifdef ADRASTEA_SHADOW_REGISTERS
2605
2606/*
2607 Current shadow register config
2608
2609 -----------------------------------------------------------
2610 Shadow Register | CE | src/dst write index
2611 -----------------------------------------------------------
2612 0 | 0 | src
2613 1 No Config - Doesn't point to anything
2614 2 No Config - Doesn't point to anything
2615 3 | 3 | src
2616 4 | 4 | src
2617 5 | 5 | src
2618 6 No Config - Doesn't point to anything
2619 7 | 7 | src
2620 8 No Config - Doesn't point to anything
2621 9 No Config - Doesn't point to anything
2622 10 No Config - Doesn't point to anything
2623 11 No Config - Doesn't point to anything
2624 -----------------------------------------------------------
2625 12 No Config - Doesn't point to anything
2626 13 | 1 | dst
2627 14 | 2 | dst
2628 15 No Config - Doesn't point to anything
2629 16 No Config - Doesn't point to anything
2630 17 No Config - Doesn't point to anything
2631 18 No Config - Doesn't point to anything
2632 19 | 7 | dst
2633 20 | 8 | dst
2634 21 No Config - Doesn't point to anything
2635 22 No Config - Doesn't point to anything
2636 23 No Config - Doesn't point to anything
2637 -----------------------------------------------------------
2638
2639
2640 ToDo - Move shadow register config to following in the future
2641 This helps free up a block of shadow registers towards the end.
2642 Can be used for other purposes
2643
2644 -----------------------------------------------------------
2645 Shadow Register | CE | src/dst write index
2646 -----------------------------------------------------------
2647 0 | 0 | src
2648 1 | 3 | src
2649 2 | 4 | src
2650 3 | 5 | src
2651 4 | 7 | src
2652 -----------------------------------------------------------
2653 5 | 1 | dst
2654 6 | 2 | dst
2655 7 | 7 | dst
2656 8 | 8 | dst
2657 -----------------------------------------------------------
2658 9 No Config - Doesn't point to anything
2659 12 No Config - Doesn't point to anything
2660 13 No Config - Doesn't point to anything
2661 14 No Config - Doesn't point to anything
2662 15 No Config - Doesn't point to anything
2663 16 No Config - Doesn't point to anything
2664 17 No Config - Doesn't point to anything
2665 18 No Config - Doesn't point to anything
2666 19 No Config - Doesn't point to anything
2667 20 No Config - Doesn't point to anything
2668 21 No Config - Doesn't point to anything
2669 22 No Config - Doesn't point to anything
2670 23 No Config - Doesn't point to anything
2671 -----------------------------------------------------------
2672*/
2673
Komal Seelam644263d2016-02-22 20:45:49 +05302674u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002675{
2676 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002677 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002678
Houston Hoffmane6330442016-02-26 12:19:11 -08002679 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002680 case 0:
2681 addr = SHADOW_VALUE0;
2682 break;
2683 case 3:
2684 addr = SHADOW_VALUE3;
2685 break;
2686 case 4:
2687 addr = SHADOW_VALUE4;
2688 break;
2689 case 5:
2690 addr = SHADOW_VALUE5;
2691 break;
2692 case 7:
2693 addr = SHADOW_VALUE7;
2694 break;
2695 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002696 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302697 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002698 }
2699 return addr;
2700
2701}
2702
Komal Seelam644263d2016-02-22 20:45:49 +05302703u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002704{
2705 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002706 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002707
Houston Hoffmane6330442016-02-26 12:19:11 -08002708 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002709 case 1:
2710 addr = SHADOW_VALUE13;
2711 break;
2712 case 2:
2713 addr = SHADOW_VALUE14;
2714 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002715 case 5:
2716 addr = SHADOW_VALUE17;
2717 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002718 case 7:
2719 addr = SHADOW_VALUE19;
2720 break;
2721 case 8:
2722 addr = SHADOW_VALUE20;
2723 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002724 case 9:
2725 addr = SHADOW_VALUE21;
2726 break;
2727 case 10:
2728 addr = SHADOW_VALUE22;
2729 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05302730 case 11:
2731 addr = SHADOW_VALUE23;
2732 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002733 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002734 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302735 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002736 }
2737
2738 return addr;
2739
2740}
2741#endif
2742
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002743#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002744void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2745{
2746 struct CE_state *ce_state;
2747 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2748
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002749 ce_state = scn->ce_id_to_state[ctx_id];
2750
2751 return ce_state->lro_data;
2752}
2753
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002754/**
2755 * ce_lro_flush_cb_register() - register the LRO flush
2756 * callback
2757 * @scn: HIF context
2758 * @handler: callback function
2759 * @data: opaque data pointer to be passed back
2760 *
2761 * Store the LRO flush callback provided
2762 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002763 * Return: Number of instances the callback is registered for
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002764 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002765int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002766 void (handler)(void *),
2767 void *(lro_init_handler)(void))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002768{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002769 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002770 int i;
2771 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302772 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002773 void *data = NULL;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002774
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302775 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002776
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002777 if (scn != NULL) {
2778 for (i = 0; i < scn->ce_count; i++) {
2779 ce_state = scn->ce_id_to_state[i];
2780 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002781 data = lro_init_handler();
2782 if (data == NULL) {
2783 HIF_ERROR("%s: Failed to init LRO for CE %d",
2784 __func__, i);
2785 continue;
2786 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002787 ce_state->lro_flush_cb = handler;
2788 ce_state->lro_data = data;
2789 rc++;
2790 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002791 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002792 } else {
2793 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002794 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002795 return rc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002796}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002797
2798/**
2799 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2800 * callback
2801 * @scn: HIF context
2802 *
2803 * Remove the LRO flush callback
2804 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002805 * Return: Number of instances the callback is de-registered
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002806 */
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002807int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
2808 void (lro_deinit_cb)(void *))
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002809{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002810 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002811 int i;
2812 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302813 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002814
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302815 QDF_ASSERT(scn != NULL);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002816 if (scn != NULL) {
2817 for (i = 0; i < scn->ce_count; i++) {
2818 ce_state = scn->ce_id_to_state[i];
2819 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002820 qdf_spin_lock_bh(
2821 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002822 ce_state->lro_flush_cb = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002823 lro_deinit_cb(ce_state->lro_data);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002824 ce_state->lro_data = NULL;
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07002825 qdf_spin_unlock_bh(
2826 &ce_state->lro_unloading_lock);
2827 qdf_spinlock_destroy(
2828 &ce_state->lro_unloading_lock);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002829 rc++;
2830 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002831 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002832 } else {
2833 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002834 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002835 return rc;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002836}
2837#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002838
2839/**
2840 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2841 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302842 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002843 * @svc_id: Service ID for which the mapping is needed.
2844 * @ul_pipe: address of the container in which ul pipe is returned.
2845 * @dl_pipe: address of the container in which dl pipe is returned.
2846 * @ul_is_polled: address of the container in which a bool
2847 * indicating if the UL CE for this service
2848 * is polled is returned.
2849 * @dl_is_polled: address of the container in which a bool
2850 * indicating if the DL CE for this service
2851 * is polled is returned.
2852 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002853 * Return: Indicates whether the service has been found in the table.
2854 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2855 * There will be warning logs if either leg has not been updated
2856 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002857 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302858int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002859 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2860 int *dl_is_polled)
2861{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002862 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002863 unsigned int i;
2864 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002865 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002866 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302867 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002868 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002869 bool dl_updated = false;
2870 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002871
Houston Hoffman748e1a62017-03-30 17:20:42 -07002872 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2873 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002874
2875 *dl_is_polled = 0; /* polling for received messages not supported */
2876
2877 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2878
2879 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2880 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002881 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002882 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002883 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302884 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002885 CE_ATTR_DISABLE_INTR) != 0;
2886 ul_updated = true;
2887 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002888 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002889 dl_updated = true;
2890 }
2891 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002892 }
2893 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002894 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302895 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002896 __func__, svc_id);
2897 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05302898 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002899 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002900
2901 return status;
2902}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002903
2904#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302905inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002906 uint32_t CE_ctrl_addr)
2907{
2908 uint32_t read_from_hw, srri_from_ddr = 0;
2909
2910 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2911
2912 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2913
2914 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002915 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2916 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002917 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302918 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002919 }
2920 return srri_from_ddr;
2921}
2922
2923
Komal Seelam644263d2016-02-22 20:45:49 +05302924inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002925 uint32_t CE_ctrl_addr)
2926{
2927 uint32_t read_from_hw, drri_from_ddr = 0;
2928
2929 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2930
2931 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2932
2933 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002934 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002935 drri_from_ddr, read_from_hw,
2936 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302937 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002938 }
2939 return drri_from_ddr;
2940}
2941
2942#endif
2943
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002944#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002945/**
2946 * hif_get_src_ring_read_index(): Called to get the SRRI
2947 *
Komal Seelam644263d2016-02-22 20:45:49 +05302948 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002949 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2950 *
2951 * This function returns the SRRI to the caller. For CEs that
2952 * dont have interrupts enabled, we look at the DDR based SRRI
2953 *
2954 * Return: SRRI
2955 */
Komal Seelam644263d2016-02-22 20:45:49 +05302956inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002957 uint32_t CE_ctrl_addr)
2958{
2959 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302960 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002961
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302962 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002963 if (attr.flags & CE_ATTR_DISABLE_INTR)
2964 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2965 else
2966 return A_TARGET_READ(scn,
2967 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2968}
2969
2970/**
2971 * hif_get_dst_ring_read_index(): Called to get the DRRI
2972 *
Komal Seelam644263d2016-02-22 20:45:49 +05302973 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002974 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2975 *
2976 * This function returns the DRRI to the caller. For CEs that
2977 * dont have interrupts enabled, we look at the DDR based DRRI
2978 *
2979 * Return: DRRI
2980 */
Komal Seelam644263d2016-02-22 20:45:49 +05302981inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002982 uint32_t CE_ctrl_addr)
2983{
2984 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302985 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002986
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302987 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002988
2989 if (attr.flags & CE_ATTR_DISABLE_INTR)
2990 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2991 else
2992 return A_TARGET_READ(scn,
2993 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2994}
2995
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002996/**
2997 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2998 *
Komal Seelam644263d2016-02-22 20:45:49 +05302999 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003000 *
3001 * This function allocates non cached memory on ddr and sends
3002 * the physical address of this memory to the CE hardware. The
3003 * hardware updates the RRI on this particular location.
3004 *
3005 * Return: None
3006 */
Komal Seelam644263d2016-02-22 20:45:49 +05303007static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003008{
3009 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303010 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003011 uint32_t high_paddr, low_paddr;
3012 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303013 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3014 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3015 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003016
3017 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3018 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3019
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003020 HIF_INFO("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003021
3022 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3023 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3024
3025 for (i = 0; i < CE_COUNT; i++)
3026 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3027
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303028 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003029
3030 return;
3031}
3032#else
3033
3034/**
3035 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3036 *
Komal Seelam644263d2016-02-22 20:45:49 +05303037 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003038 *
3039 * This is a dummy implementation for platforms that don't
3040 * support this functionality.
3041 *
3042 * Return: None
3043 */
Komal Seelam644263d2016-02-22 20:45:49 +05303044static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003045{
3046 return;
3047}
3048#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303049
3050/**
3051 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303052 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303053 *
3054 * Output the copy engine registers
3055 *
3056 * Return: 0 for success or error code
3057 */
Komal Seelam644263d2016-02-22 20:45:49 +05303058int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303059{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303060 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303061 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003062 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303063 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3064 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303065 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303066
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003067 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3068 if (scn->ce_id_to_state[i] == NULL) {
3069 HIF_DBG("CE%d not used.", i);
3070 continue;
3071 }
3072
Komal Seelam644263d2016-02-22 20:45:49 +05303073 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003074 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303075 ce_reg_word_size * sizeof(uint32_t));
3076
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303077 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05303078 HIF_ERROR("Dumping CE register failed!");
3079 return -EACCES;
3080 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303081 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303082 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003083 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303084 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303085 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3086 + SR_WR_INDEX_ADDRESS),
3087 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3088 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3089 + CURRENT_SRRI_ADDRESS),
3090 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3091 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3092 + DST_WR_INDEX_ADDRESS),
3093 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3094 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3095 + CURRENT_DRRI_ADDRESS),
3096 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3097 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303098 }
Govind Singh2443fb32016-01-13 17:44:48 +05303099 return 0;
3100}
Houston Hoffman85925072016-05-06 17:02:18 -07003101#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3102struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3103 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3104{
3105 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3106 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3107 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3108 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3109 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3110 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3111 struct CE_ring_state *src_ring = ce_state->src_ring;
3112 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3113
3114 if (src_ring) {
3115 hif_info->ul_pipe.nentries = src_ring->nentries;
3116 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3117 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3118 hif_info->ul_pipe.write_index = src_ring->write_index;
3119 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3120 hif_info->ul_pipe.base_addr_CE_space =
3121 src_ring->base_addr_CE_space;
3122 hif_info->ul_pipe.base_addr_owner_space =
3123 src_ring->base_addr_owner_space;
3124 }
3125
3126
3127 if (dest_ring) {
3128 hif_info->dl_pipe.nentries = dest_ring->nentries;
3129 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3130 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3131 hif_info->dl_pipe.write_index = dest_ring->write_index;
3132 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3133 hif_info->dl_pipe.base_addr_CE_space =
3134 dest_ring->base_addr_CE_space;
3135 hif_info->dl_pipe.base_addr_owner_space =
3136 dest_ring->base_addr_owner_space;
3137 }
3138
3139 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3140 hif_info->ctrl_addr = ce_state->ctrl_addr;
3141
3142 return hif_info;
3143}
3144
3145uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3146{
3147 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3148
3149 scn->nss_wifi_ol_mode = mode;
3150 return 0;
3151}
3152
3153#endif
3154
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303155void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3156{
3157 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3158 scn->hif_attribute = hif_attrib;
3159}
3160
Houston Hoffman85925072016-05-06 17:02:18 -07003161void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3162{
3163 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3164 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3165 uint32_t ctrl_addr = CE_state->ctrl_addr;
3166
3167 Q_TARGET_ACCESS_BEGIN(scn);
3168 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3169 Q_TARGET_ACCESS_END(scn);
3170}
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303171
3172/**
3173 * hif_fw_event_handler() - hif fw event handler
3174 * @hif_state: pointer to hif ce state structure
3175 *
3176 * Process fw events and raise HTC callback to process fw events.
3177 *
3178 * Return: none
3179 */
3180static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3181{
3182 struct hif_msg_callbacks *msg_callbacks =
3183 &hif_state->msg_callbacks_current;
3184
3185 if (!msg_callbacks->fwEventHandler)
3186 return;
3187
3188 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3189 QDF_STATUS_E_FAILURE);
3190}
3191
3192#ifndef QCA_WIFI_3_0
3193/**
3194 * hif_fw_interrupt_handler() - FW interrupt handler
3195 * @irq: irq number
3196 * @arg: the user pointer
3197 *
3198 * Called from the PCI interrupt handler when a
3199 * firmware-generated interrupt to the Host.
3200 *
3201 * Return: status of handled irq
3202 */
3203irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3204{
3205 struct hif_softc *scn = arg;
3206 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3207 uint32_t fw_indicator_address, fw_indicator;
3208
3209 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3210 return ATH_ISR_NOSCHED;
3211
3212 fw_indicator_address = hif_state->fw_indicator_address;
3213 /* For sudden unplug this will return ~0 */
3214 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3215
3216 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3217 /* ACK: clear Target-side pending event */
3218 A_TARGET_WRITE(scn, fw_indicator_address,
3219 fw_indicator & ~FW_IND_EVENT_PENDING);
3220 if (Q_TARGET_ACCESS_END(scn) < 0)
3221 return ATH_ISR_SCHED;
3222
3223 if (hif_state->started) {
3224 hif_fw_event_handler(hif_state);
3225 } else {
3226 /*
3227 * Probable Target failure before we're prepared
3228 * to handle it. Generally unexpected.
3229 */
3230 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3231 ("%s: Early firmware event indicated\n",
3232 __func__));
3233 }
3234 } else {
3235 if (Q_TARGET_ACCESS_END(scn) < 0)
3236 return ATH_ISR_SCHED;
3237 }
3238
3239 return ATH_ISR_SCHED;
3240}
3241#else
3242irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3243{
3244 return ATH_ISR_SCHED;
3245}
3246#endif /* #ifdef QCA_WIFI_3_0 */
3247
3248
3249/**
3250 * hif_wlan_disable(): call the platform driver to disable wlan
3251 * @scn: HIF Context
3252 *
3253 * This function passes the con_mode to platform driver to disable
3254 * wlan.
3255 *
3256 * Return: void
3257 */
3258void hif_wlan_disable(struct hif_softc *scn)
3259{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003260 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303261 uint32_t con_mode = hif_get_conparam(scn);
3262
3263 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003264 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303265 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003266 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303267 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003268 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303269
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003270 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303271}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003272
Dustin Brown6834d322017-03-20 15:02:48 -07003273int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3274{
3275 QDF_STATUS status;
3276 uint8_t ul_pipe, dl_pipe;
3277 int ul_is_polled, dl_is_polled;
3278
3279 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3280 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3281 HTC_CTRL_RSVD_SVC,
3282 &ul_pipe, &dl_pipe,
3283 &ul_is_polled, &dl_is_polled);
3284 if (status) {
3285 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3286 return qdf_status_to_os_return(status);
3287 }
3288
3289 *ce_id = dl_pipe;
3290
3291 return 0;
3292}