blob: f9d04b1ce9182eb8043a6a214414397a21196528 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Pratik Gandhi034cb7c2017-11-10 16:46:06 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053035#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080036#include "regtable.h"
37#define ATH_MODULE_NAME hif
38#include <a_debug.h>
39#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053041#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070042#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080043#include "hif_debug.h"
44#include "ce_internal.h"
45#include "ce_reg.h"
46#include "ce_assignment.h"
47#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070048#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080049#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070050#endif
Pratik Gandhidc82a772018-01-30 18:57:05 +053051#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052
53#define CE_POLL_TIMEOUT 10 /* ms */
54
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053055#define AGC_DUMP 1
56#define CHANINFO_DUMP 2
57#define BB_WATCHDOG_DUMP 3
58#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
59#define PCIE_ACCESS_DUMP 4
60#endif
61#include "mp_dev.h"
62
Houston Hoffman5141f9d2017-01-05 10:49:17 -080063#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
64 !defined(QCA_WIFI_SUPPORT_SRNG)
65#define QCA_WIFI_SUPPORT_SRNG
66#endif
67
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080068/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053069QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080070
71/*
72 * Fix EV118783, poll to check whether a BMI response comes
73 * other than waiting for the interruption which may be lost.
74 */
75/* #define BMI_RSP_POLLING */
76#define BMI_RSP_TO_MILLISEC 1000
77
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070078#ifdef CONFIG_BYPASS_QMI
79#define BYPASS_QMI 1
80#else
81#define BYPASS_QMI 0
82#endif
83
Houston Hoffmanabd00772016-05-06 17:02:48 -070084#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053085#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070086#define WDI_IPA_SERVICE_GROUP 5
87#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
88#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
89#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053090#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070091#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080092
Nachiket Kukadee5738b52017-09-07 17:16:12 +053093QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053094static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080095
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053096/**
97 * hif_target_access_log_dump() - dump access log
98 *
99 * dump access log
100 *
101 * Return: n/a
102 */
103#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
104static void hif_target_access_log_dump(void)
105{
106 hif_target_dump_access_log();
107}
108#endif
109
110
111void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
112 uint8_t cmd_id, bool start)
113{
114 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
115
116 switch (cmd_id) {
117 case AGC_DUMP:
118 if (start)
119 priv_start_agc(scn);
120 else
121 priv_dump_agc(scn);
122 break;
123 case CHANINFO_DUMP:
124 if (start)
125 priv_start_cap_chaninfo(scn);
126 else
127 priv_dump_chaninfo(scn);
128 break;
129 case BB_WATCHDOG_DUMP:
130 priv_dump_bbwatchdog(scn);
131 break;
132#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
133 case PCIE_ACCESS_DUMP:
134 hif_target_access_log_dump();
135 break;
136#endif
137 default:
138 HIF_ERROR("%s: Invalid htc dump command", __func__);
139 break;
140 }
141}
142
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800143static void ce_poll_timeout(void *arg)
144{
145 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700146
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800147 if (CE_state->timer_inited) {
148 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530149 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800150 }
151}
152
153static unsigned int roundup_pwr2(unsigned int n)
154{
155 int i;
156 unsigned int test_pwr2;
157
158 if (!(n & (n - 1)))
159 return n; /* already a power of 2 */
160
161 test_pwr2 = 4;
162 for (i = 0; i < 29; i++) {
163 if (test_pwr2 > n)
164 return test_pwr2;
165 test_pwr2 = test_pwr2 << 1;
166 }
167
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530168 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800169 return 0;
170}
171
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700172#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
173#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
174
175static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
176 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
180 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
181 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
182 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
183 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
184 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800185#ifdef QCA_WIFI_3_0_ADRASTEA
186 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
187 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530188 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800189#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700190};
191
Nirav Shah0d0cce82018-01-17 17:00:31 +0530192#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700193static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
194 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
196 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
199 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
200 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
201 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
202 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
203};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530204#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700205
206/* CE_PCI TABLE */
207/*
208 * NOTE: the table below is out of date, though still a useful reference.
209 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
210 * mapping of HTC services to HIF pipes.
211 */
212/*
213 * This authoritative table defines Copy Engine configuration and the mapping
214 * of services/endpoints to CEs. A subset of this information is passed to
215 * the Target during startup as a prerequisite to entering BMI phase.
216 * See:
217 * target_service_to_ce_map - Target-side mapping
218 * hif_map_service_to_pipe - Host-side mapping
219 * target_ce_config - Target-side configuration
220 * host_ce_config - Host-side configuration
221 ============================================================================
222 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
223 | | | ctio | Size | Frequency
224 | | | n | |
225 ============================================================================
226 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
227 descriptor | | | | O(100B) | and regular
228 download | | | | |
229 ----------------------------------------------------------------------------
230 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
231 indication | | | | O(10B) | regular
232 upload | | | | |
233 ----------------------------------------------------------------------------
234 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
235 upload | | | | O(1000B) | (frequent
236 e.g. noise | | | | | during IP1.0
237 packets | | | | | testing)
238 ----------------------------------------------------------------------------
239 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
240 download | | | | O(1000B) | (frequent
241 e.g. | | | | | during IP1.0
242 misdirecte | | | | | testing)
243 d EAPOL | | | | |
244 packets | | | | |
245 ----------------------------------------------------------------------------
246 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
247 | DATA_VO (uplink) | | | |
248 ----------------------------------------------------------------------------
249 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
250 | DATA_VO (downlink) | | | |
251 ----------------------------------------------------------------------------
252 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
253 | | | | O(100B) |
254 ----------------------------------------------------------------------------
255 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
256 messages | (downlink) | | | O(100B) |
257 | | | | |
258 ----------------------------------------------------------------------------
259 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
260 | HTC_RAW_STREAMS | | | |
261 | (uplink) | | | |
262 ----------------------------------------------------------------------------
263 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
264 | HTC_RAW_STREAMS | | | |
265 | (downlink) | | | |
266 ----------------------------------------------------------------------------
267 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
268 | | | | | infrequent
269 ============================================================================
270 */
271
272/*
273 * Map from service/endpoint to Copy Engine.
274 * This table is derived from the CE_PCI TABLE, above.
275 * It is passed to the Target at startup for use by firmware.
276 */
277static struct service_to_pipe target_service_to_ce_map_wlan[] = {
278 {
279 WMI_DATA_VO_SVC,
280 PIPEDIR_OUT, /* out = UL = host -> target */
281 3,
282 },
283 {
284 WMI_DATA_VO_SVC,
285 PIPEDIR_IN, /* in = DL = target -> host */
286 2,
287 },
288 {
289 WMI_DATA_BK_SVC,
290 PIPEDIR_OUT, /* out = UL = host -> target */
291 3,
292 },
293 {
294 WMI_DATA_BK_SVC,
295 PIPEDIR_IN, /* in = DL = target -> host */
296 2,
297 },
298 {
299 WMI_DATA_BE_SVC,
300 PIPEDIR_OUT, /* out = UL = host -> target */
301 3,
302 },
303 {
304 WMI_DATA_BE_SVC,
305 PIPEDIR_IN, /* in = DL = target -> host */
306 2,
307 },
308 {
309 WMI_DATA_VI_SVC,
310 PIPEDIR_OUT, /* out = UL = host -> target */
311 3,
312 },
313 {
314 WMI_DATA_VI_SVC,
315 PIPEDIR_IN, /* in = DL = target -> host */
316 2,
317 },
318 {
319 WMI_CONTROL_SVC,
320 PIPEDIR_OUT, /* out = UL = host -> target */
321 3,
322 },
323 {
324 WMI_CONTROL_SVC,
325 PIPEDIR_IN, /* in = DL = target -> host */
326 2,
327 },
328 {
329 HTC_CTRL_RSVD_SVC,
330 PIPEDIR_OUT, /* out = UL = host -> target */
331 0, /* could be moved to 3 (share with WMI) */
332 },
333 {
334 HTC_CTRL_RSVD_SVC,
335 PIPEDIR_IN, /* in = DL = target -> host */
336 2,
337 },
338 {
339 HTC_RAW_STREAMS_SVC, /* not currently used */
340 PIPEDIR_OUT, /* out = UL = host -> target */
341 0,
342 },
343 {
344 HTC_RAW_STREAMS_SVC, /* not currently used */
345 PIPEDIR_IN, /* in = DL = target -> host */
346 2,
347 },
348 {
349 HTT_DATA_MSG_SVC,
350 PIPEDIR_OUT, /* out = UL = host -> target */
351 4,
352 },
353 {
354 HTT_DATA_MSG_SVC,
355 PIPEDIR_IN, /* in = DL = target -> host */
356 1,
357 },
358 {
359 WDI_IPA_TX_SVC,
360 PIPEDIR_OUT, /* in = DL = target -> host */
361 5,
362 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800363#if defined(QCA_WIFI_3_0_ADRASTEA)
364 {
365 HTT_DATA2_MSG_SVC,
366 PIPEDIR_IN, /* in = DL = target -> host */
367 9,
368 },
369 {
370 HTT_DATA3_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 10,
373 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530374 {
375 PACKET_LOG_SVC,
376 PIPEDIR_IN, /* in = DL = target -> host */
377 11,
378 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800379#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700380 /* (Additions here) */
381
382 { /* Must be last */
383 0,
384 0,
385 0,
386 },
387};
388
Houston Hoffman88c896f2016-12-14 09:56:35 -0800389/* PIPEDIR_OUT = HOST to Target */
390/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530391#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530392static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
393 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
394 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
395 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
396 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
397 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
398 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
399 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
400 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
401 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
402 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
403 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
404 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
405 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
406 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
407 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
408 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
409 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
410 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530411 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530412 /* (Additions here) */
413 { 0, 0, 0, },
414};
Pratik Gandhi78461502018-02-05 17:22:41 +0530415#else
416static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
417};
418#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530419
Pratik Gandhi78461502018-02-05 17:22:41 +0530420#if (defined(QCA_WIFI_QCA6290))
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530421#ifdef CONFIG_WIN
Houston Hoffman88c896f2016-12-14 09:56:35 -0800422static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
423 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
424 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
425 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
426 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
427 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
428 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
429 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
430 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
431 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
432 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
433 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
434 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
435 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
436 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530439 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800440 /* (Additions here) */
441 { 0, 0, 0, },
442};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530443#else
444static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
445 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
446 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
447 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
448 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
449 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
450 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
451 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
452 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
453 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
454 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
455 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
456 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
457 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
458 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
459 /* (Additions here) */
460 { 0, 0, 0, },
461};
462#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530463#else
464static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
465};
466#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800467
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700468static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
469 {
470 WMI_DATA_VO_SVC,
471 PIPEDIR_OUT, /* out = UL = host -> target */
472 3,
473 },
474 {
475 WMI_DATA_VO_SVC,
476 PIPEDIR_IN, /* in = DL = target -> host */
477 2,
478 },
479 {
480 WMI_DATA_BK_SVC,
481 PIPEDIR_OUT, /* out = UL = host -> target */
482 3,
483 },
484 {
485 WMI_DATA_BK_SVC,
486 PIPEDIR_IN, /* in = DL = target -> host */
487 2,
488 },
489 {
490 WMI_DATA_BE_SVC,
491 PIPEDIR_OUT, /* out = UL = host -> target */
492 3,
493 },
494 {
495 WMI_DATA_BE_SVC,
496 PIPEDIR_IN, /* in = DL = target -> host */
497 2,
498 },
499 {
500 WMI_DATA_VI_SVC,
501 PIPEDIR_OUT, /* out = UL = host -> target */
502 3,
503 },
504 {
505 WMI_DATA_VI_SVC,
506 PIPEDIR_IN, /* in = DL = target -> host */
507 2,
508 },
509 {
510 WMI_CONTROL_SVC,
511 PIPEDIR_OUT, /* out = UL = host -> target */
512 3,
513 },
514 {
515 WMI_CONTROL_SVC,
516 PIPEDIR_IN, /* in = DL = target -> host */
517 2,
518 },
519 {
520 HTC_CTRL_RSVD_SVC,
521 PIPEDIR_OUT, /* out = UL = host -> target */
522 0, /* could be moved to 3 (share with WMI) */
523 },
524 {
525 HTC_CTRL_RSVD_SVC,
526 PIPEDIR_IN, /* in = DL = target -> host */
527 1,
528 },
529 {
530 HTC_RAW_STREAMS_SVC, /* not currently used */
531 PIPEDIR_OUT, /* out = UL = host -> target */
532 0,
533 },
534 {
535 HTC_RAW_STREAMS_SVC, /* not currently used */
536 PIPEDIR_IN, /* in = DL = target -> host */
537 1,
538 },
539 {
540 HTT_DATA_MSG_SVC,
541 PIPEDIR_OUT, /* out = UL = host -> target */
542 4,
543 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530544#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700545 {
546 HTT_DATA_MSG_SVC,
547 PIPEDIR_IN, /* in = DL = target -> host */
548 5,
549 },
550#else /* WLAN_FEATURE_FASTPATH */
551 {
552 HTT_DATA_MSG_SVC,
553 PIPEDIR_IN, /* in = DL = target -> host */
554 1,
555 },
556#endif /* WLAN_FEATURE_FASTPATH */
557
558 /* (Additions here) */
559
560 { /* Must be last */
561 0,
562 0,
563 0,
564 },
565};
566
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700567static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
568static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
569
Nirav Shah0d0cce82018-01-17 17:00:31 +0530570#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700571static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
572 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
573 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
574 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
575 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
576 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
577 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
578 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
579 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
580 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
581 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
582 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
583 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
584 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
585 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
586 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
587 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
588 {0, 0, 0,}, /* Must be last */
589};
590
Nirav Shah0d0cce82018-01-17 17:00:31 +0530591void hif_select_epping_service_to_pipe_map(struct service_to_pipe
592 **tgt_svc_map_to_use,
593 uint32_t *sz_tgt_svc_map_to_use)
594{
595 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
596 *sz_tgt_svc_map_to_use =
597 sizeof(target_service_to_ce_map_wlan_epping);
598}
599#endif
600
Houston Hoffman748e1a62017-03-30 17:20:42 -0700601static void hif_select_service_to_pipe_map(struct hif_softc *scn,
602 struct service_to_pipe **tgt_svc_map_to_use,
603 uint32_t *sz_tgt_svc_map_to_use)
604{
605 uint32_t mode = hif_get_conparam(scn);
606 struct hif_target_info *tgt_info = &scn->target_info;
607
608 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530609 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
610 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700611 } else {
612 switch (tgt_info->target_type) {
613 default:
614 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
615 *sz_tgt_svc_map_to_use =
616 sizeof(target_service_to_ce_map_wlan);
617 break;
618 case TARGET_TYPE_AR900B:
619 case TARGET_TYPE_QCA9984:
620 case TARGET_TYPE_IPQ4019:
621 case TARGET_TYPE_QCA9888:
622 case TARGET_TYPE_AR9888:
623 case TARGET_TYPE_AR9888V2:
624 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
625 *sz_tgt_svc_map_to_use =
626 sizeof(target_service_to_ce_map_ar900b);
627 break;
628 case TARGET_TYPE_QCA6290:
629 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
630 *sz_tgt_svc_map_to_use =
631 sizeof(target_service_to_ce_map_qca6290);
632 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530633 case TARGET_TYPE_QCA8074:
634 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
635 *sz_tgt_svc_map_to_use =
636 sizeof(target_service_to_ce_map_qca8074);
637 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700638 }
639 }
640}
641
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700642/**
643 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
644 * @ce_state : pointer to the state context of the CE
645 *
646 * Description:
647 * Sets htt_rx_data attribute of the state structure if the
648 * CE serves one of the HTT DATA services.
649 *
650 * Return:
651 * false (attribute set to false)
652 * true (attribute set to true);
653 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700654static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700655{
656 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530657 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700658 int i;
659 bool rc = false;
660
661 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700662 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
663 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700664
Kiran Venkatappac0687092017-04-13 16:45:03 +0530665 map_len = map_sz / sizeof(struct service_to_pipe);
666 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700667 if ((svc_map[i].pipenum == ce_state->id) &&
668 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
669 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
670 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
671 /* HTT CEs are unidirectional */
672 if (svc_map[i].pipedir == PIPEDIR_IN)
673 ce_state->htt_rx_data = true;
674 else
675 ce_state->htt_tx_data = true;
676 rc = true;
677 }
678 }
679 }
680 return rc;
681}
682
Houston Hoffman47808172016-05-06 10:04:21 -0700683/**
684 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
685 * @ce_id: ce in question
686 * @ring: ring state being examined
687 * @type: "src_ring" or "dest_ring" string for identifying the ring
688 *
689 * Warns on non-zero index values.
690 * Causes a kernel panic if the ring is not empty durring initialization.
691 */
692static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
693 char *type)
694{
695 if (ring->write_index != 0 || ring->sw_index != 0)
696 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
697 ce_id, type, ring->sw_index, ring->write_index);
698 if (ring->write_index != ring->sw_index)
699 QDF_BUG(0);
700}
701
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530702#ifdef IPA_OFFLOAD
703/**
704 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
705 * @scn: softc instance
706 * @ce_id: ce in question
707 * @base_addr: pointer to copyengine ring base address
708 * @ce_ring: copyengine instance
709 * @nentries: number of entries should be allocated
710 * @desc_size: ce desc size
711 *
712 * Return: QDF_STATUS_SUCCESS - for success
713 */
714static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
715 qdf_dma_addr_t *base_addr,
716 struct CE_ring_state *ce_ring,
717 unsigned int nentries, uint32_t desc_size)
718{
719 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
720 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
721 nentries * desc_size + CE_DESC_RING_ALIGN);
722 if (!scn->ipa_ce_ring) {
723 HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
724 __func__);
725 return QDF_STATUS_E_NOMEM;
726 }
727 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
728 &scn->ipa_ce_ring->mem_info);
729 ce_ring->base_addr_owner_space_unaligned =
730 scn->ipa_ce_ring->vaddr;
731 } else {
732 ce_ring->base_addr_owner_space_unaligned =
733 qdf_mem_alloc_consistent(scn->qdf_dev,
734 scn->qdf_dev->dev,
735 (nentries * desc_size +
736 CE_DESC_RING_ALIGN),
737 base_addr);
738 if (!ce_ring->base_addr_owner_space_unaligned) {
739 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
740 __func__, CE_id);
741 return QDF_STATUS_E_NOMEM;
742 }
743 }
744 return QDF_STATUS_SUCCESS;
745}
746
747/**
748 * ce_free_desc_ring() - Frees copyengine descriptor ring
749 * @scn: softc instance
750 * @ce_id: ce in question
751 * @ce_ring: copyengine instance
752 * @desc_size: ce desc size
753 *
754 * Return: None
755 */
756static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
757 struct CE_ring_state *ce_ring, uint32_t desc_size)
758{
759 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
760 qdf_mem_shared_mem_free(scn->qdf_dev,
761 scn->ipa_ce_ring);
762 ce_ring->base_addr_owner_space_unaligned = NULL;
763 } else {
764 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
765 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
766 ce_ring->base_addr_owner_space_unaligned,
767 ce_ring->base_addr_CE_space, 0);
768 ce_ring->base_addr_owner_space_unaligned = NULL;
769 }
770}
771#else
772static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
773 qdf_dma_addr_t *base_addr,
774 struct CE_ring_state *ce_ring,
775 unsigned int nentries, uint32_t desc_size)
776{
777 ce_ring->base_addr_owner_space_unaligned =
778 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
779 (nentries * desc_size +
780 CE_DESC_RING_ALIGN), base_addr);
781 if (!ce_ring->base_addr_owner_space_unaligned) {
782 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
783 __func__, CE_id);
784 return QDF_STATUS_E_NOMEM;
785 }
786 return QDF_STATUS_SUCCESS;
787}
788
789static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
790 struct CE_ring_state *ce_ring, uint32_t desc_size)
791{
792 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
793 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
794 ce_ring->base_addr_owner_space_unaligned,
795 ce_ring->base_addr_CE_space, 0);
796 ce_ring->base_addr_owner_space_unaligned = NULL;
797}
798#endif /* IPA_OFFLOAD */
799
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530800/**
801 * ce_srng_based() - Does this target use srng
802 * @ce_state : pointer to the state context of the CE
803 *
804 * Description:
805 * returns true if the target is SRNG based
806 *
807 * Return:
808 * false (attribute set to false)
809 * true (attribute set to true);
810 */
811bool ce_srng_based(struct hif_softc *scn)
812{
813 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
814 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
815
816 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530817 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700818 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530819 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530820 default:
821 return false;
822 }
823 return false;
824}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530825qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530826
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800827#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700828static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530829{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530830 if (ce_srng_based(scn))
831 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530832
833 return ce_services_legacy();
834}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800835
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800836
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800837#else /* QCA_LITHIUM */
838static struct ce_ops *ce_services_attach(struct hif_softc *scn)
839{
840 return ce_services_legacy();
841}
842#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530843
Houston Hoffman403c2df2017-01-27 12:51:15 -0800844static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800845 struct pld_shadow_reg_v2_cfg **shadow_config,
846 int *num_shadow_registers_configured) {
847 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
848
849 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
850 scn, shadow_config, num_shadow_registers_configured);
851}
852
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530853static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
854 uint8_t ring_type)
855{
856 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
857
858 return hif_state->ce_services->ce_get_desc_size(ring_type);
859}
860
861
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700862static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530863 uint8_t ring_type, uint32_t nentries)
864{
865 uint32_t ce_nbytes;
866 char *ptr;
867 qdf_dma_addr_t base_addr;
868 struct CE_ring_state *ce_ring;
869 uint32_t desc_size;
870 struct hif_softc *scn = CE_state->scn;
871
872 ce_nbytes = sizeof(struct CE_ring_state)
873 + (nentries * sizeof(void *));
874 ptr = qdf_mem_malloc(ce_nbytes);
875 if (!ptr)
876 return NULL;
877
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530878 ce_ring = (struct CE_ring_state *)ptr;
879 ptr += sizeof(struct CE_ring_state);
880 ce_ring->nentries = nentries;
881 ce_ring->nentries_mask = nentries - 1;
882
883 ce_ring->low_water_mark_nentries = 0;
884 ce_ring->high_water_mark_nentries = nentries;
885 ce_ring->per_transfer_context = (void **)ptr;
886
887 desc_size = ce_get_desc_size(scn, ring_type);
888
889 /* Legacy platforms that do not support cache
890 * coherent DMA are unsupported
891 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530892 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
893 ce_ring, nentries,
894 desc_size) !=
895 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530896 HIF_ERROR("%s: ring has no DMA mem",
897 __func__);
898 qdf_mem_free(ptr);
899 return NULL;
900 }
901 ce_ring->base_addr_CE_space_unaligned = base_addr;
902
903 /* Correctly initialize memory to 0 to
904 * prevent garbage data crashing system
905 * when download firmware
906 */
907 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
908 nentries * desc_size +
909 CE_DESC_RING_ALIGN);
910
911 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
912
913 ce_ring->base_addr_CE_space =
914 (ce_ring->base_addr_CE_space_unaligned +
915 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
916
917 ce_ring->base_addr_owner_space = (void *)
918 (((size_t) ce_ring->base_addr_owner_space_unaligned +
919 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
920 } else {
921 ce_ring->base_addr_CE_space =
922 ce_ring->base_addr_CE_space_unaligned;
923 ce_ring->base_addr_owner_space =
924 ce_ring->base_addr_owner_space_unaligned;
925 }
926
927 return ce_ring;
928}
929
Yun Park3fb36442017-08-17 17:37:53 -0700930static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530931 uint32_t ce_id, struct CE_ring_state *ring,
932 struct CE_attr *attr)
933{
934 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
935
Yun Park3fb36442017-08-17 17:37:53 -0700936 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700937 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530938}
939
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800940int hif_ce_bus_early_suspend(struct hif_softc *scn)
941{
942 uint8_t ul_pipe, dl_pipe;
943 int ce_id, status, ul_is_polled, dl_is_polled;
944 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700945
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800946 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
947 &ul_pipe, &dl_pipe,
948 &ul_is_polled, &dl_is_polled);
949 if (status) {
950 HIF_ERROR("%s: pipe_mapping failure", __func__);
951 return status;
952 }
953
954 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
955 if (ce_id == ul_pipe)
956 continue;
957 if (ce_id == dl_pipe)
958 continue;
959
960 ce_state = scn->ce_id_to_state[ce_id];
961 qdf_spin_lock_bh(&ce_state->ce_index_lock);
962 if (ce_state->state == CE_RUNNING)
963 ce_state->state = CE_PAUSED;
964 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
965 }
966
967 return status;
968}
969
970int hif_ce_bus_late_resume(struct hif_softc *scn)
971{
972 int ce_id;
973 struct CE_state *ce_state;
974 int write_index;
975 bool index_updated;
976
977 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
978 ce_state = scn->ce_id_to_state[ce_id];
979 qdf_spin_lock_bh(&ce_state->ce_index_lock);
980 if (ce_state->state == CE_PENDING) {
981 write_index = ce_state->src_ring->write_index;
982 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
983 write_index);
984 ce_state->state = CE_RUNNING;
985 index_updated = true;
986 } else {
987 index_updated = false;
988 }
989
990 if (ce_state->state == CE_PAUSED)
991 ce_state->state = CE_RUNNING;
992 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
993
994 if (index_updated)
995 hif_record_ce_desc_event(scn, ce_id,
996 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +0530997 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800998 }
999
1000 return 0;
1001}
1002
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001003/**
1004 * ce_oom_recovery() - try to recover rx ce from oom condition
1005 * @context: CE_state of the CE with oom rx ring
1006 *
1007 * the executing work Will continue to be rescheduled untill
1008 * at least 1 descriptor is successfully posted to the rx ring.
1009 *
1010 * return: none
1011 */
1012static void ce_oom_recovery(void *context)
1013{
1014 struct CE_state *ce_state = context;
1015 struct hif_softc *scn = ce_state->scn;
1016 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1017 struct HIF_CE_pipe_info *pipe_info =
1018 &ce_softc->pipe_info[ce_state->id];
1019
1020 hif_post_recv_buffers_for_pipe(pipe_info);
1021}
1022
c_cgodavfda96ad2017-09-07 16:16:00 +05301023#if HIF_CE_DEBUG_DATA_BUF
1024/**
1025 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1026 * the CE descriptors.
1027 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1028 * @scn: hif scn handle
1029 * ce_id: Copy Engine Id
1030 *
1031 * Return: QDF_STATUS
1032 */
1033QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1034{
1035 struct hif_ce_desc_event *event = NULL;
1036 struct hif_ce_desc_event *hist_ev = NULL;
1037 uint32_t index = 0;
1038
1039 hist_ev =
1040 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1041
1042 if (!hist_ev)
1043 return QDF_STATUS_E_NOMEM;
1044
1045 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1046 event = &hist_ev[index];
1047 event->data =
1048 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1049 if (event->data == NULL)
1050 return QDF_STATUS_E_NOMEM;
1051 }
1052 return QDF_STATUS_SUCCESS;
1053}
1054
1055/**
1056 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1057 * the CE descriptors.
1058 * @scn: hif scn handle
1059 * ce_id: Copy Engine Id
1060 *
1061 * Return:
1062 */
1063void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1064{
1065 struct hif_ce_desc_event *event = NULL;
1066 struct hif_ce_desc_event *hist_ev = NULL;
1067 uint32_t index = 0;
1068
1069 hist_ev =
1070 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1071
1072 if (!hist_ev)
1073 return;
1074
1075 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1076 event = &hist_ev[index];
1077 if (event->data != NULL)
1078 qdf_mem_free(event->data);
1079 event->data = NULL;
1080 event = NULL;
1081 }
1082}
1083#endif /* HIF_CE_DEBUG_DATA_BUF */
1084
1085/*
1086 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1087 * for defined here
1088 */
1089#if HIF_CE_DEBUG_DATA_BUF
1090/**
1091 * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1092 * @scn: hif scn handle
1093 * ce_id: Copy Engine Id
1094 *
1095 * Return: QDF_STATUS
1096 */
1097static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1098 unsigned int CE_id)
1099{
1100 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1101 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1102
1103 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1104 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1105 return QDF_STATUS_E_NOMEM;
1106 } else {
1107 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1108 return QDF_STATUS_SUCCESS;
1109 }
1110}
1111
1112/**
1113 * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1114 * storing.
1115 * @scn: hif scn handle
1116 * ce_id: Copy Engine Id
1117 *
1118 * Return:
1119 */
1120static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1121 unsigned int CE_id)
1122{
1123 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1124 struct hif_ce_desc_event *hist_ev =
1125 (struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1126
1127 if (!hist_ev)
1128 return;
1129
1130#if HIF_CE_DEBUG_DATA_BUF
1131 if (ce_hist->data_enable[CE_id] == 1) {
1132 ce_hist->data_enable[CE_id] = 0;
1133 free_mem_ce_debug_hist_data(scn, CE_id);
1134 }
1135#endif
1136 ce_hist->enable[CE_id] = 0;
1137 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1138 ce_hist->hist_ev[CE_id] = NULL;
1139}
1140
1141/**
1142 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1143 * CE records on the console using sysfs.
1144 * @scn: hif scn handle
1145 *
1146 * Return:
1147 */
1148static inline void reset_ce_debug_history(struct hif_softc *scn)
1149{
1150 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1151 /* Initialise the CE debug history sysfs interface inputs ce_id and
1152 * index. Disable data storing
1153 */
1154 ce_hist->hist_index = 0;
1155 ce_hist->hist_id = 0;
1156}
1157#else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1158static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1159 unsigned int CE_id)
1160{
1161 return QDF_STATUS_SUCCESS;
1162}
1163
1164static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1165 unsigned int CE_id)
1166{
1167}
1168
1169static inline void reset_ce_debug_history(struct hif_softc *scn)
1170{
1171}
1172#endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1173
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001174/*
1175 * Initialize a Copy Engine based on caller-supplied attributes.
1176 * This may be called once to initialize both source and destination
1177 * rings or it may be called twice for separate source and destination
1178 * initialization. It may be that only one side or the other is
1179 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001180 *
1181 * This should be called durring the initialization sequence before
1182 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001183 */
Komal Seelam644263d2016-02-22 20:45:49 +05301184struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001185 unsigned int CE_id, struct CE_attr *attr)
1186{
1187 struct CE_state *CE_state;
1188 uint32_t ctrl_addr;
1189 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001190 bool malloc_CE_state = false;
1191 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001192 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001193
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301194 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001195 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001196 CE_state = scn->ce_id_to_state[CE_id];
1197
1198 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001199 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301200 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201 if (!CE_state) {
1202 HIF_ERROR("%s: CE_state has no mem", __func__);
1203 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204 }
Houston Hoffman233e9092015-09-02 13:37:21 -07001205 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301206 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001207
1208 CE_state->id = CE_id;
1209 CE_state->ctrl_addr = ctrl_addr;
1210 CE_state->state = CE_RUNNING;
1211 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001212 }
1213 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001214
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301215 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001216 if (attr == NULL) {
1217 /* Already initialized; caller wants the handle */
1218 return (struct CE_handle *)CE_state;
1219 }
1220
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001221 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301222 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001223 else
1224 CE_state->src_sz_max = attr->src_sz_max;
1225
c_cgodavfda96ad2017-09-07 16:16:00 +05301226 ce_init_ce_desc_event_log(scn, CE_id,
1227 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001228
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001229 /* source ring setup */
1230 nentries = attr->src_nentries;
1231 if (nentries) {
1232 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001233
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001234 nentries = roundup_pwr2(nentries);
1235 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301236 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001237 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301238 src_ring = CE_state->src_ring =
1239 ce_alloc_ring_state(CE_state,
1240 CE_RING_SRC,
1241 nentries);
1242 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001243 /* cannot allocate src ring. If the
1244 * CE_state is allocated locally free
1245 * CE_State and return error.
1246 */
1247 HIF_ERROR("%s: src ring has no mem", __func__);
1248 if (malloc_CE_state) {
1249 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301250 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001251 malloc_CE_state = false;
1252 }
1253 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001254 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001255 /* we can allocate src ring. Mark that the src ring is
1256 * allocated locally
1257 */
1258 malloc_src_ring = true;
1259
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001260 /*
1261 * Also allocate a shadow src ring in
1262 * regular mem to use for faster access.
1263 */
1264 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301265 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001266 sizeof(struct CE_src_desc) +
1267 CE_DESC_RING_ALIGN);
1268 if (src_ring->shadow_base_unaligned == NULL) {
1269 HIF_ERROR("%s: src ring no shadow_base mem",
1270 __func__);
1271 goto error_no_dma_mem;
1272 }
1273 src_ring->shadow_base = (struct CE_src_desc *)
1274 (((size_t) src_ring->shadow_base_unaligned +
1275 CE_DESC_RING_ALIGN - 1) &
1276 ~(CE_DESC_RING_ALIGN - 1));
1277
Yun Park3fb36442017-08-17 17:37:53 -07001278 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1279 src_ring, attr);
1280 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001281 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001282
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301283 ce_ring_test_initial_indexes(CE_id, src_ring,
1284 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001285 }
1286 }
1287
1288 /* destination ring setup */
1289 nentries = attr->dest_nentries;
1290 if (nentries) {
1291 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001292
1293 nentries = roundup_pwr2(nentries);
1294 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301295 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001296 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301297 dest_ring = CE_state->dest_ring =
1298 ce_alloc_ring_state(CE_state,
1299 CE_RING_DEST,
1300 nentries);
1301 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001302 /* cannot allocate dst ring. If the CE_state
1303 * or src ring is allocated locally free
1304 * CE_State and src ring and return error.
1305 */
1306 HIF_ERROR("%s: dest ring has no mem",
1307 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301308 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001309 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001310
Yun Park3fb36442017-08-17 17:37:53 -07001311 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001312 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001313 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301314 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001315
1316 ce_ring_test_initial_indexes(CE_id, dest_ring,
1317 "dest_ring");
1318
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301319 /* For srng based target, init status ring here */
1320 if (ce_srng_based(CE_state->scn)) {
1321 CE_state->status_ring =
1322 ce_alloc_ring_state(CE_state,
1323 CE_RING_STATUS,
1324 nentries);
1325 if (CE_state->status_ring == NULL) {
1326 /*Allocation failed. Cleanup*/
1327 qdf_mem_free(CE_state->dest_ring);
1328 if (malloc_src_ring) {
1329 qdf_mem_free
1330 (CE_state->src_ring);
1331 CE_state->src_ring = NULL;
1332 malloc_src_ring = false;
1333 }
1334 if (malloc_CE_state) {
1335 /* allocated CE_state locally */
1336 scn->ce_id_to_state[CE_id] =
1337 NULL;
1338 qdf_mem_free(CE_state);
1339 malloc_CE_state = false;
1340 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001341
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301342 return NULL;
1343 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001344
Yun Park3fb36442017-08-17 17:37:53 -07001345 status = ce_ring_setup(scn, CE_RING_STATUS,
1346 CE_id, CE_state->status_ring,
1347 attr);
1348 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301349 goto error_target_access;
1350
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001351 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001352
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001353 /* epping */
1354 /* poll timer */
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301355 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) ||
1356 scn->polled_mode_on) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301357 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001358 &CE_state->poll_timer,
1359 ce_poll_timeout,
1360 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301361 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001362 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301363 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001364 CE_POLL_TIMEOUT);
1365 }
1366 }
1367 }
1368
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301369 if (!ce_srng_based(scn)) {
1370 /* Enable CE error interrupts */
1371 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1372 goto error_target_access;
1373 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1374 if (Q_TARGET_ACCESS_END(scn) < 0)
1375 goto error_target_access;
1376 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001378 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1379 ce_oom_recovery, CE_state);
1380
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001381 /* update the htt_data attribute */
1382 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001383 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001384
c_cgodavfda96ad2017-09-07 16:16:00 +05301385 alloc_mem_ce_debug_history(scn, CE_id);
1386
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001387 return (struct CE_handle *)CE_state;
1388
Houston Hoffman4411ad42016-03-14 21:12:04 -07001389error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001390error_no_dma_mem:
1391 ce_fini((struct CE_handle *)CE_state);
1392 return NULL;
1393}
1394
1395#ifdef WLAN_FEATURE_FASTPATH
1396/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001397 * hif_enable_fastpath() Update that we have enabled fastpath mode
1398 * @hif_ctx: HIF context
1399 *
1400 * For use in data path
1401 *
1402 * Retrun: void
1403 */
1404void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1405{
1406 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1407
Houston Hoffmand63cd742016-12-05 11:59:56 -08001408 if (ce_srng_based(scn)) {
1409 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1410 return;
1411 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001412 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001413 scn->fastpath_mode_on = true;
1414}
1415
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301416void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx)
1417{
1418 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1419 HIF_DBG("%s, Enabling polled mode", __func__);
1420
1421 scn->polled_mode_on = true;
1422}
1423
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001424/**
1425 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1426 * @hif_ctx: HIF Context
1427 *
1428 * For use in data path to skip HTC
1429 *
1430 * Return: bool
1431 */
1432bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1433{
1434 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1435
1436 return scn->fastpath_mode_on;
1437}
1438
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301439bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1440{
1441 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1442
1443 return scn->polled_mode_on;
1444}
1445
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001446/**
1447 * hif_get_ce_handle - API to get CE handle for FastPath mode
1448 * @hif_ctx: HIF Context
1449 * @id: CopyEngine Id
1450 *
1451 * API to return CE handle for fastpath mode
1452 *
1453 * Return: void
1454 */
1455void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1456{
1457 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1458
1459 return scn->ce_id_to_state[id];
1460}
1461
1462/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001463 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1464 * No processing is required inside this function.
1465 * @ce_hdl: Cope engine handle
1466 * Using an assert, this function makes sure that,
1467 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001468 *
1469 * This is called while dismantling CE structures. No other thread
1470 * should be using these structures while dismantling is occuring
1471 * therfore no locking is needed.
1472 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001473 * Return: none
1474 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001475void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001476{
1477 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1478 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301479 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001480 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001481
Houston Hoffman85925072016-05-06 17:02:18 -07001482 if (hif_is_nss_wifi_enabled(sc))
1483 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001484
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001485 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001486 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001487 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001488 sw_index = src_ring->sw_index;
1489 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001490
1491 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301492 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001493 }
1494}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001495
1496/**
1497 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1498 * @ce_hdl: Handle to CE
1499 *
1500 * These buffers are never allocated on the fly, but
1501 * are allocated only once during HIF start and freed
1502 * only once during HIF stop.
1503 * NOTE:
1504 * The assumption here is there is no in-flight DMA in progress
1505 * currently, so that buffers can be freed up safely.
1506 *
1507 * Return: NONE
1508 */
1509void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1510{
1511 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1512 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1513 qdf_nbuf_t nbuf;
1514 int i;
1515
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001516 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001517 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001518
1519 if (!ce_state->htt_rx_data)
1520 return;
1521
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001522 /*
1523 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1524 * this CE is completely full: does not leave one blank space, to
1525 * distinguish between empty queue & full queue. So free all the
1526 * entries.
1527 */
1528 for (i = 0; i < dst_ring->nentries; i++) {
1529 nbuf = dst_ring->per_transfer_context[i];
1530
1531 /*
1532 * The reasons for doing this check are:
1533 * 1) Protect against calling cleanup before allocating buffers
1534 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1535 * could have a partially filled ring, because of a memory
1536 * allocation failure in the middle of allocating ring.
1537 * This check accounts for that case, checking
1538 * fastpath_mode_on flag or started flag would not have
1539 * covered that case. This is not in performance path,
1540 * so OK to do this.
1541 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001542 if (nbuf) {
1543 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1544 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001545 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001546 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001547 }
1548}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001549
1550/**
1551 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1552 * @scn: HIF handle
1553 *
1554 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1555 * Hence we have to post all the entries in the pipe, even, in the beginning
1556 * unlike for other CE pipes where one less than dest_nentries are filled in
1557 * the beginning.
1558 *
1559 * Return: None
1560 */
1561static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1562{
1563 int pipe_num;
1564 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1565
1566 if (scn->fastpath_mode_on == false)
1567 return;
1568
1569 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1570 struct HIF_CE_pipe_info *pipe_info =
1571 &hif_state->pipe_info[pipe_num];
1572 struct CE_state *ce_state =
1573 scn->ce_id_to_state[pipe_info->pipe_num];
1574
1575 if (ce_state->htt_rx_data)
1576 atomic_inc(&pipe_info->recv_bufs_needed);
1577 }
1578}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001580static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001581{
1582}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001583
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001584static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001585{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001586 return false;
1587}
1588
1589static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1590{
1591 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001592}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593#endif /* WLAN_FEATURE_FASTPATH */
1594
1595void ce_fini(struct CE_handle *copyeng)
1596{
1597 struct CE_state *CE_state = (struct CE_state *)copyeng;
1598 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301599 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301600 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301602 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001603 CE_state->state = CE_UNUSED;
1604 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301605 /* Set the flag to false first to stop processing in ce_poll_timeout */
1606 CE_state->timer_inited = false;
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001607 qdf_lro_deinit(CE_state->lro_data);
1608
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001609 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001610 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001611 ce_h2t_tx_ce_cleanup(copyeng);
1612
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301613 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301615 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301617 ce_free_desc_ring(scn, CE_state->id,
1618 CE_state->src_ring,
1619 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301620 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621 }
1622 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001623 /* Cleanup the datapath Rx ring */
1624 ce_t2h_msg_ce_cleanup(copyeng);
1625
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301626 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001627 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301628 ce_free_desc_ring(scn, CE_state->id,
1629 CE_state->dest_ring,
1630 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301631 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001632
1633 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301634 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301635 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001636 }
1637 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001638 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301639 /* Cleanup the datapath Tx ring */
1640 ce_h2t_tx_ce_cleanup(copyeng);
1641
1642 if (CE_state->status_ring->shadow_base_unaligned)
1643 qdf_mem_free(
1644 CE_state->status_ring->shadow_base_unaligned);
1645
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301646 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301647 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301648 ce_free_desc_ring(scn, CE_state->id,
1649 CE_state->status_ring,
1650 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301651 qdf_mem_free(CE_state->status_ring);
1652 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001653
c_cgodavfda96ad2017-09-07 16:16:00 +05301654 free_mem_ce_debug_history(scn, CE_id);
1655 reset_ce_debug_history(scn);
1656 ce_deinit_ce_desc_event_log(scn, CE_id);
1657
Houston Hoffman03f46572016-12-12 12:53:56 -08001658 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301659 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001660}
1661
Komal Seelam5584a7c2016-02-24 19:22:48 +05301662void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001663{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301664 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001665
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301666 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301668 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669 sizeof(hif_state->msg_callbacks_current));
1670}
1671
1672/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301673QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301674hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001675 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301676 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001677{
Komal Seelam644263d2016-02-22 20:45:49 +05301678 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301679 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001680 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1681 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1682 int bytes = nbytes, nfrags = 0;
1683 struct ce_sendlist sendlist;
1684 int status, i = 0;
1685 unsigned int mux_id = 0;
1686
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301687 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001688
1689 transfer_id =
1690 (mux_id & MUX_ID_MASK) |
1691 (transfer_id & TRANSACTION_ID_MASK);
1692 data_attr &= DESC_DATA_FLAG_MASK;
1693 /*
1694 * The common case involves sending multiple fragments within a
1695 * single download (the tx descriptor and the tx frame header).
1696 * So, optimize for the case of multiple fragments by not even
1697 * checking whether it's necessary to use a sendlist.
1698 * The overhead of using a sendlist for a single buffer download
1699 * is not a big deal, since it happens rarely (for WMI messages).
1700 */
1701 ce_sendlist_init(&sendlist);
1702 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301703 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001704 int frag_bytes;
1705
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301706 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1707 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001708 /*
1709 * Clear the packet offset for all but the first CE desc.
1710 */
1711 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301712 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001713
1714 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1715 frag_bytes >
1716 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301717 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001718 (nbuf,
1719 nfrags) ? 0 :
1720 CE_SEND_FLAG_SWAP_DISABLE,
1721 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301722 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001723 HIF_ERROR("%s: error, frag_num %d larger than limit",
1724 __func__, nfrags);
1725 return status;
1726 }
1727 bytes -= frag_bytes;
1728 nfrags++;
1729 } while (bytes > 0);
1730
1731 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301732 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301734 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001735 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301736 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737 }
1738 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301739 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301741 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742 HIF_ERROR("%s: error CE handle is null", __func__);
1743 return A_ERROR;
1744 }
1745
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301746 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301747 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301748 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1749 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301751 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001752
1753 return status;
1754}
1755
Komal Seelam5584a7c2016-02-24 19:22:48 +05301756void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1757 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001758{
Komal Seelam644263d2016-02-22 20:45:49 +05301759 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301760 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301761
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001762 if (!force) {
1763 int resources;
1764 /*
1765 * Decide whether to actually poll for completions, or just
1766 * wait for a later chance. If there seem to be plenty of
1767 * resources left, then just wait, since checking involves
1768 * reading a CE register, which is a relatively expensive
1769 * operation.
1770 */
Komal Seelam644263d2016-02-22 20:45:49 +05301771 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001772 /*
1773 * If at least 50% of the total resources are still available,
1774 * don't bother checking again yet.
1775 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001776 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1777 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001778 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001779 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001780#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001781 ce_per_engine_servicereap(scn, pipe);
1782#else
1783 ce_per_engine_service(scn, pipe);
1784#endif
1785}
1786
Komal Seelam5584a7c2016-02-24 19:22:48 +05301787uint16_t
1788hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001789{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301790 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001791 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1792 uint16_t rv;
1793
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301794 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001795 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301796 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001797 return rv;
1798}
1799
1800/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001801static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001802hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301803 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001804 unsigned int nbytes, unsigned int transfer_id,
1805 unsigned int sw_index, unsigned int hw_index,
1806 unsigned int toeplitz_hash_result)
1807{
1808 struct HIF_CE_pipe_info *pipe_info =
1809 (struct HIF_CE_pipe_info *)ce_context;
1810 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301811 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001813 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301814 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001815
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001816 do {
1817 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001818 * The upper layer callback will be triggered
1819 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001820 */
Houston Hoffman85118512015-09-28 14:17:11 -07001821 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001822 if (scn->target_status == TARGET_STATUS_RESET) {
1823
1824 qdf_nbuf_unmap_single(scn->qdf_dev,
1825 transfer_context,
1826 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301827 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001828 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001829 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001830 msg_callbacks->Context,
1831 transfer_context, transfer_id,
1832 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001833 }
1834
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301835 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001836 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301837 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001838 } while (ce_completed_send_next(copyeng,
1839 &ce_context, &transfer_context,
1840 &CE_data, &nbytes, &transfer_id,
1841 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301842 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843}
1844
Houston Hoffman910c6262015-09-28 12:56:25 -07001845/**
1846 * hif_ce_do_recv(): send message from copy engine to upper layers
1847 * @msg_callbacks: structure containing callback and callback context
1848 * @netbuff: skb containing message
1849 * @nbytes: number of bytes in the message
1850 * @pipe_info: used for the pipe_number info
1851 *
1852 * Checks the packet length, configures the lenght in the netbuff,
1853 * and calls the upper layer callback.
1854 *
1855 * return: None
1856 */
1857static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301858 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001859 struct HIF_CE_pipe_info *pipe_info) {
1860 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301861 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001862 msg_callbacks->
1863 rxCompletionHandler(msg_callbacks->Context,
1864 netbuf, pipe_info->pipe_num);
1865 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07001866 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07001867 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001868
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301869 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001870 }
1871}
1872
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001873/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001874static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001875hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301876 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001877 unsigned int nbytes, unsigned int transfer_id,
1878 unsigned int flags)
1879{
1880 struct HIF_CE_pipe_info *pipe_info =
1881 (struct HIF_CE_pipe_info *)ce_context;
1882 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001883 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301884 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001885#ifdef HIF_PCI
1886 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1887#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001888 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301889 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001891 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001892#ifdef HIF_PCI
1893 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1894#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301895 qdf_nbuf_unmap_single(scn->qdf_dev,
1896 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301897 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898
Houston Hoffman910c6262015-09-28 12:56:25 -07001899 atomic_inc(&pipe_info->recv_bufs_needed);
1900 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301901 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301902 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001903 else
1904 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001905 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906
1907 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001908 * MAX_NUM_OF_RECEIVES
1909 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001910 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001911 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001912 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001913 break;
1914 }
1915 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1916 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301917 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001918
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001919}
1920
1921/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1922
1923void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301924hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001925 struct hif_msg_callbacks *callbacks)
1926{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301927 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001928
1929#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1930 spin_lock_init(&pcie_access_log_lock);
1931#endif
1932 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301933 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001934 sizeof(hif_state->msg_callbacks_pending));
1935
1936}
1937
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001938static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939{
1940 struct CE_handle *ce_diag = hif_state->ce_diag;
1941 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301942 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001943 struct hif_msg_callbacks *hif_msg_callbacks =
1944 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001945
1946 /* daemonize("hif_compl_thread"); */
1947
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001948 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001949 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001950 return -EINVAL;
1951 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001952
1953 if (!hif_msg_callbacks ||
1954 !hif_msg_callbacks->rxCompletionHandler ||
1955 !hif_msg_callbacks->txCompletionHandler) {
1956 HIF_ERROR("%s: no completion handler registered", __func__);
1957 return -EFAULT;
1958 }
1959
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001960 A_TARGET_ACCESS_LIKELY(scn);
1961 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1962 struct CE_attr attr;
1963 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001964
1965 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001966 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001967 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301968 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001969 if (attr.src_nentries) {
1970 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07001971 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001972 __func__, pipe_num, pipe_info);
1973 ce_send_cb_register(pipe_info->ce_hdl,
1974 hif_pci_ce_send_done, pipe_info,
1975 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001976 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1977 }
1978 if (attr.dest_nentries) {
1979 /* pipe used to receive from target */
1980 ce_recv_cb_register(pipe_info->ce_hdl,
1981 hif_pci_ce_recv_data, pipe_info,
1982 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001984
1985 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301986 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301987
1988 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1989 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001990 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001991
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001992 A_TARGET_ACCESS_UNLIKELY(scn);
1993 return 0;
1994}
1995
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001996/*
1997 * Install pending msg callbacks.
1998 *
1999 * TBDXXX: This hack is needed because upper layers install msg callbacks
2000 * for use with HTC before BMI is done; yet this HIF implementation
2001 * needs to continue to use BMI msg callbacks. Really, upper layers
2002 * should not register HTC callbacks until AFTER BMI phase.
2003 */
Komal Seelam644263d2016-02-22 20:45:49 +05302004static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002005{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302006 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002007
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302008 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002009 &hif_state->msg_callbacks_pending,
2010 sizeof(hif_state->msg_callbacks_pending));
2011}
2012
Komal Seelam5584a7c2016-02-24 19:22:48 +05302013void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2014 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015{
2016 int ul_is_polled, dl_is_polled;
2017
Komal Seelam644263d2016-02-22 20:45:49 +05302018 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002019 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2020}
2021
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002022/**
2023 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302024 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025 *
2026 * Output the pipe error counts of each pipe to log file
2027 *
2028 * Return: N/A
2029 */
Komal Seelam644263d2016-02-22 20:45:49 +05302030void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002031{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302032 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033 int pipe_num;
2034
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002035 if (hif_state == NULL) {
2036 HIF_ERROR("%s hif_state is NULL", __func__);
2037 return;
2038 }
2039 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2040 struct HIF_CE_pipe_info *pipe_info;
2041
2042 pipe_info = &hif_state->pipe_info[pipe_num];
2043
2044 if (pipe_info->nbuf_alloc_err_count > 0 ||
2045 pipe_info->nbuf_dma_err_count > 0 ||
2046 pipe_info->nbuf_ce_enqueue_err_count)
2047 HIF_ERROR(
2048 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2049 __func__, pipe_info->pipe_num,
2050 atomic_read(&pipe_info->recv_bufs_needed),
2051 pipe_info->nbuf_alloc_err_count,
2052 pipe_info->nbuf_dma_err_count,
2053 pipe_info->nbuf_ce_enqueue_err_count);
2054 }
2055}
2056
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002057static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2058 void *nbuf, uint32_t *error_cnt,
2059 enum hif_ce_event_type failure_type,
2060 const char *failure_type_string)
2061{
2062 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2063 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2064 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2065 int ce_id = CE_state->id;
2066 uint32_t error_cnt_tmp;
2067
2068 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2069 error_cnt_tmp = ++(*error_cnt);
2070 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302071 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002072 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2073 failure_type_string);
2074 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302075 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002076 /* if we fail to allocate the last buffer for an rx pipe,
2077 * there is no trigger to refill the ce and we will
2078 * eventually crash
2079 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302080 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002081 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302082
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002083}
2084
2085
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002086
2087
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302088QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002089{
2090 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302091 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302092 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302093 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002094 uint32_t bufs_posted = 0;
2095
2096 buf_sz = pipe_info->buf_sz;
2097 if (buf_sz == 0) {
2098 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302099 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002100 }
2101
2102 ce_hdl = pipe_info->ce_hdl;
2103
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302104 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002105 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302106 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302107 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002108
2109 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302110 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302112 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002114 hif_post_recv_buffers_failure(pipe_info, nbuf,
2115 &pipe_info->nbuf_alloc_err_count,
2116 HIF_RX_NBUF_ALLOC_FAILURE,
2117 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302118 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002119 }
2120
2121 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302122 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002123 * CE_data = dma_map_single(dev, data, buf_sz, );
2124 * DMA_FROM_DEVICE);
2125 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302126 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302127 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302129 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002130 hif_post_recv_buffers_failure(pipe_info, nbuf,
2131 &pipe_info->nbuf_dma_err_count,
2132 HIF_RX_NBUF_MAP_FAILURE,
2133 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302134 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302135 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002136 }
2137
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302138 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002139
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302140 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002141 buf_sz, DMA_FROM_DEVICE);
2142 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302143 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002144 hif_post_recv_buffers_failure(pipe_info, nbuf,
2145 &pipe_info->nbuf_ce_enqueue_err_count,
2146 HIF_RX_NBUF_ENQUEUE_FAILURE,
2147 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2148
Govind Singh4fcafd42016-08-08 12:37:31 +05302149 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2150 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302151 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302152 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002153 }
2154
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302155 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002156 bufs_posted++;
2157 }
2158 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002159 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002160 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2161 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002162 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002163 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2164 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002165 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002166 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002167
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302168 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002169
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302170 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002171}
2172
2173/*
2174 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302175 * Returns 0 for non fastpath rx copy engine as
2176 * oom_allocation_work will be scheduled to recover any
2177 * failures, non-zero if unable to completely replenish
2178 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002179 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302180QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002181{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302182 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302183 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302184 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302185 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002186
2187 A_TARGET_ACCESS_LIKELY(scn);
2188 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2189 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002190
Houston Hoffman85925072016-05-06 17:02:18 -07002191 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002192 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002193
2194 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002195 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002196 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002197
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302198 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302199 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302200 ce_state->htt_rx_data &&
2201 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302202 A_TARGET_ACCESS_UNLIKELY(scn);
2203 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302204 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002205 }
2206
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207 A_TARGET_ACCESS_UNLIKELY(scn);
2208
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302209 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002210}
2211
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302212QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002213{
Komal Seelam644263d2016-02-22 20:45:49 +05302214 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302215 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302216 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002217
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002218 hif_update_fastpath_recv_bufs_cnt(scn);
2219
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002220 hif_msg_callbacks_install(scn);
2221
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002222 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302223 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002224
Houston Hoffman271951f2016-11-12 15:24:27 -08002225 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226 hif_state->started = true;
2227
Houston Hoffman271951f2016-11-12 15:24:27 -08002228 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302229 qdf_status = hif_post_recv_buffers(scn);
2230 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002231 /* cleanup is done in hif_ce_disable */
2232 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302233 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002234 }
2235
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302236 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002237}
2238
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002239static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002240{
Komal Seelam644263d2016-02-22 20:45:49 +05302241 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002242 struct CE_handle *ce_hdl;
2243 uint32_t buf_sz;
2244 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302245 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302246 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002247 void *per_CE_context;
2248
2249 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002250 /* Unused Copy Engine */
2251 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002252 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002253
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002254
2255 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002256 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002257 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002258
Komal Seelam02cf2f82016-02-22 20:44:25 +05302259 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002260 ce_hdl = pipe_info->ce_hdl;
2261
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002262 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002263 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002264 while (ce_revoke_recv_next
2265 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302266 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302267 if (netbuf) {
2268 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2269 QDF_DMA_FROM_DEVICE);
2270 qdf_nbuf_free(netbuf);
2271 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002272 }
2273}
2274
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002275static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002276{
2277 struct CE_handle *ce_hdl;
2278 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302279 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302280 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002281 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302282 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002283 unsigned int nbytes;
2284 unsigned int id;
2285 uint32_t buf_sz;
2286 uint32_t toeplitz_hash_result;
2287
2288 buf_sz = pipe_info->buf_sz;
2289 if (buf_sz == 0) {
2290 /* Unused Copy Engine */
2291 return;
2292 }
2293
2294 hif_state = pipe_info->HIF_CE_state;
2295 if (!hif_state->started) {
2296 return;
2297 }
2298
Komal Seelam02cf2f82016-02-22 20:44:25 +05302299 scn = HIF_GET_SOFTC(hif_state);
2300
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002301 ce_hdl = pipe_info->ce_hdl;
2302
2303 while (ce_cancel_send_next
2304 (ce_hdl, &per_CE_context,
2305 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302306 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002307 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2308 /*
2309 * Packets enqueued by htt_h2t_ver_req_msg() and
2310 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2311 * freed in htt_htc_misc_pkt_pool_free() in
2312 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002313 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002314 * which they are queued in.
2315 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302316 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002317 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302318 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002319 * layer to free the buffer
2320 */
2321 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302322 pipe_info->pipe_callbacks.
2323 txCompletionHandler(pipe_info->
2324 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002325 netbuf, id, toeplitz_hash_result);
2326 }
2327 }
2328}
2329
2330/*
2331 * Cleanup residual buffers for device shutdown:
2332 * buffers that were enqueued for receive
2333 * buffers that were to be sent
2334 * Note: Buffers that had completed but which were
2335 * not yet processed are on a completion queue. They
2336 * are handled when the completion thread shuts down.
2337 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002338static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002339{
2340 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302341 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002342 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002343
Komal Seelam02cf2f82016-02-22 20:44:25 +05302344 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002345 struct HIF_CE_pipe_info *pipe_info;
2346
Houston Hoffman85925072016-05-06 17:02:18 -07002347 ce_state = scn->ce_id_to_state[pipe_num];
2348 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2349 ((ce_state->htt_tx_data) ||
2350 (ce_state->htt_rx_data))) {
2351 continue;
2352 }
2353
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002354 pipe_info = &hif_state->pipe_info[pipe_num];
2355 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2356 hif_send_buffer_cleanup_on_pipe(pipe_info);
2357 }
2358}
2359
Komal Seelam5584a7c2016-02-24 19:22:48 +05302360void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361{
Komal Seelam644263d2016-02-22 20:45:49 +05302362 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302363 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302364
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002365 hif_buffer_cleanup(hif_state);
2366}
2367
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002368static void hif_destroy_oom_work(struct hif_softc *scn)
2369{
2370 struct CE_state *ce_state;
2371 int ce_id;
2372
2373 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2374 ce_state = scn->ce_id_to_state[ce_id];
2375 if (ce_state)
2376 qdf_destroy_work(scn->qdf_dev,
2377 &ce_state->oom_allocation_work);
2378 }
2379}
2380
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302381void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002382{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302383 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002384 int pipe_num;
2385
Houston Hoffmana69581e2016-11-14 18:03:19 -08002386 /*
2387 * before cleaning up any memory, ensure irq &
2388 * bottom half contexts will not be re-entered
2389 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002390 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002391 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002392 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002393
2394 /*
2395 * At this point, asynchronous threads are stopped,
2396 * The Target should not DMA nor interrupt, Host code may
2397 * not initiate anything more. So we just need to clean
2398 * up Host-side state.
2399 */
2400
2401 if (scn->athdiag_procfs_inited) {
2402 athdiag_procfs_remove();
2403 scn->athdiag_procfs_inited = false;
2404 }
2405
2406 hif_buffer_cleanup(hif_state);
2407
2408 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2409 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302410 struct CE_attr attr;
2411 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002412
2413 pipe_info = &hif_state->pipe_info[pipe_num];
2414 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302415 if (pipe_info->ce_hdl != ce_diag) {
2416 attr = hif_state->host_ce_config[pipe_num];
2417 if (attr.src_nentries)
2418 qdf_spinlock_destroy(&pipe_info->
2419 completion_freeq_lock);
2420 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002421 ce_fini(pipe_info->ce_hdl);
2422 pipe_info->ce_hdl = NULL;
2423 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302424 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002425 }
2426 }
2427
2428 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302429 qdf_timer_stop(&hif_state->sleep_timer);
2430 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002431 hif_state->sleep_timer_init = false;
2432 }
2433
2434 hif_state->started = false;
2435}
2436
Houston Hoffman748e1a62017-03-30 17:20:42 -07002437
Houston Hoffman854e67f2016-03-14 21:11:39 -07002438/**
2439 * hif_get_target_ce_config() - get copy engine configuration
2440 * @target_ce_config_ret: basic copy engine configuration
2441 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2442 * @target_service_to_ce_map_ret: service mapping for the copy engines
2443 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2444 * @target_shadow_reg_cfg_ret: shadow register configuration
2445 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2446 *
2447 * providing accessor to these values outside of this file.
2448 * currently these are stored in static pointers to const sections.
2449 * there are multiple configurations that are selected from at compile time.
2450 * Runtime selection would need to consider mode, target type and bus type.
2451 *
2452 * Return: return by parameter.
2453 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302454void hif_get_target_ce_config(struct hif_softc *scn,
2455 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002456 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002457 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002458 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002459 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002460 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002461{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302462 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2463
2464 *target_ce_config_ret = hif_state->target_ce_config;
2465 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002466
2467 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2468 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002469
2470 if (target_shadow_reg_cfg_ret)
2471 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2472
2473 if (shadow_cfg_sz_ret)
2474 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002475}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002476
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002477#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002478static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002479{
2480 int i;
2481 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2482 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2483
2484 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2485 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2486 "%s: i %d, val %x\n", __func__, i,
2487 cfg->shadow_reg_v2_cfg[i].addr);
2488 }
2489}
2490
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002491#else
2492static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2493{
2494 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2495 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2496}
2497#endif
2498
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002499/**
2500 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302501 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002502 *
2503 * This function passes the con_mode and CE configuration to
2504 * platform driver to enable wlan.
2505 *
Houston Hoffman108da402016-03-14 21:11:24 -07002506 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002507 */
Houston Hoffman108da402016-03-14 21:11:24 -07002508int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002509{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002510 struct pld_wlan_enable_cfg cfg;
2511 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302512 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002513
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302514 hif_get_target_ce_config(scn,
2515 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002516 &cfg.num_ce_tgt_cfg,
2517 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2518 &cfg.num_ce_svc_pipe_cfg,
2519 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2520 &cfg.num_shadow_reg_cfg);
2521
2522 /* translate from structure size to array size */
2523 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2524 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2525 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002527 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2528 &cfg.num_shadow_reg_v2_cfg);
2529
2530 hif_print_hal_shadow_register_cfg(&cfg);
2531
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302532 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002533 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302534 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2535 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002536 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002537 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002538 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002539 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002540
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002541 if (BYPASS_QMI)
2542 return 0;
2543 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002544 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2545 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002546}
2547
Nirav Shah0d0cce82018-01-17 17:00:31 +05302548#ifdef WLAN_FEATURE_EPPING
2549
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002550#define CE_EPPING_USES_IRQ true
2551
Nirav Shah0d0cce82018-01-17 17:00:31 +05302552void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2553{
2554 if (CE_EPPING_USES_IRQ)
2555 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2556 else
2557 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2558 hif_state->target_ce_config = target_ce_config_wlan_epping;
2559 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2560 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2561 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2562}
2563#endif
2564
Houston Hoffman108da402016-03-14 21:11:24 -07002565/**
2566 * hif_ce_prepare_config() - load the correct static tables.
2567 * @scn: hif context
2568 *
2569 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002570 */
Houston Hoffman108da402016-03-14 21:11:24 -07002571void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002572{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302573 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002574 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2575 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302576 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002577
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002578 hif_state->ce_services = ce_services_attach(scn);
2579
Houston Hoffman710af5a2016-11-22 21:59:03 -08002580 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002582 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05302583 hif_ce_prepare_epping_config(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002584 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002585
2586 switch (tgt_info->target_type) {
2587 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302588 hif_state->host_ce_config = host_ce_config_wlan;
2589 hif_state->target_ce_config = target_ce_config_wlan;
2590 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002591 break;
2592 case TARGET_TYPE_AR900B:
2593 case TARGET_TYPE_QCA9984:
2594 case TARGET_TYPE_IPQ4019:
2595 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302596 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2597 hif_state->host_ce_config =
2598 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2599 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2600 hif_state->host_ce_config =
2601 host_lowdesc_ce_cfg_wlan_ar900b;
2602 } else {
2603 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2604 }
2605
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302606 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2607 hif_state->target_ce_config_sz =
2608 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002609
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002610 break;
2611
2612 case TARGET_TYPE_AR9888:
2613 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302614 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2615 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2616 } else {
2617 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2618 }
2619
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302620 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2621 hif_state->target_ce_config_sz =
2622 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002623
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002624 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002625
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302626 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002627 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2628 hif_state->host_ce_config =
2629 host_ce_config_wlan_qca8074_pci;
2630 hif_state->target_ce_config =
2631 target_ce_config_wlan_qca8074_pci;
2632 hif_state->target_ce_config_sz =
2633 sizeof(target_ce_config_wlan_qca8074_pci);
2634 } else {
2635 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2636 hif_state->target_ce_config =
2637 target_ce_config_wlan_qca8074;
2638 hif_state->target_ce_config_sz =
2639 sizeof(target_ce_config_wlan_qca8074);
2640 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302641 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002642 case TARGET_TYPE_QCA6290:
2643 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2644 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2645 hif_state->target_ce_config_sz =
2646 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002647
Houston Hoffman710af5a2016-11-22 21:59:03 -08002648 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002649 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002650 }
Yun parkc80eea72017-10-06 15:33:36 -07002651 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07002652}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002653
Houston Hoffman108da402016-03-14 21:11:24 -07002654/**
2655 * hif_ce_open() - do ce specific allocations
2656 * @hif_sc: pointer to hif context
2657 *
2658 * return: 0 for success or QDF_STATUS_E_NOMEM
2659 */
2660QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2661{
2662 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002663
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302664 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302665 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002666 return QDF_STATUS_SUCCESS;
2667}
2668
2669/**
2670 * hif_ce_close() - do ce specific free
2671 * @hif_sc: pointer to hif context
2672 */
2673void hif_ce_close(struct hif_softc *hif_sc)
2674{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302675 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2676
2677 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302678 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002679}
2680
2681/**
2682 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2683 * @hif_sc: hif context
2684 *
2685 * uses state variables to support cleaning up when hif_config_ce fails.
2686 */
2687void hif_unconfig_ce(struct hif_softc *hif_sc)
2688{
2689 int pipe_num;
2690 struct HIF_CE_pipe_info *pipe_info;
2691 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2692
2693 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2694 pipe_info = &hif_state->pipe_info[pipe_num];
2695 if (pipe_info->ce_hdl) {
2696 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002697 ce_fini(pipe_info->ce_hdl);
2698 pipe_info->ce_hdl = NULL;
2699 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002700 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002701 }
2702 }
Houston Hoffman108da402016-03-14 21:11:24 -07002703 if (hif_sc->athdiag_procfs_inited) {
2704 athdiag_procfs_remove();
2705 hif_sc->athdiag_procfs_inited = false;
2706 }
2707}
2708
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002709#ifdef CONFIG_BYPASS_QMI
2710#define FW_SHARED_MEM (2 * 1024 * 1024)
2711
2712/**
2713 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2714 * @scn: pointer to HIF structure
2715 *
2716 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2717 *
2718 * Return: void
2719 */
2720static void hif_post_static_buf_to_target(struct hif_softc *scn)
2721{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002722 void *target_va;
2723 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002724
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002725 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2726 FW_SHARED_MEM, &target_pa);
2727 if (NULL == target_va) {
2728 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002729 return;
2730 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002731 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2732 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002733}
2734#else
2735static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2736{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002737}
2738#endif
2739
Houston Hoffman579c02f2017-08-02 01:57:38 -07002740static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2741 bool wait_for_it)
2742{
2743 /* todo */
2744 return 0;
2745}
2746
Houston Hoffman108da402016-03-14 21:11:24 -07002747/**
2748 * hif_config_ce() - configure copy engines
2749 * @scn: hif context
2750 *
2751 * Prepares fw, copy engine hardware and host sw according
2752 * to the attributes selected by hif_ce_prepare_config.
2753 *
2754 * also calls athdiag_procfs_init
2755 *
2756 * return: 0 for success nonzero for failure.
2757 */
2758int hif_config_ce(struct hif_softc *scn)
2759{
2760 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2761 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2762 struct HIF_CE_pipe_info *pipe_info;
2763 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302764 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05302765
Houston Hoffman108da402016-03-14 21:11:24 -07002766#ifdef ADRASTEA_SHADOW_REGISTERS
2767 int i;
2768#endif
2769 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2770
2771 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05302772 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002773
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002774 hif_post_static_buf_to_target(scn);
2775
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002776 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002777
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002778 hif_config_rri_on_ddr(scn);
2779
Houston Hoffman579c02f2017-08-02 01:57:38 -07002780 if (ce_srng_based(scn))
2781 scn->bus_ops.hif_target_sleep_state_adjust =
2782 &hif_srng_sleep_state_adjust;
2783
c_cgodavfda96ad2017-09-07 16:16:00 +05302784 /* Initialise the CE debug history sysfs interface inputs ce_id and
2785 * index. Disable data storing
2786 */
2787 reset_ce_debug_history(scn);
2788
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002789 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2790 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002791
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002792 pipe_info = &hif_state->pipe_info[pipe_num];
2793 pipe_info->pipe_num = pipe_num;
2794 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302795 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002796
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002797 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002798 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302799 if (!ce_state) {
2800 A_TARGET_ACCESS_UNLIKELY(scn);
2801 goto err;
2802 }
Houston Hoffman03f46572016-12-12 12:53:56 -08002803 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302804 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002805 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302806 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002807 A_TARGET_ACCESS_UNLIKELY(scn);
2808 goto err;
2809 }
2810
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07002811 ce_state->lro_data = qdf_lro_init();
2812
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302813 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002814 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002815 * Diagnostic Window support
2816 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002817 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002818 continue;
2819 }
2820
Houston Hoffman85925072016-05-06 17:02:18 -07002821 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2822 (ce_state->htt_rx_data))
2823 continue;
2824
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302825 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002826 if (attr->dest_nentries > 0) {
2827 atomic_set(&pipe_info->recv_bufs_needed,
2828 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302829 /*SRNG based CE has one entry less */
2830 if (ce_srng_based(scn))
2831 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002832 } else {
2833 atomic_set(&pipe_info->recv_bufs_needed, 0);
2834 }
2835 ce_tasklet_init(hif_state, (1 << pipe_num));
2836 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002837 }
2838
2839 if (athdiag_procfs_init(scn) != 0) {
2840 A_TARGET_ACCESS_UNLIKELY(scn);
2841 goto err;
2842 }
2843 scn->athdiag_procfs_inited = true;
2844
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002845 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002846
Houston Hoffman108da402016-03-14 21:11:24 -07002847 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002848
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002849 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002850
2851#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002852 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002853 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002854 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002855 __func__, i,
2856 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2857 }
2858#endif
2859
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302860 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002861
2862err:
2863 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002864 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002865 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302866 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002867}
2868
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002869#ifdef WLAN_FEATURE_FASTPATH
2870/**
2871 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2872 * @handler: Callback funtcion
2873 * @context: handle for callback function
2874 *
2875 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2876 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002877int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2878 fastpath_msg_handler handler,
2879 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002880{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002881 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002882 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002883 int i;
2884
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302885 if (!scn) {
2886 HIF_ERROR("%s: scn is NULL", __func__);
2887 QDF_ASSERT(0);
2888 return QDF_STATUS_E_FAILURE;
2889 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002890
2891 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002892 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002893 return QDF_STATUS_E_FAILURE;
2894 }
2895
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002896 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002897 ce_state = scn->ce_id_to_state[i];
2898 if (ce_state->htt_rx_data) {
2899 ce_state->fastpath_handler = handler;
2900 ce_state->context = context;
2901 }
2902 }
2903
2904 return QDF_STATUS_SUCCESS;
2905}
Pratik Gandhidc82a772018-01-30 18:57:05 +05302906qdf_export_symbol(hif_ce_fastpath_cb_register);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002907#endif
2908
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002909#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002910/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302911 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002912 * @scn: bus context
2913 * @ce_sr_base_paddr: copyengine source ring base physical address
2914 * @ce_sr_ring_size: copyengine source ring size
2915 * @ce_reg_paddr: copyengine register physical address
2916 *
2917 * IPA micro controller data path offload feature enabled,
2918 * HIF should release copy engine related resource information to IPA UC
2919 * IPA UC will access hardware resource with released information
2920 *
2921 * Return: None
2922 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302923void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302924 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002925 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302926 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002927{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302928 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002929 struct HIF_CE_pipe_info *pipe_info =
2930 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2931 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2932
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302933 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002934 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002935}
2936#endif /* IPA_OFFLOAD */
2937
2938
2939#ifdef ADRASTEA_SHADOW_REGISTERS
2940
2941/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002942 * Current shadow register config
2943 *
2944 * -----------------------------------------------------------
2945 * Shadow Register | CE | src/dst write index
2946 * -----------------------------------------------------------
2947 * 0 | 0 | src
2948 * 1 No Config - Doesn't point to anything
2949 * 2 No Config - Doesn't point to anything
2950 * 3 | 3 | src
2951 * 4 | 4 | src
2952 * 5 | 5 | src
2953 * 6 No Config - Doesn't point to anything
2954 * 7 | 7 | src
2955 * 8 No Config - Doesn't point to anything
2956 * 9 No Config - Doesn't point to anything
2957 * 10 No Config - Doesn't point to anything
2958 * 11 No Config - Doesn't point to anything
2959 * -----------------------------------------------------------
2960 * 12 No Config - Doesn't point to anything
2961 * 13 | 1 | dst
2962 * 14 | 2 | dst
2963 * 15 No Config - Doesn't point to anything
2964 * 16 No Config - Doesn't point to anything
2965 * 17 No Config - Doesn't point to anything
2966 * 18 No Config - Doesn't point to anything
2967 * 19 | 7 | dst
2968 * 20 | 8 | dst
2969 * 21 No Config - Doesn't point to anything
2970 * 22 No Config - Doesn't point to anything
2971 * 23 No Config - Doesn't point to anything
2972 * -----------------------------------------------------------
2973 *
2974 *
2975 * ToDo - Move shadow register config to following in the future
2976 * This helps free up a block of shadow registers towards the end.
2977 * Can be used for other purposes
2978 *
2979 * -----------------------------------------------------------
2980 * Shadow Register | CE | src/dst write index
2981 * -----------------------------------------------------------
2982 * 0 | 0 | src
2983 * 1 | 3 | src
2984 * 2 | 4 | src
2985 * 3 | 5 | src
2986 * 4 | 7 | src
2987 * -----------------------------------------------------------
2988 * 5 | 1 | dst
2989 * 6 | 2 | dst
2990 * 7 | 7 | dst
2991 * 8 | 8 | dst
2992 * -----------------------------------------------------------
2993 * 9 No Config - Doesn't point to anything
2994 * 12 No Config - Doesn't point to anything
2995 * 13 No Config - Doesn't point to anything
2996 * 14 No Config - Doesn't point to anything
2997 * 15 No Config - Doesn't point to anything
2998 * 16 No Config - Doesn't point to anything
2999 * 17 No Config - Doesn't point to anything
3000 * 18 No Config - Doesn't point to anything
3001 * 19 No Config - Doesn't point to anything
3002 * 20 No Config - Doesn't point to anything
3003 * 21 No Config - Doesn't point to anything
3004 * 22 No Config - Doesn't point to anything
3005 * 23 No Config - Doesn't point to anything
3006 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003007*/
3008
Komal Seelam644263d2016-02-22 20:45:49 +05303009u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003010{
3011 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003012 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003013
Houston Hoffmane6330442016-02-26 12:19:11 -08003014 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003015 case 0:
3016 addr = SHADOW_VALUE0;
3017 break;
3018 case 3:
3019 addr = SHADOW_VALUE3;
3020 break;
3021 case 4:
3022 addr = SHADOW_VALUE4;
3023 break;
3024 case 5:
3025 addr = SHADOW_VALUE5;
3026 break;
3027 case 7:
3028 addr = SHADOW_VALUE7;
3029 break;
3030 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003031 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303032 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003033 }
3034 return addr;
3035
3036}
3037
Komal Seelam644263d2016-02-22 20:45:49 +05303038u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003039{
3040 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003041 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003042
Houston Hoffmane6330442016-02-26 12:19:11 -08003043 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003044 case 1:
3045 addr = SHADOW_VALUE13;
3046 break;
3047 case 2:
3048 addr = SHADOW_VALUE14;
3049 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003050 case 5:
3051 addr = SHADOW_VALUE17;
3052 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003053 case 7:
3054 addr = SHADOW_VALUE19;
3055 break;
3056 case 8:
3057 addr = SHADOW_VALUE20;
3058 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003059 case 9:
3060 addr = SHADOW_VALUE21;
3061 break;
3062 case 10:
3063 addr = SHADOW_VALUE22;
3064 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303065 case 11:
3066 addr = SHADOW_VALUE23;
3067 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003068 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003069 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303070 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003071 }
3072
3073 return addr;
3074
3075}
3076#endif
3077
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003078#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003079void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3080{
3081 struct CE_state *ce_state;
3082 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3083
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003084 ce_state = scn->ce_id_to_state[ctx_id];
3085
3086 return ce_state->lro_data;
3087}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003088#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003089
3090/**
3091 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3092 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303093 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003094 * @svc_id: Service ID for which the mapping is needed.
3095 * @ul_pipe: address of the container in which ul pipe is returned.
3096 * @dl_pipe: address of the container in which dl pipe is returned.
3097 * @ul_is_polled: address of the container in which a bool
3098 * indicating if the UL CE for this service
3099 * is polled is returned.
3100 * @dl_is_polled: address of the container in which a bool
3101 * indicating if the DL CE for this service
3102 * is polled is returned.
3103 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003104 * Return: Indicates whether the service has been found in the table.
3105 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3106 * There will be warning logs if either leg has not been updated
3107 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003108 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303109int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003110 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3111 int *dl_is_polled)
3112{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003113 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003114 unsigned int i;
3115 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003116 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003117 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303118 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003119 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003120 bool dl_updated = false;
3121 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003122
Houston Hoffman748e1a62017-03-30 17:20:42 -07003123 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3124 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003125
3126 *dl_is_polled = 0; /* polling for received messages not supported */
3127
3128 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3129
3130 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3131 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003132 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003133 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003134 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303135 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003136 CE_ATTR_DISABLE_INTR) != 0;
3137 ul_updated = true;
3138 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003139 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003140 dl_updated = true;
3141 }
3142 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003143 }
3144 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003145 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05303146 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003147 __func__, svc_id);
3148 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05303149 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003150 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003151
3152 return status;
3153}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003154
3155#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303156inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003157 uint32_t CE_ctrl_addr)
3158{
3159 uint32_t read_from_hw, srri_from_ddr = 0;
3160
3161 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3162
3163 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3164
3165 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003166 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3167 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003168 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303169 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003170 }
3171 return srri_from_ddr;
3172}
3173
3174
Komal Seelam644263d2016-02-22 20:45:49 +05303175inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003176 uint32_t CE_ctrl_addr)
3177{
3178 uint32_t read_from_hw, drri_from_ddr = 0;
3179
3180 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3181
3182 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3183
3184 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003185 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003186 drri_from_ddr, read_from_hw,
3187 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303188 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003189 }
3190 return drri_from_ddr;
3191}
3192
3193#endif
3194
Houston Hoffman3d0cda82015-12-03 13:25:05 -08003195#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003196/**
3197 * hif_get_src_ring_read_index(): Called to get the SRRI
3198 *
Komal Seelam644263d2016-02-22 20:45:49 +05303199 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003200 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3201 *
3202 * This function returns the SRRI to the caller. For CEs that
3203 * dont have interrupts enabled, we look at the DDR based SRRI
3204 *
3205 * Return: SRRI
3206 */
Komal Seelam644263d2016-02-22 20:45:49 +05303207inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003208 uint32_t CE_ctrl_addr)
3209{
3210 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303211 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003212
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303213 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Govind Singhbc679dc2017-06-08 12:33:59 +05303214 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003215 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303216 } else {
3217 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3218 return A_TARGET_READ(scn,
3219 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3220 else
3221 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3222 CE_ctrl_addr);
3223 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003224}
3225
3226/**
3227 * hif_get_dst_ring_read_index(): Called to get the DRRI
3228 *
Komal Seelam644263d2016-02-22 20:45:49 +05303229 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003230 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3231 *
3232 * This function returns the DRRI to the caller. For CEs that
3233 * dont have interrupts enabled, we look at the DDR based DRRI
3234 *
3235 * Return: DRRI
3236 */
Komal Seelam644263d2016-02-22 20:45:49 +05303237inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003238 uint32_t CE_ctrl_addr)
3239{
3240 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303241 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003242
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303243 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003244
Govind Singhbc679dc2017-06-08 12:33:59 +05303245 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003246 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303247 } else {
3248 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3249 return A_TARGET_READ(scn,
3250 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3251 else
3252 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3253 CE_ctrl_addr);
3254 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003255}
3256
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003257/**
3258 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3259 *
Komal Seelam644263d2016-02-22 20:45:49 +05303260 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003261 *
3262 * This function allocates non cached memory on ddr and sends
3263 * the physical address of this memory to the CE hardware. The
3264 * hardware updates the RRI on this particular location.
3265 *
3266 * Return: None
3267 */
Komal Seelam644263d2016-02-22 20:45:49 +05303268static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003269{
3270 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303271 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003272 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003273
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003274 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303275 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3276 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3277 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003278
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05303279 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003280 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3281 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3282
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003283 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003284
3285 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3286 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3287
3288 for (i = 0; i < CE_COUNT; i++)
3289 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3290
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303291 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003292
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003293}
3294#else
3295
3296/**
3297 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3298 *
Komal Seelam644263d2016-02-22 20:45:49 +05303299 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003300 *
3301 * This is a dummy implementation for platforms that don't
3302 * support this functionality.
3303 *
3304 * Return: None
3305 */
Komal Seelam644263d2016-02-22 20:45:49 +05303306static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003307{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003308}
3309#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303310
3311/**
3312 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303313 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303314 *
3315 * Output the copy engine registers
3316 *
3317 * Return: 0 for success or error code
3318 */
Komal Seelam644263d2016-02-22 20:45:49 +05303319int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303320{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303321 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303322 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003323 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303324 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3325 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303326 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303327
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003328 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3329 if (scn->ce_id_to_state[i] == NULL) {
3330 HIF_DBG("CE%d not used.", i);
3331 continue;
3332 }
3333
Komal Seelam644263d2016-02-22 20:45:49 +05303334 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003335 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303336 ce_reg_word_size * sizeof(uint32_t));
3337
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303338 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003339 HIF_ERROR("Dumping CE register failed!");
3340 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303341 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303342 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303343 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003344 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303345 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303346 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3347 + SR_WR_INDEX_ADDRESS),
3348 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3349 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3350 + CURRENT_SRRI_ADDRESS),
3351 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3352 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3353 + DST_WR_INDEX_ADDRESS),
3354 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3355 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3356 + CURRENT_DRRI_ADDRESS),
3357 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3358 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303359 }
Govind Singh2443fb32016-01-13 17:44:48 +05303360 return 0;
3361}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303362qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003363#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3364struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3365 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3366{
3367 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3368 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3369 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3370 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3371 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3372 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3373 struct CE_ring_state *src_ring = ce_state->src_ring;
3374 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3375
3376 if (src_ring) {
3377 hif_info->ul_pipe.nentries = src_ring->nentries;
3378 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3379 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3380 hif_info->ul_pipe.write_index = src_ring->write_index;
3381 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3382 hif_info->ul_pipe.base_addr_CE_space =
3383 src_ring->base_addr_CE_space;
3384 hif_info->ul_pipe.base_addr_owner_space =
3385 src_ring->base_addr_owner_space;
3386 }
3387
3388
3389 if (dest_ring) {
3390 hif_info->dl_pipe.nentries = dest_ring->nentries;
3391 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3392 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3393 hif_info->dl_pipe.write_index = dest_ring->write_index;
3394 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3395 hif_info->dl_pipe.base_addr_CE_space =
3396 dest_ring->base_addr_CE_space;
3397 hif_info->dl_pipe.base_addr_owner_space =
3398 dest_ring->base_addr_owner_space;
3399 }
3400
3401 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3402 hif_info->ctrl_addr = ce_state->ctrl_addr;
3403
3404 return hif_info;
3405}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303406qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003407
3408uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3409{
3410 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3411
3412 scn->nss_wifi_ol_mode = mode;
3413 return 0;
3414}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303415qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003416#endif
3417
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303418void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3419{
3420 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3421 scn->hif_attribute = hif_attrib;
3422}
3423
Yun Park3fb36442017-08-17 17:37:53 -07003424
3425/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003426void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3427{
3428 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3429 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3430 uint32_t ctrl_addr = CE_state->ctrl_addr;
3431
3432 Q_TARGET_ACCESS_BEGIN(scn);
3433 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3434 Q_TARGET_ACCESS_END(scn);
3435}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303436qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303437
3438/**
3439 * hif_fw_event_handler() - hif fw event handler
3440 * @hif_state: pointer to hif ce state structure
3441 *
3442 * Process fw events and raise HTC callback to process fw events.
3443 *
3444 * Return: none
3445 */
3446static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3447{
3448 struct hif_msg_callbacks *msg_callbacks =
3449 &hif_state->msg_callbacks_current;
3450
3451 if (!msg_callbacks->fwEventHandler)
3452 return;
3453
3454 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3455 QDF_STATUS_E_FAILURE);
3456}
3457
3458#ifndef QCA_WIFI_3_0
3459/**
3460 * hif_fw_interrupt_handler() - FW interrupt handler
3461 * @irq: irq number
3462 * @arg: the user pointer
3463 *
3464 * Called from the PCI interrupt handler when a
3465 * firmware-generated interrupt to the Host.
3466 *
Yun Park3fb36442017-08-17 17:37:53 -07003467 * only registered for legacy ce devices
3468 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303469 * Return: status of handled irq
3470 */
3471irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3472{
3473 struct hif_softc *scn = arg;
3474 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3475 uint32_t fw_indicator_address, fw_indicator;
3476
3477 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3478 return ATH_ISR_NOSCHED;
3479
3480 fw_indicator_address = hif_state->fw_indicator_address;
3481 /* For sudden unplug this will return ~0 */
3482 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3483
3484 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3485 /* ACK: clear Target-side pending event */
3486 A_TARGET_WRITE(scn, fw_indicator_address,
3487 fw_indicator & ~FW_IND_EVENT_PENDING);
3488 if (Q_TARGET_ACCESS_END(scn) < 0)
3489 return ATH_ISR_SCHED;
3490
3491 if (hif_state->started) {
3492 hif_fw_event_handler(hif_state);
3493 } else {
3494 /*
3495 * Probable Target failure before we're prepared
3496 * to handle it. Generally unexpected.
3497 */
3498 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3499 ("%s: Early firmware event indicated\n",
3500 __func__));
3501 }
3502 } else {
3503 if (Q_TARGET_ACCESS_END(scn) < 0)
3504 return ATH_ISR_SCHED;
3505 }
3506
3507 return ATH_ISR_SCHED;
3508}
3509#else
3510irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3511{
3512 return ATH_ISR_SCHED;
3513}
3514#endif /* #ifdef QCA_WIFI_3_0 */
3515
3516
3517/**
3518 * hif_wlan_disable(): call the platform driver to disable wlan
3519 * @scn: HIF Context
3520 *
3521 * This function passes the con_mode to platform driver to disable
3522 * wlan.
3523 *
3524 * Return: void
3525 */
3526void hif_wlan_disable(struct hif_softc *scn)
3527{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003528 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303529 uint32_t con_mode = hif_get_conparam(scn);
3530
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05303531 if (scn->target_status == TARGET_STATUS_RESET)
3532 return;
3533
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303534 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003535 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303536 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003537 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303538 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003539 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303540
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003541 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303542}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003543
Dustin Brown6834d322017-03-20 15:02:48 -07003544int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3545{
3546 QDF_STATUS status;
3547 uint8_t ul_pipe, dl_pipe;
3548 int ul_is_polled, dl_is_polled;
3549
3550 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3551 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3552 HTC_CTRL_RSVD_SVC,
3553 &ul_pipe, &dl_pipe,
3554 &ul_is_polled, &dl_is_polled);
3555 if (status) {
3556 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3557 return qdf_status_to_os_return(status);
3558 }
3559
3560 *ce_id = dl_pipe;
3561
3562 return 0;
3563}