blob: f0e84a74091ce75adc5dba440b304949e2c64dfc [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Nandha Kishore Easwaran54532862019-12-27 11:26:03 +05302 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053040#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041
42#define CE_POLL_TIMEOUT 10 /* ms */
43
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053044#define AGC_DUMP 1
45#define CHANINFO_DUMP 2
46#define BB_WATCHDOG_DUMP 3
47#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48#define PCIE_ACCESS_DUMP 4
49#endif
50#include "mp_dev.h"
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +053051#ifdef HIF_CE_LOG_INFO
52#include "qdf_hang_event_notifier.h"
53#endif
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053054
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +053055#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
Houston Hoffman5141f9d2017-01-05 10:49:17 -080057#define QCA_WIFI_SUPPORT_SRNG
58#endif
59
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080060/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053061QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070070#ifdef CONFIG_BYPASS_QMI
71#define BYPASS_QMI 1
72#else
73#define BYPASS_QMI 0
74#endif
75
Akshay Kosigi181b2f52018-11-26 17:02:54 +053076#ifdef ENABLE_10_4_FW_HDR
77#if (ENABLE_10_4_FW_HDR == 1)
Houston Hoffmanabd00772016-05-06 17:02:48 -070078#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Akshay Kosigi181b2f52018-11-26 17:02:54 +053082#endif /* ENABLE_10_4_FW_HDR == 1 */
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#endif /* ENABLE_10_4_FW_HDR */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Nachiket Kukadee5738b52017-09-07 17:16:12 +053085QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053086static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053088/**
89 * hif_target_access_log_dump() - dump access log
90 *
91 * dump access log
92 *
93 * Return: n/a
94 */
95#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96static void hif_target_access_log_dump(void)
97{
98 hif_target_dump_access_log();
99}
100#endif
101
102
103void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 uint8_t cmd_id, bool start)
105{
106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107
108 switch (cmd_id) {
109 case AGC_DUMP:
110 if (start)
111 priv_start_agc(scn);
112 else
113 priv_dump_agc(scn);
114 break;
115 case CHANINFO_DUMP:
116 if (start)
117 priv_start_cap_chaninfo(scn);
118 else
119 priv_dump_chaninfo(scn);
120 break;
121 case BB_WATCHDOG_DUMP:
122 priv_dump_bbwatchdog(scn);
123 break;
124#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 case PCIE_ACCESS_DUMP:
126 hif_target_access_log_dump();
127 break;
128#endif
129 default:
130 HIF_ERROR("%s: Invalid htc dump command", __func__);
131 break;
132 }
133}
134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135static void ce_poll_timeout(void *arg)
136{
137 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700138
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 if (CE_state->timer_inited) {
140 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143}
144
145static unsigned int roundup_pwr2(unsigned int n)
146{
147 int i;
148 unsigned int test_pwr2;
149
150 if (!(n & (n - 1)))
151 return n; /* already a power of 2 */
152
153 test_pwr2 = 4;
154 for (i = 0; i < 29; i++) {
155 if (test_pwr2 > n)
156 return test_pwr2;
157 test_pwr2 = test_pwr2 << 1;
158 }
159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530160 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800161 return 0;
162}
163
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700164#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166
167static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800177#ifdef QCA_WIFI_3_0_ADRASTEA
178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800181#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700182};
183
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530184#ifdef QCN7605_SUPPORT
185static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194};
195#endif
196
Nirav Shah0d0cce82018-01-17 17:00:31 +0530197#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700198static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530209#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700210
211/* CE_PCI TABLE */
212/*
213 * NOTE: the table below is out of date, though still a useful reference.
214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215 * mapping of HTC services to HIF pipes.
216 */
217/*
218 * This authoritative table defines Copy Engine configuration and the mapping
219 * of services/endpoints to CEs. A subset of this information is passed to
220 * the Target during startup as a prerequisite to entering BMI phase.
221 * See:
222 * target_service_to_ce_map - Target-side mapping
223 * hif_map_service_to_pipe - Host-side mapping
224 * target_ce_config - Target-side configuration
225 * host_ce_config - Host-side configuration
226 ============================================================================
227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
228 | | | ctio | Size | Frequency
229 | | | n | |
230 ============================================================================
231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
232 descriptor | | | | O(100B) | and regular
233 download | | | | |
234 ----------------------------------------------------------------------------
235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
236 indication | | | | O(10B) | regular
237 upload | | | | |
238 ----------------------------------------------------------------------------
239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
240 upload | | | | O(1000B) | (frequent
241 e.g. noise | | | | | during IP1.0
242 packets | | | | | testing)
243 ----------------------------------------------------------------------------
244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
245 download | | | | O(1000B) | (frequent
246 e.g. | | | | | during IP1.0
247 misdirecte | | | | | testing)
248 d EAPOL | | | | |
249 packets | | | | |
250 ----------------------------------------------------------------------------
251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
252 | DATA_VO (uplink) | | | |
253 ----------------------------------------------------------------------------
254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
255 | DATA_VO (downlink) | | | |
256 ----------------------------------------------------------------------------
257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258 | | | | O(100B) |
259 ----------------------------------------------------------------------------
260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
261 messages | (downlink) | | | O(100B) |
262 | | | | |
263 ----------------------------------------------------------------------------
264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
265 | HTC_RAW_STREAMS | | | |
266 | (uplink) | | | |
267 ----------------------------------------------------------------------------
268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
269 | HTC_RAW_STREAMS | | | |
270 | (downlink) | | | |
271 ----------------------------------------------------------------------------
272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
273 | | | | | infrequent
274 ============================================================================
275 */
276
277/*
278 * Map from service/endpoint to Copy Engine.
279 * This table is derived from the CE_PCI TABLE, above.
280 * It is passed to the Target at startup for use by firmware.
281 */
282static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 {
284 WMI_DATA_VO_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_VO_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BK_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BK_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_BE_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_BE_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_DATA_VI_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_DATA_VI_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
324 WMI_CONTROL_SVC,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 3,
327 },
328 {
329 WMI_CONTROL_SVC,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 HTC_CTRL_RSVD_SVC,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 0, /* could be moved to 3 (share with WMI) */
337 },
338 {
339 HTC_CTRL_RSVD_SVC,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
344 HTC_RAW_STREAMS_SVC, /* not currently used */
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0,
347 },
348 {
349 HTC_RAW_STREAMS_SVC, /* not currently used */
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTT_DATA_MSG_SVC,
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 4,
357 },
358 {
359 HTT_DATA_MSG_SVC,
360 PIPEDIR_IN, /* in = DL = target -> host */
361 1,
362 },
363 {
364 WDI_IPA_TX_SVC,
365 PIPEDIR_OUT, /* in = DL = target -> host */
366 5,
367 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800368#if defined(QCA_WIFI_3_0_ADRASTEA)
369 {
370 HTT_DATA2_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 9,
373 },
374 {
375 HTT_DATA3_MSG_SVC,
376 PIPEDIR_IN, /* in = DL = target -> host */
377 10,
378 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530379 {
380 PACKET_LOG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 11,
383 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800384#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700385 /* (Additions here) */
386
387 { /* Must be last */
388 0,
389 0,
390 0,
391 },
392};
393
Houston Hoffman88c896f2016-12-14 09:56:35 -0800394/* PIPEDIR_OUT = HOST to Target */
395/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530396#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530397static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530417 /* (Additions here) */
418 { 0, 0, 0, },
419};
Pratik Gandhi78461502018-02-05 17:22:41 +0530420#else
421static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422};
423#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530424
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530425#if (defined(QCA_WIFI_QCA8074V2))
426static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 /* (Additions here) */
449 { 0, 0, 0, },
450};
451#else
452static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453};
454#endif
455
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530456#if (defined(QCA_WIFI_QCA6018))
457static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
458 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
459 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
460 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
461 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
462 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
463 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
464 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
465 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
466 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
467 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
468 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 /* (Additions here) */
478 { 0, 0, 0, },
479};
480#else
481static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
482};
483#endif
484
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530485#if (defined(QCA_WIFI_QCN9000))
486static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
487 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
488 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
489 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
490 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
491 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
492 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
493 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
494 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
495 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
496 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
497 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
498 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
499 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
500 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
501 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
502 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
503 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
504 /* (Additions here) */
505 { 0, 0, 0, },
506};
507#else
508static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
509};
510#endif
511
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530512/* PIPEDIR_OUT = HOST to Target */
513/* PIPEDIR_IN = TARGET to HOST */
514#ifdef QCN7605_SUPPORT
515static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
516 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
517 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
518 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
519 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
520 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
521 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
522 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
523 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
524 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
525 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
526 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
527 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
528 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
529 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
530 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
531 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
532 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
533#ifdef IPA_OFFLOAD
534 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
535#else
536 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
537#endif
538 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
539 /* (Additions here) */
540 { 0, 0, 0, },
541};
542#endif
543
Pratik Gandhi78461502018-02-05 17:22:41 +0530544#if (defined(QCA_WIFI_QCA6290))
Akshay Kosigi181b2f52018-11-26 17:02:54 +0530545#ifdef QCA_6290_AP_MODE
Houston Hoffman88c896f2016-12-14 09:56:35 -0800546static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
547 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
548 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
549 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
550 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
551 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
552 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
553 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
554 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
555 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
556 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
557 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
558 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
559 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
560 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530561 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
562 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530563 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800564 /* (Additions here) */
565 { 0, 0, 0, },
566};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530567#else
568static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
569 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
570 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
571 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
572 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
573 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
574 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
575 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
576 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
577 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
578 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
579 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
580 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
581 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
582 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
583 /* (Additions here) */
584 { 0, 0, 0, },
585};
586#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530587#else
588static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
589};
590#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800591
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700592#if (defined(QCA_WIFI_QCA6390))
593static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
594 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
595 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
596 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
597 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
598 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
599 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
600 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
601 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
602 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
603 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
604 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
605 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
606 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
607 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800608 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700609 /* (Additions here) */
610 { 0, 0, 0, },
611};
612#else
613static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
614};
615#endif
616
Mohit Khanna973308a2019-05-13 18:31:33 -0700617static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
618 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
619 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
620 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
621 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
622 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
623 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
624 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
625 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
626 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
627 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
628 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
629 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
630 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
631 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
632 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
633 /* (Additions here) */
634 { 0, 0, 0, },
635};
636
Alok Kumarffc116e2020-01-06 18:12:35 +0530637#if (defined(QCA_WIFI_QCA6750))
638static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
639 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
640 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
641 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
642 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
643 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
644 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
645 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
646 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
647 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
648 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
649 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
650 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
651 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
652 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
653 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
654 /* (Additions here) */
655 { 0, 0, 0, },
656};
657#else
658static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
659};
660#endif
661
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700662static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
663 {
664 WMI_DATA_VO_SVC,
665 PIPEDIR_OUT, /* out = UL = host -> target */
666 3,
667 },
668 {
669 WMI_DATA_VO_SVC,
670 PIPEDIR_IN, /* in = DL = target -> host */
671 2,
672 },
673 {
674 WMI_DATA_BK_SVC,
675 PIPEDIR_OUT, /* out = UL = host -> target */
676 3,
677 },
678 {
679 WMI_DATA_BK_SVC,
680 PIPEDIR_IN, /* in = DL = target -> host */
681 2,
682 },
683 {
684 WMI_DATA_BE_SVC,
685 PIPEDIR_OUT, /* out = UL = host -> target */
686 3,
687 },
688 {
689 WMI_DATA_BE_SVC,
690 PIPEDIR_IN, /* in = DL = target -> host */
691 2,
692 },
693 {
694 WMI_DATA_VI_SVC,
695 PIPEDIR_OUT, /* out = UL = host -> target */
696 3,
697 },
698 {
699 WMI_DATA_VI_SVC,
700 PIPEDIR_IN, /* in = DL = target -> host */
701 2,
702 },
703 {
704 WMI_CONTROL_SVC,
705 PIPEDIR_OUT, /* out = UL = host -> target */
706 3,
707 },
708 {
709 WMI_CONTROL_SVC,
710 PIPEDIR_IN, /* in = DL = target -> host */
711 2,
712 },
713 {
714 HTC_CTRL_RSVD_SVC,
715 PIPEDIR_OUT, /* out = UL = host -> target */
716 0, /* could be moved to 3 (share with WMI) */
717 },
718 {
719 HTC_CTRL_RSVD_SVC,
720 PIPEDIR_IN, /* in = DL = target -> host */
721 1,
722 },
723 {
724 HTC_RAW_STREAMS_SVC, /* not currently used */
725 PIPEDIR_OUT, /* out = UL = host -> target */
726 0,
727 },
728 {
729 HTC_RAW_STREAMS_SVC, /* not currently used */
730 PIPEDIR_IN, /* in = DL = target -> host */
731 1,
732 },
733 {
734 HTT_DATA_MSG_SVC,
735 PIPEDIR_OUT, /* out = UL = host -> target */
736 4,
737 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530738#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700739 {
740 HTT_DATA_MSG_SVC,
741 PIPEDIR_IN, /* in = DL = target -> host */
742 5,
743 },
744#else /* WLAN_FEATURE_FASTPATH */
745 {
746 HTT_DATA_MSG_SVC,
747 PIPEDIR_IN, /* in = DL = target -> host */
748 1,
749 },
750#endif /* WLAN_FEATURE_FASTPATH */
751
752 /* (Additions here) */
753
754 { /* Must be last */
755 0,
756 0,
757 0,
758 },
759};
760
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700761static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
762static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
763
Nirav Shah0d0cce82018-01-17 17:00:31 +0530764#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700765static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
766 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
767 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
768 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
769 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
770 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
771 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
772 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
773 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
774 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
775 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
776 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
777 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
778 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
779 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
780 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
781 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
782 {0, 0, 0,}, /* Must be last */
783};
784
Nirav Shah0d0cce82018-01-17 17:00:31 +0530785void hif_select_epping_service_to_pipe_map(struct service_to_pipe
786 **tgt_svc_map_to_use,
787 uint32_t *sz_tgt_svc_map_to_use)
788{
789 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
790 *sz_tgt_svc_map_to_use =
791 sizeof(target_service_to_ce_map_wlan_epping);
792}
793#endif
794
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530795#ifdef QCN7605_SUPPORT
796static inline
797void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
798 uint32_t *sz_tgt_svc_map_to_use)
799{
800 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
801 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
802}
803#else
804static inline
805void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
806 uint32_t *sz_tgt_svc_map_to_use)
807{
808 HIF_ERROR("%s: QCN7605 not supported", __func__);
809}
810#endif
811
Houston Hoffman748e1a62017-03-30 17:20:42 -0700812static void hif_select_service_to_pipe_map(struct hif_softc *scn,
813 struct service_to_pipe **tgt_svc_map_to_use,
814 uint32_t *sz_tgt_svc_map_to_use)
815{
816 uint32_t mode = hif_get_conparam(scn);
817 struct hif_target_info *tgt_info = &scn->target_info;
818
819 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530820 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
821 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700822 } else {
823 switch (tgt_info->target_type) {
824 default:
825 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
826 *sz_tgt_svc_map_to_use =
827 sizeof(target_service_to_ce_map_wlan);
828 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530829 case TARGET_TYPE_QCN7605:
830 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
831 sz_tgt_svc_map_to_use);
832 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700833 case TARGET_TYPE_AR900B:
834 case TARGET_TYPE_QCA9984:
835 case TARGET_TYPE_IPQ4019:
836 case TARGET_TYPE_QCA9888:
837 case TARGET_TYPE_AR9888:
838 case TARGET_TYPE_AR9888V2:
839 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
840 *sz_tgt_svc_map_to_use =
841 sizeof(target_service_to_ce_map_ar900b);
842 break;
843 case TARGET_TYPE_QCA6290:
844 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
845 *sz_tgt_svc_map_to_use =
846 sizeof(target_service_to_ce_map_qca6290);
847 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700848 case TARGET_TYPE_QCA6390:
849 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
850 *sz_tgt_svc_map_to_use =
851 sizeof(target_service_to_ce_map_qca6390);
852 break;
Mohit Khanna973308a2019-05-13 18:31:33 -0700853 case TARGET_TYPE_QCA6490:
854 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
855 *sz_tgt_svc_map_to_use =
856 sizeof(target_service_to_ce_map_qca6490);
857 break;
Alok Kumarffc116e2020-01-06 18:12:35 +0530858 case TARGET_TYPE_QCA6750:
859 *tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
860 *sz_tgt_svc_map_to_use =
861 sizeof(target_service_to_ce_map_qca6750);
862 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530863 case TARGET_TYPE_QCA8074:
864 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
865 *sz_tgt_svc_map_to_use =
866 sizeof(target_service_to_ce_map_qca8074);
867 break;
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530868 case TARGET_TYPE_QCA8074V2:
869 *tgt_svc_map_to_use =
870 target_service_to_ce_map_qca8074_v2;
871 *sz_tgt_svc_map_to_use =
872 sizeof(target_service_to_ce_map_qca8074_v2);
873 break;
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530874 case TARGET_TYPE_QCA6018:
875 *tgt_svc_map_to_use =
876 target_service_to_ce_map_qca6018;
877 *sz_tgt_svc_map_to_use =
878 sizeof(target_service_to_ce_map_qca6018);
879 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530880 case TARGET_TYPE_QCN9000:
881 *tgt_svc_map_to_use =
882 target_service_to_ce_map_qcn9000;
883 *sz_tgt_svc_map_to_use =
884 sizeof(target_service_to_ce_map_qcn9000);
885 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700886 }
887 }
888}
889
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700890/**
891 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
892 * @ce_state : pointer to the state context of the CE
893 *
894 * Description:
895 * Sets htt_rx_data attribute of the state structure if the
896 * CE serves one of the HTT DATA services.
897 *
898 * Return:
899 * false (attribute set to false)
900 * true (attribute set to true);
901 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700902static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700903{
904 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530905 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700906 int i;
907 bool rc = false;
908
Jeff Johnson8d639a02019-03-18 09:51:11 -0700909 if (ce_state) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700910 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
911 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700912
Kiran Venkatappac0687092017-04-13 16:45:03 +0530913 map_len = map_sz / sizeof(struct service_to_pipe);
914 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700915 if ((svc_map[i].pipenum == ce_state->id) &&
916 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
917 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
918 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
919 /* HTT CEs are unidirectional */
920 if (svc_map[i].pipedir == PIPEDIR_IN)
921 ce_state->htt_rx_data = true;
922 else
923 ce_state->htt_tx_data = true;
924 rc = true;
925 }
926 }
927 }
928 return rc;
929}
930
Houston Hoffman47808172016-05-06 10:04:21 -0700931/**
932 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
933 * @ce_id: ce in question
934 * @ring: ring state being examined
935 * @type: "src_ring" or "dest_ring" string for identifying the ring
936 *
937 * Warns on non-zero index values.
938 * Causes a kernel panic if the ring is not empty durring initialization.
939 */
940static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
941 char *type)
942{
943 if (ring->write_index != 0 || ring->sw_index != 0)
944 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
945 ce_id, type, ring->sw_index, ring->write_index);
946 if (ring->write_index != ring->sw_index)
947 QDF_BUG(0);
948}
949
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530950#ifdef IPA_OFFLOAD
951/**
952 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
953 * @scn: softc instance
954 * @ce_id: ce in question
955 * @base_addr: pointer to copyengine ring base address
956 * @ce_ring: copyengine instance
957 * @nentries: number of entries should be allocated
958 * @desc_size: ce desc size
959 *
960 * Return: QDF_STATUS_SUCCESS - for success
961 */
962static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
963 qdf_dma_addr_t *base_addr,
964 struct CE_ring_state *ce_ring,
965 unsigned int nentries, uint32_t desc_size)
966{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700967 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
968 !ce_srng_based(scn)) {
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530969 if (!scn->ipa_ce_ring) {
Mohit Khannaba7a7982018-03-21 22:06:25 -0700970 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
971 scn->qdf_dev,
972 nentries * desc_size + CE_DESC_RING_ALIGN);
973 if (!scn->ipa_ce_ring) {
974 HIF_ERROR(
975 "%s: Failed to allocate memory for IPA ce ring",
976 __func__);
977 return QDF_STATUS_E_NOMEM;
978 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530979 }
980 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
981 &scn->ipa_ce_ring->mem_info);
982 ce_ring->base_addr_owner_space_unaligned =
983 scn->ipa_ce_ring->vaddr;
984 } else {
985 ce_ring->base_addr_owner_space_unaligned =
986 qdf_mem_alloc_consistent(scn->qdf_dev,
987 scn->qdf_dev->dev,
988 (nentries * desc_size +
989 CE_DESC_RING_ALIGN),
990 base_addr);
991 if (!ce_ring->base_addr_owner_space_unaligned) {
992 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
993 __func__, CE_id);
994 return QDF_STATUS_E_NOMEM;
995 }
996 }
997 return QDF_STATUS_SUCCESS;
998}
999
1000/**
1001 * ce_free_desc_ring() - Frees copyengine descriptor ring
1002 * @scn: softc instance
1003 * @ce_id: ce in question
1004 * @ce_ring: copyengine instance
1005 * @desc_size: ce desc size
1006 *
1007 * Return: None
1008 */
1009static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1010 struct CE_ring_state *ce_ring, uint32_t desc_size)
1011{
Mohit Khannaba7a7982018-03-21 22:06:25 -07001012 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1013 !ce_srng_based(scn)) {
1014 if (scn->ipa_ce_ring) {
1015 qdf_mem_shared_mem_free(scn->qdf_dev,
1016 scn->ipa_ce_ring);
1017 scn->ipa_ce_ring = NULL;
1018 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301019 ce_ring->base_addr_owner_space_unaligned = NULL;
1020 } else {
1021 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1022 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1023 ce_ring->base_addr_owner_space_unaligned,
1024 ce_ring->base_addr_CE_space, 0);
1025 ce_ring->base_addr_owner_space_unaligned = NULL;
1026 }
1027}
1028#else
1029static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1030 qdf_dma_addr_t *base_addr,
1031 struct CE_ring_state *ce_ring,
1032 unsigned int nentries, uint32_t desc_size)
1033{
1034 ce_ring->base_addr_owner_space_unaligned =
1035 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1036 (nentries * desc_size +
1037 CE_DESC_RING_ALIGN), base_addr);
1038 if (!ce_ring->base_addr_owner_space_unaligned) {
1039 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1040 __func__, CE_id);
1041 return QDF_STATUS_E_NOMEM;
1042 }
1043 return QDF_STATUS_SUCCESS;
1044}
1045
1046static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1047 struct CE_ring_state *ce_ring, uint32_t desc_size)
1048{
1049 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1050 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1051 ce_ring->base_addr_owner_space_unaligned,
1052 ce_ring->base_addr_CE_space, 0);
1053 ce_ring->base_addr_owner_space_unaligned = NULL;
1054}
1055#endif /* IPA_OFFLOAD */
1056
Sathish Kumar86876492018-08-27 13:39:20 +05301057/*
1058 * TODO: Need to explore the possibility of having this as part of a
1059 * target context instead of a global array.
1060 */
1061static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1062
1063void ce_service_register_module(enum ce_target_type target_type,
1064 struct ce_ops* (*ce_attach)(void))
1065{
1066 if (target_type < CE_MAX_TARGET_TYPE)
1067 ce_attach_register[target_type] = ce_attach;
1068}
1069
1070qdf_export_symbol(ce_service_register_module);
1071
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301072/**
1073 * ce_srng_based() - Does this target use srng
1074 * @ce_state : pointer to the state context of the CE
1075 *
1076 * Description:
1077 * returns true if the target is SRNG based
1078 *
1079 * Return:
1080 * false (attribute set to false)
1081 * true (attribute set to true);
1082 */
1083bool ce_srng_based(struct hif_softc *scn)
1084{
1085 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1086 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1087
1088 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301089 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301090 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001091 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07001092 case TARGET_TYPE_QCA6390:
Mohit Khanna973308a2019-05-13 18:31:33 -07001093 case TARGET_TYPE_QCA6490:
Alok Kumarffc116e2020-01-06 18:12:35 +05301094 case TARGET_TYPE_QCA6750:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301095 case TARGET_TYPE_QCA6018:
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05301096 case TARGET_TYPE_QCN9000:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301097 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301098 default:
1099 return false;
1100 }
1101 return false;
1102}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301103qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301104
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001105#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001106static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301107{
Sathish Kumar86876492018-08-27 13:39:20 +05301108 struct ce_ops *ops = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301109
Sathish Kumar86876492018-08-27 13:39:20 +05301110 if (ce_srng_based(scn)) {
1111 if (ce_attach_register[CE_SVC_SRNG])
1112 ops = ce_attach_register[CE_SVC_SRNG]();
1113 } else if (ce_attach_register[CE_SVC_LEGACY]) {
1114 ops = ce_attach_register[CE_SVC_LEGACY]();
1115 }
1116
1117 return ops;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301118}
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001119
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001120
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001121#else /* QCA_LITHIUM */
1122static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1123{
Sathish Kumar86876492018-08-27 13:39:20 +05301124 if (ce_attach_register[CE_SVC_LEGACY])
1125 return ce_attach_register[CE_SVC_LEGACY]();
1126
1127 return NULL;
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001128}
1129#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301130
Houston Hoffman403c2df2017-01-27 12:51:15 -08001131static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -08001132 struct pld_shadow_reg_v2_cfg **shadow_config,
1133 int *num_shadow_registers_configured) {
1134 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1135
1136 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1137 scn, shadow_config, num_shadow_registers_configured);
1138}
1139
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301140static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1141 uint8_t ring_type)
1142{
1143 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1144
1145 return hif_state->ce_services->ce_get_desc_size(ring_type);
1146}
1147
1148
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001149static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301150 uint8_t ring_type, uint32_t nentries)
1151{
1152 uint32_t ce_nbytes;
1153 char *ptr;
1154 qdf_dma_addr_t base_addr;
1155 struct CE_ring_state *ce_ring;
1156 uint32_t desc_size;
1157 struct hif_softc *scn = CE_state->scn;
1158
1159 ce_nbytes = sizeof(struct CE_ring_state)
1160 + (nentries * sizeof(void *));
1161 ptr = qdf_mem_malloc(ce_nbytes);
1162 if (!ptr)
1163 return NULL;
1164
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301165 ce_ring = (struct CE_ring_state *)ptr;
1166 ptr += sizeof(struct CE_ring_state);
1167 ce_ring->nentries = nentries;
1168 ce_ring->nentries_mask = nentries - 1;
1169
1170 ce_ring->low_water_mark_nentries = 0;
1171 ce_ring->high_water_mark_nentries = nentries;
1172 ce_ring->per_transfer_context = (void **)ptr;
1173
1174 desc_size = ce_get_desc_size(scn, ring_type);
1175
1176 /* Legacy platforms that do not support cache
1177 * coherent DMA are unsupported
1178 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301179 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1180 ce_ring, nentries,
1181 desc_size) !=
1182 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301183 HIF_ERROR("%s: ring has no DMA mem",
1184 __func__);
Alok Kumarfea70e32018-09-21 15:42:06 +05301185 qdf_mem_free(ce_ring);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301186 return NULL;
1187 }
1188 ce_ring->base_addr_CE_space_unaligned = base_addr;
1189
1190 /* Correctly initialize memory to 0 to
1191 * prevent garbage data crashing system
1192 * when download firmware
1193 */
1194 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1195 nentries * desc_size +
1196 CE_DESC_RING_ALIGN);
1197
1198 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1199
1200 ce_ring->base_addr_CE_space =
1201 (ce_ring->base_addr_CE_space_unaligned +
1202 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1203
1204 ce_ring->base_addr_owner_space = (void *)
1205 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1206 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1207 } else {
1208 ce_ring->base_addr_CE_space =
1209 ce_ring->base_addr_CE_space_unaligned;
1210 ce_ring->base_addr_owner_space =
1211 ce_ring->base_addr_owner_space_unaligned;
1212 }
1213
1214 return ce_ring;
1215}
1216
Yun Park3fb36442017-08-17 17:37:53 -07001217static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301218 uint32_t ce_id, struct CE_ring_state *ring,
1219 struct CE_attr *attr)
1220{
1221 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1222
Yun Park3fb36442017-08-17 17:37:53 -07001223 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001224 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301225}
1226
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001227int hif_ce_bus_early_suspend(struct hif_softc *scn)
1228{
1229 uint8_t ul_pipe, dl_pipe;
1230 int ce_id, status, ul_is_polled, dl_is_polled;
1231 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001232
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001233 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1234 &ul_pipe, &dl_pipe,
1235 &ul_is_polled, &dl_is_polled);
1236 if (status) {
1237 HIF_ERROR("%s: pipe_mapping failure", __func__);
1238 return status;
1239 }
1240
1241 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1242 if (ce_id == ul_pipe)
1243 continue;
1244 if (ce_id == dl_pipe)
1245 continue;
1246
1247 ce_state = scn->ce_id_to_state[ce_id];
1248 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1249 if (ce_state->state == CE_RUNNING)
1250 ce_state->state = CE_PAUSED;
1251 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1252 }
1253
1254 return status;
1255}
1256
1257int hif_ce_bus_late_resume(struct hif_softc *scn)
1258{
1259 int ce_id;
1260 struct CE_state *ce_state;
Nirav Shaheeb99622018-09-11 13:50:08 +05301261 int write_index = 0;
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001262 bool index_updated;
1263
1264 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1265 ce_state = scn->ce_id_to_state[ce_id];
1266 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1267 if (ce_state->state == CE_PENDING) {
1268 write_index = ce_state->src_ring->write_index;
1269 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1270 write_index);
1271 ce_state->state = CE_RUNNING;
1272 index_updated = true;
1273 } else {
1274 index_updated = false;
1275 }
1276
1277 if (ce_state->state == CE_PAUSED)
1278 ce_state->state = CE_RUNNING;
1279 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1280
1281 if (index_updated)
1282 hif_record_ce_desc_event(scn, ce_id,
1283 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301284 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001285 }
1286
1287 return 0;
1288}
1289
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001290/**
1291 * ce_oom_recovery() - try to recover rx ce from oom condition
1292 * @context: CE_state of the CE with oom rx ring
1293 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001294 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001295 * at least 1 descriptor is successfully posted to the rx ring.
1296 *
1297 * return: none
1298 */
1299static void ce_oom_recovery(void *context)
1300{
1301 struct CE_state *ce_state = context;
1302 struct hif_softc *scn = ce_state->scn;
1303 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1304 struct HIF_CE_pipe_info *pipe_info =
1305 &ce_softc->pipe_info[ce_state->id];
1306
1307 hif_post_recv_buffers_for_pipe(pipe_info);
1308}
1309
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301310#ifdef HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301311/**
1312 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1313 * the CE descriptors.
1314 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1315 * @scn: hif scn handle
1316 * ce_id: Copy Engine Id
1317 *
1318 * Return: QDF_STATUS
1319 */
1320QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1321{
1322 struct hif_ce_desc_event *event = NULL;
1323 struct hif_ce_desc_event *hist_ev = NULL;
1324 uint32_t index = 0;
1325
1326 hist_ev =
1327 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1328
1329 if (!hist_ev)
1330 return QDF_STATUS_E_NOMEM;
1331
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001332 scn->hif_ce_desc_hist.data_enable[ce_id] = true;
c_cgodavfda96ad2017-09-07 16:16:00 +05301333 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1334 event = &hist_ev[index];
1335 event->data =
1336 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001337 if (!event->data) {
1338 hif_err_rl("ce debug data alloc failed");
c_cgodavfda96ad2017-09-07 16:16:00 +05301339 return QDF_STATUS_E_NOMEM;
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001340 }
c_cgodavfda96ad2017-09-07 16:16:00 +05301341 }
1342 return QDF_STATUS_SUCCESS;
1343}
1344
1345/**
1346 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1347 * the CE descriptors.
1348 * @scn: hif scn handle
1349 * ce_id: Copy Engine Id
1350 *
1351 * Return:
1352 */
1353void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1354{
1355 struct hif_ce_desc_event *event = NULL;
1356 struct hif_ce_desc_event *hist_ev = NULL;
1357 uint32_t index = 0;
1358
1359 hist_ev =
1360 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1361
1362 if (!hist_ev)
1363 return;
1364
1365 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1366 event = &hist_ev[index];
Jeff Johnson8d639a02019-03-18 09:51:11 -07001367 if (event->data)
c_cgodavfda96ad2017-09-07 16:16:00 +05301368 qdf_mem_free(event->data);
1369 event->data = NULL;
1370 event = NULL;
1371 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001372
c_cgodavfda96ad2017-09-07 16:16:00 +05301373}
1374#endif /* HIF_CE_DEBUG_DATA_BUF */
1375
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301376#ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001377#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001378struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1379
c_cgodavfda96ad2017-09-07 16:16:00 +05301380/**
Dustin Brown2f750872018-10-17 12:16:20 -07001381 * alloc_mem_ce_debug_history() - Allocate CE descriptor history
c_cgodavfda96ad2017-09-07 16:16:00 +05301382 * @scn: hif scn handle
Dustin Brown2f750872018-10-17 12:16:20 -07001383 * @ce_id: Copy Engine Id
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001384 * @src_nentries: source ce ring entries
c_cgodavfda96ad2017-09-07 16:16:00 +05301385 * Return: QDF_STATUS
1386 */
Dustin Brown2f750872018-10-17 12:16:20 -07001387static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001388alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1389 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001390{
1391 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1392
1393 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1394 ce_hist->enable[ce_id] = 1;
1395
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001396 if (src_nentries)
1397 alloc_mem_ce_debug_hist_data(scn, ce_id);
1398 else
1399 ce_hist->data_enable[ce_id] = false;
1400
Dustin Brown2f750872018-10-17 12:16:20 -07001401 return QDF_STATUS_SUCCESS;
1402}
1403
1404/**
1405 * free_mem_ce_debug_history() - Free CE descriptor history
1406 * @scn: hif scn handle
1407 * @ce_id: Copy Engine Id
1408 *
1409 * Return: None
1410 */
1411static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1412{
1413 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1414
1415 ce_hist->enable[ce_id] = 0;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001416 if (ce_hist->data_enable[ce_id]) {
1417 ce_hist->data_enable[ce_id] = false;
1418 free_mem_ce_debug_hist_data(scn, ce_id);
1419 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001420 ce_hist->hist_ev[ce_id] = NULL;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001421}
1422#else
1423static inline QDF_STATUS
1424alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1425 uint32_t src_nentries)
1426{
1427 return QDF_STATUS_SUCCESS;
Dustin Brown2f750872018-10-17 12:16:20 -07001428}
1429
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001430static inline void
1431free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1432#endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301433#else
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001434#if defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001435
1436static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001437alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1438 uint32_t src_nentries)
c_cgodavfda96ad2017-09-07 16:16:00 +05301439{
1440 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1441 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1442
Jeff Johnson8d639a02019-03-18 09:51:11 -07001443 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
c_cgodavfda96ad2017-09-07 16:16:00 +05301444 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1445 return QDF_STATUS_E_NOMEM;
1446 } else {
1447 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1448 return QDF_STATUS_SUCCESS;
1449 }
1450}
1451
Dustin Brown2f750872018-10-17 12:16:20 -07001452static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301453{
1454 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
Dustin Brown2f750872018-10-17 12:16:20 -07001455 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
c_cgodavfda96ad2017-09-07 16:16:00 +05301456
1457 if (!hist_ev)
1458 return;
1459
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001460 if (ce_hist->data_enable[CE_id]) {
1461 ce_hist->data_enable[CE_id] = false;
c_cgodavfda96ad2017-09-07 16:16:00 +05301462 free_mem_ce_debug_hist_data(scn, CE_id);
1463 }
Dustin Brown2f750872018-10-17 12:16:20 -07001464
c_cgodavfda96ad2017-09-07 16:16:00 +05301465 ce_hist->enable[CE_id] = 0;
1466 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1467 ce_hist->hist_ev[CE_id] = NULL;
1468}
1469
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001470#else
Dustin Brown2f750872018-10-17 12:16:20 -07001471
1472static inline QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001473alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1474 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001475{
1476 return QDF_STATUS_SUCCESS;
1477}
1478
1479static inline void
1480free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001481#endif /* HIF_CE_DEBUG_DATA_BUF */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301482#endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
Dustin Brown2f750872018-10-17 12:16:20 -07001483
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301484#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
c_cgodavfda96ad2017-09-07 16:16:00 +05301485/**
1486 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1487 * CE records on the console using sysfs.
1488 * @scn: hif scn handle
1489 *
1490 * Return:
1491 */
1492static inline void reset_ce_debug_history(struct hif_softc *scn)
1493{
1494 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1495 /* Initialise the CE debug history sysfs interface inputs ce_id and
1496 * index. Disable data storing
1497 */
1498 ce_hist->hist_index = 0;
1499 ce_hist->hist_id = 0;
1500}
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301501#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
Dustin Brown2f750872018-10-17 12:16:20 -07001502static inline void reset_ce_debug_history(struct hif_softc *scn) { }
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301503#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
c_cgodavfda96ad2017-09-07 16:16:00 +05301504
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301505void ce_enable_polling(void *cestate)
1506{
1507 struct CE_state *CE_state = (struct CE_state *)cestate;
1508
1509 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1510 CE_state->timer_inited = true;
1511}
1512
1513void ce_disable_polling(void *cestate)
1514{
1515 struct CE_state *CE_state = (struct CE_state *)cestate;
1516
1517 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1518 CE_state->timer_inited = false;
1519}
1520
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001521/*
1522 * Initialize a Copy Engine based on caller-supplied attributes.
1523 * This may be called once to initialize both source and destination
1524 * rings or it may be called twice for separate source and destination
1525 * initialization. It may be that only one side or the other is
1526 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001527 *
1528 * This should be called durring the initialization sequence before
1529 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530 */
Komal Seelam644263d2016-02-22 20:45:49 +05301531struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001532 unsigned int CE_id, struct CE_attr *attr)
1533{
1534 struct CE_state *CE_state;
1535 uint32_t ctrl_addr;
1536 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537 bool malloc_CE_state = false;
1538 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001539 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301541 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001543 CE_state = scn->ce_id_to_state[CE_id];
1544
1545 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001546 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301547 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301548 if (!CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001549 return NULL;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301550
Houston Hoffman233e9092015-09-02 13:37:21 -07001551 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301552 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001553
1554 CE_state->id = CE_id;
1555 CE_state->ctrl_addr = ctrl_addr;
1556 CE_state->state = CE_RUNNING;
1557 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001558 }
1559 CE_state->scn = scn;
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301560 CE_state->service = ce_engine_service_reg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001561
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301562 qdf_atomic_init(&CE_state->rx_pending);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001563 if (!attr) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001564 /* Already initialized; caller wants the handle */
1565 return (struct CE_handle *)CE_state;
1566 }
1567
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001568 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301569 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001570 else
1571 CE_state->src_sz_max = attr->src_sz_max;
1572
c_cgodavfda96ad2017-09-07 16:16:00 +05301573 ce_init_ce_desc_event_log(scn, CE_id,
1574 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001575
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001576 /* source ring setup */
1577 nentries = attr->src_nentries;
1578 if (nentries) {
1579 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001580
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001581 nentries = roundup_pwr2(nentries);
1582 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301583 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301585 src_ring = CE_state->src_ring =
1586 ce_alloc_ring_state(CE_state,
1587 CE_RING_SRC,
1588 nentries);
1589 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001590 /* cannot allocate src ring. If the
1591 * CE_state is allocated locally free
1592 * CE_State and return error.
1593 */
1594 HIF_ERROR("%s: src ring has no mem", __func__);
1595 if (malloc_CE_state) {
1596 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301597 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001598 malloc_CE_state = false;
1599 }
1600 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001602 /* we can allocate src ring. Mark that the src ring is
1603 * allocated locally
1604 */
1605 malloc_src_ring = true;
1606
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001607 /*
1608 * Also allocate a shadow src ring in
1609 * regular mem to use for faster access.
1610 */
1611 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301612 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 sizeof(struct CE_src_desc) +
1614 CE_DESC_RING_ALIGN);
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301615 if (!src_ring->shadow_base_unaligned)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 goto error_no_dma_mem;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301617
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001618 src_ring->shadow_base = (struct CE_src_desc *)
1619 (((size_t) src_ring->shadow_base_unaligned +
1620 CE_DESC_RING_ALIGN - 1) &
1621 ~(CE_DESC_RING_ALIGN - 1));
1622
Yun Park3fb36442017-08-17 17:37:53 -07001623 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1624 src_ring, attr);
1625 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001626 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001627
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301628 ce_ring_test_initial_indexes(CE_id, src_ring,
1629 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001630 }
1631 }
1632
1633 /* destination ring setup */
1634 nentries = attr->dest_nentries;
1635 if (nentries) {
1636 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001637
1638 nentries = roundup_pwr2(nentries);
1639 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301640 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001641 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301642 dest_ring = CE_state->dest_ring =
1643 ce_alloc_ring_state(CE_state,
1644 CE_RING_DEST,
1645 nentries);
1646 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001647 /* cannot allocate dst ring. If the CE_state
1648 * or src ring is allocated locally free
1649 * CE_State and src ring and return error.
1650 */
1651 HIF_ERROR("%s: dest ring has no mem",
1652 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301653 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001655
Yun Park3fb36442017-08-17 17:37:53 -07001656 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001657 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001658 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301659 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001660
1661 ce_ring_test_initial_indexes(CE_id, dest_ring,
1662 "dest_ring");
1663
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301664 /* For srng based target, init status ring here */
1665 if (ce_srng_based(CE_state->scn)) {
1666 CE_state->status_ring =
1667 ce_alloc_ring_state(CE_state,
1668 CE_RING_STATUS,
1669 nentries);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001670 if (!CE_state->status_ring) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301671 /*Allocation failed. Cleanup*/
1672 qdf_mem_free(CE_state->dest_ring);
1673 if (malloc_src_ring) {
1674 qdf_mem_free
1675 (CE_state->src_ring);
1676 CE_state->src_ring = NULL;
1677 malloc_src_ring = false;
1678 }
1679 if (malloc_CE_state) {
1680 /* allocated CE_state locally */
1681 scn->ce_id_to_state[CE_id] =
1682 NULL;
1683 qdf_mem_free(CE_state);
1684 malloc_CE_state = false;
1685 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001686
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301687 return NULL;
1688 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001689
Yun Park3fb36442017-08-17 17:37:53 -07001690 status = ce_ring_setup(scn, CE_RING_STATUS,
1691 CE_id, CE_state->status_ring,
1692 attr);
1693 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301694 goto error_target_access;
1695
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001697
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001698 /* epping */
1699 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301700 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301701 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301702 &CE_state->poll_timer,
1703 ce_poll_timeout,
1704 CE_state,
1705 QDF_TIMER_TYPE_WAKE_APPS);
1706 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301707 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001708 CE_POLL_TIMEOUT);
1709 }
1710 }
1711 }
1712
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301713 if (!ce_srng_based(scn)) {
1714 /* Enable CE error interrupts */
1715 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1716 goto error_target_access;
1717 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1718 if (Q_TARGET_ACCESS_END(scn) < 0)
1719 goto error_target_access;
1720 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001721
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001722 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1723 ce_oom_recovery, CE_state);
1724
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001725 /* update the htt_data attribute */
1726 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001727 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001728
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001729 alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
c_cgodavfda96ad2017-09-07 16:16:00 +05301730
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731 return (struct CE_handle *)CE_state;
1732
Houston Hoffman4411ad42016-03-14 21:12:04 -07001733error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734error_no_dma_mem:
1735 ce_fini((struct CE_handle *)CE_state);
1736 return NULL;
1737}
1738
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301739/**
1740 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1741 * @hif_ctx: HIF Context
1742 *
1743 * API to check if polling is enabled on all CEs. Returns true when polling
1744 * is enabled on all CEs.
1745 *
1746 * Return: bool
1747 */
1748bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1749{
1750 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1751 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1752 struct CE_attr *attr;
1753 int id;
1754
1755 for (id = 0; id < scn->ce_count; id++) {
1756 attr = &hif_state->host_ce_config[id];
1757 if (attr && (attr->dest_nentries) &&
1758 !(attr->flags & CE_ATTR_ENABLE_POLL))
1759 return false;
1760 }
1761 return true;
1762}
1763qdf_export_symbol(hif_is_polled_mode_enabled);
1764
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001765#ifdef WLAN_FEATURE_FASTPATH
1766/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001767 * hif_enable_fastpath() Update that we have enabled fastpath mode
1768 * @hif_ctx: HIF context
1769 *
1770 * For use in data path
1771 *
1772 * Retrun: void
1773 */
1774void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1775{
1776 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1777
Houston Hoffmand63cd742016-12-05 11:59:56 -08001778 if (ce_srng_based(scn)) {
1779 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1780 return;
1781 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001782 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001783 scn->fastpath_mode_on = true;
1784}
1785
1786/**
1787 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1788 * @hif_ctx: HIF Context
1789 *
1790 * For use in data path to skip HTC
1791 *
1792 * Return: bool
1793 */
1794bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1795{
1796 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1797
1798 return scn->fastpath_mode_on;
1799}
1800
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301801/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001802 * hif_get_ce_handle - API to get CE handle for FastPath mode
1803 * @hif_ctx: HIF Context
1804 * @id: CopyEngine Id
1805 *
1806 * API to return CE handle for fastpath mode
1807 *
1808 * Return: void
1809 */
1810void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1811{
1812 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1813
1814 return scn->ce_id_to_state[id];
1815}
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301816qdf_export_symbol(hif_get_ce_handle);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001817
1818/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1820 * No processing is required inside this function.
1821 * @ce_hdl: Cope engine handle
1822 * Using an assert, this function makes sure that,
1823 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001824 *
1825 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001826 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001827 * therfore no locking is needed.
1828 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 * Return: none
1830 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001831void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832{
1833 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1834 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301835 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001837
Houston Hoffman85925072016-05-06 17:02:18 -07001838 if (hif_is_nss_wifi_enabled(sc))
1839 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001840
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001841 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001842 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001843 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001844 sw_index = src_ring->sw_index;
1845 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001846
1847 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301848 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001849 }
1850}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001851
1852/**
1853 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1854 * @ce_hdl: Handle to CE
1855 *
1856 * These buffers are never allocated on the fly, but
1857 * are allocated only once during HIF start and freed
1858 * only once during HIF stop.
1859 * NOTE:
1860 * The assumption here is there is no in-flight DMA in progress
1861 * currently, so that buffers can be freed up safely.
1862 *
1863 * Return: NONE
1864 */
1865void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1866{
1867 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1868 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1869 qdf_nbuf_t nbuf;
1870 int i;
1871
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001872 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001873 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001874
1875 if (!ce_state->htt_rx_data)
1876 return;
1877
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001878 /*
1879 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1880 * this CE is completely full: does not leave one blank space, to
1881 * distinguish between empty queue & full queue. So free all the
1882 * entries.
1883 */
1884 for (i = 0; i < dst_ring->nentries; i++) {
1885 nbuf = dst_ring->per_transfer_context[i];
1886
1887 /*
1888 * The reasons for doing this check are:
1889 * 1) Protect against calling cleanup before allocating buffers
1890 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1891 * could have a partially filled ring, because of a memory
1892 * allocation failure in the middle of allocating ring.
1893 * This check accounts for that case, checking
1894 * fastpath_mode_on flag or started flag would not have
1895 * covered that case. This is not in performance path,
1896 * so OK to do this.
1897 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001898 if (nbuf) {
1899 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1900 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001901 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001902 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001903 }
1904}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001905
1906/**
1907 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1908 * @scn: HIF handle
1909 *
1910 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1911 * Hence we have to post all the entries in the pipe, even, in the beginning
1912 * unlike for other CE pipes where one less than dest_nentries are filled in
1913 * the beginning.
1914 *
1915 * Return: None
1916 */
1917static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1918{
1919 int pipe_num;
1920 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1921
1922 if (scn->fastpath_mode_on == false)
1923 return;
1924
1925 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1926 struct HIF_CE_pipe_info *pipe_info =
1927 &hif_state->pipe_info[pipe_num];
1928 struct CE_state *ce_state =
1929 scn->ce_id_to_state[pipe_info->pipe_num];
1930
1931 if (ce_state->htt_rx_data)
1932 atomic_inc(&pipe_info->recv_bufs_needed);
1933 }
1934}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001935#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001936static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937{
1938}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001939
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001940static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001941{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001942 return false;
1943}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001944#endif /* WLAN_FEATURE_FASTPATH */
1945
1946void ce_fini(struct CE_handle *copyeng)
1947{
1948 struct CE_state *CE_state = (struct CE_state *)copyeng;
1949 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301950 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301951 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001952
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301953 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001954 CE_state->state = CE_UNUSED;
1955 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301956 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301957 ce_disable_polling(CE_state);
1958
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001959 qdf_lro_deinit(CE_state->lro_data);
1960
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001962 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001963 ce_h2t_tx_ce_cleanup(copyeng);
1964
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301965 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301967 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001968 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301969 ce_free_desc_ring(scn, CE_state->id,
1970 CE_state->src_ring,
1971 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301972 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001973 }
1974 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001975 /* Cleanup the datapath Rx ring */
1976 ce_t2h_msg_ce_cleanup(copyeng);
1977
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301978 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001979 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301980 ce_free_desc_ring(scn, CE_state->id,
1981 CE_state->dest_ring,
1982 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301983 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001984
1985 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301986 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301987 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988 }
1989 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001990 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301991 /* Cleanup the datapath Tx ring */
1992 ce_h2t_tx_ce_cleanup(copyeng);
1993
1994 if (CE_state->status_ring->shadow_base_unaligned)
1995 qdf_mem_free(
1996 CE_state->status_ring->shadow_base_unaligned);
1997
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301998 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301999 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302000 ce_free_desc_ring(scn, CE_state->id,
2001 CE_state->status_ring,
2002 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302003 qdf_mem_free(CE_state->status_ring);
2004 }
Houston Hoffman03f46572016-12-12 12:53:56 -08002005
c_cgodavfda96ad2017-09-07 16:16:00 +05302006 free_mem_ce_debug_history(scn, CE_id);
2007 reset_ce_debug_history(scn);
2008 ce_deinit_ce_desc_event_log(scn, CE_id);
2009
Houston Hoffman03f46572016-12-12 12:53:56 -08002010 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302011 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002012}
2013
Komal Seelam5584a7c2016-02-24 19:22:48 +05302014void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302016 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302018 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002019 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302020 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002021 sizeof(hif_state->msg_callbacks_current));
2022}
2023
2024/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302025QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05302026hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002027 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302028 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002029{
Komal Seelam644263d2016-02-22 20:45:49 +05302030 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302031 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002032 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2033 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2034 int bytes = nbytes, nfrags = 0;
2035 struct ce_sendlist sendlist;
2036 int status, i = 0;
2037 unsigned int mux_id = 0;
2038
Santosh Anbudbfae9b2018-07-12 15:40:49 +05302039 if (nbytes > qdf_nbuf_len(nbuf)) {
2040 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2041 (uint32_t)qdf_nbuf_len(nbuf));
2042 QDF_ASSERT(0);
2043 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002044
2045 transfer_id =
2046 (mux_id & MUX_ID_MASK) |
2047 (transfer_id & TRANSACTION_ID_MASK);
2048 data_attr &= DESC_DATA_FLAG_MASK;
2049 /*
2050 * The common case involves sending multiple fragments within a
2051 * single download (the tx descriptor and the tx frame header).
2052 * So, optimize for the case of multiple fragments by not even
2053 * checking whether it's necessary to use a sendlist.
2054 * The overhead of using a sendlist for a single buffer download
2055 * is not a big deal, since it happens rarely (for WMI messages).
2056 */
2057 ce_sendlist_init(&sendlist);
2058 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302059 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002060 int frag_bytes;
2061
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302062 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2063 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002064 /*
2065 * Clear the packet offset for all but the first CE desc.
2066 */
2067 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302068 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069
2070 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2071 frag_bytes >
2072 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302073 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074 (nbuf,
2075 nfrags) ? 0 :
2076 CE_SEND_FLAG_SWAP_DISABLE,
2077 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302078 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002079 HIF_ERROR("%s: error, frag_num %d larger than limit",
2080 __func__, nfrags);
2081 return status;
2082 }
2083 bytes -= frag_bytes;
2084 nfrags++;
2085 } while (bytes > 0);
2086
2087 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302088 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002089 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302090 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002091 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302092 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002093 }
2094 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302095 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002096
Jeff Johnson8d639a02019-03-18 09:51:11 -07002097 if (qdf_unlikely(!ce_hdl)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002098 HIF_ERROR("%s: error CE handle is null", __func__);
2099 return A_ERROR;
2100 }
2101
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302102 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302103 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05302104 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2105 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002106 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302107 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002108
2109 return status;
2110}
2111
Komal Seelam5584a7c2016-02-24 19:22:48 +05302112void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2113 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002114{
Komal Seelam644263d2016-02-22 20:45:49 +05302115 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302116 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05302117
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002118 if (!force) {
2119 int resources;
2120 /*
2121 * Decide whether to actually poll for completions, or just
2122 * wait for a later chance. If there seem to be plenty of
2123 * resources left, then just wait, since checking involves
2124 * reading a CE register, which is a relatively expensive
2125 * operation.
2126 */
Komal Seelam644263d2016-02-22 20:45:49 +05302127 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128 /*
2129 * If at least 50% of the total resources are still available,
2130 * don't bother checking again yet.
2131 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002132 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2133 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002134 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002135 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07002136#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002137 ce_per_engine_servicereap(scn, pipe);
2138#else
2139 ce_per_engine_service(scn, pipe);
2140#endif
2141}
2142
Komal Seelam5584a7c2016-02-24 19:22:48 +05302143uint16_t
2144hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002145{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302146 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002147 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2148 uint16_t rv;
2149
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302150 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002151 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302152 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002153 return rv;
2154}
2155
2156/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002157static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002158hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302159 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002160 unsigned int nbytes, unsigned int transfer_id,
2161 unsigned int sw_index, unsigned int hw_index,
2162 unsigned int toeplitz_hash_result)
2163{
2164 struct HIF_CE_pipe_info *pipe_info =
2165 (struct HIF_CE_pipe_info *)ce_context;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002166 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07002167 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302168 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002169
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002170 do {
2171 /*
Houston Hoffman85118512015-09-28 14:17:11 -07002172 * The upper layer callback will be triggered
2173 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002174 */
Rakesh Pillai48633522019-11-15 15:07:53 +05302175 if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2176 msg_callbacks->txCompletionHandler(
2177 msg_callbacks->Context,
2178 transfer_context, transfer_id,
2179 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002180
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302181 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07002182 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302183 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002184 } while (ce_completed_send_next(copyeng,
2185 &ce_context, &transfer_context,
2186 &CE_data, &nbytes, &transfer_id,
2187 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302188 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002189}
2190
Houston Hoffman910c6262015-09-28 12:56:25 -07002191/**
2192 * hif_ce_do_recv(): send message from copy engine to upper layers
2193 * @msg_callbacks: structure containing callback and callback context
2194 * @netbuff: skb containing message
2195 * @nbytes: number of bytes in the message
2196 * @pipe_info: used for the pipe_number info
2197 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07002198 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07002199 * and calls the upper layer callback.
2200 *
2201 * return: None
2202 */
2203static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302204 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07002205 struct HIF_CE_pipe_info *pipe_info) {
2206 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302207 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07002208 msg_callbacks->
2209 rxCompletionHandler(msg_callbacks->Context,
2210 netbuf, pipe_info->pipe_num);
2211 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07002212 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07002213 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08002214
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302215 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07002216 }
2217}
2218
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002219/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002220static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002221hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302222 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002223 unsigned int nbytes, unsigned int transfer_id,
2224 unsigned int flags)
2225{
2226 struct HIF_CE_pipe_info *pipe_info =
2227 (struct HIF_CE_pipe_info *)ce_context;
2228 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002229 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302230 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Yue Maac6b2752019-05-08 17:17:12 -07002231 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffman910c6262015-09-28 12:56:25 -07002232 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302233 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002234
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002235 do {
Yue Maac6b2752019-05-08 17:17:12 -07002236 hif_pm_runtime_mark_last_busy(hif_ctx);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302237 qdf_nbuf_unmap_single(scn->qdf_dev,
2238 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302239 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002240
Houston Hoffman910c6262015-09-28 12:56:25 -07002241 atomic_inc(&pipe_info->recv_bufs_needed);
2242 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302243 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302244 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002245 else
2246 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002247 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002248
2249 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002250 * MAX_NUM_OF_RECEIVES
2251 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002252 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002253 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002254 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255 break;
2256 }
2257 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2258 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302259 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002260
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002261}
2262
2263/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2264
2265void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302266hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002267 struct hif_msg_callbacks *callbacks)
2268{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302269 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002270
2271#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2272 spin_lock_init(&pcie_access_log_lock);
2273#endif
2274 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302275 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002276 sizeof(hif_state->msg_callbacks_pending));
2277
2278}
2279
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002280static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002281{
2282 struct CE_handle *ce_diag = hif_state->ce_diag;
2283 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302284 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002285 struct hif_msg_callbacks *hif_msg_callbacks =
2286 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002287
2288 /* daemonize("hif_compl_thread"); */
2289
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002290 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002291 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002292 return -EINVAL;
2293 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002294
2295 if (!hif_msg_callbacks ||
2296 !hif_msg_callbacks->rxCompletionHandler ||
2297 !hif_msg_callbacks->txCompletionHandler) {
2298 HIF_ERROR("%s: no completion handler registered", __func__);
2299 return -EFAULT;
2300 }
2301
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002302 A_TARGET_ACCESS_LIKELY(scn);
2303 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2304 struct CE_attr attr;
2305 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002306
2307 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002308 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002309 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302310 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002311 if (attr.src_nentries) {
2312 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002313 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002314 __func__, pipe_num, pipe_info);
2315 ce_send_cb_register(pipe_info->ce_hdl,
2316 hif_pci_ce_send_done, pipe_info,
2317 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002318 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2319 }
2320 if (attr.dest_nentries) {
2321 /* pipe used to receive from target */
2322 ce_recv_cb_register(pipe_info->ce_hdl,
2323 hif_pci_ce_recv_data, pipe_info,
2324 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002325 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002326
2327 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302328 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302329
2330 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2331 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002332 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002333
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002334 A_TARGET_ACCESS_UNLIKELY(scn);
2335 return 0;
2336}
2337
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002338/*
2339 * Install pending msg callbacks.
2340 *
2341 * TBDXXX: This hack is needed because upper layers install msg callbacks
2342 * for use with HTC before BMI is done; yet this HIF implementation
2343 * needs to continue to use BMI msg callbacks. Really, upper layers
2344 * should not register HTC callbacks until AFTER BMI phase.
2345 */
Komal Seelam644263d2016-02-22 20:45:49 +05302346static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002347{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302348 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002349
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302350 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002351 &hif_state->msg_callbacks_pending,
2352 sizeof(hif_state->msg_callbacks_pending));
2353}
2354
Komal Seelam5584a7c2016-02-24 19:22:48 +05302355void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2356 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002357{
2358 int ul_is_polled, dl_is_polled;
2359
Komal Seelam644263d2016-02-22 20:45:49 +05302360 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2362}
2363
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364/**
2365 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302366 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002367 *
2368 * Output the pipe error counts of each pipe to log file
2369 *
2370 * Return: N/A
2371 */
Komal Seelam644263d2016-02-22 20:45:49 +05302372void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002373{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302374 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002375 int pipe_num;
2376
Jeff Johnson8d639a02019-03-18 09:51:11 -07002377 if (!hif_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002378 HIF_ERROR("%s hif_state is NULL", __func__);
2379 return;
2380 }
2381 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2382 struct HIF_CE_pipe_info *pipe_info;
2383
2384 pipe_info = &hif_state->pipe_info[pipe_num];
2385
2386 if (pipe_info->nbuf_alloc_err_count > 0 ||
2387 pipe_info->nbuf_dma_err_count > 0 ||
2388 pipe_info->nbuf_ce_enqueue_err_count)
2389 HIF_ERROR(
2390 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2391 __func__, pipe_info->pipe_num,
2392 atomic_read(&pipe_info->recv_bufs_needed),
2393 pipe_info->nbuf_alloc_err_count,
2394 pipe_info->nbuf_dma_err_count,
2395 pipe_info->nbuf_ce_enqueue_err_count);
2396 }
2397}
2398
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002399static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2400 void *nbuf, uint32_t *error_cnt,
2401 enum hif_ce_event_type failure_type,
2402 const char *failure_type_string)
2403{
2404 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2405 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2406 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2407 int ce_id = CE_state->id;
2408 uint32_t error_cnt_tmp;
2409
2410 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2411 error_cnt_tmp = ++(*error_cnt);
2412 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302413 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002414 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2415 failure_type_string);
2416 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302417 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002418 /* if we fail to allocate the last buffer for an rx pipe,
2419 * there is no trigger to refill the ce and we will
2420 * eventually crash
2421 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302422 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002423 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302424
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002425}
2426
2427
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002428
2429
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302430QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002431{
2432 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302433 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302434 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302435 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002436 uint32_t bufs_posted = 0;
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302437 unsigned int ce_id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002438
2439 buf_sz = pipe_info->buf_sz;
2440 if (buf_sz == 0) {
2441 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302442 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002443 }
2444
2445 ce_hdl = pipe_info->ce_hdl;
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302446 ce_id = ((struct CE_state *)ce_hdl)->id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002447
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302448 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002449 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302450 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302451 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002452
2453 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302454 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302456 hif_record_ce_desc_event(scn, ce_id,
2457 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2458 0, 0);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302459 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002460 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002461 hif_post_recv_buffers_failure(pipe_info, nbuf,
2462 &pipe_info->nbuf_alloc_err_count,
2463 HIF_RX_NBUF_ALLOC_FAILURE,
2464 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302465 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002466 }
2467
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302468 hif_record_ce_desc_event(scn, ce_id,
2469 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2470 0, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302472 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002473 * CE_data = dma_map_single(dev, data, buf_sz, );
2474 * DMA_FROM_DEVICE);
2475 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302476 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302477 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002478
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302479 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002480 hif_post_recv_buffers_failure(pipe_info, nbuf,
2481 &pipe_info->nbuf_dma_err_count,
2482 HIF_RX_NBUF_MAP_FAILURE,
2483 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302484 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302485 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002486 }
2487
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302488 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302489 hif_record_ce_desc_event(scn, ce_id,
2490 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2491 0, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302492 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002493 buf_sz, DMA_FROM_DEVICE);
2494 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302495 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002496 hif_post_recv_buffers_failure(pipe_info, nbuf,
2497 &pipe_info->nbuf_ce_enqueue_err_count,
2498 HIF_RX_NBUF_ENQUEUE_FAILURE,
2499 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2500
Govind Singh4fcafd42016-08-08 12:37:31 +05302501 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2502 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302503 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302504 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002505 }
2506
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302507 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002508 bufs_posted++;
2509 }
2510 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002511 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002512 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2513 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002514 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002515 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2516 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002517 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002518 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002519
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302520 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002521
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302522 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002523}
2524
2525/*
2526 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302527 * Returns 0 for non fastpath rx copy engine as
2528 * oom_allocation_work will be scheduled to recover any
2529 * failures, non-zero if unable to completely replenish
2530 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002531 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302532QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002533{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302534 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302535 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302536 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302537 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002538
2539 A_TARGET_ACCESS_LIKELY(scn);
2540 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2541 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002542
Houston Hoffman85925072016-05-06 17:02:18 -07002543 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002544 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002545
2546 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002547 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002548 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002549
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302550 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302551 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302552 ce_state->htt_rx_data &&
2553 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302554 A_TARGET_ACCESS_UNLIKELY(scn);
2555 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302556 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002557 }
2558
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002559 A_TARGET_ACCESS_UNLIKELY(scn);
2560
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302561 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002562}
2563
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302564QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002565{
Komal Seelam644263d2016-02-22 20:45:49 +05302566 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302567 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302568 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002569
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002570 hif_update_fastpath_recv_bufs_cnt(scn);
2571
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002572 hif_msg_callbacks_install(scn);
2573
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002574 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302575 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002576
Houston Hoffman271951f2016-11-12 15:24:27 -08002577 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002578 hif_state->started = true;
2579
Houston Hoffman271951f2016-11-12 15:24:27 -08002580 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302581 qdf_status = hif_post_recv_buffers(scn);
2582 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002583 /* cleanup is done in hif_ce_disable */
2584 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302585 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002586 }
2587
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302588 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002589}
2590
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002591static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002592{
Komal Seelam644263d2016-02-22 20:45:49 +05302593 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002594 struct CE_handle *ce_hdl;
2595 uint32_t buf_sz;
2596 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302597 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302598 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002599 void *per_CE_context;
2600
2601 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002602 /* Unused Copy Engine */
2603 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002604 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002605
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002606
2607 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002608 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002609 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002610
Komal Seelam02cf2f82016-02-22 20:44:25 +05302611 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002612 ce_hdl = pipe_info->ce_hdl;
2613
Jeff Johnson8d639a02019-03-18 09:51:11 -07002614 if (!scn->qdf_dev)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002615 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002616 while (ce_revoke_recv_next
2617 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302618 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302619 if (netbuf) {
2620 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2621 QDF_DMA_FROM_DEVICE);
2622 qdf_nbuf_free(netbuf);
2623 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002624 }
2625}
2626
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002627static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002628{
2629 struct CE_handle *ce_hdl;
2630 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302631 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302632 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002633 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302634 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002635 unsigned int nbytes;
2636 unsigned int id;
2637 uint32_t buf_sz;
2638 uint32_t toeplitz_hash_result;
2639
2640 buf_sz = pipe_info->buf_sz;
2641 if (buf_sz == 0) {
2642 /* Unused Copy Engine */
2643 return;
2644 }
2645
2646 hif_state = pipe_info->HIF_CE_state;
2647 if (!hif_state->started) {
2648 return;
2649 }
2650
Komal Seelam02cf2f82016-02-22 20:44:25 +05302651 scn = HIF_GET_SOFTC(hif_state);
2652
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002653 ce_hdl = pipe_info->ce_hdl;
2654
2655 while (ce_cancel_send_next
2656 (ce_hdl, &per_CE_context,
2657 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302658 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002659 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2660 /*
2661 * Packets enqueued by htt_h2t_ver_req_msg() and
2662 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2663 * freed in htt_htc_misc_pkt_pool_free() in
2664 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002665 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002666 * which they are queued in.
2667 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302668 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002669 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302670 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002671 * layer to free the buffer
2672 */
2673 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302674 pipe_info->pipe_callbacks.
2675 txCompletionHandler(pipe_info->
2676 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002677 netbuf, id, toeplitz_hash_result);
2678 }
2679 }
2680}
2681
2682/*
2683 * Cleanup residual buffers for device shutdown:
2684 * buffers that were enqueued for receive
2685 * buffers that were to be sent
2686 * Note: Buffers that had completed but which were
2687 * not yet processed are on a completion queue. They
2688 * are handled when the completion thread shuts down.
2689 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002690static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002691{
2692 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302693 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002694 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002695
Komal Seelam02cf2f82016-02-22 20:44:25 +05302696 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002697 struct HIF_CE_pipe_info *pipe_info;
2698
Houston Hoffman85925072016-05-06 17:02:18 -07002699 ce_state = scn->ce_id_to_state[pipe_num];
2700 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2701 ((ce_state->htt_tx_data) ||
2702 (ce_state->htt_rx_data))) {
2703 continue;
2704 }
2705
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002706 pipe_info = &hif_state->pipe_info[pipe_num];
2707 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2708 hif_send_buffer_cleanup_on_pipe(pipe_info);
2709 }
2710}
2711
Komal Seelam5584a7c2016-02-24 19:22:48 +05302712void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002713{
Komal Seelam644263d2016-02-22 20:45:49 +05302714 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302715 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302716
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002717 hif_buffer_cleanup(hif_state);
2718}
2719
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002720static void hif_destroy_oom_work(struct hif_softc *scn)
2721{
2722 struct CE_state *ce_state;
2723 int ce_id;
2724
2725 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2726 ce_state = scn->ce_id_to_state[ce_id];
2727 if (ce_state)
2728 qdf_destroy_work(scn->qdf_dev,
2729 &ce_state->oom_allocation_work);
2730 }
2731}
2732
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302733void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002734{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302735 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002736 int pipe_num;
2737
Houston Hoffmana69581e2016-11-14 18:03:19 -08002738 /*
2739 * before cleaning up any memory, ensure irq &
2740 * bottom half contexts will not be re-entered
2741 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002742 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002743 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002744 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002745
2746 /*
2747 * At this point, asynchronous threads are stopped,
2748 * The Target should not DMA nor interrupt, Host code may
2749 * not initiate anything more. So we just need to clean
2750 * up Host-side state.
2751 */
2752
2753 if (scn->athdiag_procfs_inited) {
2754 athdiag_procfs_remove();
2755 scn->athdiag_procfs_inited = false;
2756 }
2757
2758 hif_buffer_cleanup(hif_state);
2759
2760 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2761 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302762 struct CE_attr attr;
2763 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002764
2765 pipe_info = &hif_state->pipe_info[pipe_num];
2766 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302767 if (pipe_info->ce_hdl != ce_diag) {
2768 attr = hif_state->host_ce_config[pipe_num];
2769 if (attr.src_nentries)
2770 qdf_spinlock_destroy(&pipe_info->
2771 completion_freeq_lock);
2772 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002773 ce_fini(pipe_info->ce_hdl);
2774 pipe_info->ce_hdl = NULL;
2775 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302776 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002777 }
2778 }
2779
2780 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302781 qdf_timer_stop(&hif_state->sleep_timer);
2782 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002783 hif_state->sleep_timer_init = false;
2784 }
2785
2786 hif_state->started = false;
2787}
2788
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302789static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2790 struct shadow_reg_cfg
2791 **target_shadow_reg_cfg_ret,
2792 uint32_t *shadow_cfg_sz_ret)
2793{
Nirav Shah3e6e04b2018-07-20 12:00:34 +05302794 if (target_shadow_reg_cfg_ret)
2795 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2796 if (shadow_cfg_sz_ret)
2797 *shadow_cfg_sz_ret = shadow_cfg_sz;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302798}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002799
Houston Hoffman854e67f2016-03-14 21:11:39 -07002800/**
2801 * hif_get_target_ce_config() - get copy engine configuration
2802 * @target_ce_config_ret: basic copy engine configuration
2803 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2804 * @target_service_to_ce_map_ret: service mapping for the copy engines
2805 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2806 * @target_shadow_reg_cfg_ret: shadow register configuration
2807 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2808 *
2809 * providing accessor to these values outside of this file.
2810 * currently these are stored in static pointers to const sections.
2811 * there are multiple configurations that are selected from at compile time.
2812 * Runtime selection would need to consider mode, target type and bus type.
2813 *
2814 * Return: return by parameter.
2815 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302816void hif_get_target_ce_config(struct hif_softc *scn,
2817 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002818 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002819 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002820 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002821 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002822 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002823{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302824 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2825
2826 *target_ce_config_ret = hif_state->target_ce_config;
2827 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002828
2829 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2830 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302831 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2832 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002833}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002834
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002835#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002836static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002837{
2838 int i;
2839 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302840 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002841
2842 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2843 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302844 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002845 cfg->shadow_reg_v2_cfg[i].addr);
2846 }
2847}
2848
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002849#else
2850static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2851{
2852 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302853 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002854}
2855#endif
2856
Nirav Shahbc8daa42018-07-09 16:27:42 +05302857#ifdef ADRASTEA_RRI_ON_DDR
2858/**
2859 * hif_get_src_ring_read_index(): Called to get the SRRI
2860 *
2861 * @scn: hif_softc pointer
2862 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2863 *
2864 * This function returns the SRRI to the caller. For CEs that
2865 * dont have interrupts enabled, we look at the DDR based SRRI
2866 *
2867 * Return: SRRI
2868 */
2869inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2870 uint32_t CE_ctrl_addr)
2871{
2872 struct CE_attr attr;
2873 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2874
2875 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2876 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2877 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2878 } else {
2879 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2880 return A_TARGET_READ(scn,
2881 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2882 else
2883 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2884 CE_ctrl_addr);
2885 }
2886}
2887
2888/**
2889 * hif_get_dst_ring_read_index(): Called to get the DRRI
2890 *
2891 * @scn: hif_softc pointer
2892 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2893 *
2894 * This function returns the DRRI to the caller. For CEs that
2895 * dont have interrupts enabled, we look at the DDR based DRRI
2896 *
2897 * Return: DRRI
2898 */
2899inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2900 uint32_t CE_ctrl_addr)
2901{
2902 struct CE_attr attr;
2903 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2904
2905 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2906
2907 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2908 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2909 } else {
2910 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2911 return A_TARGET_READ(scn,
2912 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2913 else
2914 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2915 CE_ctrl_addr);
2916 }
2917}
2918
2919/**
2920 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2921 * @scn: hif_softc pointer
2922 *
2923 * Return: qdf status
2924 */
2925static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2926{
2927 qdf_dma_addr_t paddr_rri_on_ddr = 0;
2928
2929 scn->vaddr_rri_on_ddr =
2930 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2931 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2932 &paddr_rri_on_ddr);
2933
2934 if (!scn->vaddr_rri_on_ddr) {
2935 hif_err("dmaable page alloc fail");
2936 return QDF_STATUS_E_NOMEM;
2937 }
2938
2939 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2940
2941 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2942
2943 return QDF_STATUS_SUCCESS;
2944}
2945#endif
2946
2947#if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2948/**
2949 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2950 *
2951 * @scn: hif_softc pointer
2952 *
2953 * This function allocates non cached memory on ddr and sends
2954 * the physical address of this memory to the CE hardware. The
2955 * hardware updates the RRI on this particular location.
2956 *
2957 * Return: None
2958 */
2959static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2960{
2961 unsigned int i;
2962 uint32_t high_paddr, low_paddr;
2963
2964 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2965 return;
2966
2967 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr);
2968 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2969
2970 HIF_DBG("%s using srri and drri from DDR", __func__);
2971
2972 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2973 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2974
2975 for (i = 0; i < CE_COUNT; i++)
2976 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2977}
2978#else
2979/**
2980 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2981 *
2982 * @scn: hif_softc pointer
2983 *
2984 * This is a dummy implementation for platforms that don't
2985 * support this functionality.
2986 *
2987 * Return: None
2988 */
2989static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2990{
2991}
2992#endif
2993
2994/**
2995 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2996 * QMI command
2997 * @scn: hif context
2998 * @cfg: wlan enable config
2999 *
3000 * In case of Genoa, rri_over_ddr memory configuration is passed
3001 * to firmware through QMI configure command.
3002 */
3003#if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3004static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3005 struct pld_wlan_enable_cfg *cfg)
3006{
3007 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3008 return;
3009
3010 cfg->rri_over_ddr_cfg_valid = true;
3011 cfg->rri_over_ddr_cfg.base_addr_low =
3012 BITS0_TO_31(scn->paddr_rri_on_ddr);
3013 cfg->rri_over_ddr_cfg.base_addr_high =
3014 BITS32_TO_35(scn->paddr_rri_on_ddr);
3015}
3016#else
3017static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3018 struct pld_wlan_enable_cfg *cfg)
3019{
3020}
3021#endif
3022
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003023/**
3024 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05303025 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003026 *
3027 * This function passes the con_mode and CE configuration to
3028 * platform driver to enable wlan.
3029 *
Houston Hoffman108da402016-03-14 21:11:24 -07003030 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003031 */
Houston Hoffman108da402016-03-14 21:11:24 -07003032int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003033{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003034 struct pld_wlan_enable_cfg cfg;
3035 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303036 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003037
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303038 hif_get_target_ce_config(scn,
3039 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07003040 &cfg.num_ce_tgt_cfg,
3041 (struct service_to_pipe **)&cfg.ce_svc_cfg,
3042 &cfg.num_ce_svc_pipe_cfg,
3043 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3044 &cfg.num_shadow_reg_cfg);
3045
3046 /* translate from structure size to array size */
3047 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3048 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3049 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003050
Houston Hoffman5141f9d2017-01-05 10:49:17 -08003051 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3052 &cfg.num_shadow_reg_v2_cfg);
3053
3054 hif_print_hal_shadow_register_cfg(&cfg);
3055
Nirav Shahbc8daa42018-07-09 16:27:42 +05303056 hif_update_rri_over_ddr_config(scn, &cfg);
3057
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303058 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003059 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05303060 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3061 mode = PLD_COLDBOOT_CALIBRATION;
Vignesh Viswanathan7c974c22019-07-24 15:24:03 +05303062 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3063 mode = PLD_FTM_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003064 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003065 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07003066 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003067 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07003068
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003069 if (BYPASS_QMI)
3070 return 0;
3071 else
Vevek Venkatesan0ac9aaf2019-06-28 17:17:22 +05303072 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003073}
3074
Nirav Shah0d0cce82018-01-17 17:00:31 +05303075#ifdef WLAN_FEATURE_EPPING
3076
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003077#define CE_EPPING_USES_IRQ true
3078
Nirav Shah0d0cce82018-01-17 17:00:31 +05303079void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3080{
3081 if (CE_EPPING_USES_IRQ)
3082 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3083 else
3084 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3085 hif_state->target_ce_config = target_ce_config_wlan_epping;
3086 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3087 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3088 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3089}
3090#endif
3091
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303092#ifdef QCN7605_SUPPORT
3093static inline
3094void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3095 struct HIF_CE_state *hif_state)
3096{
3097 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3098 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3099 hif_state->target_ce_config_sz =
3100 sizeof(target_ce_config_wlan_qcn7605);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303101 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3102 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303103 scn->ce_count = QCN7605_CE_COUNT;
3104}
3105#else
3106static inline
3107void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3108 struct HIF_CE_state *hif_state)
3109{
3110 HIF_ERROR("QCN7605 not supported");
3111}
3112#endif
3113
Sathish Kumar86876492018-08-27 13:39:20 +05303114#ifdef CE_SVC_CMN_INIT
3115#ifdef QCA_WIFI_SUPPORT_SRNG
3116static inline void hif_ce_service_init(void)
3117{
3118 ce_service_srng_init();
3119}
3120#else
3121static inline void hif_ce_service_init(void)
3122{
3123 ce_service_legacy_init();
3124}
3125#endif
3126#else
3127static inline void hif_ce_service_init(void)
3128{
3129}
3130#endif
3131
3132
Houston Hoffman108da402016-03-14 21:11:24 -07003133/**
3134 * hif_ce_prepare_config() - load the correct static tables.
3135 * @scn: hif context
3136 *
3137 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003138 */
Houston Hoffman108da402016-03-14 21:11:24 -07003139void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003140{
Komal Seelambd7c51d2016-02-24 10:27:30 +05303141 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003142 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3143 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303144 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003145
Sathish Kumar86876492018-08-27 13:39:20 +05303146 hif_ce_service_init();
Houston Hoffman10fedfc2017-01-23 15:23:09 -08003147 hif_state->ce_services = ce_services_attach(scn);
3148
Houston Hoffman710af5a2016-11-22 21:59:03 -08003149 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003150 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003151 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05303152 hif_ce_prepare_epping_config(hif_state);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303153 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003154 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003155
3156 switch (tgt_info->target_type) {
3157 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303158 hif_state->host_ce_config = host_ce_config_wlan;
3159 hif_state->target_ce_config = target_ce_config_wlan;
3160 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003161 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303162 case TARGET_TYPE_QCN7605:
3163 hif_set_ce_config_qcn7605(scn, hif_state);
3164 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003165 case TARGET_TYPE_AR900B:
3166 case TARGET_TYPE_QCA9984:
3167 case TARGET_TYPE_IPQ4019:
3168 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303169 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3170 hif_state->host_ce_config =
3171 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3172 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3173 hif_state->host_ce_config =
3174 host_lowdesc_ce_cfg_wlan_ar900b;
3175 } else {
3176 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3177 }
3178
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303179 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3180 hif_state->target_ce_config_sz =
3181 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003182
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003183 break;
3184
3185 case TARGET_TYPE_AR9888:
3186 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303187 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3188 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3189 } else {
3190 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3191 }
3192
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303193 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3194 hif_state->target_ce_config_sz =
3195 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003196
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003197 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003198
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303199 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05303200 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05303201 case TARGET_TYPE_QCA6018:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003202 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3203 hif_state->host_ce_config =
3204 host_ce_config_wlan_qca8074_pci;
3205 hif_state->target_ce_config =
3206 target_ce_config_wlan_qca8074_pci;
3207 hif_state->target_ce_config_sz =
3208 sizeof(target_ce_config_wlan_qca8074_pci);
3209 } else {
3210 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3211 hif_state->target_ce_config =
3212 target_ce_config_wlan_qca8074;
3213 hif_state->target_ce_config_sz =
3214 sizeof(target_ce_config_wlan_qca8074);
3215 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303216 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003217 case TARGET_TYPE_QCA6290:
3218 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3219 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3220 hif_state->target_ce_config_sz =
3221 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003222
Houston Hoffman710af5a2016-11-22 21:59:03 -08003223 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003224 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05303225 case TARGET_TYPE_QCN9000:
3226 hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3227 hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3228 hif_state->target_ce_config_sz =
3229 sizeof(target_ce_config_wlan_qcn9000);
3230 scn->ce_count = QCN_9000_CE_COUNT;
Nandha Kishore Easwaran54532862019-12-27 11:26:03 +05303231 scn->disable_wake_irq = 1;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05303232 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07003233 case TARGET_TYPE_QCA6390:
3234 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3235 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3236 hif_state->target_ce_config_sz =
3237 sizeof(target_ce_config_wlan_qca6390);
3238
3239 scn->ce_count = QCA_6390_CE_COUNT;
3240 break;
Mohit Khanna973308a2019-05-13 18:31:33 -07003241 case TARGET_TYPE_QCA6490:
3242 hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3243 hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3244 hif_state->target_ce_config_sz =
3245 sizeof(target_ce_config_wlan_qca6490);
3246
3247 scn->ce_count = QCA_6490_CE_COUNT;
3248 break;
Alok Kumarffc116e2020-01-06 18:12:35 +05303249 case TARGET_TYPE_QCA6750:
3250 hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3251 hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3252 hif_state->target_ce_config_sz =
3253 sizeof(target_ce_config_wlan_qca6750);
3254
3255 scn->ce_count = QCA_6750_CE_COUNT;
3256 break;
hangtianc572f5f2019-04-10 11:19:59 +08003257 case TARGET_TYPE_ADRASTEA:
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303258 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
hangtianc572f5f2019-04-10 11:19:59 +08003259 hif_state->host_ce_config =
3260 host_lowdesc_ce_config_wlan_adrastea_nopktlog;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303261 hif_state->target_ce_config =
3262 target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3263 hif_state->target_ce_config_sz =
3264 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3265 } else {
hangtianc572f5f2019-04-10 11:19:59 +08003266 hif_state->host_ce_config =
3267 host_ce_config_wlan_adrastea;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303268 hif_state->target_ce_config =
3269 target_ce_config_wlan_adrastea;
3270 hif_state->target_ce_config_sz =
hangtianc572f5f2019-04-10 11:19:59 +08003271 sizeof(target_ce_config_wlan_adrastea);
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303272 }
hangtianc572f5f2019-04-10 11:19:59 +08003273 break;
3274
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003275 }
Yun parkc80eea72017-10-06 15:33:36 -07003276 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07003277}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003278
Houston Hoffman108da402016-03-14 21:11:24 -07003279/**
3280 * hif_ce_open() - do ce specific allocations
3281 * @hif_sc: pointer to hif context
3282 *
3283 * return: 0 for success or QDF_STATUS_E_NOMEM
3284 */
3285QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3286{
3287 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003288
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303289 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303290 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003291 return QDF_STATUS_SUCCESS;
3292}
3293
3294/**
3295 * hif_ce_close() - do ce specific free
3296 * @hif_sc: pointer to hif context
3297 */
3298void hif_ce_close(struct hif_softc *hif_sc)
3299{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303300 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3301
3302 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05303303 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003304}
3305
3306/**
3307 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3308 * @hif_sc: hif context
3309 *
3310 * uses state variables to support cleaning up when hif_config_ce fails.
3311 */
3312void hif_unconfig_ce(struct hif_softc *hif_sc)
3313{
3314 int pipe_num;
3315 struct HIF_CE_pipe_info *pipe_info;
3316 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07003317 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07003318
3319 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3320 pipe_info = &hif_state->pipe_info[pipe_num];
3321 if (pipe_info->ce_hdl) {
3322 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05303323 }
3324 }
3325 deinit_tasklet_workers(hif_hdl);
3326 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3327 pipe_info = &hif_state->pipe_info[pipe_num];
3328 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07003329 ce_fini(pipe_info->ce_hdl);
3330 pipe_info->ce_hdl = NULL;
3331 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08003332 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003333 }
3334 }
Houston Hoffman108da402016-03-14 21:11:24 -07003335 if (hif_sc->athdiag_procfs_inited) {
3336 athdiag_procfs_remove();
3337 hif_sc->athdiag_procfs_inited = false;
3338 }
3339}
3340
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003341#ifdef CONFIG_BYPASS_QMI
Nirav Shah8e930272018-07-10 16:28:21 +05303342#ifdef QCN7605_SUPPORT
3343/**
3344 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3345 * @scn: pointer to HIF structure
3346 *
3347 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3348 *
3349 * Return: void
3350 */
3351static void hif_post_static_buf_to_target(struct hif_softc *scn)
3352{
3353 void *target_va;
3354 phys_addr_t target_pa;
3355 struct ce_info *ce_info_ptr;
3356 uint32_t msi_data_start;
3357 uint32_t msi_data_count;
3358 uint32_t msi_irq_start;
3359 uint32_t i = 0;
3360 int ret;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003361
Nirav Shah8e930272018-07-10 16:28:21 +05303362 target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3363 scn->qdf_dev->dev,
3364 FW_SHARED_MEM +
3365 sizeof(struct ce_info),
3366 &target_pa);
3367 if (!target_va)
3368 return;
3369
3370 ce_info_ptr = (struct ce_info *)target_va;
3371
3372 if (scn->vaddr_rri_on_ddr) {
3373 ce_info_ptr->rri_over_ddr_low_paddr =
3374 BITS0_TO_31(scn->paddr_rri_on_ddr);
3375 ce_info_ptr->rri_over_ddr_high_paddr =
3376 BITS32_TO_35(scn->paddr_rri_on_ddr);
3377 }
3378
3379 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3380 &msi_data_count, &msi_data_start,
3381 &msi_irq_start);
3382 if (ret) {
3383 hif_err("Failed to get CE msi config");
3384 return;
3385 }
3386
3387 for (i = 0; i < CE_COUNT_MAX; i++) {
3388 ce_info_ptr->cfg[i].ce_id = i;
3389 ce_info_ptr->cfg[i].msi_vector =
3390 (i % msi_data_count) + msi_irq_start;
3391 }
3392
3393 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3394 hif_info("target va %pK target pa %pa", target_va, &target_pa);
3395}
3396#else
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003397/**
3398 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3399 * @scn: pointer to HIF structure
3400 *
3401 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3402 *
3403 * Return: void
3404 */
3405static void hif_post_static_buf_to_target(struct hif_softc *scn)
3406{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003407 void *target_va;
3408 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003409
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003410 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3411 FW_SHARED_MEM, &target_pa);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003412 if (!target_va) {
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003413 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003414 return;
3415 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303416 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003417 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003418}
Nirav Shah8e930272018-07-10 16:28:21 +05303419#endif
3420
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003421#else
3422static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3423{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003424}
3425#endif
3426
Houston Hoffman579c02f2017-08-02 01:57:38 -07003427static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3428 bool wait_for_it)
3429{
3430 /* todo */
3431 return 0;
3432}
3433
Houston Hoffman108da402016-03-14 21:11:24 -07003434/**
3435 * hif_config_ce() - configure copy engines
3436 * @scn: hif context
3437 *
3438 * Prepares fw, copy engine hardware and host sw according
3439 * to the attributes selected by hif_ce_prepare_config.
3440 *
3441 * also calls athdiag_procfs_init
3442 *
3443 * return: 0 for success nonzero for failure.
3444 */
3445int hif_config_ce(struct hif_softc *scn)
3446{
3447 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3448 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3449 struct HIF_CE_pipe_info *pipe_info;
3450 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303451 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05303452
Houston Hoffman108da402016-03-14 21:11:24 -07003453#ifdef ADRASTEA_SHADOW_REGISTERS
3454 int i;
3455#endif
3456 QDF_STATUS rv = QDF_STATUS_SUCCESS;
3457
3458 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05303459 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003460
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003461 hif_post_static_buf_to_target(scn);
3462
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003463 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07003464
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003465 hif_config_rri_on_ddr(scn);
3466
Houston Hoffman579c02f2017-08-02 01:57:38 -07003467 if (ce_srng_based(scn))
3468 scn->bus_ops.hif_target_sleep_state_adjust =
3469 &hif_srng_sleep_state_adjust;
3470
c_cgodavfda96ad2017-09-07 16:16:00 +05303471 /* Initialise the CE debug history sysfs interface inputs ce_id and
3472 * index. Disable data storing
3473 */
3474 reset_ce_debug_history(scn);
3475
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003476 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3477 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003478
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003479 pipe_info = &hif_state->pipe_info[pipe_num];
3480 pipe_info->pipe_num = pipe_num;
3481 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303482 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003483
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003484 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07003485 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303486 if (!ce_state) {
3487 A_TARGET_ACCESS_UNLIKELY(scn);
3488 goto err;
3489 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003490 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003491 QDF_ASSERT(pipe_info->ce_hdl);
3492 if (!pipe_info->ce_hdl) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303493 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003494 A_TARGET_ACCESS_UNLIKELY(scn);
3495 goto err;
3496 }
3497
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003498 ce_state->lro_data = qdf_lro_init();
3499
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303500 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003501 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003502 * Diagnostic Window support
3503 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003504 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003505 continue;
3506 }
3507
Houston Hoffman85925072016-05-06 17:02:18 -07003508 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3509 (ce_state->htt_rx_data))
3510 continue;
3511
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303512 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003513 if (attr->dest_nentries > 0) {
3514 atomic_set(&pipe_info->recv_bufs_needed,
3515 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303516 /*SRNG based CE has one entry less */
3517 if (ce_srng_based(scn))
3518 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003519 } else {
3520 atomic_set(&pipe_info->recv_bufs_needed, 0);
3521 }
3522 ce_tasklet_init(hif_state, (1 << pipe_num));
3523 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003524 }
3525
3526 if (athdiag_procfs_init(scn) != 0) {
3527 A_TARGET_ACCESS_UNLIKELY(scn);
3528 goto err;
3529 }
3530 scn->athdiag_procfs_inited = true;
3531
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003532 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003533
Houston Hoffman108da402016-03-14 21:11:24 -07003534 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003535
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003536 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003537
3538#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003539 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003540 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003541 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003542 __func__, i,
3543 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3544 }
3545#endif
3546
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303547 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003548
3549err:
3550 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003551 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003552 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303553 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003554}
3555
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003556#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003557/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303558 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003559 * @scn: bus context
3560 * @ce_sr_base_paddr: copyengine source ring base physical address
3561 * @ce_sr_ring_size: copyengine source ring size
3562 * @ce_reg_paddr: copyengine register physical address
3563 *
3564 * IPA micro controller data path offload feature enabled,
3565 * HIF should release copy engine related resource information to IPA UC
3566 * IPA UC will access hardware resource with released information
3567 *
3568 * Return: None
3569 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303570void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303571 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003572 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303573 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003574{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303575 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003576 struct HIF_CE_pipe_info *pipe_info =
3577 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3578 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3579
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303580 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003581 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003582}
3583#endif /* IPA_OFFLOAD */
3584
3585
3586#ifdef ADRASTEA_SHADOW_REGISTERS
3587
3588/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003589 * Current shadow register config
3590 *
3591 * -----------------------------------------------------------
3592 * Shadow Register | CE | src/dst write index
3593 * -----------------------------------------------------------
3594 * 0 | 0 | src
3595 * 1 No Config - Doesn't point to anything
3596 * 2 No Config - Doesn't point to anything
3597 * 3 | 3 | src
3598 * 4 | 4 | src
3599 * 5 | 5 | src
3600 * 6 No Config - Doesn't point to anything
3601 * 7 | 7 | src
3602 * 8 No Config - Doesn't point to anything
3603 * 9 No Config - Doesn't point to anything
3604 * 10 No Config - Doesn't point to anything
3605 * 11 No Config - Doesn't point to anything
3606 * -----------------------------------------------------------
3607 * 12 No Config - Doesn't point to anything
3608 * 13 | 1 | dst
3609 * 14 | 2 | dst
3610 * 15 No Config - Doesn't point to anything
3611 * 16 No Config - Doesn't point to anything
3612 * 17 No Config - Doesn't point to anything
3613 * 18 No Config - Doesn't point to anything
3614 * 19 | 7 | dst
3615 * 20 | 8 | dst
3616 * 21 No Config - Doesn't point to anything
3617 * 22 No Config - Doesn't point to anything
3618 * 23 No Config - Doesn't point to anything
3619 * -----------------------------------------------------------
3620 *
3621 *
3622 * ToDo - Move shadow register config to following in the future
3623 * This helps free up a block of shadow registers towards the end.
3624 * Can be used for other purposes
3625 *
3626 * -----------------------------------------------------------
3627 * Shadow Register | CE | src/dst write index
3628 * -----------------------------------------------------------
3629 * 0 | 0 | src
3630 * 1 | 3 | src
3631 * 2 | 4 | src
3632 * 3 | 5 | src
3633 * 4 | 7 | src
3634 * -----------------------------------------------------------
3635 * 5 | 1 | dst
3636 * 6 | 2 | dst
3637 * 7 | 7 | dst
3638 * 8 | 8 | dst
3639 * -----------------------------------------------------------
3640 * 9 No Config - Doesn't point to anything
3641 * 12 No Config - Doesn't point to anything
3642 * 13 No Config - Doesn't point to anything
3643 * 14 No Config - Doesn't point to anything
3644 * 15 No Config - Doesn't point to anything
3645 * 16 No Config - Doesn't point to anything
3646 * 17 No Config - Doesn't point to anything
3647 * 18 No Config - Doesn't point to anything
3648 * 19 No Config - Doesn't point to anything
3649 * 20 No Config - Doesn't point to anything
3650 * 21 No Config - Doesn't point to anything
3651 * 22 No Config - Doesn't point to anything
3652 * 23 No Config - Doesn't point to anything
3653 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003654*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303655#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303656u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003657{
3658 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003659 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003660
Houston Hoffmane6330442016-02-26 12:19:11 -08003661 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003662 case 0:
3663 addr = SHADOW_VALUE0;
3664 break;
3665 case 3:
3666 addr = SHADOW_VALUE3;
3667 break;
3668 case 4:
3669 addr = SHADOW_VALUE4;
3670 break;
3671 case 5:
3672 addr = SHADOW_VALUE5;
3673 break;
3674 case 7:
3675 addr = SHADOW_VALUE7;
3676 break;
3677 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003678 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303679 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003680 }
3681 return addr;
3682
3683}
3684
Komal Seelam644263d2016-02-22 20:45:49 +05303685u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003686{
3687 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003688 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003689
Houston Hoffmane6330442016-02-26 12:19:11 -08003690 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003691 case 1:
3692 addr = SHADOW_VALUE13;
3693 break;
3694 case 2:
3695 addr = SHADOW_VALUE14;
3696 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003697 case 5:
3698 addr = SHADOW_VALUE17;
3699 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003700 case 7:
3701 addr = SHADOW_VALUE19;
3702 break;
3703 case 8:
3704 addr = SHADOW_VALUE20;
3705 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003706 case 9:
3707 addr = SHADOW_VALUE21;
3708 break;
3709 case 10:
3710 addr = SHADOW_VALUE22;
3711 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303712 case 11:
3713 addr = SHADOW_VALUE23;
3714 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003715 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003716 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303717 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003718 }
3719
3720 return addr;
3721
3722}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303723#else
3724u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3725{
3726 u32 addr = 0;
3727 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3728
3729 switch (ce) {
3730 case 0:
3731 addr = SHADOW_VALUE0;
3732 break;
3733 case 4:
3734 addr = SHADOW_VALUE4;
3735 break;
3736 case 5:
3737 addr = SHADOW_VALUE5;
3738 break;
3739 default:
3740 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3741 QDF_ASSERT(0);
3742 }
3743 return addr;
3744}
3745
3746u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3747{
3748 u32 addr = 0;
3749 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3750
3751 switch (ce) {
3752 case 1:
3753 addr = SHADOW_VALUE13;
3754 break;
3755 case 2:
3756 addr = SHADOW_VALUE14;
3757 break;
3758 case 3:
3759 addr = SHADOW_VALUE15;
3760 break;
3761 case 5:
3762 addr = SHADOW_VALUE17;
3763 break;
3764 case 7:
3765 addr = SHADOW_VALUE19;
3766 break;
3767 case 8:
3768 addr = SHADOW_VALUE20;
3769 break;
3770 case 9:
3771 addr = SHADOW_VALUE21;
3772 break;
3773 case 10:
3774 addr = SHADOW_VALUE22;
3775 break;
3776 case 11:
3777 addr = SHADOW_VALUE23;
3778 break;
3779 default:
3780 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3781 QDF_ASSERT(0);
3782 }
3783
3784 return addr;
3785}
3786#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003787#endif
3788
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003789#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003790void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3791{
3792 struct CE_state *ce_state;
3793 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3794
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003795 ce_state = scn->ce_id_to_state[ctx_id];
3796
3797 return ce_state->lro_data;
3798}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003799#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003800
3801/**
3802 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3803 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303804 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003805 * @svc_id: Service ID for which the mapping is needed.
3806 * @ul_pipe: address of the container in which ul pipe is returned.
3807 * @dl_pipe: address of the container in which dl pipe is returned.
3808 * @ul_is_polled: address of the container in which a bool
3809 * indicating if the UL CE for this service
3810 * is polled is returned.
3811 * @dl_is_polled: address of the container in which a bool
3812 * indicating if the DL CE for this service
3813 * is polled is returned.
3814 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003815 * Return: Indicates whether the service has been found in the table.
3816 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3817 * There will be warning logs if either leg has not been updated
3818 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003819 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303820int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003821 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3822 int *dl_is_polled)
3823{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003824 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003825 unsigned int i;
3826 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003827 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003828 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303829 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003830 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003831 bool dl_updated = false;
3832 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003833
Houston Hoffman748e1a62017-03-30 17:20:42 -07003834 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3835 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003836
3837 *dl_is_polled = 0; /* polling for received messages not supported */
3838
3839 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3840
3841 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3842 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003843 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003844 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003845 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303846 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003847 CE_ATTR_DISABLE_INTR) != 0;
3848 ul_updated = true;
3849 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003850 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003851 dl_updated = true;
3852 }
3853 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003854 }
3855 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003856 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003857 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003858 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003859 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003860
3861 return status;
3862}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003863
3864#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303865inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003866 uint32_t CE_ctrl_addr)
3867{
3868 uint32_t read_from_hw, srri_from_ddr = 0;
3869
3870 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3871
3872 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3873
3874 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003875 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3876 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003877 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303878 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003879 }
3880 return srri_from_ddr;
3881}
3882
3883
Komal Seelam644263d2016-02-22 20:45:49 +05303884inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003885 uint32_t CE_ctrl_addr)
3886{
3887 uint32_t read_from_hw, drri_from_ddr = 0;
3888
3889 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3890
3891 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3892
3893 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003894 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003895 drri_from_ddr, read_from_hw,
3896 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303897 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003898 }
3899 return drri_from_ddr;
3900}
3901
3902#endif
3903
Govind Singh2443fb32016-01-13 17:44:48 +05303904/**
3905 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303906 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303907 *
3908 * Output the copy engine registers
3909 *
3910 * Return: 0 for success or error code
3911 */
Komal Seelam644263d2016-02-22 20:45:49 +05303912int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303913{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303914 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303915 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003916 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303917 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3918 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303919 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303920
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003921 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
Jeff Johnson8d639a02019-03-18 09:51:11 -07003922 if (!scn->ce_id_to_state[i]) {
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003923 HIF_DBG("CE%d not used.", i);
3924 continue;
3925 }
3926
Komal Seelam644263d2016-02-22 20:45:49 +05303927 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003928 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303929 ce_reg_word_size * sizeof(uint32_t));
3930
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303931 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003932 HIF_ERROR("Dumping CE register failed!");
3933 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303934 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303935 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303936 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003937 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303938 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303939 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303940 + SR_WR_INDEX_ADDRESS),
3941 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303942 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303943 + CURRENT_SRRI_ADDRESS),
3944 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303945 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303946 + DST_WR_INDEX_ADDRESS),
3947 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303948 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303949 + CURRENT_DRRI_ADDRESS),
3950 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303951 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303952 }
Govind Singh2443fb32016-01-13 17:44:48 +05303953 return 0;
3954}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303955qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003956#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3957struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3958 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3959{
3960 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3961 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3962 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3963 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3964 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3965 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3966 struct CE_ring_state *src_ring = ce_state->src_ring;
3967 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3968
3969 if (src_ring) {
3970 hif_info->ul_pipe.nentries = src_ring->nentries;
3971 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3972 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3973 hif_info->ul_pipe.write_index = src_ring->write_index;
3974 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3975 hif_info->ul_pipe.base_addr_CE_space =
3976 src_ring->base_addr_CE_space;
3977 hif_info->ul_pipe.base_addr_owner_space =
3978 src_ring->base_addr_owner_space;
3979 }
3980
3981
3982 if (dest_ring) {
3983 hif_info->dl_pipe.nentries = dest_ring->nentries;
3984 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3985 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3986 hif_info->dl_pipe.write_index = dest_ring->write_index;
3987 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3988 hif_info->dl_pipe.base_addr_CE_space =
3989 dest_ring->base_addr_CE_space;
3990 hif_info->dl_pipe.base_addr_owner_space =
3991 dest_ring->base_addr_owner_space;
3992 }
3993
3994 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3995 hif_info->ctrl_addr = ce_state->ctrl_addr;
3996
3997 return hif_info;
3998}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303999qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07004000
4001uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4002{
4003 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4004
4005 scn->nss_wifi_ol_mode = mode;
4006 return 0;
4007}
Pratik Gandhidc82a772018-01-30 18:57:05 +05304008qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07004009#endif
4010
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05304011void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4012{
4013 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4014 scn->hif_attribute = hif_attrib;
4015}
4016
Yun Park3fb36442017-08-17 17:37:53 -07004017
4018/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07004019void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4020{
4021 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4022 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4023 uint32_t ctrl_addr = CE_state->ctrl_addr;
4024
4025 Q_TARGET_ACCESS_BEGIN(scn);
4026 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4027 Q_TARGET_ACCESS_END(scn);
4028}
Pratik Gandhidc82a772018-01-30 18:57:05 +05304029qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304030
4031/**
4032 * hif_fw_event_handler() - hif fw event handler
4033 * @hif_state: pointer to hif ce state structure
4034 *
4035 * Process fw events and raise HTC callback to process fw events.
4036 *
4037 * Return: none
4038 */
4039static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4040{
4041 struct hif_msg_callbacks *msg_callbacks =
4042 &hif_state->msg_callbacks_current;
4043
4044 if (!msg_callbacks->fwEventHandler)
4045 return;
4046
4047 msg_callbacks->fwEventHandler(msg_callbacks->Context,
4048 QDF_STATUS_E_FAILURE);
4049}
4050
4051#ifndef QCA_WIFI_3_0
4052/**
4053 * hif_fw_interrupt_handler() - FW interrupt handler
4054 * @irq: irq number
4055 * @arg: the user pointer
4056 *
4057 * Called from the PCI interrupt handler when a
4058 * firmware-generated interrupt to the Host.
4059 *
Yun Park3fb36442017-08-17 17:37:53 -07004060 * only registered for legacy ce devices
4061 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304062 * Return: status of handled irq
4063 */
4064irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4065{
4066 struct hif_softc *scn = arg;
4067 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4068 uint32_t fw_indicator_address, fw_indicator;
4069
4070 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4071 return ATH_ISR_NOSCHED;
4072
4073 fw_indicator_address = hif_state->fw_indicator_address;
4074 /* For sudden unplug this will return ~0 */
4075 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4076
4077 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4078 /* ACK: clear Target-side pending event */
4079 A_TARGET_WRITE(scn, fw_indicator_address,
4080 fw_indicator & ~FW_IND_EVENT_PENDING);
4081 if (Q_TARGET_ACCESS_END(scn) < 0)
4082 return ATH_ISR_SCHED;
4083
4084 if (hif_state->started) {
4085 hif_fw_event_handler(hif_state);
4086 } else {
4087 /*
4088 * Probable Target failure before we're prepared
4089 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08004090 * fw_indicator used as bitmap, and defined as below:
4091 * FW_IND_EVENT_PENDING 0x1
4092 * FW_IND_INITIALIZED 0x2
4093 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304094 */
4095 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08004096 ("%s: Early firmware event indicated 0x%x\n",
4097 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304098 }
4099 } else {
4100 if (Q_TARGET_ACCESS_END(scn) < 0)
4101 return ATH_ISR_SCHED;
4102 }
4103
4104 return ATH_ISR_SCHED;
4105}
4106#else
4107irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4108{
4109 return ATH_ISR_SCHED;
4110}
4111#endif /* #ifdef QCA_WIFI_3_0 */
4112
4113
4114/**
4115 * hif_wlan_disable(): call the platform driver to disable wlan
4116 * @scn: HIF Context
4117 *
4118 * This function passes the con_mode to platform driver to disable
4119 * wlan.
4120 *
4121 * Return: void
4122 */
4123void hif_wlan_disable(struct hif_softc *scn)
4124{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004125 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304126 uint32_t con_mode = hif_get_conparam(scn);
4127
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05304128 if (scn->target_status == TARGET_STATUS_RESET)
4129 return;
4130
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304131 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004132 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304133 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004134 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304135 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004136 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304137
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004138 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304139}
Dustin Brown6bdbda52016-09-27 15:52:30 -07004140
Dustin Brown6834d322017-03-20 15:02:48 -07004141int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4142{
4143 QDF_STATUS status;
4144 uint8_t ul_pipe, dl_pipe;
4145 int ul_is_polled, dl_is_polled;
4146
4147 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4148 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4149 HTC_CTRL_RSVD_SVC,
4150 &ul_pipe, &dl_pipe,
4151 &ul_is_polled, &dl_is_polled);
4152 if (status) {
4153 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4154 return qdf_status_to_os_return(status);
4155 }
4156
4157 *ce_id = dl_pipe;
4158
4159 return 0;
4160}
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304161
4162#ifdef HIF_CE_LOG_INFO
4163/**
4164 * ce_get_index_info(): Get CE index info
4165 * @scn: HIF Context
4166 * @ce_state: CE opaque handle
4167 * @info: CE info
4168 *
4169 * Return: 0 for success and non zero for failure
4170 */
4171static
4172int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4173 struct ce_index *info)
4174{
4175 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4176
4177 return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4178}
4179
Yeshwanth Sriram Guntuka91658222020-03-27 14:42:44 +05304180void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304181 unsigned int *offset)
4182{
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304183 struct hang_event_info info = {0};
4184 static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4185 BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4186 uint8_t curr_index = 0;
4187 uint8_t i;
4188 uint16_t size;
4189
4190 info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4191 info.active_grp_tasklet_cnt =
4192 qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4193
4194 for (i = 0; i < scn->ce_count; i++) {
4195 if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4196 continue;
4197
4198 if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4199 &info.ce_info[curr_index]))
4200 continue;
4201
4202 curr_index++;
4203 }
4204
4205 info.ce_count = curr_index;
4206 size = sizeof(info) -
4207 (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4208
4209 QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4210 size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4211
4212 qdf_mem_copy(data + *offset, &info, size);
4213 *offset = *offset + size;
4214}
4215#endif