blob: 47a9f6c2a5d483b83e07905c2b718f20ad416156 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Nirav Shah8e930272018-07-10 16:28:21 +05302 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053040#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041
42#define CE_POLL_TIMEOUT 10 /* ms */
43
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053044#define AGC_DUMP 1
45#define CHANINFO_DUMP 2
46#define BB_WATCHDOG_DUMP 3
47#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48#define PCIE_ACCESS_DUMP 4
49#endif
50#include "mp_dev.h"
51
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +053052#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
53 defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
Houston Hoffman5141f9d2017-01-05 10:49:17 -080054#define QCA_WIFI_SUPPORT_SRNG
55#endif
56
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080057/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053058QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080059
60/*
61 * Fix EV118783, poll to check whether a BMI response comes
62 * other than waiting for the interruption which may be lost.
63 */
64/* #define BMI_RSP_POLLING */
65#define BMI_RSP_TO_MILLISEC 1000
66
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070067#ifdef CONFIG_BYPASS_QMI
68#define BYPASS_QMI 1
69#else
70#define BYPASS_QMI 0
71#endif
72
Akshay Kosigi181b2f52018-11-26 17:02:54 +053073#ifdef ENABLE_10_4_FW_HDR
74#if (ENABLE_10_4_FW_HDR == 1)
Houston Hoffmanabd00772016-05-06 17:02:48 -070075#define WDI_IPA_SERVICE_GROUP 5
76#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
77#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
78#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Akshay Kosigi181b2f52018-11-26 17:02:54 +053079#endif /* ENABLE_10_4_FW_HDR == 1 */
Pratik Gandhi424c62e2016-08-23 19:47:09 +053080#endif /* ENABLE_10_4_FW_HDR */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080081
Nachiket Kukadee5738b52017-09-07 17:16:12 +053082QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053083static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053085/**
86 * hif_target_access_log_dump() - dump access log
87 *
88 * dump access log
89 *
90 * Return: n/a
91 */
92#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
93static void hif_target_access_log_dump(void)
94{
95 hif_target_dump_access_log();
96}
97#endif
98
99
100void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
101 uint8_t cmd_id, bool start)
102{
103 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
104
105 switch (cmd_id) {
106 case AGC_DUMP:
107 if (start)
108 priv_start_agc(scn);
109 else
110 priv_dump_agc(scn);
111 break;
112 case CHANINFO_DUMP:
113 if (start)
114 priv_start_cap_chaninfo(scn);
115 else
116 priv_dump_chaninfo(scn);
117 break;
118 case BB_WATCHDOG_DUMP:
119 priv_dump_bbwatchdog(scn);
120 break;
121#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
122 case PCIE_ACCESS_DUMP:
123 hif_target_access_log_dump();
124 break;
125#endif
126 default:
127 HIF_ERROR("%s: Invalid htc dump command", __func__);
128 break;
129 }
130}
131
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800132static void ce_poll_timeout(void *arg)
133{
134 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700135
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800136 if (CE_state->timer_inited) {
137 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530138 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 }
140}
141
142static unsigned int roundup_pwr2(unsigned int n)
143{
144 int i;
145 unsigned int test_pwr2;
146
147 if (!(n & (n - 1)))
148 return n; /* already a power of 2 */
149
150 test_pwr2 = 4;
151 for (i = 0; i < 29; i++) {
152 if (test_pwr2 > n)
153 return test_pwr2;
154 test_pwr2 = test_pwr2 << 1;
155 }
156
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530157 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800158 return 0;
159}
160
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700161#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
162#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
163
164static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
165 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
166 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
167 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
168 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
171 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
173 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800174#ifdef QCA_WIFI_3_0_ADRASTEA
175 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530177 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800178#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700179};
180
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530181#ifdef QCN7605_SUPPORT
182static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
183 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
184 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
185 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
186 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
187 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
188 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
189 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
191};
192#endif
193
Nirav Shah0d0cce82018-01-17 17:00:31 +0530194#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700195static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
196 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
199 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
201 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
202 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
203 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
204 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
205};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530206#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700207
208/* CE_PCI TABLE */
209/*
210 * NOTE: the table below is out of date, though still a useful reference.
211 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
212 * mapping of HTC services to HIF pipes.
213 */
214/*
215 * This authoritative table defines Copy Engine configuration and the mapping
216 * of services/endpoints to CEs. A subset of this information is passed to
217 * the Target during startup as a prerequisite to entering BMI phase.
218 * See:
219 * target_service_to_ce_map - Target-side mapping
220 * hif_map_service_to_pipe - Host-side mapping
221 * target_ce_config - Target-side configuration
222 * host_ce_config - Host-side configuration
223 ============================================================================
224 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
225 | | | ctio | Size | Frequency
226 | | | n | |
227 ============================================================================
228 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
229 descriptor | | | | O(100B) | and regular
230 download | | | | |
231 ----------------------------------------------------------------------------
232 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
233 indication | | | | O(10B) | regular
234 upload | | | | |
235 ----------------------------------------------------------------------------
236 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
237 upload | | | | O(1000B) | (frequent
238 e.g. noise | | | | | during IP1.0
239 packets | | | | | testing)
240 ----------------------------------------------------------------------------
241 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
242 download | | | | O(1000B) | (frequent
243 e.g. | | | | | during IP1.0
244 misdirecte | | | | | testing)
245 d EAPOL | | | | |
246 packets | | | | |
247 ----------------------------------------------------------------------------
248 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
249 | DATA_VO (uplink) | | | |
250 ----------------------------------------------------------------------------
251 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
252 | DATA_VO (downlink) | | | |
253 ----------------------------------------------------------------------------
254 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
255 | | | | O(100B) |
256 ----------------------------------------------------------------------------
257 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
258 messages | (downlink) | | | O(100B) |
259 | | | | |
260 ----------------------------------------------------------------------------
261 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
262 | HTC_RAW_STREAMS | | | |
263 | (uplink) | | | |
264 ----------------------------------------------------------------------------
265 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
266 | HTC_RAW_STREAMS | | | |
267 | (downlink) | | | |
268 ----------------------------------------------------------------------------
269 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
270 | | | | | infrequent
271 ============================================================================
272 */
273
274/*
275 * Map from service/endpoint to Copy Engine.
276 * This table is derived from the CE_PCI TABLE, above.
277 * It is passed to the Target at startup for use by firmware.
278 */
279static struct service_to_pipe target_service_to_ce_map_wlan[] = {
280 {
281 WMI_DATA_VO_SVC,
282 PIPEDIR_OUT, /* out = UL = host -> target */
283 3,
284 },
285 {
286 WMI_DATA_VO_SVC,
287 PIPEDIR_IN, /* in = DL = target -> host */
288 2,
289 },
290 {
291 WMI_DATA_BK_SVC,
292 PIPEDIR_OUT, /* out = UL = host -> target */
293 3,
294 },
295 {
296 WMI_DATA_BK_SVC,
297 PIPEDIR_IN, /* in = DL = target -> host */
298 2,
299 },
300 {
301 WMI_DATA_BE_SVC,
302 PIPEDIR_OUT, /* out = UL = host -> target */
303 3,
304 },
305 {
306 WMI_DATA_BE_SVC,
307 PIPEDIR_IN, /* in = DL = target -> host */
308 2,
309 },
310 {
311 WMI_DATA_VI_SVC,
312 PIPEDIR_OUT, /* out = UL = host -> target */
313 3,
314 },
315 {
316 WMI_DATA_VI_SVC,
317 PIPEDIR_IN, /* in = DL = target -> host */
318 2,
319 },
320 {
321 WMI_CONTROL_SVC,
322 PIPEDIR_OUT, /* out = UL = host -> target */
323 3,
324 },
325 {
326 WMI_CONTROL_SVC,
327 PIPEDIR_IN, /* in = DL = target -> host */
328 2,
329 },
330 {
331 HTC_CTRL_RSVD_SVC,
332 PIPEDIR_OUT, /* out = UL = host -> target */
333 0, /* could be moved to 3 (share with WMI) */
334 },
335 {
336 HTC_CTRL_RSVD_SVC,
337 PIPEDIR_IN, /* in = DL = target -> host */
338 2,
339 },
340 {
341 HTC_RAW_STREAMS_SVC, /* not currently used */
342 PIPEDIR_OUT, /* out = UL = host -> target */
343 0,
344 },
345 {
346 HTC_RAW_STREAMS_SVC, /* not currently used */
347 PIPEDIR_IN, /* in = DL = target -> host */
348 2,
349 },
350 {
351 HTT_DATA_MSG_SVC,
352 PIPEDIR_OUT, /* out = UL = host -> target */
353 4,
354 },
355 {
356 HTT_DATA_MSG_SVC,
357 PIPEDIR_IN, /* in = DL = target -> host */
358 1,
359 },
360 {
361 WDI_IPA_TX_SVC,
362 PIPEDIR_OUT, /* in = DL = target -> host */
363 5,
364 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800365#if defined(QCA_WIFI_3_0_ADRASTEA)
366 {
367 HTT_DATA2_MSG_SVC,
368 PIPEDIR_IN, /* in = DL = target -> host */
369 9,
370 },
371 {
372 HTT_DATA3_MSG_SVC,
373 PIPEDIR_IN, /* in = DL = target -> host */
374 10,
375 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530376 {
377 PACKET_LOG_SVC,
378 PIPEDIR_IN, /* in = DL = target -> host */
379 11,
380 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800381#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700382 /* (Additions here) */
383
384 { /* Must be last */
385 0,
386 0,
387 0,
388 },
389};
390
Houston Hoffman88c896f2016-12-14 09:56:35 -0800391/* PIPEDIR_OUT = HOST to Target */
392/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530393#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530394static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
395 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
396 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
397 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
398 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
399 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
400 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
401 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
402 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
403 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
404 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
405 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
406 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
407 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
408 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
409 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
410 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
411 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
412 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530413 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530414 /* (Additions here) */
415 { 0, 0, 0, },
416};
Pratik Gandhi78461502018-02-05 17:22:41 +0530417#else
418static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
419};
420#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530421
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530422#if (defined(QCA_WIFI_QCA8074V2))
423static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
424 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
425 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
426 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
427 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
428 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
429 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
430 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
431 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
432 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
433 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
434 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
435 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
436 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
437 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
438 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
439 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
440 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
441 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
442 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
443 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
444 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
445 /* (Additions here) */
446 { 0, 0, 0, },
447};
448#else
449static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
450};
451#endif
452
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530453#if (defined(QCA_WIFI_QCA6018))
454static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
455 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
456 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
457 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
458 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
459 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
460 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
461 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
462 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
463 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
464 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
465 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
466 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
467 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
468 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
469 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
470 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
471 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
472 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
473 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
474 /* (Additions here) */
475 { 0, 0, 0, },
476};
477#else
478static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
479};
480#endif
481
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530482#if (defined(QCA_WIFI_QCN9000))
483static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
484 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
485 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
486 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
487 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
488 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
489 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
490 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
491 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
492 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
493 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
494 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
495 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
496 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
497 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
498 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
499 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
500 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
501 /* (Additions here) */
502 { 0, 0, 0, },
503};
504#else
505static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
506};
507#endif
508
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530509/* PIPEDIR_OUT = HOST to Target */
510/* PIPEDIR_IN = TARGET to HOST */
511#ifdef QCN7605_SUPPORT
512static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
513 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
514 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
515 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
516 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
517 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
518 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
519 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
520 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
521 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
522 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
523 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
524 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
525 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
526 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
527 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
528 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
529 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
530#ifdef IPA_OFFLOAD
531 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
532#else
533 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
534#endif
535 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
536 /* (Additions here) */
537 { 0, 0, 0, },
538};
539#endif
540
Pratik Gandhi78461502018-02-05 17:22:41 +0530541#if (defined(QCA_WIFI_QCA6290))
Akshay Kosigi181b2f52018-11-26 17:02:54 +0530542#ifdef QCA_6290_AP_MODE
Houston Hoffman88c896f2016-12-14 09:56:35 -0800543static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
544 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
545 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
546 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
547 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
548 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
549 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
550 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
551 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
552 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
553 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
554 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
555 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
556 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
557 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530558 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
559 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530560 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800561 /* (Additions here) */
562 { 0, 0, 0, },
563};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530564#else
565static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
566 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
567 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
568 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
569 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
570 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
571 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
572 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
573 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
574 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
575 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
576 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
577 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
578 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
579 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
580 /* (Additions here) */
581 { 0, 0, 0, },
582};
583#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530584#else
585static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
586};
587#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800588
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700589#if (defined(QCA_WIFI_QCA6390))
590static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
591 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
592 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
593 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
594 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
595 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
596 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
597 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
598 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
599 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
600 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
601 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
602 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
603 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
604 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800605 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700606 /* (Additions here) */
607 { 0, 0, 0, },
608};
609#else
610static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
611};
612#endif
613
Mohit Khanna973308a2019-05-13 18:31:33 -0700614static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
615 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
616 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
617 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
618 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
619 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
620 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
621 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
622 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
623 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
624 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
625 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
626 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
627 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
628 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
629 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
630 /* (Additions here) */
631 { 0, 0, 0, },
632};
633
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700634static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
635 {
636 WMI_DATA_VO_SVC,
637 PIPEDIR_OUT, /* out = UL = host -> target */
638 3,
639 },
640 {
641 WMI_DATA_VO_SVC,
642 PIPEDIR_IN, /* in = DL = target -> host */
643 2,
644 },
645 {
646 WMI_DATA_BK_SVC,
647 PIPEDIR_OUT, /* out = UL = host -> target */
648 3,
649 },
650 {
651 WMI_DATA_BK_SVC,
652 PIPEDIR_IN, /* in = DL = target -> host */
653 2,
654 },
655 {
656 WMI_DATA_BE_SVC,
657 PIPEDIR_OUT, /* out = UL = host -> target */
658 3,
659 },
660 {
661 WMI_DATA_BE_SVC,
662 PIPEDIR_IN, /* in = DL = target -> host */
663 2,
664 },
665 {
666 WMI_DATA_VI_SVC,
667 PIPEDIR_OUT, /* out = UL = host -> target */
668 3,
669 },
670 {
671 WMI_DATA_VI_SVC,
672 PIPEDIR_IN, /* in = DL = target -> host */
673 2,
674 },
675 {
676 WMI_CONTROL_SVC,
677 PIPEDIR_OUT, /* out = UL = host -> target */
678 3,
679 },
680 {
681 WMI_CONTROL_SVC,
682 PIPEDIR_IN, /* in = DL = target -> host */
683 2,
684 },
685 {
686 HTC_CTRL_RSVD_SVC,
687 PIPEDIR_OUT, /* out = UL = host -> target */
688 0, /* could be moved to 3 (share with WMI) */
689 },
690 {
691 HTC_CTRL_RSVD_SVC,
692 PIPEDIR_IN, /* in = DL = target -> host */
693 1,
694 },
695 {
696 HTC_RAW_STREAMS_SVC, /* not currently used */
697 PIPEDIR_OUT, /* out = UL = host -> target */
698 0,
699 },
700 {
701 HTC_RAW_STREAMS_SVC, /* not currently used */
702 PIPEDIR_IN, /* in = DL = target -> host */
703 1,
704 },
705 {
706 HTT_DATA_MSG_SVC,
707 PIPEDIR_OUT, /* out = UL = host -> target */
708 4,
709 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530710#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700711 {
712 HTT_DATA_MSG_SVC,
713 PIPEDIR_IN, /* in = DL = target -> host */
714 5,
715 },
716#else /* WLAN_FEATURE_FASTPATH */
717 {
718 HTT_DATA_MSG_SVC,
719 PIPEDIR_IN, /* in = DL = target -> host */
720 1,
721 },
722#endif /* WLAN_FEATURE_FASTPATH */
723
724 /* (Additions here) */
725
726 { /* Must be last */
727 0,
728 0,
729 0,
730 },
731};
732
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700733static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
734static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
735
Nirav Shah0d0cce82018-01-17 17:00:31 +0530736#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700737static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
738 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
739 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
740 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
741 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
742 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
743 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
744 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
745 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
746 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
747 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
748 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
749 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
750 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
751 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
752 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
753 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
754 {0, 0, 0,}, /* Must be last */
755};
756
Nirav Shah0d0cce82018-01-17 17:00:31 +0530757void hif_select_epping_service_to_pipe_map(struct service_to_pipe
758 **tgt_svc_map_to_use,
759 uint32_t *sz_tgt_svc_map_to_use)
760{
761 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
762 *sz_tgt_svc_map_to_use =
763 sizeof(target_service_to_ce_map_wlan_epping);
764}
765#endif
766
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530767#ifdef QCN7605_SUPPORT
768static inline
769void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
770 uint32_t *sz_tgt_svc_map_to_use)
771{
772 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
773 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
774}
775#else
776static inline
777void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
778 uint32_t *sz_tgt_svc_map_to_use)
779{
780 HIF_ERROR("%s: QCN7605 not supported", __func__);
781}
782#endif
783
Houston Hoffman748e1a62017-03-30 17:20:42 -0700784static void hif_select_service_to_pipe_map(struct hif_softc *scn,
785 struct service_to_pipe **tgt_svc_map_to_use,
786 uint32_t *sz_tgt_svc_map_to_use)
787{
788 uint32_t mode = hif_get_conparam(scn);
789 struct hif_target_info *tgt_info = &scn->target_info;
790
791 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530792 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
793 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700794 } else {
795 switch (tgt_info->target_type) {
796 default:
797 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
798 *sz_tgt_svc_map_to_use =
799 sizeof(target_service_to_ce_map_wlan);
800 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530801 case TARGET_TYPE_QCN7605:
802 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
803 sz_tgt_svc_map_to_use);
804 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700805 case TARGET_TYPE_AR900B:
806 case TARGET_TYPE_QCA9984:
807 case TARGET_TYPE_IPQ4019:
808 case TARGET_TYPE_QCA9888:
809 case TARGET_TYPE_AR9888:
810 case TARGET_TYPE_AR9888V2:
811 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
812 *sz_tgt_svc_map_to_use =
813 sizeof(target_service_to_ce_map_ar900b);
814 break;
815 case TARGET_TYPE_QCA6290:
816 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
817 *sz_tgt_svc_map_to_use =
818 sizeof(target_service_to_ce_map_qca6290);
819 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700820 case TARGET_TYPE_QCA6390:
821 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
822 *sz_tgt_svc_map_to_use =
823 sizeof(target_service_to_ce_map_qca6390);
824 break;
Mohit Khanna973308a2019-05-13 18:31:33 -0700825 case TARGET_TYPE_QCA6490:
826 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
827 *sz_tgt_svc_map_to_use =
828 sizeof(target_service_to_ce_map_qca6490);
829 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530830 case TARGET_TYPE_QCA8074:
831 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
832 *sz_tgt_svc_map_to_use =
833 sizeof(target_service_to_ce_map_qca8074);
834 break;
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530835 case TARGET_TYPE_QCA8074V2:
836 *tgt_svc_map_to_use =
837 target_service_to_ce_map_qca8074_v2;
838 *sz_tgt_svc_map_to_use =
839 sizeof(target_service_to_ce_map_qca8074_v2);
840 break;
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530841 case TARGET_TYPE_QCA6018:
842 *tgt_svc_map_to_use =
843 target_service_to_ce_map_qca6018;
844 *sz_tgt_svc_map_to_use =
845 sizeof(target_service_to_ce_map_qca6018);
846 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530847 case TARGET_TYPE_QCN9000:
848 *tgt_svc_map_to_use =
849 target_service_to_ce_map_qcn9000;
850 *sz_tgt_svc_map_to_use =
851 sizeof(target_service_to_ce_map_qcn9000);
852 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700853 }
854 }
855}
856
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700857/**
858 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
859 * @ce_state : pointer to the state context of the CE
860 *
861 * Description:
862 * Sets htt_rx_data attribute of the state structure if the
863 * CE serves one of the HTT DATA services.
864 *
865 * Return:
866 * false (attribute set to false)
867 * true (attribute set to true);
868 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700869static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700870{
871 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530872 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700873 int i;
874 bool rc = false;
875
Jeff Johnson8d639a02019-03-18 09:51:11 -0700876 if (ce_state) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700877 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
878 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700879
Kiran Venkatappac0687092017-04-13 16:45:03 +0530880 map_len = map_sz / sizeof(struct service_to_pipe);
881 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700882 if ((svc_map[i].pipenum == ce_state->id) &&
883 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
884 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
885 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
886 /* HTT CEs are unidirectional */
887 if (svc_map[i].pipedir == PIPEDIR_IN)
888 ce_state->htt_rx_data = true;
889 else
890 ce_state->htt_tx_data = true;
891 rc = true;
892 }
893 }
894 }
895 return rc;
896}
897
Houston Hoffman47808172016-05-06 10:04:21 -0700898/**
899 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
900 * @ce_id: ce in question
901 * @ring: ring state being examined
902 * @type: "src_ring" or "dest_ring" string for identifying the ring
903 *
904 * Warns on non-zero index values.
905 * Causes a kernel panic if the ring is not empty durring initialization.
906 */
907static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
908 char *type)
909{
910 if (ring->write_index != 0 || ring->sw_index != 0)
911 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
912 ce_id, type, ring->sw_index, ring->write_index);
913 if (ring->write_index != ring->sw_index)
914 QDF_BUG(0);
915}
916
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530917#ifdef IPA_OFFLOAD
918/**
919 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
920 * @scn: softc instance
921 * @ce_id: ce in question
922 * @base_addr: pointer to copyengine ring base address
923 * @ce_ring: copyengine instance
924 * @nentries: number of entries should be allocated
925 * @desc_size: ce desc size
926 *
927 * Return: QDF_STATUS_SUCCESS - for success
928 */
929static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
930 qdf_dma_addr_t *base_addr,
931 struct CE_ring_state *ce_ring,
932 unsigned int nentries, uint32_t desc_size)
933{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700934 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
935 !ce_srng_based(scn)) {
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530936 if (!scn->ipa_ce_ring) {
Mohit Khannaba7a7982018-03-21 22:06:25 -0700937 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
938 scn->qdf_dev,
939 nentries * desc_size + CE_DESC_RING_ALIGN);
940 if (!scn->ipa_ce_ring) {
941 HIF_ERROR(
942 "%s: Failed to allocate memory for IPA ce ring",
943 __func__);
944 return QDF_STATUS_E_NOMEM;
945 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530946 }
947 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
948 &scn->ipa_ce_ring->mem_info);
949 ce_ring->base_addr_owner_space_unaligned =
950 scn->ipa_ce_ring->vaddr;
951 } else {
952 ce_ring->base_addr_owner_space_unaligned =
953 qdf_mem_alloc_consistent(scn->qdf_dev,
954 scn->qdf_dev->dev,
955 (nentries * desc_size +
956 CE_DESC_RING_ALIGN),
957 base_addr);
958 if (!ce_ring->base_addr_owner_space_unaligned) {
959 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
960 __func__, CE_id);
961 return QDF_STATUS_E_NOMEM;
962 }
963 }
964 return QDF_STATUS_SUCCESS;
965}
966
967/**
968 * ce_free_desc_ring() - Frees copyengine descriptor ring
969 * @scn: softc instance
970 * @ce_id: ce in question
971 * @ce_ring: copyengine instance
972 * @desc_size: ce desc size
973 *
974 * Return: None
975 */
976static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
977 struct CE_ring_state *ce_ring, uint32_t desc_size)
978{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700979 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
980 !ce_srng_based(scn)) {
981 if (scn->ipa_ce_ring) {
982 qdf_mem_shared_mem_free(scn->qdf_dev,
983 scn->ipa_ce_ring);
984 scn->ipa_ce_ring = NULL;
985 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530986 ce_ring->base_addr_owner_space_unaligned = NULL;
987 } else {
988 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
989 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
990 ce_ring->base_addr_owner_space_unaligned,
991 ce_ring->base_addr_CE_space, 0);
992 ce_ring->base_addr_owner_space_unaligned = NULL;
993 }
994}
995#else
996static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
997 qdf_dma_addr_t *base_addr,
998 struct CE_ring_state *ce_ring,
999 unsigned int nentries, uint32_t desc_size)
1000{
1001 ce_ring->base_addr_owner_space_unaligned =
1002 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1003 (nentries * desc_size +
1004 CE_DESC_RING_ALIGN), base_addr);
1005 if (!ce_ring->base_addr_owner_space_unaligned) {
1006 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1007 __func__, CE_id);
1008 return QDF_STATUS_E_NOMEM;
1009 }
1010 return QDF_STATUS_SUCCESS;
1011}
1012
1013static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1014 struct CE_ring_state *ce_ring, uint32_t desc_size)
1015{
1016 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1017 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1018 ce_ring->base_addr_owner_space_unaligned,
1019 ce_ring->base_addr_CE_space, 0);
1020 ce_ring->base_addr_owner_space_unaligned = NULL;
1021}
1022#endif /* IPA_OFFLOAD */
1023
Sathish Kumar86876492018-08-27 13:39:20 +05301024/*
1025 * TODO: Need to explore the possibility of having this as part of a
1026 * target context instead of a global array.
1027 */
1028static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1029
1030void ce_service_register_module(enum ce_target_type target_type,
1031 struct ce_ops* (*ce_attach)(void))
1032{
1033 if (target_type < CE_MAX_TARGET_TYPE)
1034 ce_attach_register[target_type] = ce_attach;
1035}
1036
1037qdf_export_symbol(ce_service_register_module);
1038
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301039/**
1040 * ce_srng_based() - Does this target use srng
1041 * @ce_state : pointer to the state context of the CE
1042 *
1043 * Description:
1044 * returns true if the target is SRNG based
1045 *
1046 * Return:
1047 * false (attribute set to false)
1048 * true (attribute set to true);
1049 */
1050bool ce_srng_based(struct hif_softc *scn)
1051{
1052 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1053 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1054
1055 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301056 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301057 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001058 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07001059 case TARGET_TYPE_QCA6390:
Mohit Khanna973308a2019-05-13 18:31:33 -07001060 case TARGET_TYPE_QCA6490:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301061 case TARGET_TYPE_QCA6018:
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05301062 case TARGET_TYPE_QCN9000:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301063 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301064 default:
1065 return false;
1066 }
1067 return false;
1068}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301069qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301070
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001071#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001072static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301073{
Sathish Kumar86876492018-08-27 13:39:20 +05301074 struct ce_ops *ops = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301075
Sathish Kumar86876492018-08-27 13:39:20 +05301076 if (ce_srng_based(scn)) {
1077 if (ce_attach_register[CE_SVC_SRNG])
1078 ops = ce_attach_register[CE_SVC_SRNG]();
1079 } else if (ce_attach_register[CE_SVC_LEGACY]) {
1080 ops = ce_attach_register[CE_SVC_LEGACY]();
1081 }
1082
1083 return ops;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301084}
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001085
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001086
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001087#else /* QCA_LITHIUM */
1088static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1089{
Sathish Kumar86876492018-08-27 13:39:20 +05301090 if (ce_attach_register[CE_SVC_LEGACY])
1091 return ce_attach_register[CE_SVC_LEGACY]();
1092
1093 return NULL;
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001094}
1095#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301096
Houston Hoffman403c2df2017-01-27 12:51:15 -08001097static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -08001098 struct pld_shadow_reg_v2_cfg **shadow_config,
1099 int *num_shadow_registers_configured) {
1100 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1101
1102 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1103 scn, shadow_config, num_shadow_registers_configured);
1104}
1105
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301106static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1107 uint8_t ring_type)
1108{
1109 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1110
1111 return hif_state->ce_services->ce_get_desc_size(ring_type);
1112}
1113
1114
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001115static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301116 uint8_t ring_type, uint32_t nentries)
1117{
1118 uint32_t ce_nbytes;
1119 char *ptr;
1120 qdf_dma_addr_t base_addr;
1121 struct CE_ring_state *ce_ring;
1122 uint32_t desc_size;
1123 struct hif_softc *scn = CE_state->scn;
1124
1125 ce_nbytes = sizeof(struct CE_ring_state)
1126 + (nentries * sizeof(void *));
1127 ptr = qdf_mem_malloc(ce_nbytes);
1128 if (!ptr)
1129 return NULL;
1130
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301131 ce_ring = (struct CE_ring_state *)ptr;
1132 ptr += sizeof(struct CE_ring_state);
1133 ce_ring->nentries = nentries;
1134 ce_ring->nentries_mask = nentries - 1;
1135
1136 ce_ring->low_water_mark_nentries = 0;
1137 ce_ring->high_water_mark_nentries = nentries;
1138 ce_ring->per_transfer_context = (void **)ptr;
1139
1140 desc_size = ce_get_desc_size(scn, ring_type);
1141
1142 /* Legacy platforms that do not support cache
1143 * coherent DMA are unsupported
1144 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301145 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1146 ce_ring, nentries,
1147 desc_size) !=
1148 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301149 HIF_ERROR("%s: ring has no DMA mem",
1150 __func__);
Alok Kumarfea70e32018-09-21 15:42:06 +05301151 qdf_mem_free(ce_ring);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301152 return NULL;
1153 }
1154 ce_ring->base_addr_CE_space_unaligned = base_addr;
1155
1156 /* Correctly initialize memory to 0 to
1157 * prevent garbage data crashing system
1158 * when download firmware
1159 */
1160 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1161 nentries * desc_size +
1162 CE_DESC_RING_ALIGN);
1163
1164 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1165
1166 ce_ring->base_addr_CE_space =
1167 (ce_ring->base_addr_CE_space_unaligned +
1168 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1169
1170 ce_ring->base_addr_owner_space = (void *)
1171 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1172 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1173 } else {
1174 ce_ring->base_addr_CE_space =
1175 ce_ring->base_addr_CE_space_unaligned;
1176 ce_ring->base_addr_owner_space =
1177 ce_ring->base_addr_owner_space_unaligned;
1178 }
1179
1180 return ce_ring;
1181}
1182
Yun Park3fb36442017-08-17 17:37:53 -07001183static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301184 uint32_t ce_id, struct CE_ring_state *ring,
1185 struct CE_attr *attr)
1186{
1187 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1188
Yun Park3fb36442017-08-17 17:37:53 -07001189 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001190 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301191}
1192
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001193int hif_ce_bus_early_suspend(struct hif_softc *scn)
1194{
1195 uint8_t ul_pipe, dl_pipe;
1196 int ce_id, status, ul_is_polled, dl_is_polled;
1197 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001198
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001199 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1200 &ul_pipe, &dl_pipe,
1201 &ul_is_polled, &dl_is_polled);
1202 if (status) {
1203 HIF_ERROR("%s: pipe_mapping failure", __func__);
1204 return status;
1205 }
1206
1207 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1208 if (ce_id == ul_pipe)
1209 continue;
1210 if (ce_id == dl_pipe)
1211 continue;
1212
1213 ce_state = scn->ce_id_to_state[ce_id];
1214 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1215 if (ce_state->state == CE_RUNNING)
1216 ce_state->state = CE_PAUSED;
1217 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1218 }
1219
1220 return status;
1221}
1222
1223int hif_ce_bus_late_resume(struct hif_softc *scn)
1224{
1225 int ce_id;
1226 struct CE_state *ce_state;
Nirav Shaheeb99622018-09-11 13:50:08 +05301227 int write_index = 0;
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001228 bool index_updated;
1229
1230 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1231 ce_state = scn->ce_id_to_state[ce_id];
1232 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1233 if (ce_state->state == CE_PENDING) {
1234 write_index = ce_state->src_ring->write_index;
1235 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1236 write_index);
1237 ce_state->state = CE_RUNNING;
1238 index_updated = true;
1239 } else {
1240 index_updated = false;
1241 }
1242
1243 if (ce_state->state == CE_PAUSED)
1244 ce_state->state = CE_RUNNING;
1245 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1246
1247 if (index_updated)
1248 hif_record_ce_desc_event(scn, ce_id,
1249 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301250 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001251 }
1252
1253 return 0;
1254}
1255
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001256/**
1257 * ce_oom_recovery() - try to recover rx ce from oom condition
1258 * @context: CE_state of the CE with oom rx ring
1259 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001260 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001261 * at least 1 descriptor is successfully posted to the rx ring.
1262 *
1263 * return: none
1264 */
1265static void ce_oom_recovery(void *context)
1266{
1267 struct CE_state *ce_state = context;
1268 struct hif_softc *scn = ce_state->scn;
1269 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1270 struct HIF_CE_pipe_info *pipe_info =
1271 &ce_softc->pipe_info[ce_state->id];
1272
1273 hif_post_recv_buffers_for_pipe(pipe_info);
1274}
1275
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301276#ifdef HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301277/**
1278 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1279 * the CE descriptors.
1280 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1281 * @scn: hif scn handle
1282 * ce_id: Copy Engine Id
1283 *
1284 * Return: QDF_STATUS
1285 */
1286QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1287{
1288 struct hif_ce_desc_event *event = NULL;
1289 struct hif_ce_desc_event *hist_ev = NULL;
1290 uint32_t index = 0;
1291
1292 hist_ev =
1293 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1294
1295 if (!hist_ev)
1296 return QDF_STATUS_E_NOMEM;
1297
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001298 scn->hif_ce_desc_hist.data_enable[ce_id] = true;
c_cgodavfda96ad2017-09-07 16:16:00 +05301299 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1300 event = &hist_ev[index];
1301 event->data =
1302 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001303 if (!event->data) {
1304 hif_err_rl("ce debug data alloc failed");
c_cgodavfda96ad2017-09-07 16:16:00 +05301305 return QDF_STATUS_E_NOMEM;
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001306 }
c_cgodavfda96ad2017-09-07 16:16:00 +05301307 }
1308 return QDF_STATUS_SUCCESS;
1309}
1310
1311/**
1312 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1313 * the CE descriptors.
1314 * @scn: hif scn handle
1315 * ce_id: Copy Engine Id
1316 *
1317 * Return:
1318 */
1319void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1320{
1321 struct hif_ce_desc_event *event = NULL;
1322 struct hif_ce_desc_event *hist_ev = NULL;
1323 uint32_t index = 0;
1324
1325 hist_ev =
1326 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1327
1328 if (!hist_ev)
1329 return;
1330
1331 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1332 event = &hist_ev[index];
Jeff Johnson8d639a02019-03-18 09:51:11 -07001333 if (event->data)
c_cgodavfda96ad2017-09-07 16:16:00 +05301334 qdf_mem_free(event->data);
1335 event->data = NULL;
1336 event = NULL;
1337 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001338
c_cgodavfda96ad2017-09-07 16:16:00 +05301339}
1340#endif /* HIF_CE_DEBUG_DATA_BUF */
1341
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301342#ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001343#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001344struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1345
c_cgodavfda96ad2017-09-07 16:16:00 +05301346/**
Dustin Brown2f750872018-10-17 12:16:20 -07001347 * alloc_mem_ce_debug_history() - Allocate CE descriptor history
c_cgodavfda96ad2017-09-07 16:16:00 +05301348 * @scn: hif scn handle
Dustin Brown2f750872018-10-17 12:16:20 -07001349 * @ce_id: Copy Engine Id
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001350 * @src_nentries: source ce ring entries
c_cgodavfda96ad2017-09-07 16:16:00 +05301351 * Return: QDF_STATUS
1352 */
Dustin Brown2f750872018-10-17 12:16:20 -07001353static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001354alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1355 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001356{
1357 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1358
1359 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1360 ce_hist->enable[ce_id] = 1;
1361
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001362 if (src_nentries)
1363 alloc_mem_ce_debug_hist_data(scn, ce_id);
1364 else
1365 ce_hist->data_enable[ce_id] = false;
1366
Dustin Brown2f750872018-10-17 12:16:20 -07001367 return QDF_STATUS_SUCCESS;
1368}
1369
1370/**
1371 * free_mem_ce_debug_history() - Free CE descriptor history
1372 * @scn: hif scn handle
1373 * @ce_id: Copy Engine Id
1374 *
1375 * Return: None
1376 */
1377static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1378{
1379 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1380
1381 ce_hist->enable[ce_id] = 0;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001382 if (ce_hist->data_enable[ce_id]) {
1383 ce_hist->data_enable[ce_id] = false;
1384 free_mem_ce_debug_hist_data(scn, ce_id);
1385 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001386 ce_hist->hist_ev[ce_id] = NULL;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001387}
1388#else
1389static inline QDF_STATUS
1390alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1391 uint32_t src_nentries)
1392{
1393 return QDF_STATUS_SUCCESS;
Dustin Brown2f750872018-10-17 12:16:20 -07001394}
1395
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001396static inline void
1397free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1398#endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301399#else
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001400#if defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001401
1402static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001403alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1404 uint32_t src_nentries)
c_cgodavfda96ad2017-09-07 16:16:00 +05301405{
1406 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1407 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1408
Jeff Johnson8d639a02019-03-18 09:51:11 -07001409 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
c_cgodavfda96ad2017-09-07 16:16:00 +05301410 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1411 return QDF_STATUS_E_NOMEM;
1412 } else {
1413 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1414 return QDF_STATUS_SUCCESS;
1415 }
1416}
1417
Dustin Brown2f750872018-10-17 12:16:20 -07001418static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301419{
1420 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
Dustin Brown2f750872018-10-17 12:16:20 -07001421 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
c_cgodavfda96ad2017-09-07 16:16:00 +05301422
1423 if (!hist_ev)
1424 return;
1425
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001426 if (ce_hist->data_enable[CE_id]) {
1427 ce_hist->data_enable[CE_id] = false;
c_cgodavfda96ad2017-09-07 16:16:00 +05301428 free_mem_ce_debug_hist_data(scn, CE_id);
1429 }
Dustin Brown2f750872018-10-17 12:16:20 -07001430
c_cgodavfda96ad2017-09-07 16:16:00 +05301431 ce_hist->enable[CE_id] = 0;
1432 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1433 ce_hist->hist_ev[CE_id] = NULL;
1434}
1435
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001436#else
Dustin Brown2f750872018-10-17 12:16:20 -07001437
1438static inline QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001439alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1440 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001441{
1442 return QDF_STATUS_SUCCESS;
1443}
1444
1445static inline void
1446free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001447#endif /* HIF_CE_DEBUG_DATA_BUF */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301448#endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
Dustin Brown2f750872018-10-17 12:16:20 -07001449
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301450#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
c_cgodavfda96ad2017-09-07 16:16:00 +05301451/**
1452 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1453 * CE records on the console using sysfs.
1454 * @scn: hif scn handle
1455 *
1456 * Return:
1457 */
1458static inline void reset_ce_debug_history(struct hif_softc *scn)
1459{
1460 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1461 /* Initialise the CE debug history sysfs interface inputs ce_id and
1462 * index. Disable data storing
1463 */
1464 ce_hist->hist_index = 0;
1465 ce_hist->hist_id = 0;
1466}
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301467#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
Dustin Brown2f750872018-10-17 12:16:20 -07001468static inline void reset_ce_debug_history(struct hif_softc *scn) { }
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301469#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
c_cgodavfda96ad2017-09-07 16:16:00 +05301470
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301471void ce_enable_polling(void *cestate)
1472{
1473 struct CE_state *CE_state = (struct CE_state *)cestate;
1474
1475 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1476 CE_state->timer_inited = true;
1477}
1478
1479void ce_disable_polling(void *cestate)
1480{
1481 struct CE_state *CE_state = (struct CE_state *)cestate;
1482
1483 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1484 CE_state->timer_inited = false;
1485}
1486
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487/*
1488 * Initialize a Copy Engine based on caller-supplied attributes.
1489 * This may be called once to initialize both source and destination
1490 * rings or it may be called twice for separate source and destination
1491 * initialization. It may be that only one side or the other is
1492 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001493 *
1494 * This should be called durring the initialization sequence before
1495 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001496 */
Komal Seelam644263d2016-02-22 20:45:49 +05301497struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001498 unsigned int CE_id, struct CE_attr *attr)
1499{
1500 struct CE_state *CE_state;
1501 uint32_t ctrl_addr;
1502 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001503 bool malloc_CE_state = false;
1504 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001505 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301507 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001508 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001509 CE_state = scn->ce_id_to_state[CE_id];
1510
1511 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301513 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301514 if (!CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001515 return NULL;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301516
Houston Hoffman233e9092015-09-02 13:37:21 -07001517 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301518 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001519
1520 CE_state->id = CE_id;
1521 CE_state->ctrl_addr = ctrl_addr;
1522 CE_state->state = CE_RUNNING;
1523 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001524 }
1525 CE_state->scn = scn;
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301526 CE_state->service = ce_engine_service_reg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301528 qdf_atomic_init(&CE_state->rx_pending);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001529 if (!attr) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530 /* Already initialized; caller wants the handle */
1531 return (struct CE_handle *)CE_state;
1532 }
1533
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301535 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536 else
1537 CE_state->src_sz_max = attr->src_sz_max;
1538
c_cgodavfda96ad2017-09-07 16:16:00 +05301539 ce_init_ce_desc_event_log(scn, CE_id,
1540 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001541
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 /* source ring setup */
1543 nentries = attr->src_nentries;
1544 if (nentries) {
1545 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001546
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 nentries = roundup_pwr2(nentries);
1548 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301549 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001550 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301551 src_ring = CE_state->src_ring =
1552 ce_alloc_ring_state(CE_state,
1553 CE_RING_SRC,
1554 nentries);
1555 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001556 /* cannot allocate src ring. If the
1557 * CE_state is allocated locally free
1558 * CE_State and return error.
1559 */
1560 HIF_ERROR("%s: src ring has no mem", __func__);
1561 if (malloc_CE_state) {
1562 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301563 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001564 malloc_CE_state = false;
1565 }
1566 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001567 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001568 /* we can allocate src ring. Mark that the src ring is
1569 * allocated locally
1570 */
1571 malloc_src_ring = true;
1572
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573 /*
1574 * Also allocate a shadow src ring in
1575 * regular mem to use for faster access.
1576 */
1577 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301578 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 sizeof(struct CE_src_desc) +
1580 CE_DESC_RING_ALIGN);
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301581 if (!src_ring->shadow_base_unaligned)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001582 goto error_no_dma_mem;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301583
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584 src_ring->shadow_base = (struct CE_src_desc *)
1585 (((size_t) src_ring->shadow_base_unaligned +
1586 CE_DESC_RING_ALIGN - 1) &
1587 ~(CE_DESC_RING_ALIGN - 1));
1588
Yun Park3fb36442017-08-17 17:37:53 -07001589 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1590 src_ring, attr);
1591 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001592 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001593
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301594 ce_ring_test_initial_indexes(CE_id, src_ring,
1595 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001596 }
1597 }
1598
1599 /* destination ring setup */
1600 nentries = attr->dest_nentries;
1601 if (nentries) {
1602 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001603
1604 nentries = roundup_pwr2(nentries);
1605 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301606 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001607 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301608 dest_ring = CE_state->dest_ring =
1609 ce_alloc_ring_state(CE_state,
1610 CE_RING_DEST,
1611 nentries);
1612 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 /* cannot allocate dst ring. If the CE_state
1614 * or src ring is allocated locally free
1615 * CE_State and src ring and return error.
1616 */
1617 HIF_ERROR("%s: dest ring has no mem",
1618 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301619 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001620 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621
Yun Park3fb36442017-08-17 17:37:53 -07001622 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001623 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001624 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301625 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001626
1627 ce_ring_test_initial_indexes(CE_id, dest_ring,
1628 "dest_ring");
1629
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301630 /* For srng based target, init status ring here */
1631 if (ce_srng_based(CE_state->scn)) {
1632 CE_state->status_ring =
1633 ce_alloc_ring_state(CE_state,
1634 CE_RING_STATUS,
1635 nentries);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001636 if (!CE_state->status_ring) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301637 /*Allocation failed. Cleanup*/
1638 qdf_mem_free(CE_state->dest_ring);
1639 if (malloc_src_ring) {
1640 qdf_mem_free
1641 (CE_state->src_ring);
1642 CE_state->src_ring = NULL;
1643 malloc_src_ring = false;
1644 }
1645 if (malloc_CE_state) {
1646 /* allocated CE_state locally */
1647 scn->ce_id_to_state[CE_id] =
1648 NULL;
1649 qdf_mem_free(CE_state);
1650 malloc_CE_state = false;
1651 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001652
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301653 return NULL;
1654 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001655
Yun Park3fb36442017-08-17 17:37:53 -07001656 status = ce_ring_setup(scn, CE_RING_STATUS,
1657 CE_id, CE_state->status_ring,
1658 attr);
1659 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301660 goto error_target_access;
1661
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001662 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001663
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001664 /* epping */
1665 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301666 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301667 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301668 &CE_state->poll_timer,
1669 ce_poll_timeout,
1670 CE_state,
1671 QDF_TIMER_TYPE_WAKE_APPS);
1672 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301673 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001674 CE_POLL_TIMEOUT);
1675 }
1676 }
1677 }
1678
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301679 if (!ce_srng_based(scn)) {
1680 /* Enable CE error interrupts */
1681 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1682 goto error_target_access;
1683 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1684 if (Q_TARGET_ACCESS_END(scn) < 0)
1685 goto error_target_access;
1686 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001687
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001688 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1689 ce_oom_recovery, CE_state);
1690
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001691 /* update the htt_data attribute */
1692 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001693 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001694
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001695 alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
c_cgodavfda96ad2017-09-07 16:16:00 +05301696
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697 return (struct CE_handle *)CE_state;
1698
Houston Hoffman4411ad42016-03-14 21:12:04 -07001699error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001700error_no_dma_mem:
1701 ce_fini((struct CE_handle *)CE_state);
1702 return NULL;
1703}
1704
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301705/**
1706 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1707 * @hif_ctx: HIF Context
1708 *
1709 * API to check if polling is enabled on all CEs. Returns true when polling
1710 * is enabled on all CEs.
1711 *
1712 * Return: bool
1713 */
1714bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1715{
1716 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1717 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1718 struct CE_attr *attr;
1719 int id;
1720
1721 for (id = 0; id < scn->ce_count; id++) {
1722 attr = &hif_state->host_ce_config[id];
1723 if (attr && (attr->dest_nentries) &&
1724 !(attr->flags & CE_ATTR_ENABLE_POLL))
1725 return false;
1726 }
1727 return true;
1728}
1729qdf_export_symbol(hif_is_polled_mode_enabled);
1730
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731#ifdef WLAN_FEATURE_FASTPATH
1732/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001733 * hif_enable_fastpath() Update that we have enabled fastpath mode
1734 * @hif_ctx: HIF context
1735 *
1736 * For use in data path
1737 *
1738 * Retrun: void
1739 */
1740void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1741{
1742 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1743
Houston Hoffmand63cd742016-12-05 11:59:56 -08001744 if (ce_srng_based(scn)) {
1745 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1746 return;
1747 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001748 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001749 scn->fastpath_mode_on = true;
1750}
1751
1752/**
1753 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1754 * @hif_ctx: HIF Context
1755 *
1756 * For use in data path to skip HTC
1757 *
1758 * Return: bool
1759 */
1760bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1761{
1762 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1763
1764 return scn->fastpath_mode_on;
1765}
1766
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301767/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001768 * hif_get_ce_handle - API to get CE handle for FastPath mode
1769 * @hif_ctx: HIF Context
1770 * @id: CopyEngine Id
1771 *
1772 * API to return CE handle for fastpath mode
1773 *
1774 * Return: void
1775 */
1776void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1777{
1778 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1779
1780 return scn->ce_id_to_state[id];
1781}
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301782qdf_export_symbol(hif_get_ce_handle);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001783
1784/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1786 * No processing is required inside this function.
1787 * @ce_hdl: Cope engine handle
1788 * Using an assert, this function makes sure that,
1789 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001790 *
1791 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001792 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001793 * therfore no locking is needed.
1794 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001795 * Return: none
1796 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001797void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001798{
1799 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1800 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301801 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001802 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001803
Houston Hoffman85925072016-05-06 17:02:18 -07001804 if (hif_is_nss_wifi_enabled(sc))
1805 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001806
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001807 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001808 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001809 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001810 sw_index = src_ring->sw_index;
1811 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812
1813 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301814 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001815 }
1816}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001817
1818/**
1819 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1820 * @ce_hdl: Handle to CE
1821 *
1822 * These buffers are never allocated on the fly, but
1823 * are allocated only once during HIF start and freed
1824 * only once during HIF stop.
1825 * NOTE:
1826 * The assumption here is there is no in-flight DMA in progress
1827 * currently, so that buffers can be freed up safely.
1828 *
1829 * Return: NONE
1830 */
1831void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1832{
1833 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1834 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1835 qdf_nbuf_t nbuf;
1836 int i;
1837
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001838 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001839 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001840
1841 if (!ce_state->htt_rx_data)
1842 return;
1843
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001844 /*
1845 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1846 * this CE is completely full: does not leave one blank space, to
1847 * distinguish between empty queue & full queue. So free all the
1848 * entries.
1849 */
1850 for (i = 0; i < dst_ring->nentries; i++) {
1851 nbuf = dst_ring->per_transfer_context[i];
1852
1853 /*
1854 * The reasons for doing this check are:
1855 * 1) Protect against calling cleanup before allocating buffers
1856 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1857 * could have a partially filled ring, because of a memory
1858 * allocation failure in the middle of allocating ring.
1859 * This check accounts for that case, checking
1860 * fastpath_mode_on flag or started flag would not have
1861 * covered that case. This is not in performance path,
1862 * so OK to do this.
1863 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001864 if (nbuf) {
1865 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1866 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001867 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001868 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001869 }
1870}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001871
1872/**
1873 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1874 * @scn: HIF handle
1875 *
1876 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1877 * Hence we have to post all the entries in the pipe, even, in the beginning
1878 * unlike for other CE pipes where one less than dest_nentries are filled in
1879 * the beginning.
1880 *
1881 * Return: None
1882 */
1883static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1884{
1885 int pipe_num;
1886 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1887
1888 if (scn->fastpath_mode_on == false)
1889 return;
1890
1891 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1892 struct HIF_CE_pipe_info *pipe_info =
1893 &hif_state->pipe_info[pipe_num];
1894 struct CE_state *ce_state =
1895 scn->ce_id_to_state[pipe_info->pipe_num];
1896
1897 if (ce_state->htt_rx_data)
1898 atomic_inc(&pipe_info->recv_bufs_needed);
1899 }
1900}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001902static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903{
1904}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001905
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001906static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001907{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001908 return false;
1909}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001910#endif /* WLAN_FEATURE_FASTPATH */
1911
1912void ce_fini(struct CE_handle *copyeng)
1913{
1914 struct CE_state *CE_state = (struct CE_state *)copyeng;
1915 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301916 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301917 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001918
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301919 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001920 CE_state->state = CE_UNUSED;
1921 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301922 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301923 ce_disable_polling(CE_state);
1924
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001925 qdf_lro_deinit(CE_state->lro_data);
1926
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001927 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001928 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001929 ce_h2t_tx_ce_cleanup(copyeng);
1930
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301931 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001932 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301933 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001934 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301935 ce_free_desc_ring(scn, CE_state->id,
1936 CE_state->src_ring,
1937 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301938 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939 }
1940 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001941 /* Cleanup the datapath Rx ring */
1942 ce_t2h_msg_ce_cleanup(copyeng);
1943
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301944 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001945 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301946 ce_free_desc_ring(scn, CE_state->id,
1947 CE_state->dest_ring,
1948 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301949 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001950
1951 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301952 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301953 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001954 }
1955 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001956 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301957 /* Cleanup the datapath Tx ring */
1958 ce_h2t_tx_ce_cleanup(copyeng);
1959
1960 if (CE_state->status_ring->shadow_base_unaligned)
1961 qdf_mem_free(
1962 CE_state->status_ring->shadow_base_unaligned);
1963
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301964 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301965 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301966 ce_free_desc_ring(scn, CE_state->id,
1967 CE_state->status_ring,
1968 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301969 qdf_mem_free(CE_state->status_ring);
1970 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001971
c_cgodavfda96ad2017-09-07 16:16:00 +05301972 free_mem_ce_debug_history(scn, CE_id);
1973 reset_ce_debug_history(scn);
1974 ce_deinit_ce_desc_event_log(scn, CE_id);
1975
Houston Hoffman03f46572016-12-12 12:53:56 -08001976 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301977 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001978}
1979
Komal Seelam5584a7c2016-02-24 19:22:48 +05301980void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301982 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001983
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301984 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001985 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301986 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001987 sizeof(hif_state->msg_callbacks_current));
1988}
1989
1990/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301991QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301992hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001993 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301994 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001995{
Komal Seelam644263d2016-02-22 20:45:49 +05301996 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301997 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001998 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1999 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2000 int bytes = nbytes, nfrags = 0;
2001 struct ce_sendlist sendlist;
2002 int status, i = 0;
2003 unsigned int mux_id = 0;
2004
Santosh Anbudbfae9b2018-07-12 15:40:49 +05302005 if (nbytes > qdf_nbuf_len(nbuf)) {
2006 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2007 (uint32_t)qdf_nbuf_len(nbuf));
2008 QDF_ASSERT(0);
2009 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010
2011 transfer_id =
2012 (mux_id & MUX_ID_MASK) |
2013 (transfer_id & TRANSACTION_ID_MASK);
2014 data_attr &= DESC_DATA_FLAG_MASK;
2015 /*
2016 * The common case involves sending multiple fragments within a
2017 * single download (the tx descriptor and the tx frame header).
2018 * So, optimize for the case of multiple fragments by not even
2019 * checking whether it's necessary to use a sendlist.
2020 * The overhead of using a sendlist for a single buffer download
2021 * is not a big deal, since it happens rarely (for WMI messages).
2022 */
2023 ce_sendlist_init(&sendlist);
2024 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302025 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002026 int frag_bytes;
2027
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302028 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2029 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030 /*
2031 * Clear the packet offset for all but the first CE desc.
2032 */
2033 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302034 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002035
2036 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2037 frag_bytes >
2038 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302039 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040 (nbuf,
2041 nfrags) ? 0 :
2042 CE_SEND_FLAG_SWAP_DISABLE,
2043 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302044 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002045 HIF_ERROR("%s: error, frag_num %d larger than limit",
2046 __func__, nfrags);
2047 return status;
2048 }
2049 bytes -= frag_bytes;
2050 nfrags++;
2051 } while (bytes > 0);
2052
2053 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302054 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002055 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302056 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002057 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302058 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002059 }
2060 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302061 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002062
Jeff Johnson8d639a02019-03-18 09:51:11 -07002063 if (qdf_unlikely(!ce_hdl)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002064 HIF_ERROR("%s: error CE handle is null", __func__);
2065 return A_ERROR;
2066 }
2067
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302068 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302069 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05302070 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2071 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002072 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302073 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074
2075 return status;
2076}
2077
Komal Seelam5584a7c2016-02-24 19:22:48 +05302078void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2079 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002080{
Komal Seelam644263d2016-02-22 20:45:49 +05302081 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302082 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05302083
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002084 if (!force) {
2085 int resources;
2086 /*
2087 * Decide whether to actually poll for completions, or just
2088 * wait for a later chance. If there seem to be plenty of
2089 * resources left, then just wait, since checking involves
2090 * reading a CE register, which is a relatively expensive
2091 * operation.
2092 */
Komal Seelam644263d2016-02-22 20:45:49 +05302093 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002094 /*
2095 * If at least 50% of the total resources are still available,
2096 * don't bother checking again yet.
2097 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002098 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2099 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002100 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002101 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07002102#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002103 ce_per_engine_servicereap(scn, pipe);
2104#else
2105 ce_per_engine_service(scn, pipe);
2106#endif
2107}
2108
Komal Seelam5584a7c2016-02-24 19:22:48 +05302109uint16_t
2110hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302112 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2114 uint16_t rv;
2115
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302116 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002117 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302118 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002119 return rv;
2120}
2121
2122/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002123static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002124hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302125 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002126 unsigned int nbytes, unsigned int transfer_id,
2127 unsigned int sw_index, unsigned int hw_index,
2128 unsigned int toeplitz_hash_result)
2129{
2130 struct HIF_CE_pipe_info *pipe_info =
2131 (struct HIF_CE_pipe_info *)ce_context;
2132 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302133 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002134 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07002135 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302136 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002137
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002138 do {
2139 /*
Houston Hoffman85118512015-09-28 14:17:11 -07002140 * The upper layer callback will be triggered
2141 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002142 */
Houston Hoffman85118512015-09-28 14:17:11 -07002143 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08002144 if (scn->target_status == TARGET_STATUS_RESET) {
2145
2146 qdf_nbuf_unmap_single(scn->qdf_dev,
2147 transfer_context,
2148 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302149 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08002150 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08002151 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07002152 msg_callbacks->Context,
2153 transfer_context, transfer_id,
2154 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002155 }
2156
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302157 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07002158 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302159 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002160 } while (ce_completed_send_next(copyeng,
2161 &ce_context, &transfer_context,
2162 &CE_data, &nbytes, &transfer_id,
2163 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302164 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002165}
2166
Houston Hoffman910c6262015-09-28 12:56:25 -07002167/**
2168 * hif_ce_do_recv(): send message from copy engine to upper layers
2169 * @msg_callbacks: structure containing callback and callback context
2170 * @netbuff: skb containing message
2171 * @nbytes: number of bytes in the message
2172 * @pipe_info: used for the pipe_number info
2173 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07002174 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07002175 * and calls the upper layer callback.
2176 *
2177 * return: None
2178 */
2179static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302180 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07002181 struct HIF_CE_pipe_info *pipe_info) {
2182 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302183 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07002184 msg_callbacks->
2185 rxCompletionHandler(msg_callbacks->Context,
2186 netbuf, pipe_info->pipe_num);
2187 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07002188 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07002189 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08002190
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302191 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07002192 }
2193}
2194
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002195/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002196static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302198 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002199 unsigned int nbytes, unsigned int transfer_id,
2200 unsigned int flags)
2201{
2202 struct HIF_CE_pipe_info *pipe_info =
2203 (struct HIF_CE_pipe_info *)ce_context;
2204 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002205 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302206 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Yue Maac6b2752019-05-08 17:17:12 -07002207 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffman910c6262015-09-28 12:56:25 -07002208 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302209 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002210
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002211 do {
Yue Maac6b2752019-05-08 17:17:12 -07002212 hif_pm_runtime_mark_last_busy(hif_ctx);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302213 qdf_nbuf_unmap_single(scn->qdf_dev,
2214 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302215 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216
Houston Hoffman910c6262015-09-28 12:56:25 -07002217 atomic_inc(&pipe_info->recv_bufs_needed);
2218 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302219 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302220 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002221 else
2222 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002223 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002224
2225 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002226 * MAX_NUM_OF_RECEIVES
2227 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002228 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002229 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002230 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002231 break;
2232 }
2233 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2234 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302235 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002236
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002237}
2238
2239/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2240
2241void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302242hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002243 struct hif_msg_callbacks *callbacks)
2244{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302245 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002246
2247#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2248 spin_lock_init(&pcie_access_log_lock);
2249#endif
2250 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302251 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002252 sizeof(hif_state->msg_callbacks_pending));
2253
2254}
2255
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002256static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002257{
2258 struct CE_handle *ce_diag = hif_state->ce_diag;
2259 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302260 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002261 struct hif_msg_callbacks *hif_msg_callbacks =
2262 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002263
2264 /* daemonize("hif_compl_thread"); */
2265
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002266 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002267 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002268 return -EINVAL;
2269 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002270
2271 if (!hif_msg_callbacks ||
2272 !hif_msg_callbacks->rxCompletionHandler ||
2273 !hif_msg_callbacks->txCompletionHandler) {
2274 HIF_ERROR("%s: no completion handler registered", __func__);
2275 return -EFAULT;
2276 }
2277
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002278 A_TARGET_ACCESS_LIKELY(scn);
2279 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2280 struct CE_attr attr;
2281 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002282
2283 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002284 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002285 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302286 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002287 if (attr.src_nentries) {
2288 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002289 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002290 __func__, pipe_num, pipe_info);
2291 ce_send_cb_register(pipe_info->ce_hdl,
2292 hif_pci_ce_send_done, pipe_info,
2293 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002294 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2295 }
2296 if (attr.dest_nentries) {
2297 /* pipe used to receive from target */
2298 ce_recv_cb_register(pipe_info->ce_hdl,
2299 hif_pci_ce_recv_data, pipe_info,
2300 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002301 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002302
2303 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302304 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302305
2306 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2307 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002308 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002309
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002310 A_TARGET_ACCESS_UNLIKELY(scn);
2311 return 0;
2312}
2313
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002314/*
2315 * Install pending msg callbacks.
2316 *
2317 * TBDXXX: This hack is needed because upper layers install msg callbacks
2318 * for use with HTC before BMI is done; yet this HIF implementation
2319 * needs to continue to use BMI msg callbacks. Really, upper layers
2320 * should not register HTC callbacks until AFTER BMI phase.
2321 */
Komal Seelam644263d2016-02-22 20:45:49 +05302322static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002323{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302324 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002325
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302326 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002327 &hif_state->msg_callbacks_pending,
2328 sizeof(hif_state->msg_callbacks_pending));
2329}
2330
Komal Seelam5584a7c2016-02-24 19:22:48 +05302331void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2332 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002333{
2334 int ul_is_polled, dl_is_polled;
2335
Komal Seelam644263d2016-02-22 20:45:49 +05302336 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002337 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2338}
2339
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002340/**
2341 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302342 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002343 *
2344 * Output the pipe error counts of each pipe to log file
2345 *
2346 * Return: N/A
2347 */
Komal Seelam644263d2016-02-22 20:45:49 +05302348void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002349{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302350 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002351 int pipe_num;
2352
Jeff Johnson8d639a02019-03-18 09:51:11 -07002353 if (!hif_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002354 HIF_ERROR("%s hif_state is NULL", __func__);
2355 return;
2356 }
2357 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2358 struct HIF_CE_pipe_info *pipe_info;
2359
2360 pipe_info = &hif_state->pipe_info[pipe_num];
2361
2362 if (pipe_info->nbuf_alloc_err_count > 0 ||
2363 pipe_info->nbuf_dma_err_count > 0 ||
2364 pipe_info->nbuf_ce_enqueue_err_count)
2365 HIF_ERROR(
2366 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2367 __func__, pipe_info->pipe_num,
2368 atomic_read(&pipe_info->recv_bufs_needed),
2369 pipe_info->nbuf_alloc_err_count,
2370 pipe_info->nbuf_dma_err_count,
2371 pipe_info->nbuf_ce_enqueue_err_count);
2372 }
2373}
2374
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002375static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2376 void *nbuf, uint32_t *error_cnt,
2377 enum hif_ce_event_type failure_type,
2378 const char *failure_type_string)
2379{
2380 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2381 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2382 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2383 int ce_id = CE_state->id;
2384 uint32_t error_cnt_tmp;
2385
2386 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2387 error_cnt_tmp = ++(*error_cnt);
2388 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302389 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002390 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2391 failure_type_string);
2392 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302393 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002394 /* if we fail to allocate the last buffer for an rx pipe,
2395 * there is no trigger to refill the ce and we will
2396 * eventually crash
2397 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302398 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002399 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302400
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002401}
2402
2403
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002404
2405
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302406QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002407{
2408 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302409 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302410 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302411 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002412 uint32_t bufs_posted = 0;
2413
2414 buf_sz = pipe_info->buf_sz;
2415 if (buf_sz == 0) {
2416 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302417 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002418 }
2419
2420 ce_hdl = pipe_info->ce_hdl;
2421
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302422 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002423 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302424 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302425 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002426
2427 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302428 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002429
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302430 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002431 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002432 hif_post_recv_buffers_failure(pipe_info, nbuf,
2433 &pipe_info->nbuf_alloc_err_count,
2434 HIF_RX_NBUF_ALLOC_FAILURE,
2435 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302436 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002437 }
2438
2439 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302440 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002441 * CE_data = dma_map_single(dev, data, buf_sz, );
2442 * DMA_FROM_DEVICE);
2443 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302444 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302445 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002446
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302447 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002448 hif_post_recv_buffers_failure(pipe_info, nbuf,
2449 &pipe_info->nbuf_dma_err_count,
2450 HIF_RX_NBUF_MAP_FAILURE,
2451 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302452 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302453 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002454 }
2455
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302456 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002457
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302458 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002459 buf_sz, DMA_FROM_DEVICE);
2460 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302461 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002462 hif_post_recv_buffers_failure(pipe_info, nbuf,
2463 &pipe_info->nbuf_ce_enqueue_err_count,
2464 HIF_RX_NBUF_ENQUEUE_FAILURE,
2465 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2466
Govind Singh4fcafd42016-08-08 12:37:31 +05302467 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2468 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302469 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302470 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002471 }
2472
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302473 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 bufs_posted++;
2475 }
2476 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002477 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002478 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2479 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002480 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002481 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2482 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002483 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002484 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002485
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302486 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002487
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302488 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002489}
2490
2491/*
2492 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302493 * Returns 0 for non fastpath rx copy engine as
2494 * oom_allocation_work will be scheduled to recover any
2495 * failures, non-zero if unable to completely replenish
2496 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002497 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302498QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002499{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302500 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302501 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302502 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302503 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002504
2505 A_TARGET_ACCESS_LIKELY(scn);
2506 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2507 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002508
Houston Hoffman85925072016-05-06 17:02:18 -07002509 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002510 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002511
2512 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002513 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002514 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002515
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302516 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302517 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302518 ce_state->htt_rx_data &&
2519 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302520 A_TARGET_ACCESS_UNLIKELY(scn);
2521 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302522 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002523 }
2524
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002525 A_TARGET_ACCESS_UNLIKELY(scn);
2526
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302527 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002528}
2529
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302530QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002531{
Komal Seelam644263d2016-02-22 20:45:49 +05302532 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302534 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002535
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002536 hif_update_fastpath_recv_bufs_cnt(scn);
2537
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002538 hif_msg_callbacks_install(scn);
2539
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002540 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302541 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002542
Houston Hoffman271951f2016-11-12 15:24:27 -08002543 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002544 hif_state->started = true;
2545
Houston Hoffman271951f2016-11-12 15:24:27 -08002546 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302547 qdf_status = hif_post_recv_buffers(scn);
2548 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002549 /* cleanup is done in hif_ce_disable */
2550 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302551 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002552 }
2553
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302554 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002555}
2556
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002557static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002558{
Komal Seelam644263d2016-02-22 20:45:49 +05302559 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002560 struct CE_handle *ce_hdl;
2561 uint32_t buf_sz;
2562 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302563 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302564 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002565 void *per_CE_context;
2566
2567 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002568 /* Unused Copy Engine */
2569 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002570 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002571
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002572
2573 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002574 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002575 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002576
Komal Seelam02cf2f82016-02-22 20:44:25 +05302577 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002578 ce_hdl = pipe_info->ce_hdl;
2579
Jeff Johnson8d639a02019-03-18 09:51:11 -07002580 if (!scn->qdf_dev)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002582 while (ce_revoke_recv_next
2583 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302584 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302585 if (netbuf) {
2586 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2587 QDF_DMA_FROM_DEVICE);
2588 qdf_nbuf_free(netbuf);
2589 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002590 }
2591}
2592
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002593static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002594{
2595 struct CE_handle *ce_hdl;
2596 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302597 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302598 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002599 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302600 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002601 unsigned int nbytes;
2602 unsigned int id;
2603 uint32_t buf_sz;
2604 uint32_t toeplitz_hash_result;
2605
2606 buf_sz = pipe_info->buf_sz;
2607 if (buf_sz == 0) {
2608 /* Unused Copy Engine */
2609 return;
2610 }
2611
2612 hif_state = pipe_info->HIF_CE_state;
2613 if (!hif_state->started) {
2614 return;
2615 }
2616
Komal Seelam02cf2f82016-02-22 20:44:25 +05302617 scn = HIF_GET_SOFTC(hif_state);
2618
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002619 ce_hdl = pipe_info->ce_hdl;
2620
2621 while (ce_cancel_send_next
2622 (ce_hdl, &per_CE_context,
2623 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302624 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002625 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2626 /*
2627 * Packets enqueued by htt_h2t_ver_req_msg() and
2628 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2629 * freed in htt_htc_misc_pkt_pool_free() in
2630 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002631 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002632 * which they are queued in.
2633 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302634 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002635 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302636 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002637 * layer to free the buffer
2638 */
2639 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302640 pipe_info->pipe_callbacks.
2641 txCompletionHandler(pipe_info->
2642 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002643 netbuf, id, toeplitz_hash_result);
2644 }
2645 }
2646}
2647
2648/*
2649 * Cleanup residual buffers for device shutdown:
2650 * buffers that were enqueued for receive
2651 * buffers that were to be sent
2652 * Note: Buffers that had completed but which were
2653 * not yet processed are on a completion queue. They
2654 * are handled when the completion thread shuts down.
2655 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002656static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002657{
2658 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302659 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002660 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002661
Komal Seelam02cf2f82016-02-22 20:44:25 +05302662 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002663 struct HIF_CE_pipe_info *pipe_info;
2664
Houston Hoffman85925072016-05-06 17:02:18 -07002665 ce_state = scn->ce_id_to_state[pipe_num];
2666 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2667 ((ce_state->htt_tx_data) ||
2668 (ce_state->htt_rx_data))) {
2669 continue;
2670 }
2671
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002672 pipe_info = &hif_state->pipe_info[pipe_num];
2673 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2674 hif_send_buffer_cleanup_on_pipe(pipe_info);
2675 }
2676}
2677
Komal Seelam5584a7c2016-02-24 19:22:48 +05302678void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002679{
Komal Seelam644263d2016-02-22 20:45:49 +05302680 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302681 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302682
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002683 hif_buffer_cleanup(hif_state);
2684}
2685
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002686static void hif_destroy_oom_work(struct hif_softc *scn)
2687{
2688 struct CE_state *ce_state;
2689 int ce_id;
2690
2691 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2692 ce_state = scn->ce_id_to_state[ce_id];
2693 if (ce_state)
2694 qdf_destroy_work(scn->qdf_dev,
2695 &ce_state->oom_allocation_work);
2696 }
2697}
2698
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302699void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002700{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302701 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002702 int pipe_num;
2703
Houston Hoffmana69581e2016-11-14 18:03:19 -08002704 /*
2705 * before cleaning up any memory, ensure irq &
2706 * bottom half contexts will not be re-entered
2707 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002708 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002709 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002710 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002711
2712 /*
2713 * At this point, asynchronous threads are stopped,
2714 * The Target should not DMA nor interrupt, Host code may
2715 * not initiate anything more. So we just need to clean
2716 * up Host-side state.
2717 */
2718
2719 if (scn->athdiag_procfs_inited) {
2720 athdiag_procfs_remove();
2721 scn->athdiag_procfs_inited = false;
2722 }
2723
2724 hif_buffer_cleanup(hif_state);
2725
2726 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2727 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302728 struct CE_attr attr;
2729 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002730
2731 pipe_info = &hif_state->pipe_info[pipe_num];
2732 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302733 if (pipe_info->ce_hdl != ce_diag) {
2734 attr = hif_state->host_ce_config[pipe_num];
2735 if (attr.src_nentries)
2736 qdf_spinlock_destroy(&pipe_info->
2737 completion_freeq_lock);
2738 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002739 ce_fini(pipe_info->ce_hdl);
2740 pipe_info->ce_hdl = NULL;
2741 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302742 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002743 }
2744 }
2745
2746 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302747 qdf_timer_stop(&hif_state->sleep_timer);
2748 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002749 hif_state->sleep_timer_init = false;
2750 }
2751
2752 hif_state->started = false;
2753}
2754
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302755static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2756 struct shadow_reg_cfg
2757 **target_shadow_reg_cfg_ret,
2758 uint32_t *shadow_cfg_sz_ret)
2759{
Nirav Shah3e6e04b2018-07-20 12:00:34 +05302760 if (target_shadow_reg_cfg_ret)
2761 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2762 if (shadow_cfg_sz_ret)
2763 *shadow_cfg_sz_ret = shadow_cfg_sz;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302764}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002765
Houston Hoffman854e67f2016-03-14 21:11:39 -07002766/**
2767 * hif_get_target_ce_config() - get copy engine configuration
2768 * @target_ce_config_ret: basic copy engine configuration
2769 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2770 * @target_service_to_ce_map_ret: service mapping for the copy engines
2771 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2772 * @target_shadow_reg_cfg_ret: shadow register configuration
2773 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2774 *
2775 * providing accessor to these values outside of this file.
2776 * currently these are stored in static pointers to const sections.
2777 * there are multiple configurations that are selected from at compile time.
2778 * Runtime selection would need to consider mode, target type and bus type.
2779 *
2780 * Return: return by parameter.
2781 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302782void hif_get_target_ce_config(struct hif_softc *scn,
2783 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002784 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002785 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002786 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002787 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002788 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002789{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302790 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2791
2792 *target_ce_config_ret = hif_state->target_ce_config;
2793 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002794
2795 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2796 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302797 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2798 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002799}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002800
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002801#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002802static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002803{
2804 int i;
2805 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302806 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002807
2808 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2809 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302810 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002811 cfg->shadow_reg_v2_cfg[i].addr);
2812 }
2813}
2814
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002815#else
2816static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2817{
2818 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302819 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002820}
2821#endif
2822
Nirav Shahbc8daa42018-07-09 16:27:42 +05302823#ifdef ADRASTEA_RRI_ON_DDR
2824/**
2825 * hif_get_src_ring_read_index(): Called to get the SRRI
2826 *
2827 * @scn: hif_softc pointer
2828 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2829 *
2830 * This function returns the SRRI to the caller. For CEs that
2831 * dont have interrupts enabled, we look at the DDR based SRRI
2832 *
2833 * Return: SRRI
2834 */
2835inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2836 uint32_t CE_ctrl_addr)
2837{
2838 struct CE_attr attr;
2839 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2840
2841 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2842 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2843 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2844 } else {
2845 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2846 return A_TARGET_READ(scn,
2847 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2848 else
2849 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2850 CE_ctrl_addr);
2851 }
2852}
2853
2854/**
2855 * hif_get_dst_ring_read_index(): Called to get the DRRI
2856 *
2857 * @scn: hif_softc pointer
2858 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2859 *
2860 * This function returns the DRRI to the caller. For CEs that
2861 * dont have interrupts enabled, we look at the DDR based DRRI
2862 *
2863 * Return: DRRI
2864 */
2865inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2866 uint32_t CE_ctrl_addr)
2867{
2868 struct CE_attr attr;
2869 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2870
2871 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2872
2873 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2874 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2875 } else {
2876 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2877 return A_TARGET_READ(scn,
2878 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2879 else
2880 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2881 CE_ctrl_addr);
2882 }
2883}
2884
2885/**
2886 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2887 * @scn: hif_softc pointer
2888 *
2889 * Return: qdf status
2890 */
2891static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2892{
2893 qdf_dma_addr_t paddr_rri_on_ddr = 0;
2894
2895 scn->vaddr_rri_on_ddr =
2896 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2897 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2898 &paddr_rri_on_ddr);
2899
2900 if (!scn->vaddr_rri_on_ddr) {
2901 hif_err("dmaable page alloc fail");
2902 return QDF_STATUS_E_NOMEM;
2903 }
2904
2905 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2906
2907 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2908
2909 return QDF_STATUS_SUCCESS;
2910}
2911#endif
2912
2913#if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2914/**
2915 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2916 *
2917 * @scn: hif_softc pointer
2918 *
2919 * This function allocates non cached memory on ddr and sends
2920 * the physical address of this memory to the CE hardware. The
2921 * hardware updates the RRI on this particular location.
2922 *
2923 * Return: None
2924 */
2925static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2926{
2927 unsigned int i;
2928 uint32_t high_paddr, low_paddr;
2929
2930 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2931 return;
2932
2933 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr);
2934 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2935
2936 HIF_DBG("%s using srri and drri from DDR", __func__);
2937
2938 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2939 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2940
2941 for (i = 0; i < CE_COUNT; i++)
2942 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2943}
2944#else
2945/**
2946 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2947 *
2948 * @scn: hif_softc pointer
2949 *
2950 * This is a dummy implementation for platforms that don't
2951 * support this functionality.
2952 *
2953 * Return: None
2954 */
2955static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2956{
2957}
2958#endif
2959
2960/**
2961 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2962 * QMI command
2963 * @scn: hif context
2964 * @cfg: wlan enable config
2965 *
2966 * In case of Genoa, rri_over_ddr memory configuration is passed
2967 * to firmware through QMI configure command.
2968 */
2969#if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
2970static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2971 struct pld_wlan_enable_cfg *cfg)
2972{
2973 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2974 return;
2975
2976 cfg->rri_over_ddr_cfg_valid = true;
2977 cfg->rri_over_ddr_cfg.base_addr_low =
2978 BITS0_TO_31(scn->paddr_rri_on_ddr);
2979 cfg->rri_over_ddr_cfg.base_addr_high =
2980 BITS32_TO_35(scn->paddr_rri_on_ddr);
2981}
2982#else
2983static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2984 struct pld_wlan_enable_cfg *cfg)
2985{
2986}
2987#endif
2988
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002989/**
2990 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302991 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002992 *
2993 * This function passes the con_mode and CE configuration to
2994 * platform driver to enable wlan.
2995 *
Houston Hoffman108da402016-03-14 21:11:24 -07002996 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002997 */
Houston Hoffman108da402016-03-14 21:11:24 -07002998int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002999{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003000 struct pld_wlan_enable_cfg cfg;
3001 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303002 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003003
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303004 hif_get_target_ce_config(scn,
3005 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07003006 &cfg.num_ce_tgt_cfg,
3007 (struct service_to_pipe **)&cfg.ce_svc_cfg,
3008 &cfg.num_ce_svc_pipe_cfg,
3009 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3010 &cfg.num_shadow_reg_cfg);
3011
3012 /* translate from structure size to array size */
3013 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3014 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3015 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003016
Houston Hoffman5141f9d2017-01-05 10:49:17 -08003017 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3018 &cfg.num_shadow_reg_v2_cfg);
3019
3020 hif_print_hal_shadow_register_cfg(&cfg);
3021
Nirav Shahbc8daa42018-07-09 16:27:42 +05303022 hif_update_rri_over_ddr_config(scn, &cfg);
3023
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303024 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003025 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05303026 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3027 mode = PLD_COLDBOOT_CALIBRATION;
Vignesh Viswanathan7c974c22019-07-24 15:24:03 +05303028 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3029 mode = PLD_FTM_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003030 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003031 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07003032 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003033 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07003034
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003035 if (BYPASS_QMI)
3036 return 0;
3037 else
Vevek Venkatesan0ac9aaf2019-06-28 17:17:22 +05303038 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003039}
3040
Nirav Shah0d0cce82018-01-17 17:00:31 +05303041#ifdef WLAN_FEATURE_EPPING
3042
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003043#define CE_EPPING_USES_IRQ true
3044
Nirav Shah0d0cce82018-01-17 17:00:31 +05303045void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3046{
3047 if (CE_EPPING_USES_IRQ)
3048 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3049 else
3050 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3051 hif_state->target_ce_config = target_ce_config_wlan_epping;
3052 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3053 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3054 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3055}
3056#endif
3057
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303058#ifdef QCN7605_SUPPORT
3059static inline
3060void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3061 struct HIF_CE_state *hif_state)
3062{
3063 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3064 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3065 hif_state->target_ce_config_sz =
3066 sizeof(target_ce_config_wlan_qcn7605);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303067 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3068 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303069 scn->ce_count = QCN7605_CE_COUNT;
3070}
3071#else
3072static inline
3073void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3074 struct HIF_CE_state *hif_state)
3075{
3076 HIF_ERROR("QCN7605 not supported");
3077}
3078#endif
3079
Sathish Kumar86876492018-08-27 13:39:20 +05303080#ifdef CE_SVC_CMN_INIT
3081#ifdef QCA_WIFI_SUPPORT_SRNG
3082static inline void hif_ce_service_init(void)
3083{
3084 ce_service_srng_init();
3085}
3086#else
3087static inline void hif_ce_service_init(void)
3088{
3089 ce_service_legacy_init();
3090}
3091#endif
3092#else
3093static inline void hif_ce_service_init(void)
3094{
3095}
3096#endif
3097
3098
Houston Hoffman108da402016-03-14 21:11:24 -07003099/**
3100 * hif_ce_prepare_config() - load the correct static tables.
3101 * @scn: hif context
3102 *
3103 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003104 */
Houston Hoffman108da402016-03-14 21:11:24 -07003105void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003106{
Komal Seelambd7c51d2016-02-24 10:27:30 +05303107 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003108 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3109 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303110 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003111
Sathish Kumar86876492018-08-27 13:39:20 +05303112 hif_ce_service_init();
Houston Hoffman10fedfc2017-01-23 15:23:09 -08003113 hif_state->ce_services = ce_services_attach(scn);
3114
Houston Hoffman710af5a2016-11-22 21:59:03 -08003115 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003116 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003117 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05303118 hif_ce_prepare_epping_config(hif_state);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303119 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003120 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003121
3122 switch (tgt_info->target_type) {
3123 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303124 hif_state->host_ce_config = host_ce_config_wlan;
3125 hif_state->target_ce_config = target_ce_config_wlan;
3126 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003127 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303128 case TARGET_TYPE_QCN7605:
3129 hif_set_ce_config_qcn7605(scn, hif_state);
3130 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003131 case TARGET_TYPE_AR900B:
3132 case TARGET_TYPE_QCA9984:
3133 case TARGET_TYPE_IPQ4019:
3134 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303135 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3136 hif_state->host_ce_config =
3137 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3138 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3139 hif_state->host_ce_config =
3140 host_lowdesc_ce_cfg_wlan_ar900b;
3141 } else {
3142 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3143 }
3144
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303145 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3146 hif_state->target_ce_config_sz =
3147 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003148
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003149 break;
3150
3151 case TARGET_TYPE_AR9888:
3152 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303153 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3154 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3155 } else {
3156 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3157 }
3158
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303159 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3160 hif_state->target_ce_config_sz =
3161 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003162
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003163 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003164
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303165 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05303166 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05303167 case TARGET_TYPE_QCA6018:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003168 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3169 hif_state->host_ce_config =
3170 host_ce_config_wlan_qca8074_pci;
3171 hif_state->target_ce_config =
3172 target_ce_config_wlan_qca8074_pci;
3173 hif_state->target_ce_config_sz =
3174 sizeof(target_ce_config_wlan_qca8074_pci);
3175 } else {
3176 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3177 hif_state->target_ce_config =
3178 target_ce_config_wlan_qca8074;
3179 hif_state->target_ce_config_sz =
3180 sizeof(target_ce_config_wlan_qca8074);
3181 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303182 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003183 case TARGET_TYPE_QCA6290:
3184 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3185 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3186 hif_state->target_ce_config_sz =
3187 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003188
Houston Hoffman710af5a2016-11-22 21:59:03 -08003189 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003190 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05303191 case TARGET_TYPE_QCN9000:
3192 hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3193 hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3194 hif_state->target_ce_config_sz =
3195 sizeof(target_ce_config_wlan_qcn9000);
3196 scn->ce_count = QCN_9000_CE_COUNT;
3197 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07003198 case TARGET_TYPE_QCA6390:
3199 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3200 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3201 hif_state->target_ce_config_sz =
3202 sizeof(target_ce_config_wlan_qca6390);
3203
3204 scn->ce_count = QCA_6390_CE_COUNT;
3205 break;
Mohit Khanna973308a2019-05-13 18:31:33 -07003206 case TARGET_TYPE_QCA6490:
3207 hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3208 hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3209 hif_state->target_ce_config_sz =
3210 sizeof(target_ce_config_wlan_qca6490);
3211
3212 scn->ce_count = QCA_6490_CE_COUNT;
3213 break;
hangtianc572f5f2019-04-10 11:19:59 +08003214 case TARGET_TYPE_ADRASTEA:
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303215 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
hangtianc572f5f2019-04-10 11:19:59 +08003216 hif_state->host_ce_config =
3217 host_lowdesc_ce_config_wlan_adrastea_nopktlog;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303218 hif_state->target_ce_config =
3219 target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3220 hif_state->target_ce_config_sz =
3221 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3222 } else {
hangtianc572f5f2019-04-10 11:19:59 +08003223 hif_state->host_ce_config =
3224 host_ce_config_wlan_adrastea;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303225 hif_state->target_ce_config =
3226 target_ce_config_wlan_adrastea;
3227 hif_state->target_ce_config_sz =
hangtianc572f5f2019-04-10 11:19:59 +08003228 sizeof(target_ce_config_wlan_adrastea);
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303229 }
hangtianc572f5f2019-04-10 11:19:59 +08003230 break;
3231
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003232 }
Yun parkc80eea72017-10-06 15:33:36 -07003233 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07003234}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003235
Houston Hoffman108da402016-03-14 21:11:24 -07003236/**
3237 * hif_ce_open() - do ce specific allocations
3238 * @hif_sc: pointer to hif context
3239 *
3240 * return: 0 for success or QDF_STATUS_E_NOMEM
3241 */
3242QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3243{
3244 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003245
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303246 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303247 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003248 return QDF_STATUS_SUCCESS;
3249}
3250
3251/**
3252 * hif_ce_close() - do ce specific free
3253 * @hif_sc: pointer to hif context
3254 */
3255void hif_ce_close(struct hif_softc *hif_sc)
3256{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303257 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3258
3259 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05303260 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003261}
3262
3263/**
3264 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3265 * @hif_sc: hif context
3266 *
3267 * uses state variables to support cleaning up when hif_config_ce fails.
3268 */
3269void hif_unconfig_ce(struct hif_softc *hif_sc)
3270{
3271 int pipe_num;
3272 struct HIF_CE_pipe_info *pipe_info;
3273 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07003274 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07003275
3276 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3277 pipe_info = &hif_state->pipe_info[pipe_num];
3278 if (pipe_info->ce_hdl) {
3279 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05303280 }
3281 }
3282 deinit_tasklet_workers(hif_hdl);
3283 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3284 pipe_info = &hif_state->pipe_info[pipe_num];
3285 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07003286 ce_fini(pipe_info->ce_hdl);
3287 pipe_info->ce_hdl = NULL;
3288 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08003289 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003290 }
3291 }
Houston Hoffman108da402016-03-14 21:11:24 -07003292 if (hif_sc->athdiag_procfs_inited) {
3293 athdiag_procfs_remove();
3294 hif_sc->athdiag_procfs_inited = false;
3295 }
3296}
3297
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003298#ifdef CONFIG_BYPASS_QMI
Nirav Shah8e930272018-07-10 16:28:21 +05303299#ifdef QCN7605_SUPPORT
3300/**
3301 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3302 * @scn: pointer to HIF structure
3303 *
3304 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3305 *
3306 * Return: void
3307 */
3308static void hif_post_static_buf_to_target(struct hif_softc *scn)
3309{
3310 void *target_va;
3311 phys_addr_t target_pa;
3312 struct ce_info *ce_info_ptr;
3313 uint32_t msi_data_start;
3314 uint32_t msi_data_count;
3315 uint32_t msi_irq_start;
3316 uint32_t i = 0;
3317 int ret;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003318
Nirav Shah8e930272018-07-10 16:28:21 +05303319 target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3320 scn->qdf_dev->dev,
3321 FW_SHARED_MEM +
3322 sizeof(struct ce_info),
3323 &target_pa);
3324 if (!target_va)
3325 return;
3326
3327 ce_info_ptr = (struct ce_info *)target_va;
3328
3329 if (scn->vaddr_rri_on_ddr) {
3330 ce_info_ptr->rri_over_ddr_low_paddr =
3331 BITS0_TO_31(scn->paddr_rri_on_ddr);
3332 ce_info_ptr->rri_over_ddr_high_paddr =
3333 BITS32_TO_35(scn->paddr_rri_on_ddr);
3334 }
3335
3336 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3337 &msi_data_count, &msi_data_start,
3338 &msi_irq_start);
3339 if (ret) {
3340 hif_err("Failed to get CE msi config");
3341 return;
3342 }
3343
3344 for (i = 0; i < CE_COUNT_MAX; i++) {
3345 ce_info_ptr->cfg[i].ce_id = i;
3346 ce_info_ptr->cfg[i].msi_vector =
3347 (i % msi_data_count) + msi_irq_start;
3348 }
3349
3350 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3351 hif_info("target va %pK target pa %pa", target_va, &target_pa);
3352}
3353#else
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003354/**
3355 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3356 * @scn: pointer to HIF structure
3357 *
3358 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3359 *
3360 * Return: void
3361 */
3362static void hif_post_static_buf_to_target(struct hif_softc *scn)
3363{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003364 void *target_va;
3365 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003366
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003367 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3368 FW_SHARED_MEM, &target_pa);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003369 if (!target_va) {
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003370 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003371 return;
3372 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303373 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003374 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003375}
Nirav Shah8e930272018-07-10 16:28:21 +05303376#endif
3377
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003378#else
3379static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3380{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003381}
3382#endif
3383
Houston Hoffman579c02f2017-08-02 01:57:38 -07003384static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3385 bool wait_for_it)
3386{
3387 /* todo */
3388 return 0;
3389}
3390
Houston Hoffman108da402016-03-14 21:11:24 -07003391/**
3392 * hif_config_ce() - configure copy engines
3393 * @scn: hif context
3394 *
3395 * Prepares fw, copy engine hardware and host sw according
3396 * to the attributes selected by hif_ce_prepare_config.
3397 *
3398 * also calls athdiag_procfs_init
3399 *
3400 * return: 0 for success nonzero for failure.
3401 */
3402int hif_config_ce(struct hif_softc *scn)
3403{
3404 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3405 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3406 struct HIF_CE_pipe_info *pipe_info;
3407 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303408 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05303409
Houston Hoffman108da402016-03-14 21:11:24 -07003410#ifdef ADRASTEA_SHADOW_REGISTERS
3411 int i;
3412#endif
3413 QDF_STATUS rv = QDF_STATUS_SUCCESS;
3414
3415 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05303416 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003417
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003418 hif_post_static_buf_to_target(scn);
3419
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003420 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07003421
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003422 hif_config_rri_on_ddr(scn);
3423
Houston Hoffman579c02f2017-08-02 01:57:38 -07003424 if (ce_srng_based(scn))
3425 scn->bus_ops.hif_target_sleep_state_adjust =
3426 &hif_srng_sleep_state_adjust;
3427
c_cgodavfda96ad2017-09-07 16:16:00 +05303428 /* Initialise the CE debug history sysfs interface inputs ce_id and
3429 * index. Disable data storing
3430 */
3431 reset_ce_debug_history(scn);
3432
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003433 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3434 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003435
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003436 pipe_info = &hif_state->pipe_info[pipe_num];
3437 pipe_info->pipe_num = pipe_num;
3438 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303439 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003440
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003441 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07003442 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303443 if (!ce_state) {
3444 A_TARGET_ACCESS_UNLIKELY(scn);
3445 goto err;
3446 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003447 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003448 QDF_ASSERT(pipe_info->ce_hdl);
3449 if (!pipe_info->ce_hdl) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303450 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003451 A_TARGET_ACCESS_UNLIKELY(scn);
3452 goto err;
3453 }
3454
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003455 ce_state->lro_data = qdf_lro_init();
3456
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303457 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003458 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003459 * Diagnostic Window support
3460 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003461 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003462 continue;
3463 }
3464
Houston Hoffman85925072016-05-06 17:02:18 -07003465 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3466 (ce_state->htt_rx_data))
3467 continue;
3468
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303469 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003470 if (attr->dest_nentries > 0) {
3471 atomic_set(&pipe_info->recv_bufs_needed,
3472 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303473 /*SRNG based CE has one entry less */
3474 if (ce_srng_based(scn))
3475 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003476 } else {
3477 atomic_set(&pipe_info->recv_bufs_needed, 0);
3478 }
3479 ce_tasklet_init(hif_state, (1 << pipe_num));
3480 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003481 }
3482
3483 if (athdiag_procfs_init(scn) != 0) {
3484 A_TARGET_ACCESS_UNLIKELY(scn);
3485 goto err;
3486 }
3487 scn->athdiag_procfs_inited = true;
3488
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003489 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003490
Houston Hoffman108da402016-03-14 21:11:24 -07003491 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003492
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003493 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003494
3495#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003496 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003497 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003498 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003499 __func__, i,
3500 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3501 }
3502#endif
3503
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303504 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003505
3506err:
3507 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003508 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003509 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303510 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003511}
3512
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003513#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003514/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303515 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003516 * @scn: bus context
3517 * @ce_sr_base_paddr: copyengine source ring base physical address
3518 * @ce_sr_ring_size: copyengine source ring size
3519 * @ce_reg_paddr: copyengine register physical address
3520 *
3521 * IPA micro controller data path offload feature enabled,
3522 * HIF should release copy engine related resource information to IPA UC
3523 * IPA UC will access hardware resource with released information
3524 *
3525 * Return: None
3526 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303527void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303528 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003529 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303530 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003531{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303532 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003533 struct HIF_CE_pipe_info *pipe_info =
3534 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3535 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3536
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303537 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003538 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003539}
3540#endif /* IPA_OFFLOAD */
3541
3542
3543#ifdef ADRASTEA_SHADOW_REGISTERS
3544
3545/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003546 * Current shadow register config
3547 *
3548 * -----------------------------------------------------------
3549 * Shadow Register | CE | src/dst write index
3550 * -----------------------------------------------------------
3551 * 0 | 0 | src
3552 * 1 No Config - Doesn't point to anything
3553 * 2 No Config - Doesn't point to anything
3554 * 3 | 3 | src
3555 * 4 | 4 | src
3556 * 5 | 5 | src
3557 * 6 No Config - Doesn't point to anything
3558 * 7 | 7 | src
3559 * 8 No Config - Doesn't point to anything
3560 * 9 No Config - Doesn't point to anything
3561 * 10 No Config - Doesn't point to anything
3562 * 11 No Config - Doesn't point to anything
3563 * -----------------------------------------------------------
3564 * 12 No Config - Doesn't point to anything
3565 * 13 | 1 | dst
3566 * 14 | 2 | dst
3567 * 15 No Config - Doesn't point to anything
3568 * 16 No Config - Doesn't point to anything
3569 * 17 No Config - Doesn't point to anything
3570 * 18 No Config - Doesn't point to anything
3571 * 19 | 7 | dst
3572 * 20 | 8 | dst
3573 * 21 No Config - Doesn't point to anything
3574 * 22 No Config - Doesn't point to anything
3575 * 23 No Config - Doesn't point to anything
3576 * -----------------------------------------------------------
3577 *
3578 *
3579 * ToDo - Move shadow register config to following in the future
3580 * This helps free up a block of shadow registers towards the end.
3581 * Can be used for other purposes
3582 *
3583 * -----------------------------------------------------------
3584 * Shadow Register | CE | src/dst write index
3585 * -----------------------------------------------------------
3586 * 0 | 0 | src
3587 * 1 | 3 | src
3588 * 2 | 4 | src
3589 * 3 | 5 | src
3590 * 4 | 7 | src
3591 * -----------------------------------------------------------
3592 * 5 | 1 | dst
3593 * 6 | 2 | dst
3594 * 7 | 7 | dst
3595 * 8 | 8 | dst
3596 * -----------------------------------------------------------
3597 * 9 No Config - Doesn't point to anything
3598 * 12 No Config - Doesn't point to anything
3599 * 13 No Config - Doesn't point to anything
3600 * 14 No Config - Doesn't point to anything
3601 * 15 No Config - Doesn't point to anything
3602 * 16 No Config - Doesn't point to anything
3603 * 17 No Config - Doesn't point to anything
3604 * 18 No Config - Doesn't point to anything
3605 * 19 No Config - Doesn't point to anything
3606 * 20 No Config - Doesn't point to anything
3607 * 21 No Config - Doesn't point to anything
3608 * 22 No Config - Doesn't point to anything
3609 * 23 No Config - Doesn't point to anything
3610 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003611*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303612#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303613u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003614{
3615 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003616 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003617
Houston Hoffmane6330442016-02-26 12:19:11 -08003618 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003619 case 0:
3620 addr = SHADOW_VALUE0;
3621 break;
3622 case 3:
3623 addr = SHADOW_VALUE3;
3624 break;
3625 case 4:
3626 addr = SHADOW_VALUE4;
3627 break;
3628 case 5:
3629 addr = SHADOW_VALUE5;
3630 break;
3631 case 7:
3632 addr = SHADOW_VALUE7;
3633 break;
3634 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003635 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303636 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003637 }
3638 return addr;
3639
3640}
3641
Komal Seelam644263d2016-02-22 20:45:49 +05303642u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003643{
3644 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003645 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003646
Houston Hoffmane6330442016-02-26 12:19:11 -08003647 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003648 case 1:
3649 addr = SHADOW_VALUE13;
3650 break;
3651 case 2:
3652 addr = SHADOW_VALUE14;
3653 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003654 case 5:
3655 addr = SHADOW_VALUE17;
3656 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003657 case 7:
3658 addr = SHADOW_VALUE19;
3659 break;
3660 case 8:
3661 addr = SHADOW_VALUE20;
3662 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003663 case 9:
3664 addr = SHADOW_VALUE21;
3665 break;
3666 case 10:
3667 addr = SHADOW_VALUE22;
3668 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303669 case 11:
3670 addr = SHADOW_VALUE23;
3671 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003672 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003673 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303674 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003675 }
3676
3677 return addr;
3678
3679}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303680#else
3681u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3682{
3683 u32 addr = 0;
3684 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3685
3686 switch (ce) {
3687 case 0:
3688 addr = SHADOW_VALUE0;
3689 break;
3690 case 4:
3691 addr = SHADOW_VALUE4;
3692 break;
3693 case 5:
3694 addr = SHADOW_VALUE5;
3695 break;
3696 default:
3697 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3698 QDF_ASSERT(0);
3699 }
3700 return addr;
3701}
3702
3703u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3704{
3705 u32 addr = 0;
3706 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3707
3708 switch (ce) {
3709 case 1:
3710 addr = SHADOW_VALUE13;
3711 break;
3712 case 2:
3713 addr = SHADOW_VALUE14;
3714 break;
3715 case 3:
3716 addr = SHADOW_VALUE15;
3717 break;
3718 case 5:
3719 addr = SHADOW_VALUE17;
3720 break;
3721 case 7:
3722 addr = SHADOW_VALUE19;
3723 break;
3724 case 8:
3725 addr = SHADOW_VALUE20;
3726 break;
3727 case 9:
3728 addr = SHADOW_VALUE21;
3729 break;
3730 case 10:
3731 addr = SHADOW_VALUE22;
3732 break;
3733 case 11:
3734 addr = SHADOW_VALUE23;
3735 break;
3736 default:
3737 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3738 QDF_ASSERT(0);
3739 }
3740
3741 return addr;
3742}
3743#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003744#endif
3745
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003746#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003747void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3748{
3749 struct CE_state *ce_state;
3750 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3751
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003752 ce_state = scn->ce_id_to_state[ctx_id];
3753
3754 return ce_state->lro_data;
3755}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003756#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003757
3758/**
3759 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3760 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303761 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003762 * @svc_id: Service ID for which the mapping is needed.
3763 * @ul_pipe: address of the container in which ul pipe is returned.
3764 * @dl_pipe: address of the container in which dl pipe is returned.
3765 * @ul_is_polled: address of the container in which a bool
3766 * indicating if the UL CE for this service
3767 * is polled is returned.
3768 * @dl_is_polled: address of the container in which a bool
3769 * indicating if the DL CE for this service
3770 * is polled is returned.
3771 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003772 * Return: Indicates whether the service has been found in the table.
3773 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3774 * There will be warning logs if either leg has not been updated
3775 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003776 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303777int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003778 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3779 int *dl_is_polled)
3780{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003781 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003782 unsigned int i;
3783 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003784 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003785 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303786 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003787 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003788 bool dl_updated = false;
3789 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003790
Houston Hoffman748e1a62017-03-30 17:20:42 -07003791 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3792 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003793
3794 *dl_is_polled = 0; /* polling for received messages not supported */
3795
3796 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3797
3798 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3799 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003800 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003801 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003802 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303803 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003804 CE_ATTR_DISABLE_INTR) != 0;
3805 ul_updated = true;
3806 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003807 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003808 dl_updated = true;
3809 }
3810 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003811 }
3812 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003813 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003814 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003815 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003816 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003817
3818 return status;
3819}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003820
3821#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303822inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003823 uint32_t CE_ctrl_addr)
3824{
3825 uint32_t read_from_hw, srri_from_ddr = 0;
3826
3827 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3828
3829 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3830
3831 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003832 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3833 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003834 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303835 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003836 }
3837 return srri_from_ddr;
3838}
3839
3840
Komal Seelam644263d2016-02-22 20:45:49 +05303841inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003842 uint32_t CE_ctrl_addr)
3843{
3844 uint32_t read_from_hw, drri_from_ddr = 0;
3845
3846 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3847
3848 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3849
3850 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003851 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003852 drri_from_ddr, read_from_hw,
3853 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303854 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003855 }
3856 return drri_from_ddr;
3857}
3858
3859#endif
3860
Govind Singh2443fb32016-01-13 17:44:48 +05303861/**
3862 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303863 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303864 *
3865 * Output the copy engine registers
3866 *
3867 * Return: 0 for success or error code
3868 */
Komal Seelam644263d2016-02-22 20:45:49 +05303869int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303870{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303871 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303872 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003873 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303874 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3875 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303876 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303877
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003878 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
Jeff Johnson8d639a02019-03-18 09:51:11 -07003879 if (!scn->ce_id_to_state[i]) {
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003880 HIF_DBG("CE%d not used.", i);
3881 continue;
3882 }
3883
Komal Seelam644263d2016-02-22 20:45:49 +05303884 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003885 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303886 ce_reg_word_size * sizeof(uint32_t));
3887
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303888 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003889 HIF_ERROR("Dumping CE register failed!");
3890 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303891 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303892 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303893 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003894 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303895 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303896 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303897 + SR_WR_INDEX_ADDRESS),
3898 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303899 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303900 + CURRENT_SRRI_ADDRESS),
3901 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303902 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303903 + DST_WR_INDEX_ADDRESS),
3904 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303905 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303906 + CURRENT_DRRI_ADDRESS),
3907 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303908 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303909 }
Govind Singh2443fb32016-01-13 17:44:48 +05303910 return 0;
3911}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303912qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003913#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3914struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3915 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3916{
3917 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3918 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3919 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3920 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3921 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3922 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3923 struct CE_ring_state *src_ring = ce_state->src_ring;
3924 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3925
3926 if (src_ring) {
3927 hif_info->ul_pipe.nentries = src_ring->nentries;
3928 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3929 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3930 hif_info->ul_pipe.write_index = src_ring->write_index;
3931 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3932 hif_info->ul_pipe.base_addr_CE_space =
3933 src_ring->base_addr_CE_space;
3934 hif_info->ul_pipe.base_addr_owner_space =
3935 src_ring->base_addr_owner_space;
3936 }
3937
3938
3939 if (dest_ring) {
3940 hif_info->dl_pipe.nentries = dest_ring->nentries;
3941 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3942 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3943 hif_info->dl_pipe.write_index = dest_ring->write_index;
3944 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3945 hif_info->dl_pipe.base_addr_CE_space =
3946 dest_ring->base_addr_CE_space;
3947 hif_info->dl_pipe.base_addr_owner_space =
3948 dest_ring->base_addr_owner_space;
3949 }
3950
3951 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3952 hif_info->ctrl_addr = ce_state->ctrl_addr;
3953
3954 return hif_info;
3955}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303956qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003957
3958uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3959{
3960 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3961
3962 scn->nss_wifi_ol_mode = mode;
3963 return 0;
3964}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303965qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003966#endif
3967
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303968void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3969{
3970 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3971 scn->hif_attribute = hif_attrib;
3972}
3973
Yun Park3fb36442017-08-17 17:37:53 -07003974
3975/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003976void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3977{
3978 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3979 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3980 uint32_t ctrl_addr = CE_state->ctrl_addr;
3981
3982 Q_TARGET_ACCESS_BEGIN(scn);
3983 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3984 Q_TARGET_ACCESS_END(scn);
3985}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303986qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303987
3988/**
3989 * hif_fw_event_handler() - hif fw event handler
3990 * @hif_state: pointer to hif ce state structure
3991 *
3992 * Process fw events and raise HTC callback to process fw events.
3993 *
3994 * Return: none
3995 */
3996static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3997{
3998 struct hif_msg_callbacks *msg_callbacks =
3999 &hif_state->msg_callbacks_current;
4000
4001 if (!msg_callbacks->fwEventHandler)
4002 return;
4003
4004 msg_callbacks->fwEventHandler(msg_callbacks->Context,
4005 QDF_STATUS_E_FAILURE);
4006}
4007
4008#ifndef QCA_WIFI_3_0
4009/**
4010 * hif_fw_interrupt_handler() - FW interrupt handler
4011 * @irq: irq number
4012 * @arg: the user pointer
4013 *
4014 * Called from the PCI interrupt handler when a
4015 * firmware-generated interrupt to the Host.
4016 *
Yun Park3fb36442017-08-17 17:37:53 -07004017 * only registered for legacy ce devices
4018 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304019 * Return: status of handled irq
4020 */
4021irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4022{
4023 struct hif_softc *scn = arg;
4024 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4025 uint32_t fw_indicator_address, fw_indicator;
4026
4027 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4028 return ATH_ISR_NOSCHED;
4029
4030 fw_indicator_address = hif_state->fw_indicator_address;
4031 /* For sudden unplug this will return ~0 */
4032 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4033
4034 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4035 /* ACK: clear Target-side pending event */
4036 A_TARGET_WRITE(scn, fw_indicator_address,
4037 fw_indicator & ~FW_IND_EVENT_PENDING);
4038 if (Q_TARGET_ACCESS_END(scn) < 0)
4039 return ATH_ISR_SCHED;
4040
4041 if (hif_state->started) {
4042 hif_fw_event_handler(hif_state);
4043 } else {
4044 /*
4045 * Probable Target failure before we're prepared
4046 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08004047 * fw_indicator used as bitmap, and defined as below:
4048 * FW_IND_EVENT_PENDING 0x1
4049 * FW_IND_INITIALIZED 0x2
4050 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304051 */
4052 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08004053 ("%s: Early firmware event indicated 0x%x\n",
4054 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304055 }
4056 } else {
4057 if (Q_TARGET_ACCESS_END(scn) < 0)
4058 return ATH_ISR_SCHED;
4059 }
4060
4061 return ATH_ISR_SCHED;
4062}
4063#else
4064irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4065{
4066 return ATH_ISR_SCHED;
4067}
4068#endif /* #ifdef QCA_WIFI_3_0 */
4069
4070
4071/**
4072 * hif_wlan_disable(): call the platform driver to disable wlan
4073 * @scn: HIF Context
4074 *
4075 * This function passes the con_mode to platform driver to disable
4076 * wlan.
4077 *
4078 * Return: void
4079 */
4080void hif_wlan_disable(struct hif_softc *scn)
4081{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004082 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304083 uint32_t con_mode = hif_get_conparam(scn);
4084
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05304085 if (scn->target_status == TARGET_STATUS_RESET)
4086 return;
4087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304088 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004089 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304090 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004091 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304092 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004093 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304094
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004095 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304096}
Dustin Brown6bdbda52016-09-27 15:52:30 -07004097
Dustin Brown6834d322017-03-20 15:02:48 -07004098int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4099{
4100 QDF_STATUS status;
4101 uint8_t ul_pipe, dl_pipe;
4102 int ul_is_polled, dl_is_polled;
4103
4104 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4105 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4106 HTC_CTRL_RSVD_SVC,
4107 &ul_pipe, &dl_pipe,
4108 &ul_is_polled, &dl_is_polled);
4109 if (status) {
4110 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4111 return qdf_status_to_os_return(status);
4112 }
4113
4114 *ce_id = dl_pipe;
4115
4116 return 0;
4117}