blob: 6316c7d31c410a4ab0aed99d8e34ee1d9474a8f4 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Nandha Kishore Easwaran54532862019-12-27 11:26:03 +05302 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Pratik Gandhidc82a772018-01-30 18:57:05 +053040#include "qdf_module.h"
Manjunathappa Prakash1ec17ab2020-03-02 16:23:55 -080041#include "wlan_cfg.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042
43#define CE_POLL_TIMEOUT 10 /* ms */
44
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053045#define AGC_DUMP 1
46#define CHANINFO_DUMP 2
47#define BB_WATCHDOG_DUMP 3
48#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
49#define PCIE_ACCESS_DUMP 4
50#endif
51#include "mp_dev.h"
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +053052#ifdef HIF_CE_LOG_INFO
53#include "qdf_hang_event_notifier.h"
54#endif
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053055
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +053056#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
57 defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
Houston Hoffman5141f9d2017-01-05 10:49:17 -080058#define QCA_WIFI_SUPPORT_SRNG
59#endif
60
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080061/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053062QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080063
64/*
65 * Fix EV118783, poll to check whether a BMI response comes
66 * other than waiting for the interruption which may be lost.
67 */
68/* #define BMI_RSP_POLLING */
69#define BMI_RSP_TO_MILLISEC 1000
70
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070071#ifdef CONFIG_BYPASS_QMI
72#define BYPASS_QMI 1
73#else
74#define BYPASS_QMI 0
75#endif
76
Akshay Kosigi181b2f52018-11-26 17:02:54 +053077#ifdef ENABLE_10_4_FW_HDR
78#if (ENABLE_10_4_FW_HDR == 1)
Houston Hoffmanabd00772016-05-06 17:02:48 -070079#define WDI_IPA_SERVICE_GROUP 5
80#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
81#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
82#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Akshay Kosigi181b2f52018-11-26 17:02:54 +053083#endif /* ENABLE_10_4_FW_HDR == 1 */
Pratik Gandhi424c62e2016-08-23 19:47:09 +053084#endif /* ENABLE_10_4_FW_HDR */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080085
Nachiket Kukadee5738b52017-09-07 17:16:12 +053086QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053087static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080088
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053089/**
90 * hif_target_access_log_dump() - dump access log
91 *
92 * dump access log
93 *
94 * Return: n/a
95 */
96#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
97static void hif_target_access_log_dump(void)
98{
99 hif_target_dump_access_log();
100}
101#endif
102
103
104void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
105 uint8_t cmd_id, bool start)
106{
107 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108
109 switch (cmd_id) {
110 case AGC_DUMP:
111 if (start)
112 priv_start_agc(scn);
113 else
114 priv_dump_agc(scn);
115 break;
116 case CHANINFO_DUMP:
117 if (start)
118 priv_start_cap_chaninfo(scn);
119 else
120 priv_dump_chaninfo(scn);
121 break;
122 case BB_WATCHDOG_DUMP:
123 priv_dump_bbwatchdog(scn);
124 break;
125#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
126 case PCIE_ACCESS_DUMP:
127 hif_target_access_log_dump();
128 break;
129#endif
130 default:
131 HIF_ERROR("%s: Invalid htc dump command", __func__);
132 break;
133 }
134}
135
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800136static void ce_poll_timeout(void *arg)
137{
138 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700139
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800140 if (CE_state->timer_inited) {
141 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530142 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800143 }
144}
145
146static unsigned int roundup_pwr2(unsigned int n)
147{
148 int i;
149 unsigned int test_pwr2;
150
151 if (!(n & (n - 1)))
152 return n; /* already a power of 2 */
153
154 test_pwr2 = 4;
155 for (i = 0; i < 29; i++) {
156 if (test_pwr2 > n)
157 return test_pwr2;
158 test_pwr2 = test_pwr2 << 1;
159 }
160
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530161 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800162 return 0;
163}
164
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700165#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
166#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
167
168static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
169 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
177 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800178#ifdef QCA_WIFI_3_0_ADRASTEA
179 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
180 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530181 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800182#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700183};
184
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530185#ifdef QCN7605_SUPPORT
186static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
187 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
190 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
194 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
195};
196#endif
197
Nirav Shah0d0cce82018-01-17 17:00:31 +0530198#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700199static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
200 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
204 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
205 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
206 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
207 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
208 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
209};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530210#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700211
212/* CE_PCI TABLE */
213/*
214 * NOTE: the table below is out of date, though still a useful reference.
215 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
216 * mapping of HTC services to HIF pipes.
217 */
218/*
219 * This authoritative table defines Copy Engine configuration and the mapping
220 * of services/endpoints to CEs. A subset of this information is passed to
221 * the Target during startup as a prerequisite to entering BMI phase.
222 * See:
223 * target_service_to_ce_map - Target-side mapping
224 * hif_map_service_to_pipe - Host-side mapping
225 * target_ce_config - Target-side configuration
226 * host_ce_config - Host-side configuration
227 ============================================================================
228 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
229 | | | ctio | Size | Frequency
230 | | | n | |
231 ============================================================================
232 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
233 descriptor | | | | O(100B) | and regular
234 download | | | | |
235 ----------------------------------------------------------------------------
236 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
237 indication | | | | O(10B) | regular
238 upload | | | | |
239 ----------------------------------------------------------------------------
240 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
241 upload | | | | O(1000B) | (frequent
242 e.g. noise | | | | | during IP1.0
243 packets | | | | | testing)
244 ----------------------------------------------------------------------------
245 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
246 download | | | | O(1000B) | (frequent
247 e.g. | | | | | during IP1.0
248 misdirecte | | | | | testing)
249 d EAPOL | | | | |
250 packets | | | | |
251 ----------------------------------------------------------------------------
252 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
253 | DATA_VO (uplink) | | | |
254 ----------------------------------------------------------------------------
255 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
256 | DATA_VO (downlink) | | | |
257 ----------------------------------------------------------------------------
258 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
259 | | | | O(100B) |
260 ----------------------------------------------------------------------------
261 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
262 messages | (downlink) | | | O(100B) |
263 | | | | |
264 ----------------------------------------------------------------------------
265 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
266 | HTC_RAW_STREAMS | | | |
267 | (uplink) | | | |
268 ----------------------------------------------------------------------------
269 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
270 | HTC_RAW_STREAMS | | | |
271 | (downlink) | | | |
272 ----------------------------------------------------------------------------
273 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
274 | | | | | infrequent
275 ============================================================================
276 */
277
278/*
279 * Map from service/endpoint to Copy Engine.
280 * This table is derived from the CE_PCI TABLE, above.
281 * It is passed to the Target at startup for use by firmware.
282 */
283static struct service_to_pipe target_service_to_ce_map_wlan[] = {
284 {
285 WMI_DATA_VO_SVC,
286 PIPEDIR_OUT, /* out = UL = host -> target */
287 3,
288 },
289 {
290 WMI_DATA_VO_SVC,
291 PIPEDIR_IN, /* in = DL = target -> host */
292 2,
293 },
294 {
295 WMI_DATA_BK_SVC,
296 PIPEDIR_OUT, /* out = UL = host -> target */
297 3,
298 },
299 {
300 WMI_DATA_BK_SVC,
301 PIPEDIR_IN, /* in = DL = target -> host */
302 2,
303 },
304 {
305 WMI_DATA_BE_SVC,
306 PIPEDIR_OUT, /* out = UL = host -> target */
307 3,
308 },
309 {
310 WMI_DATA_BE_SVC,
311 PIPEDIR_IN, /* in = DL = target -> host */
312 2,
313 },
314 {
315 WMI_DATA_VI_SVC,
316 PIPEDIR_OUT, /* out = UL = host -> target */
317 3,
318 },
319 {
320 WMI_DATA_VI_SVC,
321 PIPEDIR_IN, /* in = DL = target -> host */
322 2,
323 },
324 {
325 WMI_CONTROL_SVC,
326 PIPEDIR_OUT, /* out = UL = host -> target */
327 3,
328 },
329 {
330 WMI_CONTROL_SVC,
331 PIPEDIR_IN, /* in = DL = target -> host */
332 2,
333 },
334 {
335 HTC_CTRL_RSVD_SVC,
336 PIPEDIR_OUT, /* out = UL = host -> target */
337 0, /* could be moved to 3 (share with WMI) */
338 },
339 {
340 HTC_CTRL_RSVD_SVC,
341 PIPEDIR_IN, /* in = DL = target -> host */
342 2,
343 },
344 {
345 HTC_RAW_STREAMS_SVC, /* not currently used */
346 PIPEDIR_OUT, /* out = UL = host -> target */
347 0,
348 },
349 {
350 HTC_RAW_STREAMS_SVC, /* not currently used */
351 PIPEDIR_IN, /* in = DL = target -> host */
352 2,
353 },
354 {
355 HTT_DATA_MSG_SVC,
356 PIPEDIR_OUT, /* out = UL = host -> target */
357 4,
358 },
359 {
360 HTT_DATA_MSG_SVC,
361 PIPEDIR_IN, /* in = DL = target -> host */
362 1,
363 },
364 {
365 WDI_IPA_TX_SVC,
366 PIPEDIR_OUT, /* in = DL = target -> host */
367 5,
368 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800369#if defined(QCA_WIFI_3_0_ADRASTEA)
370 {
371 HTT_DATA2_MSG_SVC,
372 PIPEDIR_IN, /* in = DL = target -> host */
373 9,
374 },
375 {
376 HTT_DATA3_MSG_SVC,
377 PIPEDIR_IN, /* in = DL = target -> host */
378 10,
379 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530380 {
381 PACKET_LOG_SVC,
382 PIPEDIR_IN, /* in = DL = target -> host */
383 11,
384 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800385#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700386 /* (Additions here) */
387
388 { /* Must be last */
389 0,
390 0,
391 0,
392 },
393};
394
Houston Hoffman88c896f2016-12-14 09:56:35 -0800395/* PIPEDIR_OUT = HOST to Target */
396/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530397#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530398static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
399 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
400 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
401 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
402 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
403 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
404 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
405 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
406 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
407 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
408 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
410 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
411 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
412 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
413 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
414 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
415 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
416 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530417 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530418 /* (Additions here) */
419 { 0, 0, 0, },
420};
Pratik Gandhi78461502018-02-05 17:22:41 +0530421#else
422static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
423};
424#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530425
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530426#if (defined(QCA_WIFI_QCA8074V2))
427static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
428 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
429 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
430 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
431 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
432 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
433 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
434 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
435 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
436 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
437 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
439 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
441 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
442 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
443 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
444 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
445 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
446 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
447 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
448 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
449 /* (Additions here) */
450 { 0, 0, 0, },
451};
452#else
453static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
454};
455#endif
456
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530457#if (defined(QCA_WIFI_QCA6018))
458static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
459 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
460 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
461 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
462 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
463 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
464 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
465 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
466 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
467 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
468 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
470 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
471 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
472 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
473 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
474 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
475 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
476 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
477 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
478 /* (Additions here) */
479 { 0, 0, 0, },
480};
481#else
482static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
483};
484#endif
485
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530486#if (defined(QCA_WIFI_QCN9000))
487static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
488 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
489 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
490 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
491 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
492 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
493 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
494 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
495 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
496 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
497 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
498 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
499 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
500 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
501 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
502 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
503 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
504 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
505 /* (Additions here) */
506 { 0, 0, 0, },
507};
508#else
509static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
510};
511#endif
512
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530513/* PIPEDIR_OUT = HOST to Target */
514/* PIPEDIR_IN = TARGET to HOST */
515#ifdef QCN7605_SUPPORT
516static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
517 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
518 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
519 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
520 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
521 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
522 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
523 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
524 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
525 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
526 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
527 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
528 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
529 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
530 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
531 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
532 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
533 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
534#ifdef IPA_OFFLOAD
535 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
536#else
537 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
538#endif
539 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
540 /* (Additions here) */
541 { 0, 0, 0, },
542};
543#endif
544
Pratik Gandhi78461502018-02-05 17:22:41 +0530545#if (defined(QCA_WIFI_QCA6290))
Akshay Kosigi181b2f52018-11-26 17:02:54 +0530546#ifdef QCA_6290_AP_MODE
Houston Hoffman88c896f2016-12-14 09:56:35 -0800547static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
548 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
549 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
550 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
551 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
552 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
553 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
554 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
555 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
556 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
557 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
558 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
559 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
560 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
561 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530562 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
563 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530564 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800565 /* (Additions here) */
566 { 0, 0, 0, },
567};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530568#else
569static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
570 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
571 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
572 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
573 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
574 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
575 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
576 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
577 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
578 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
579 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
580 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
581 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
582 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
583 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
584 /* (Additions here) */
585 { 0, 0, 0, },
586};
587#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530588#else
589static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
590};
591#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800592
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700593#if (defined(QCA_WIFI_QCA6390))
594static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
595 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
596 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
597 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
598 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
599 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
600 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
601 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
602 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
603 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
604 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
605 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
606 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
607 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
608 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800609 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700610 /* (Additions here) */
611 { 0, 0, 0, },
612};
613#else
614static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
615};
616#endif
617
Mohit Khanna973308a2019-05-13 18:31:33 -0700618static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
619 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
620 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
621 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
622 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
623 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
624 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
625 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
626 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
627 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
628 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
629 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
630 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
631 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
632 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
633 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
634 /* (Additions here) */
635 { 0, 0, 0, },
636};
637
Alok Kumarffc116e2020-01-06 18:12:35 +0530638#if (defined(QCA_WIFI_QCA6750))
639static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
640 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
641 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
642 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
643 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
644 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
645 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
646 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
647 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
648 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
649 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
650 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
651 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
652 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
653 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
654 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
655 /* (Additions here) */
656 { 0, 0, 0, },
657};
658#else
659static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
660};
661#endif
662
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700663static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
664 {
665 WMI_DATA_VO_SVC,
666 PIPEDIR_OUT, /* out = UL = host -> target */
667 3,
668 },
669 {
670 WMI_DATA_VO_SVC,
671 PIPEDIR_IN, /* in = DL = target -> host */
672 2,
673 },
674 {
675 WMI_DATA_BK_SVC,
676 PIPEDIR_OUT, /* out = UL = host -> target */
677 3,
678 },
679 {
680 WMI_DATA_BK_SVC,
681 PIPEDIR_IN, /* in = DL = target -> host */
682 2,
683 },
684 {
685 WMI_DATA_BE_SVC,
686 PIPEDIR_OUT, /* out = UL = host -> target */
687 3,
688 },
689 {
690 WMI_DATA_BE_SVC,
691 PIPEDIR_IN, /* in = DL = target -> host */
692 2,
693 },
694 {
695 WMI_DATA_VI_SVC,
696 PIPEDIR_OUT, /* out = UL = host -> target */
697 3,
698 },
699 {
700 WMI_DATA_VI_SVC,
701 PIPEDIR_IN, /* in = DL = target -> host */
702 2,
703 },
704 {
705 WMI_CONTROL_SVC,
706 PIPEDIR_OUT, /* out = UL = host -> target */
707 3,
708 },
709 {
710 WMI_CONTROL_SVC,
711 PIPEDIR_IN, /* in = DL = target -> host */
712 2,
713 },
714 {
715 HTC_CTRL_RSVD_SVC,
716 PIPEDIR_OUT, /* out = UL = host -> target */
717 0, /* could be moved to 3 (share with WMI) */
718 },
719 {
720 HTC_CTRL_RSVD_SVC,
721 PIPEDIR_IN, /* in = DL = target -> host */
722 1,
723 },
724 {
725 HTC_RAW_STREAMS_SVC, /* not currently used */
726 PIPEDIR_OUT, /* out = UL = host -> target */
727 0,
728 },
729 {
730 HTC_RAW_STREAMS_SVC, /* not currently used */
731 PIPEDIR_IN, /* in = DL = target -> host */
732 1,
733 },
734 {
735 HTT_DATA_MSG_SVC,
736 PIPEDIR_OUT, /* out = UL = host -> target */
737 4,
738 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530739#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700740 {
741 HTT_DATA_MSG_SVC,
742 PIPEDIR_IN, /* in = DL = target -> host */
743 5,
744 },
745#else /* WLAN_FEATURE_FASTPATH */
746 {
747 HTT_DATA_MSG_SVC,
748 PIPEDIR_IN, /* in = DL = target -> host */
749 1,
750 },
751#endif /* WLAN_FEATURE_FASTPATH */
752
753 /* (Additions here) */
754
755 { /* Must be last */
756 0,
757 0,
758 0,
759 },
760};
761
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700762static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
763static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
764
Nirav Shah0d0cce82018-01-17 17:00:31 +0530765#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700766static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
767 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
768 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
769 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
770 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
771 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
772 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
773 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
774 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
775 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
776 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
777 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
778 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
779 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
780 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
781 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
782 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
783 {0, 0, 0,}, /* Must be last */
784};
785
Nirav Shah0d0cce82018-01-17 17:00:31 +0530786void hif_select_epping_service_to_pipe_map(struct service_to_pipe
787 **tgt_svc_map_to_use,
788 uint32_t *sz_tgt_svc_map_to_use)
789{
790 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
791 *sz_tgt_svc_map_to_use =
792 sizeof(target_service_to_ce_map_wlan_epping);
793}
794#endif
795
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530796#ifdef QCN7605_SUPPORT
797static inline
798void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
799 uint32_t *sz_tgt_svc_map_to_use)
800{
801 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
802 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
803}
804#else
805static inline
806void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
807 uint32_t *sz_tgt_svc_map_to_use)
808{
809 HIF_ERROR("%s: QCN7605 not supported", __func__);
810}
811#endif
812
Houston Hoffman748e1a62017-03-30 17:20:42 -0700813static void hif_select_service_to_pipe_map(struct hif_softc *scn,
814 struct service_to_pipe **tgt_svc_map_to_use,
815 uint32_t *sz_tgt_svc_map_to_use)
816{
817 uint32_t mode = hif_get_conparam(scn);
818 struct hif_target_info *tgt_info = &scn->target_info;
819
820 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530821 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
822 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700823 } else {
824 switch (tgt_info->target_type) {
825 default:
826 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
827 *sz_tgt_svc_map_to_use =
828 sizeof(target_service_to_ce_map_wlan);
829 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530830 case TARGET_TYPE_QCN7605:
831 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
832 sz_tgt_svc_map_to_use);
833 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700834 case TARGET_TYPE_AR900B:
835 case TARGET_TYPE_QCA9984:
836 case TARGET_TYPE_IPQ4019:
837 case TARGET_TYPE_QCA9888:
838 case TARGET_TYPE_AR9888:
839 case TARGET_TYPE_AR9888V2:
840 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
841 *sz_tgt_svc_map_to_use =
842 sizeof(target_service_to_ce_map_ar900b);
843 break;
844 case TARGET_TYPE_QCA6290:
845 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
846 *sz_tgt_svc_map_to_use =
847 sizeof(target_service_to_ce_map_qca6290);
848 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700849 case TARGET_TYPE_QCA6390:
850 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
851 *sz_tgt_svc_map_to_use =
852 sizeof(target_service_to_ce_map_qca6390);
853 break;
Mohit Khanna973308a2019-05-13 18:31:33 -0700854 case TARGET_TYPE_QCA6490:
855 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
856 *sz_tgt_svc_map_to_use =
857 sizeof(target_service_to_ce_map_qca6490);
858 break;
Alok Kumarffc116e2020-01-06 18:12:35 +0530859 case TARGET_TYPE_QCA6750:
860 *tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
861 *sz_tgt_svc_map_to_use =
862 sizeof(target_service_to_ce_map_qca6750);
863 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530864 case TARGET_TYPE_QCA8074:
865 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
866 *sz_tgt_svc_map_to_use =
867 sizeof(target_service_to_ce_map_qca8074);
868 break;
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530869 case TARGET_TYPE_QCA8074V2:
870 *tgt_svc_map_to_use =
871 target_service_to_ce_map_qca8074_v2;
872 *sz_tgt_svc_map_to_use =
873 sizeof(target_service_to_ce_map_qca8074_v2);
874 break;
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530875 case TARGET_TYPE_QCA6018:
876 *tgt_svc_map_to_use =
877 target_service_to_ce_map_qca6018;
878 *sz_tgt_svc_map_to_use =
879 sizeof(target_service_to_ce_map_qca6018);
880 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +0530881 case TARGET_TYPE_QCN9000:
882 *tgt_svc_map_to_use =
883 target_service_to_ce_map_qcn9000;
884 *sz_tgt_svc_map_to_use =
885 sizeof(target_service_to_ce_map_qcn9000);
886 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700887 }
888 }
889}
890
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700891/**
892 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
893 * @ce_state : pointer to the state context of the CE
894 *
895 * Description:
896 * Sets htt_rx_data attribute of the state structure if the
897 * CE serves one of the HTT DATA services.
898 *
899 * Return:
900 * false (attribute set to false)
901 * true (attribute set to true);
902 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700903static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700904{
905 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530906 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700907 int i;
908 bool rc = false;
909
Jeff Johnson8d639a02019-03-18 09:51:11 -0700910 if (ce_state) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700911 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
912 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700913
Kiran Venkatappac0687092017-04-13 16:45:03 +0530914 map_len = map_sz / sizeof(struct service_to_pipe);
915 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700916 if ((svc_map[i].pipenum == ce_state->id) &&
917 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
918 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
919 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
920 /* HTT CEs are unidirectional */
921 if (svc_map[i].pipedir == PIPEDIR_IN)
922 ce_state->htt_rx_data = true;
923 else
924 ce_state->htt_tx_data = true;
925 rc = true;
926 }
927 }
928 }
929 return rc;
930}
931
Houston Hoffman47808172016-05-06 10:04:21 -0700932/**
933 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
934 * @ce_id: ce in question
935 * @ring: ring state being examined
936 * @type: "src_ring" or "dest_ring" string for identifying the ring
937 *
938 * Warns on non-zero index values.
939 * Causes a kernel panic if the ring is not empty durring initialization.
940 */
941static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
942 char *type)
943{
944 if (ring->write_index != 0 || ring->sw_index != 0)
945 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
946 ce_id, type, ring->sw_index, ring->write_index);
947 if (ring->write_index != ring->sw_index)
948 QDF_BUG(0);
949}
950
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530951#ifdef IPA_OFFLOAD
952/**
953 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
954 * @scn: softc instance
955 * @ce_id: ce in question
956 * @base_addr: pointer to copyengine ring base address
957 * @ce_ring: copyengine instance
958 * @nentries: number of entries should be allocated
959 * @desc_size: ce desc size
960 *
961 * Return: QDF_STATUS_SUCCESS - for success
962 */
963static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
964 qdf_dma_addr_t *base_addr,
965 struct CE_ring_state *ce_ring,
966 unsigned int nentries, uint32_t desc_size)
967{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700968 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
969 !ce_srng_based(scn)) {
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530970 if (!scn->ipa_ce_ring) {
Mohit Khannaba7a7982018-03-21 22:06:25 -0700971 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
972 scn->qdf_dev,
973 nentries * desc_size + CE_DESC_RING_ALIGN);
974 if (!scn->ipa_ce_ring) {
975 HIF_ERROR(
976 "%s: Failed to allocate memory for IPA ce ring",
977 __func__);
978 return QDF_STATUS_E_NOMEM;
979 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530980 }
981 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
982 &scn->ipa_ce_ring->mem_info);
983 ce_ring->base_addr_owner_space_unaligned =
984 scn->ipa_ce_ring->vaddr;
985 } else {
986 ce_ring->base_addr_owner_space_unaligned =
987 qdf_mem_alloc_consistent(scn->qdf_dev,
988 scn->qdf_dev->dev,
989 (nentries * desc_size +
990 CE_DESC_RING_ALIGN),
991 base_addr);
992 if (!ce_ring->base_addr_owner_space_unaligned) {
993 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
994 __func__, CE_id);
995 return QDF_STATUS_E_NOMEM;
996 }
997 }
998 return QDF_STATUS_SUCCESS;
999}
1000
1001/**
1002 * ce_free_desc_ring() - Frees copyengine descriptor ring
1003 * @scn: softc instance
1004 * @ce_id: ce in question
1005 * @ce_ring: copyengine instance
1006 * @desc_size: ce desc size
1007 *
1008 * Return: None
1009 */
1010static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1011 struct CE_ring_state *ce_ring, uint32_t desc_size)
1012{
Mohit Khannaba7a7982018-03-21 22:06:25 -07001013 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1014 !ce_srng_based(scn)) {
1015 if (scn->ipa_ce_ring) {
1016 qdf_mem_shared_mem_free(scn->qdf_dev,
1017 scn->ipa_ce_ring);
1018 scn->ipa_ce_ring = NULL;
1019 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301020 ce_ring->base_addr_owner_space_unaligned = NULL;
1021 } else {
1022 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1023 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1024 ce_ring->base_addr_owner_space_unaligned,
1025 ce_ring->base_addr_CE_space, 0);
1026 ce_ring->base_addr_owner_space_unaligned = NULL;
1027 }
1028}
1029#else
1030static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1031 qdf_dma_addr_t *base_addr,
1032 struct CE_ring_state *ce_ring,
1033 unsigned int nentries, uint32_t desc_size)
1034{
1035 ce_ring->base_addr_owner_space_unaligned =
1036 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1037 (nentries * desc_size +
1038 CE_DESC_RING_ALIGN), base_addr);
1039 if (!ce_ring->base_addr_owner_space_unaligned) {
1040 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1041 __func__, CE_id);
1042 return QDF_STATUS_E_NOMEM;
1043 }
1044 return QDF_STATUS_SUCCESS;
1045}
1046
1047static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1048 struct CE_ring_state *ce_ring, uint32_t desc_size)
1049{
1050 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1051 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1052 ce_ring->base_addr_owner_space_unaligned,
1053 ce_ring->base_addr_CE_space, 0);
1054 ce_ring->base_addr_owner_space_unaligned = NULL;
1055}
1056#endif /* IPA_OFFLOAD */
1057
Sathish Kumar86876492018-08-27 13:39:20 +05301058/*
1059 * TODO: Need to explore the possibility of having this as part of a
1060 * target context instead of a global array.
1061 */
1062static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1063
1064void ce_service_register_module(enum ce_target_type target_type,
1065 struct ce_ops* (*ce_attach)(void))
1066{
1067 if (target_type < CE_MAX_TARGET_TYPE)
1068 ce_attach_register[target_type] = ce_attach;
1069}
1070
1071qdf_export_symbol(ce_service_register_module);
1072
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301073/**
1074 * ce_srng_based() - Does this target use srng
1075 * @ce_state : pointer to the state context of the CE
1076 *
1077 * Description:
1078 * returns true if the target is SRNG based
1079 *
1080 * Return:
1081 * false (attribute set to false)
1082 * true (attribute set to true);
1083 */
1084bool ce_srng_based(struct hif_softc *scn)
1085{
1086 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1087 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1088
1089 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301090 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301091 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001092 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07001093 case TARGET_TYPE_QCA6390:
Mohit Khanna973308a2019-05-13 18:31:33 -07001094 case TARGET_TYPE_QCA6490:
Alok Kumarffc116e2020-01-06 18:12:35 +05301095 case TARGET_TYPE_QCA6750:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301096 case TARGET_TYPE_QCA6018:
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05301097 case TARGET_TYPE_QCN9000:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301098 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301099 default:
1100 return false;
1101 }
1102 return false;
1103}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301104qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301105
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001106#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001107static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301108{
Sathish Kumar86876492018-08-27 13:39:20 +05301109 struct ce_ops *ops = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301110
Sathish Kumar86876492018-08-27 13:39:20 +05301111 if (ce_srng_based(scn)) {
1112 if (ce_attach_register[CE_SVC_SRNG])
1113 ops = ce_attach_register[CE_SVC_SRNG]();
1114 } else if (ce_attach_register[CE_SVC_LEGACY]) {
1115 ops = ce_attach_register[CE_SVC_LEGACY]();
1116 }
1117
1118 return ops;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301119}
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001120
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001121
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001122#else /* QCA_LITHIUM */
1123static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1124{
Sathish Kumar86876492018-08-27 13:39:20 +05301125 if (ce_attach_register[CE_SVC_LEGACY])
1126 return ce_attach_register[CE_SVC_LEGACY]();
1127
1128 return NULL;
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001129}
1130#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301131
Houston Hoffman403c2df2017-01-27 12:51:15 -08001132static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -08001133 struct pld_shadow_reg_v2_cfg **shadow_config,
1134 int *num_shadow_registers_configured) {
1135 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1136
1137 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1138 scn, shadow_config, num_shadow_registers_configured);
1139}
1140
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301141static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1142 uint8_t ring_type)
1143{
1144 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1145
1146 return hif_state->ce_services->ce_get_desc_size(ring_type);
1147}
1148
1149
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001150static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301151 uint8_t ring_type, uint32_t nentries)
1152{
1153 uint32_t ce_nbytes;
1154 char *ptr;
1155 qdf_dma_addr_t base_addr;
1156 struct CE_ring_state *ce_ring;
1157 uint32_t desc_size;
1158 struct hif_softc *scn = CE_state->scn;
1159
1160 ce_nbytes = sizeof(struct CE_ring_state)
1161 + (nentries * sizeof(void *));
1162 ptr = qdf_mem_malloc(ce_nbytes);
1163 if (!ptr)
1164 return NULL;
1165
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301166 ce_ring = (struct CE_ring_state *)ptr;
1167 ptr += sizeof(struct CE_ring_state);
1168 ce_ring->nentries = nentries;
1169 ce_ring->nentries_mask = nentries - 1;
1170
1171 ce_ring->low_water_mark_nentries = 0;
1172 ce_ring->high_water_mark_nentries = nentries;
1173 ce_ring->per_transfer_context = (void **)ptr;
1174
1175 desc_size = ce_get_desc_size(scn, ring_type);
1176
1177 /* Legacy platforms that do not support cache
1178 * coherent DMA are unsupported
1179 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301180 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1181 ce_ring, nentries,
1182 desc_size) !=
1183 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301184 HIF_ERROR("%s: ring has no DMA mem",
1185 __func__);
Alok Kumarfea70e32018-09-21 15:42:06 +05301186 qdf_mem_free(ce_ring);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301187 return NULL;
1188 }
1189 ce_ring->base_addr_CE_space_unaligned = base_addr;
1190
1191 /* Correctly initialize memory to 0 to
1192 * prevent garbage data crashing system
1193 * when download firmware
1194 */
1195 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1196 nentries * desc_size +
1197 CE_DESC_RING_ALIGN);
1198
1199 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1200
1201 ce_ring->base_addr_CE_space =
1202 (ce_ring->base_addr_CE_space_unaligned +
1203 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1204
1205 ce_ring->base_addr_owner_space = (void *)
1206 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1207 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1208 } else {
1209 ce_ring->base_addr_CE_space =
1210 ce_ring->base_addr_CE_space_unaligned;
1211 ce_ring->base_addr_owner_space =
1212 ce_ring->base_addr_owner_space_unaligned;
1213 }
1214
1215 return ce_ring;
1216}
1217
Yun Park3fb36442017-08-17 17:37:53 -07001218static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301219 uint32_t ce_id, struct CE_ring_state *ring,
1220 struct CE_attr *attr)
1221{
1222 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1223
Yun Park3fb36442017-08-17 17:37:53 -07001224 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001225 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301226}
1227
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001228int hif_ce_bus_early_suspend(struct hif_softc *scn)
1229{
1230 uint8_t ul_pipe, dl_pipe;
1231 int ce_id, status, ul_is_polled, dl_is_polled;
1232 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001233
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001234 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1235 &ul_pipe, &dl_pipe,
1236 &ul_is_polled, &dl_is_polled);
1237 if (status) {
1238 HIF_ERROR("%s: pipe_mapping failure", __func__);
1239 return status;
1240 }
1241
1242 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1243 if (ce_id == ul_pipe)
1244 continue;
1245 if (ce_id == dl_pipe)
1246 continue;
1247
1248 ce_state = scn->ce_id_to_state[ce_id];
1249 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1250 if (ce_state->state == CE_RUNNING)
1251 ce_state->state = CE_PAUSED;
1252 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1253 }
1254
1255 return status;
1256}
1257
1258int hif_ce_bus_late_resume(struct hif_softc *scn)
1259{
1260 int ce_id;
1261 struct CE_state *ce_state;
Nirav Shaheeb99622018-09-11 13:50:08 +05301262 int write_index = 0;
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001263 bool index_updated;
1264
1265 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1266 ce_state = scn->ce_id_to_state[ce_id];
1267 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1268 if (ce_state->state == CE_PENDING) {
1269 write_index = ce_state->src_ring->write_index;
1270 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1271 write_index);
1272 ce_state->state = CE_RUNNING;
1273 index_updated = true;
1274 } else {
1275 index_updated = false;
1276 }
1277
1278 if (ce_state->state == CE_PAUSED)
1279 ce_state->state = CE_RUNNING;
1280 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1281
1282 if (index_updated)
1283 hif_record_ce_desc_event(scn, ce_id,
1284 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301285 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001286 }
1287
1288 return 0;
1289}
1290
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001291/**
1292 * ce_oom_recovery() - try to recover rx ce from oom condition
1293 * @context: CE_state of the CE with oom rx ring
1294 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001295 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001296 * at least 1 descriptor is successfully posted to the rx ring.
1297 *
1298 * return: none
1299 */
1300static void ce_oom_recovery(void *context)
1301{
1302 struct CE_state *ce_state = context;
1303 struct hif_softc *scn = ce_state->scn;
1304 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1305 struct HIF_CE_pipe_info *pipe_info =
1306 &ce_softc->pipe_info[ce_state->id];
1307
1308 hif_post_recv_buffers_for_pipe(pipe_info);
1309}
1310
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301311#ifdef HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301312/**
1313 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1314 * the CE descriptors.
1315 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1316 * @scn: hif scn handle
1317 * ce_id: Copy Engine Id
1318 *
1319 * Return: QDF_STATUS
1320 */
1321QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1322{
1323 struct hif_ce_desc_event *event = NULL;
1324 struct hif_ce_desc_event *hist_ev = NULL;
1325 uint32_t index = 0;
1326
1327 hist_ev =
1328 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1329
1330 if (!hist_ev)
1331 return QDF_STATUS_E_NOMEM;
1332
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001333 scn->hif_ce_desc_hist.data_enable[ce_id] = true;
c_cgodavfda96ad2017-09-07 16:16:00 +05301334 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1335 event = &hist_ev[index];
1336 event->data =
1337 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001338 if (!event->data) {
1339 hif_err_rl("ce debug data alloc failed");
c_cgodavfda96ad2017-09-07 16:16:00 +05301340 return QDF_STATUS_E_NOMEM;
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001341 }
c_cgodavfda96ad2017-09-07 16:16:00 +05301342 }
1343 return QDF_STATUS_SUCCESS;
1344}
1345
1346/**
1347 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1348 * the CE descriptors.
1349 * @scn: hif scn handle
1350 * ce_id: Copy Engine Id
1351 *
1352 * Return:
1353 */
1354void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1355{
1356 struct hif_ce_desc_event *event = NULL;
1357 struct hif_ce_desc_event *hist_ev = NULL;
1358 uint32_t index = 0;
1359
1360 hist_ev =
1361 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1362
1363 if (!hist_ev)
1364 return;
1365
1366 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1367 event = &hist_ev[index];
Jeff Johnson8d639a02019-03-18 09:51:11 -07001368 if (event->data)
c_cgodavfda96ad2017-09-07 16:16:00 +05301369 qdf_mem_free(event->data);
1370 event->data = NULL;
1371 event = NULL;
1372 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001373
c_cgodavfda96ad2017-09-07 16:16:00 +05301374}
1375#endif /* HIF_CE_DEBUG_DATA_BUF */
1376
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301377#ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001378#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001379struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1380
c_cgodavfda96ad2017-09-07 16:16:00 +05301381/**
Dustin Brown2f750872018-10-17 12:16:20 -07001382 * alloc_mem_ce_debug_history() - Allocate CE descriptor history
c_cgodavfda96ad2017-09-07 16:16:00 +05301383 * @scn: hif scn handle
Dustin Brown2f750872018-10-17 12:16:20 -07001384 * @ce_id: Copy Engine Id
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001385 * @src_nentries: source ce ring entries
c_cgodavfda96ad2017-09-07 16:16:00 +05301386 * Return: QDF_STATUS
1387 */
Dustin Brown2f750872018-10-17 12:16:20 -07001388static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001389alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1390 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001391{
1392 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1393
1394 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1395 ce_hist->enable[ce_id] = 1;
1396
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001397 if (src_nentries)
1398 alloc_mem_ce_debug_hist_data(scn, ce_id);
1399 else
1400 ce_hist->data_enable[ce_id] = false;
1401
Dustin Brown2f750872018-10-17 12:16:20 -07001402 return QDF_STATUS_SUCCESS;
1403}
1404
1405/**
1406 * free_mem_ce_debug_history() - Free CE descriptor history
1407 * @scn: hif scn handle
1408 * @ce_id: Copy Engine Id
1409 *
1410 * Return: None
1411 */
1412static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1413{
1414 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1415
1416 ce_hist->enable[ce_id] = 0;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001417 if (ce_hist->data_enable[ce_id]) {
1418 ce_hist->data_enable[ce_id] = false;
1419 free_mem_ce_debug_hist_data(scn, ce_id);
1420 }
Venkata Sharath Chandra Manchala34a2ef62019-10-05 02:30:19 -07001421 ce_hist->hist_ev[ce_id] = NULL;
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001422}
1423#else
1424static inline QDF_STATUS
1425alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1426 uint32_t src_nentries)
1427{
1428 return QDF_STATUS_SUCCESS;
Dustin Brown2f750872018-10-17 12:16:20 -07001429}
1430
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001431static inline void
1432free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1433#endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301434#else
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001435#if defined(HIF_CE_DEBUG_DATA_BUF)
Dustin Brown2f750872018-10-17 12:16:20 -07001436
1437static QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001438alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1439 uint32_t src_nentries)
c_cgodavfda96ad2017-09-07 16:16:00 +05301440{
1441 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1442 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1443
Jeff Johnson8d639a02019-03-18 09:51:11 -07001444 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
c_cgodavfda96ad2017-09-07 16:16:00 +05301445 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1446 return QDF_STATUS_E_NOMEM;
1447 } else {
1448 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1449 return QDF_STATUS_SUCCESS;
1450 }
1451}
1452
Dustin Brown2f750872018-10-17 12:16:20 -07001453static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301454{
1455 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
Dustin Brown2f750872018-10-17 12:16:20 -07001456 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
c_cgodavfda96ad2017-09-07 16:16:00 +05301457
1458 if (!hist_ev)
1459 return;
1460
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001461 if (ce_hist->data_enable[CE_id]) {
1462 ce_hist->data_enable[CE_id] = false;
c_cgodavfda96ad2017-09-07 16:16:00 +05301463 free_mem_ce_debug_hist_data(scn, CE_id);
1464 }
Dustin Brown2f750872018-10-17 12:16:20 -07001465
c_cgodavfda96ad2017-09-07 16:16:00 +05301466 ce_hist->enable[CE_id] = 0;
1467 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1468 ce_hist->hist_ev[CE_id] = NULL;
1469}
1470
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001471#else
Dustin Brown2f750872018-10-17 12:16:20 -07001472
1473static inline QDF_STATUS
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001474alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1475 uint32_t src_nentries)
Dustin Brown2f750872018-10-17 12:16:20 -07001476{
1477 return QDF_STATUS_SUCCESS;
1478}
1479
1480static inline void
1481free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001482#endif /* HIF_CE_DEBUG_DATA_BUF */
Akshay Kosigiaf98d7c2019-04-08 17:50:05 +05301483#endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
Dustin Brown2f750872018-10-17 12:16:20 -07001484
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301485#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
c_cgodavfda96ad2017-09-07 16:16:00 +05301486/**
1487 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1488 * CE records on the console using sysfs.
1489 * @scn: hif scn handle
1490 *
1491 * Return:
1492 */
1493static inline void reset_ce_debug_history(struct hif_softc *scn)
1494{
1495 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1496 /* Initialise the CE debug history sysfs interface inputs ce_id and
1497 * index. Disable data storing
1498 */
1499 ce_hist->hist_index = 0;
1500 ce_hist->hist_id = 0;
1501}
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301502#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
Dustin Brown2f750872018-10-17 12:16:20 -07001503static inline void reset_ce_debug_history(struct hif_softc *scn) { }
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301504#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
c_cgodavfda96ad2017-09-07 16:16:00 +05301505
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301506void ce_enable_polling(void *cestate)
1507{
1508 struct CE_state *CE_state = (struct CE_state *)cestate;
1509
1510 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1511 CE_state->timer_inited = true;
1512}
1513
1514void ce_disable_polling(void *cestate)
1515{
1516 struct CE_state *CE_state = (struct CE_state *)cestate;
1517
1518 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1519 CE_state->timer_inited = false;
1520}
1521
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001522/*
1523 * Initialize a Copy Engine based on caller-supplied attributes.
1524 * This may be called once to initialize both source and destination
1525 * rings or it may be called twice for separate source and destination
1526 * initialization. It may be that only one side or the other is
1527 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001528 *
1529 * This should be called durring the initialization sequence before
1530 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001531 */
Komal Seelam644263d2016-02-22 20:45:49 +05301532struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001533 unsigned int CE_id, struct CE_attr *attr)
1534{
1535 struct CE_state *CE_state;
1536 uint32_t ctrl_addr;
1537 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001538 bool malloc_CE_state = false;
1539 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001540 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301542 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001543 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001544 CE_state = scn->ce_id_to_state[CE_id];
1545
1546 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301548 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301549 if (!CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001550 return NULL;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301551
Houston Hoffman233e9092015-09-02 13:37:21 -07001552 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301553 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001554
1555 CE_state->id = CE_id;
1556 CE_state->ctrl_addr = ctrl_addr;
1557 CE_state->state = CE_RUNNING;
1558 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001559 }
1560 CE_state->scn = scn;
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301561 CE_state->service = ce_engine_service_reg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001562
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301563 qdf_atomic_init(&CE_state->rx_pending);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001564 if (!attr) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001565 /* Already initialized; caller wants the handle */
1566 return (struct CE_handle *)CE_state;
1567 }
1568
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001569 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301570 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001571 else
1572 CE_state->src_sz_max = attr->src_sz_max;
1573
c_cgodavfda96ad2017-09-07 16:16:00 +05301574 ce_init_ce_desc_event_log(scn, CE_id,
1575 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001576
Manjunathappa Prakash1ec17ab2020-03-02 16:23:55 -08001577 wlan_set_ce_srng_cfg(&scn->wlan_ce_srng_cfg);
1578
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 /* source ring setup */
1580 nentries = attr->src_nentries;
1581 if (nentries) {
1582 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001583
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584 nentries = roundup_pwr2(nentries);
1585 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301586 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001587 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301588 src_ring = CE_state->src_ring =
1589 ce_alloc_ring_state(CE_state,
1590 CE_RING_SRC,
1591 nentries);
1592 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593 /* cannot allocate src ring. If the
1594 * CE_state is allocated locally free
1595 * CE_State and return error.
1596 */
1597 HIF_ERROR("%s: src ring has no mem", __func__);
1598 if (malloc_CE_state) {
1599 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301600 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 malloc_CE_state = false;
1602 }
1603 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001604 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001605 /* we can allocate src ring. Mark that the src ring is
1606 * allocated locally
1607 */
1608 malloc_src_ring = true;
1609
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001610 /*
1611 * Also allocate a shadow src ring in
1612 * regular mem to use for faster access.
1613 */
1614 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301615 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616 sizeof(struct CE_src_desc) +
1617 CE_DESC_RING_ALIGN);
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301618 if (!src_ring->shadow_base_unaligned)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619 goto error_no_dma_mem;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301620
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621 src_ring->shadow_base = (struct CE_src_desc *)
1622 (((size_t) src_ring->shadow_base_unaligned +
1623 CE_DESC_RING_ALIGN - 1) &
1624 ~(CE_DESC_RING_ALIGN - 1));
1625
Yun Park3fb36442017-08-17 17:37:53 -07001626 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1627 src_ring, attr);
1628 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001629 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001630
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301631 ce_ring_test_initial_indexes(CE_id, src_ring,
1632 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001633 }
1634 }
1635
1636 /* destination ring setup */
1637 nentries = attr->dest_nentries;
1638 if (nentries) {
1639 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001640
1641 nentries = roundup_pwr2(nentries);
1642 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301643 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001644 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301645 dest_ring = CE_state->dest_ring =
1646 ce_alloc_ring_state(CE_state,
1647 CE_RING_DEST,
1648 nentries);
1649 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001650 /* cannot allocate dst ring. If the CE_state
1651 * or src ring is allocated locally free
1652 * CE_State and src ring and return error.
1653 */
1654 HIF_ERROR("%s: dest ring has no mem",
1655 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301656 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001658
Yun Park3fb36442017-08-17 17:37:53 -07001659 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001660 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001661 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301662 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001663
1664 ce_ring_test_initial_indexes(CE_id, dest_ring,
1665 "dest_ring");
1666
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301667 /* For srng based target, init status ring here */
1668 if (ce_srng_based(CE_state->scn)) {
1669 CE_state->status_ring =
1670 ce_alloc_ring_state(CE_state,
1671 CE_RING_STATUS,
1672 nentries);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001673 if (!CE_state->status_ring) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301674 /*Allocation failed. Cleanup*/
1675 qdf_mem_free(CE_state->dest_ring);
1676 if (malloc_src_ring) {
1677 qdf_mem_free
1678 (CE_state->src_ring);
1679 CE_state->src_ring = NULL;
1680 malloc_src_ring = false;
1681 }
1682 if (malloc_CE_state) {
1683 /* allocated CE_state locally */
1684 scn->ce_id_to_state[CE_id] =
1685 NULL;
1686 qdf_mem_free(CE_state);
1687 malloc_CE_state = false;
1688 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001689
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301690 return NULL;
1691 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692
Yun Park3fb36442017-08-17 17:37:53 -07001693 status = ce_ring_setup(scn, CE_RING_STATUS,
1694 CE_id, CE_state->status_ring,
1695 attr);
1696 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301697 goto error_target_access;
1698
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001699 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001700
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001701 /* epping */
1702 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301703 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301704 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301705 &CE_state->poll_timer,
1706 ce_poll_timeout,
1707 CE_state,
1708 QDF_TIMER_TYPE_WAKE_APPS);
1709 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301710 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001711 CE_POLL_TIMEOUT);
1712 }
1713 }
1714 }
1715
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301716 if (!ce_srng_based(scn)) {
1717 /* Enable CE error interrupts */
1718 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1719 goto error_target_access;
1720 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1721 if (Q_TARGET_ACCESS_END(scn) < 0)
1722 goto error_target_access;
1723 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001725 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1726 ce_oom_recovery, CE_state);
1727
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001728 /* update the htt_data attribute */
1729 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001730 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001731
Venkata Sharath Chandra Manchalaec01bbc2019-04-25 13:31:34 -07001732 alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
c_cgodavfda96ad2017-09-07 16:16:00 +05301733
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 return (struct CE_handle *)CE_state;
1735
Houston Hoffman4411ad42016-03-14 21:12:04 -07001736error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737error_no_dma_mem:
1738 ce_fini((struct CE_handle *)CE_state);
1739 return NULL;
1740}
1741
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301742/**
1743 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1744 * @hif_ctx: HIF Context
1745 *
1746 * API to check if polling is enabled on all CEs. Returns true when polling
1747 * is enabled on all CEs.
1748 *
1749 * Return: bool
1750 */
1751bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1752{
1753 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1754 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1755 struct CE_attr *attr;
1756 int id;
1757
1758 for (id = 0; id < scn->ce_count; id++) {
1759 attr = &hif_state->host_ce_config[id];
1760 if (attr && (attr->dest_nentries) &&
1761 !(attr->flags & CE_ATTR_ENABLE_POLL))
1762 return false;
1763 }
1764 return true;
1765}
1766qdf_export_symbol(hif_is_polled_mode_enabled);
1767
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001768#ifdef WLAN_FEATURE_FASTPATH
1769/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001770 * hif_enable_fastpath() Update that we have enabled fastpath mode
1771 * @hif_ctx: HIF context
1772 *
1773 * For use in data path
1774 *
1775 * Retrun: void
1776 */
1777void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1778{
1779 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1780
Houston Hoffmand63cd742016-12-05 11:59:56 -08001781 if (ce_srng_based(scn)) {
1782 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1783 return;
1784 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001785 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001786 scn->fastpath_mode_on = true;
1787}
1788
1789/**
1790 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1791 * @hif_ctx: HIF Context
1792 *
1793 * For use in data path to skip HTC
1794 *
1795 * Return: bool
1796 */
1797bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1798{
1799 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1800
1801 return scn->fastpath_mode_on;
1802}
1803
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301804/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001805 * hif_get_ce_handle - API to get CE handle for FastPath mode
1806 * @hif_ctx: HIF Context
1807 * @id: CopyEngine Id
1808 *
1809 * API to return CE handle for fastpath mode
1810 *
1811 * Return: void
1812 */
1813void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1814{
1815 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1816
1817 return scn->ce_id_to_state[id];
1818}
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301819qdf_export_symbol(hif_get_ce_handle);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001820
1821/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001822 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1823 * No processing is required inside this function.
1824 * @ce_hdl: Cope engine handle
1825 * Using an assert, this function makes sure that,
1826 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001827 *
1828 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001829 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001830 * therfore no locking is needed.
1831 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832 * Return: none
1833 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001834void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001835{
1836 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1837 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301838 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001839 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001840
Houston Hoffman85925072016-05-06 17:02:18 -07001841 if (hif_is_nss_wifi_enabled(sc))
1842 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001844 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001845 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001846 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001847 sw_index = src_ring->sw_index;
1848 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001849
1850 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301851 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001852 }
1853}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001854
1855/**
1856 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1857 * @ce_hdl: Handle to CE
1858 *
1859 * These buffers are never allocated on the fly, but
1860 * are allocated only once during HIF start and freed
1861 * only once during HIF stop.
1862 * NOTE:
1863 * The assumption here is there is no in-flight DMA in progress
1864 * currently, so that buffers can be freed up safely.
1865 *
1866 * Return: NONE
1867 */
1868void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1869{
1870 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1871 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1872 qdf_nbuf_t nbuf;
1873 int i;
1874
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001875 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001876 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001877
1878 if (!ce_state->htt_rx_data)
1879 return;
1880
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001881 /*
1882 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1883 * this CE is completely full: does not leave one blank space, to
1884 * distinguish between empty queue & full queue. So free all the
1885 * entries.
1886 */
1887 for (i = 0; i < dst_ring->nentries; i++) {
1888 nbuf = dst_ring->per_transfer_context[i];
1889
1890 /*
1891 * The reasons for doing this check are:
1892 * 1) Protect against calling cleanup before allocating buffers
1893 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1894 * could have a partially filled ring, because of a memory
1895 * allocation failure in the middle of allocating ring.
1896 * This check accounts for that case, checking
1897 * fastpath_mode_on flag or started flag would not have
1898 * covered that case. This is not in performance path,
1899 * so OK to do this.
1900 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001901 if (nbuf) {
1902 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1903 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001904 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001905 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001906 }
1907}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001908
1909/**
1910 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1911 * @scn: HIF handle
1912 *
1913 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1914 * Hence we have to post all the entries in the pipe, even, in the beginning
1915 * unlike for other CE pipes where one less than dest_nentries are filled in
1916 * the beginning.
1917 *
1918 * Return: None
1919 */
1920static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1921{
1922 int pipe_num;
1923 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1924
1925 if (scn->fastpath_mode_on == false)
1926 return;
1927
1928 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1929 struct HIF_CE_pipe_info *pipe_info =
1930 &hif_state->pipe_info[pipe_num];
1931 struct CE_state *ce_state =
1932 scn->ce_id_to_state[pipe_info->pipe_num];
1933
1934 if (ce_state->htt_rx_data)
1935 atomic_inc(&pipe_info->recv_bufs_needed);
1936 }
1937}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001938#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001939static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940{
1941}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001942
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001943static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001944{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001945 return false;
1946}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001947#endif /* WLAN_FEATURE_FASTPATH */
1948
1949void ce_fini(struct CE_handle *copyeng)
1950{
1951 struct CE_state *CE_state = (struct CE_state *)copyeng;
1952 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301953 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301954 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001955
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301956 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001957 CE_state->state = CE_UNUSED;
1958 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301959 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301960 ce_disable_polling(CE_state);
1961
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001962 qdf_lro_deinit(CE_state->lro_data);
1963
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001964 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001965 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966 ce_h2t_tx_ce_cleanup(copyeng);
1967
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301968 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001969 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301970 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301972 ce_free_desc_ring(scn, CE_state->id,
1973 CE_state->src_ring,
1974 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301975 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001976 }
1977 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001978 /* Cleanup the datapath Rx ring */
1979 ce_t2h_msg_ce_cleanup(copyeng);
1980
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301981 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001982 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301983 ce_free_desc_ring(scn, CE_state->id,
1984 CE_state->dest_ring,
1985 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301986 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001987
1988 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301989 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301990 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001991 }
1992 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001993 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301994 /* Cleanup the datapath Tx ring */
1995 ce_h2t_tx_ce_cleanup(copyeng);
1996
1997 if (CE_state->status_ring->shadow_base_unaligned)
1998 qdf_mem_free(
1999 CE_state->status_ring->shadow_base_unaligned);
2000
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05302001 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302002 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302003 ce_free_desc_ring(scn, CE_state->id,
2004 CE_state->status_ring,
2005 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302006 qdf_mem_free(CE_state->status_ring);
2007 }
Houston Hoffman03f46572016-12-12 12:53:56 -08002008
c_cgodavfda96ad2017-09-07 16:16:00 +05302009 free_mem_ce_debug_history(scn, CE_id);
2010 reset_ce_debug_history(scn);
2011 ce_deinit_ce_desc_event_log(scn, CE_id);
2012
Houston Hoffman03f46572016-12-12 12:53:56 -08002013 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302014 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015}
2016
Komal Seelam5584a7c2016-02-24 19:22:48 +05302017void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002018{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302019 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002020
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302021 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002022 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302023 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002024 sizeof(hif_state->msg_callbacks_current));
2025}
2026
2027/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302028QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05302029hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002030 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302031 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002032{
Komal Seelam644263d2016-02-22 20:45:49 +05302033 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302034 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002035 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2036 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2037 int bytes = nbytes, nfrags = 0;
2038 struct ce_sendlist sendlist;
2039 int status, i = 0;
2040 unsigned int mux_id = 0;
2041
Santosh Anbudbfae9b2018-07-12 15:40:49 +05302042 if (nbytes > qdf_nbuf_len(nbuf)) {
2043 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2044 (uint32_t)qdf_nbuf_len(nbuf));
2045 QDF_ASSERT(0);
2046 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002047
2048 transfer_id =
2049 (mux_id & MUX_ID_MASK) |
2050 (transfer_id & TRANSACTION_ID_MASK);
2051 data_attr &= DESC_DATA_FLAG_MASK;
2052 /*
2053 * The common case involves sending multiple fragments within a
2054 * single download (the tx descriptor and the tx frame header).
2055 * So, optimize for the case of multiple fragments by not even
2056 * checking whether it's necessary to use a sendlist.
2057 * The overhead of using a sendlist for a single buffer download
2058 * is not a big deal, since it happens rarely (for WMI messages).
2059 */
2060 ce_sendlist_init(&sendlist);
2061 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302062 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002063 int frag_bytes;
2064
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302065 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2066 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002067 /*
2068 * Clear the packet offset for all but the first CE desc.
2069 */
2070 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302071 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002072
2073 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2074 frag_bytes >
2075 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302076 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002077 (nbuf,
2078 nfrags) ? 0 :
2079 CE_SEND_FLAG_SWAP_DISABLE,
2080 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302081 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002082 HIF_ERROR("%s: error, frag_num %d larger than limit",
2083 __func__, nfrags);
2084 return status;
2085 }
2086 bytes -= frag_bytes;
2087 nfrags++;
2088 } while (bytes > 0);
2089
2090 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302091 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002092 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302093 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002094 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302095 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002096 }
2097 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302098 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002099
Jeff Johnson8d639a02019-03-18 09:51:11 -07002100 if (qdf_unlikely(!ce_hdl)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002101 HIF_ERROR("%s: error CE handle is null", __func__);
2102 return A_ERROR;
2103 }
2104
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302105 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302106 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05302107 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2108 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002109 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302110 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111
2112 return status;
2113}
2114
Komal Seelam5584a7c2016-02-24 19:22:48 +05302115void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2116 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002117{
Komal Seelam644263d2016-02-22 20:45:49 +05302118 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302119 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05302120
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121 if (!force) {
2122 int resources;
2123 /*
2124 * Decide whether to actually poll for completions, or just
2125 * wait for a later chance. If there seem to be plenty of
2126 * resources left, then just wait, since checking involves
2127 * reading a CE register, which is a relatively expensive
2128 * operation.
2129 */
Komal Seelam644263d2016-02-22 20:45:49 +05302130 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002131 /*
2132 * If at least 50% of the total resources are still available,
2133 * don't bother checking again yet.
2134 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002135 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2136 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002137 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002138 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07002139#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002140 ce_per_engine_servicereap(scn, pipe);
2141#else
2142 ce_per_engine_service(scn, pipe);
2143#endif
2144}
2145
Komal Seelam5584a7c2016-02-24 19:22:48 +05302146uint16_t
2147hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002148{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302149 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002150 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2151 uint16_t rv;
2152
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302153 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002154 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302155 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002156 return rv;
2157}
2158
2159/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002160static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002161hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302162 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002163 unsigned int nbytes, unsigned int transfer_id,
2164 unsigned int sw_index, unsigned int hw_index,
2165 unsigned int toeplitz_hash_result)
2166{
2167 struct HIF_CE_pipe_info *pipe_info =
2168 (struct HIF_CE_pipe_info *)ce_context;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002169 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07002170 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302171 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002172
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002173 do {
2174 /*
Houston Hoffman85118512015-09-28 14:17:11 -07002175 * The upper layer callback will be triggered
2176 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002177 */
Rakesh Pillai48633522019-11-15 15:07:53 +05302178 if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2179 msg_callbacks->txCompletionHandler(
2180 msg_callbacks->Context,
2181 transfer_context, transfer_id,
2182 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002183
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302184 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07002185 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302186 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002187 } while (ce_completed_send_next(copyeng,
2188 &ce_context, &transfer_context,
2189 &CE_data, &nbytes, &transfer_id,
2190 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302191 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002192}
2193
Houston Hoffman910c6262015-09-28 12:56:25 -07002194/**
2195 * hif_ce_do_recv(): send message from copy engine to upper layers
2196 * @msg_callbacks: structure containing callback and callback context
2197 * @netbuff: skb containing message
2198 * @nbytes: number of bytes in the message
2199 * @pipe_info: used for the pipe_number info
2200 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07002201 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07002202 * and calls the upper layer callback.
2203 *
2204 * return: None
2205 */
2206static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302207 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07002208 struct HIF_CE_pipe_info *pipe_info) {
2209 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302210 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07002211 msg_callbacks->
2212 rxCompletionHandler(msg_callbacks->Context,
2213 netbuf, pipe_info->pipe_num);
2214 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07002215 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07002216 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08002217
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302218 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07002219 }
2220}
2221
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002222/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002223static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002224hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302225 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226 unsigned int nbytes, unsigned int transfer_id,
2227 unsigned int flags)
2228{
2229 struct HIF_CE_pipe_info *pipe_info =
2230 (struct HIF_CE_pipe_info *)ce_context;
2231 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002232 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302233 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Yue Maac6b2752019-05-08 17:17:12 -07002234 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
Houston Hoffman910c6262015-09-28 12:56:25 -07002235 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302236 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002237
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002238 do {
Yue Maac6b2752019-05-08 17:17:12 -07002239 hif_pm_runtime_mark_last_busy(hif_ctx);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302240 qdf_nbuf_unmap_single(scn->qdf_dev,
2241 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302242 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002243
Houston Hoffman910c6262015-09-28 12:56:25 -07002244 atomic_inc(&pipe_info->recv_bufs_needed);
2245 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302246 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302247 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002248 else
2249 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002250 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002251
2252 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002253 * MAX_NUM_OF_RECEIVES
2254 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002255 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002256 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002257 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002258 break;
2259 }
2260 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2261 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302262 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002263
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002264}
2265
2266/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2267
2268void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302269hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002270 struct hif_msg_callbacks *callbacks)
2271{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302272 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002273
2274#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2275 spin_lock_init(&pcie_access_log_lock);
2276#endif
2277 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302278 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002279 sizeof(hif_state->msg_callbacks_pending));
2280
2281}
2282
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002283static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002284{
2285 struct CE_handle *ce_diag = hif_state->ce_diag;
2286 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302287 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002288 struct hif_msg_callbacks *hif_msg_callbacks =
2289 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002290
2291 /* daemonize("hif_compl_thread"); */
2292
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002293 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002294 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002295 return -EINVAL;
2296 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002297
2298 if (!hif_msg_callbacks ||
2299 !hif_msg_callbacks->rxCompletionHandler ||
2300 !hif_msg_callbacks->txCompletionHandler) {
2301 HIF_ERROR("%s: no completion handler registered", __func__);
2302 return -EFAULT;
2303 }
2304
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002305 A_TARGET_ACCESS_LIKELY(scn);
2306 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2307 struct CE_attr attr;
2308 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002309
2310 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002311 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002312 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302313 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002314 if (attr.src_nentries) {
2315 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002316 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002317 __func__, pipe_num, pipe_info);
2318 ce_send_cb_register(pipe_info->ce_hdl,
2319 hif_pci_ce_send_done, pipe_info,
2320 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002321 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2322 }
2323 if (attr.dest_nentries) {
2324 /* pipe used to receive from target */
2325 ce_recv_cb_register(pipe_info->ce_hdl,
2326 hif_pci_ce_recv_data, pipe_info,
2327 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002328 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002329
2330 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302331 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302332
2333 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2334 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002335 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002336
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002337 A_TARGET_ACCESS_UNLIKELY(scn);
2338 return 0;
2339}
2340
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002341/*
2342 * Install pending msg callbacks.
2343 *
2344 * TBDXXX: This hack is needed because upper layers install msg callbacks
2345 * for use with HTC before BMI is done; yet this HIF implementation
2346 * needs to continue to use BMI msg callbacks. Really, upper layers
2347 * should not register HTC callbacks until AFTER BMI phase.
2348 */
Komal Seelam644263d2016-02-22 20:45:49 +05302349static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002350{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302351 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002352
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302353 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002354 &hif_state->msg_callbacks_pending,
2355 sizeof(hif_state->msg_callbacks_pending));
2356}
2357
Komal Seelam5584a7c2016-02-24 19:22:48 +05302358void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2359 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002360{
2361 int ul_is_polled, dl_is_polled;
2362
Komal Seelam644263d2016-02-22 20:45:49 +05302363 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2365}
2366
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002367/**
2368 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302369 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002370 *
2371 * Output the pipe error counts of each pipe to log file
2372 *
2373 * Return: N/A
2374 */
Komal Seelam644263d2016-02-22 20:45:49 +05302375void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002376{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302377 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002378 int pipe_num;
2379
Jeff Johnson8d639a02019-03-18 09:51:11 -07002380 if (!hif_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002381 HIF_ERROR("%s hif_state is NULL", __func__);
2382 return;
2383 }
2384 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2385 struct HIF_CE_pipe_info *pipe_info;
2386
2387 pipe_info = &hif_state->pipe_info[pipe_num];
2388
2389 if (pipe_info->nbuf_alloc_err_count > 0 ||
2390 pipe_info->nbuf_dma_err_count > 0 ||
2391 pipe_info->nbuf_ce_enqueue_err_count)
2392 HIF_ERROR(
2393 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2394 __func__, pipe_info->pipe_num,
2395 atomic_read(&pipe_info->recv_bufs_needed),
2396 pipe_info->nbuf_alloc_err_count,
2397 pipe_info->nbuf_dma_err_count,
2398 pipe_info->nbuf_ce_enqueue_err_count);
2399 }
2400}
2401
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002402static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2403 void *nbuf, uint32_t *error_cnt,
2404 enum hif_ce_event_type failure_type,
2405 const char *failure_type_string)
2406{
2407 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2408 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2409 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2410 int ce_id = CE_state->id;
2411 uint32_t error_cnt_tmp;
2412
2413 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2414 error_cnt_tmp = ++(*error_cnt);
2415 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302416 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002417 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2418 failure_type_string);
2419 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302420 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002421 /* if we fail to allocate the last buffer for an rx pipe,
2422 * there is no trigger to refill the ce and we will
2423 * eventually crash
2424 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302425 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002426 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302427
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002428}
2429
2430
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002431
2432
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302433QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002434{
2435 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302436 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302437 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302438 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002439 uint32_t bufs_posted = 0;
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302440 unsigned int ce_id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002441
2442 buf_sz = pipe_info->buf_sz;
2443 if (buf_sz == 0) {
2444 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302445 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002446 }
2447
2448 ce_hdl = pipe_info->ce_hdl;
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302449 ce_id = ((struct CE_state *)ce_hdl)->id;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002450
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302451 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002452 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302453 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302454 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455
2456 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302457 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002458
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302459 hif_record_ce_desc_event(scn, ce_id,
2460 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2461 0, 0);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302462 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002463 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002464 hif_post_recv_buffers_failure(pipe_info, nbuf,
2465 &pipe_info->nbuf_alloc_err_count,
2466 HIF_RX_NBUF_ALLOC_FAILURE,
2467 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302468 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002469 }
2470
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302471 hif_record_ce_desc_event(scn, ce_id,
2472 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2473 0, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002474 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302475 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002476 * CE_data = dma_map_single(dev, data, buf_sz, );
2477 * DMA_FROM_DEVICE);
2478 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302479 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302480 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002481
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302482 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002483 hif_post_recv_buffers_failure(pipe_info, nbuf,
2484 &pipe_info->nbuf_dma_err_count,
2485 HIF_RX_NBUF_MAP_FAILURE,
2486 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302487 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302488 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002489 }
2490
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302491 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Yeshwanth Sriram Guntuka2a7ed0a2019-10-16 15:29:28 +05302492 hif_record_ce_desc_event(scn, ce_id,
2493 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2494 0, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302495 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002496 buf_sz, DMA_FROM_DEVICE);
2497 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302498 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002499 hif_post_recv_buffers_failure(pipe_info, nbuf,
2500 &pipe_info->nbuf_ce_enqueue_err_count,
2501 HIF_RX_NBUF_ENQUEUE_FAILURE,
2502 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2503
Govind Singh4fcafd42016-08-08 12:37:31 +05302504 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2505 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302506 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302507 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002508 }
2509
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302510 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002511 bufs_posted++;
2512 }
2513 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002514 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002515 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2516 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002517 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002518 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2519 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002520 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002521 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002522
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302523 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002524
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302525 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002526}
2527
2528/*
2529 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302530 * Returns 0 for non fastpath rx copy engine as
2531 * oom_allocation_work will be scheduled to recover any
2532 * failures, non-zero if unable to completely replenish
2533 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002534 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302535QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002536{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302537 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302538 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302539 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302540 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002541
2542 A_TARGET_ACCESS_LIKELY(scn);
2543 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2544 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002545
Houston Hoffman85925072016-05-06 17:02:18 -07002546 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002547 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002548
2549 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002550 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002551 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002552
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302553 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302554 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302555 ce_state->htt_rx_data &&
2556 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302557 A_TARGET_ACCESS_UNLIKELY(scn);
2558 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302559 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002560 }
2561
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002562 A_TARGET_ACCESS_UNLIKELY(scn);
2563
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302564 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002565}
2566
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302567QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002568{
Komal Seelam644263d2016-02-22 20:45:49 +05302569 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302570 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302571 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002572
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002573 hif_update_fastpath_recv_bufs_cnt(scn);
2574
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002575 hif_msg_callbacks_install(scn);
2576
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002577 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302578 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002579
Houston Hoffman271951f2016-11-12 15:24:27 -08002580 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581 hif_state->started = true;
2582
Houston Hoffman271951f2016-11-12 15:24:27 -08002583 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302584 qdf_status = hif_post_recv_buffers(scn);
2585 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002586 /* cleanup is done in hif_ce_disable */
2587 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302588 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002589 }
2590
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302591 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002592}
2593
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002594static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002595{
Komal Seelam644263d2016-02-22 20:45:49 +05302596 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002597 struct CE_handle *ce_hdl;
2598 uint32_t buf_sz;
2599 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302600 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302601 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002602 void *per_CE_context;
2603
2604 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002605 /* Unused Copy Engine */
2606 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002607 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002608
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002609
2610 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002611 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002612 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002613
Komal Seelam02cf2f82016-02-22 20:44:25 +05302614 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002615 ce_hdl = pipe_info->ce_hdl;
2616
Jeff Johnson8d639a02019-03-18 09:51:11 -07002617 if (!scn->qdf_dev)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002618 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002619 while (ce_revoke_recv_next
2620 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302621 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302622 if (netbuf) {
2623 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2624 QDF_DMA_FROM_DEVICE);
2625 qdf_nbuf_free(netbuf);
2626 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002627 }
2628}
2629
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002630static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002631{
2632 struct CE_handle *ce_hdl;
2633 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302634 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302635 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002636 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302637 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002638 unsigned int nbytes;
2639 unsigned int id;
2640 uint32_t buf_sz;
2641 uint32_t toeplitz_hash_result;
2642
2643 buf_sz = pipe_info->buf_sz;
2644 if (buf_sz == 0) {
2645 /* Unused Copy Engine */
2646 return;
2647 }
2648
2649 hif_state = pipe_info->HIF_CE_state;
2650 if (!hif_state->started) {
2651 return;
2652 }
2653
Komal Seelam02cf2f82016-02-22 20:44:25 +05302654 scn = HIF_GET_SOFTC(hif_state);
2655
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002656 ce_hdl = pipe_info->ce_hdl;
2657
2658 while (ce_cancel_send_next
2659 (ce_hdl, &per_CE_context,
2660 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302661 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002662 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2663 /*
2664 * Packets enqueued by htt_h2t_ver_req_msg() and
2665 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2666 * freed in htt_htc_misc_pkt_pool_free() in
2667 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002668 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002669 * which they are queued in.
2670 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302671 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002672 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302673 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002674 * layer to free the buffer
2675 */
2676 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302677 pipe_info->pipe_callbacks.
2678 txCompletionHandler(pipe_info->
2679 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002680 netbuf, id, toeplitz_hash_result);
2681 }
2682 }
2683}
2684
2685/*
2686 * Cleanup residual buffers for device shutdown:
2687 * buffers that were enqueued for receive
2688 * buffers that were to be sent
2689 * Note: Buffers that had completed but which were
2690 * not yet processed are on a completion queue. They
2691 * are handled when the completion thread shuts down.
2692 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002693static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002694{
2695 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302696 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002697 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002698
Komal Seelam02cf2f82016-02-22 20:44:25 +05302699 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002700 struct HIF_CE_pipe_info *pipe_info;
2701
Houston Hoffman85925072016-05-06 17:02:18 -07002702 ce_state = scn->ce_id_to_state[pipe_num];
2703 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2704 ((ce_state->htt_tx_data) ||
2705 (ce_state->htt_rx_data))) {
2706 continue;
2707 }
2708
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002709 pipe_info = &hif_state->pipe_info[pipe_num];
2710 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2711 hif_send_buffer_cleanup_on_pipe(pipe_info);
2712 }
2713}
2714
Komal Seelam5584a7c2016-02-24 19:22:48 +05302715void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002716{
Komal Seelam644263d2016-02-22 20:45:49 +05302717 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302718 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302719
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002720 hif_buffer_cleanup(hif_state);
2721}
2722
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002723static void hif_destroy_oom_work(struct hif_softc *scn)
2724{
2725 struct CE_state *ce_state;
2726 int ce_id;
2727
2728 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2729 ce_state = scn->ce_id_to_state[ce_id];
2730 if (ce_state)
2731 qdf_destroy_work(scn->qdf_dev,
2732 &ce_state->oom_allocation_work);
2733 }
2734}
2735
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302736void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002737{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302738 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002739 int pipe_num;
2740
Houston Hoffmana69581e2016-11-14 18:03:19 -08002741 /*
2742 * before cleaning up any memory, ensure irq &
2743 * bottom half contexts will not be re-entered
2744 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002745 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002746 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002747 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002748
2749 /*
2750 * At this point, asynchronous threads are stopped,
2751 * The Target should not DMA nor interrupt, Host code may
2752 * not initiate anything more. So we just need to clean
2753 * up Host-side state.
2754 */
2755
2756 if (scn->athdiag_procfs_inited) {
2757 athdiag_procfs_remove();
2758 scn->athdiag_procfs_inited = false;
2759 }
2760
2761 hif_buffer_cleanup(hif_state);
2762
2763 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2764 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302765 struct CE_attr attr;
2766 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002767
2768 pipe_info = &hif_state->pipe_info[pipe_num];
2769 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302770 if (pipe_info->ce_hdl != ce_diag) {
2771 attr = hif_state->host_ce_config[pipe_num];
2772 if (attr.src_nentries)
2773 qdf_spinlock_destroy(&pipe_info->
2774 completion_freeq_lock);
2775 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002776 ce_fini(pipe_info->ce_hdl);
2777 pipe_info->ce_hdl = NULL;
2778 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302779 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002780 }
2781 }
2782
2783 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302784 qdf_timer_stop(&hif_state->sleep_timer);
2785 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002786 hif_state->sleep_timer_init = false;
2787 }
2788
2789 hif_state->started = false;
2790}
2791
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302792static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2793 struct shadow_reg_cfg
2794 **target_shadow_reg_cfg_ret,
2795 uint32_t *shadow_cfg_sz_ret)
2796{
Nirav Shah3e6e04b2018-07-20 12:00:34 +05302797 if (target_shadow_reg_cfg_ret)
2798 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2799 if (shadow_cfg_sz_ret)
2800 *shadow_cfg_sz_ret = shadow_cfg_sz;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302801}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002802
Houston Hoffman854e67f2016-03-14 21:11:39 -07002803/**
2804 * hif_get_target_ce_config() - get copy engine configuration
2805 * @target_ce_config_ret: basic copy engine configuration
2806 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2807 * @target_service_to_ce_map_ret: service mapping for the copy engines
2808 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2809 * @target_shadow_reg_cfg_ret: shadow register configuration
2810 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2811 *
2812 * providing accessor to these values outside of this file.
2813 * currently these are stored in static pointers to const sections.
2814 * there are multiple configurations that are selected from at compile time.
2815 * Runtime selection would need to consider mode, target type and bus type.
2816 *
2817 * Return: return by parameter.
2818 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302819void hif_get_target_ce_config(struct hif_softc *scn,
2820 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002821 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002822 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002823 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002824 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002825 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002826{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302827 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2828
2829 *target_ce_config_ret = hif_state->target_ce_config;
2830 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002831
2832 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2833 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302834 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2835 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002836}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002837
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002838#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002839static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002840{
2841 int i;
2842 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302843 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002844
2845 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2846 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302847 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002848 cfg->shadow_reg_v2_cfg[i].addr);
2849 }
2850}
2851
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002852#else
2853static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2854{
2855 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302856 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002857}
2858#endif
2859
Nirav Shahbc8daa42018-07-09 16:27:42 +05302860#ifdef ADRASTEA_RRI_ON_DDR
2861/**
2862 * hif_get_src_ring_read_index(): Called to get the SRRI
2863 *
2864 * @scn: hif_softc pointer
2865 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2866 *
2867 * This function returns the SRRI to the caller. For CEs that
2868 * dont have interrupts enabled, we look at the DDR based SRRI
2869 *
2870 * Return: SRRI
2871 */
2872inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2873 uint32_t CE_ctrl_addr)
2874{
2875 struct CE_attr attr;
2876 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2877
2878 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2879 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2880 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2881 } else {
2882 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2883 return A_TARGET_READ(scn,
2884 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2885 else
2886 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2887 CE_ctrl_addr);
2888 }
2889}
2890
2891/**
2892 * hif_get_dst_ring_read_index(): Called to get the DRRI
2893 *
2894 * @scn: hif_softc pointer
2895 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2896 *
2897 * This function returns the DRRI to the caller. For CEs that
2898 * dont have interrupts enabled, we look at the DDR based DRRI
2899 *
2900 * Return: DRRI
2901 */
2902inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2903 uint32_t CE_ctrl_addr)
2904{
2905 struct CE_attr attr;
2906 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2907
2908 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2909
2910 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2911 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2912 } else {
2913 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2914 return A_TARGET_READ(scn,
2915 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2916 else
2917 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2918 CE_ctrl_addr);
2919 }
2920}
2921
2922/**
2923 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2924 * @scn: hif_softc pointer
2925 *
2926 * Return: qdf status
2927 */
2928static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2929{
2930 qdf_dma_addr_t paddr_rri_on_ddr = 0;
2931
2932 scn->vaddr_rri_on_ddr =
2933 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2934 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2935 &paddr_rri_on_ddr);
2936
2937 if (!scn->vaddr_rri_on_ddr) {
2938 hif_err("dmaable page alloc fail");
2939 return QDF_STATUS_E_NOMEM;
2940 }
2941
2942 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2943
2944 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2945
2946 return QDF_STATUS_SUCCESS;
2947}
2948#endif
2949
2950#if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2951/**
2952 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2953 *
2954 * @scn: hif_softc pointer
2955 *
2956 * This function allocates non cached memory on ddr and sends
2957 * the physical address of this memory to the CE hardware. The
2958 * hardware updates the RRI on this particular location.
2959 *
2960 * Return: None
2961 */
2962static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2963{
2964 unsigned int i;
2965 uint32_t high_paddr, low_paddr;
2966
2967 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2968 return;
2969
2970 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr);
2971 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2972
2973 HIF_DBG("%s using srri and drri from DDR", __func__);
2974
2975 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2976 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2977
2978 for (i = 0; i < CE_COUNT; i++)
2979 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2980}
2981#else
2982/**
2983 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2984 *
2985 * @scn: hif_softc pointer
2986 *
2987 * This is a dummy implementation for platforms that don't
2988 * support this functionality.
2989 *
2990 * Return: None
2991 */
2992static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2993{
2994}
2995#endif
2996
2997/**
2998 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2999 * QMI command
3000 * @scn: hif context
3001 * @cfg: wlan enable config
3002 *
3003 * In case of Genoa, rri_over_ddr memory configuration is passed
3004 * to firmware through QMI configure command.
3005 */
3006#if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3007static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3008 struct pld_wlan_enable_cfg *cfg)
3009{
3010 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3011 return;
3012
3013 cfg->rri_over_ddr_cfg_valid = true;
3014 cfg->rri_over_ddr_cfg.base_addr_low =
3015 BITS0_TO_31(scn->paddr_rri_on_ddr);
3016 cfg->rri_over_ddr_cfg.base_addr_high =
3017 BITS32_TO_35(scn->paddr_rri_on_ddr);
3018}
3019#else
3020static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3021 struct pld_wlan_enable_cfg *cfg)
3022{
3023}
3024#endif
3025
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003026/**
3027 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05303028 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003029 *
3030 * This function passes the con_mode and CE configuration to
3031 * platform driver to enable wlan.
3032 *
Houston Hoffman108da402016-03-14 21:11:24 -07003033 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003034 */
Houston Hoffman108da402016-03-14 21:11:24 -07003035int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003036{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003037 struct pld_wlan_enable_cfg cfg;
3038 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303039 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003040
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303041 hif_get_target_ce_config(scn,
3042 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07003043 &cfg.num_ce_tgt_cfg,
3044 (struct service_to_pipe **)&cfg.ce_svc_cfg,
3045 &cfg.num_ce_svc_pipe_cfg,
3046 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3047 &cfg.num_shadow_reg_cfg);
3048
3049 /* translate from structure size to array size */
3050 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3051 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3052 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003053
Houston Hoffman5141f9d2017-01-05 10:49:17 -08003054 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3055 &cfg.num_shadow_reg_v2_cfg);
3056
3057 hif_print_hal_shadow_register_cfg(&cfg);
3058
Nirav Shahbc8daa42018-07-09 16:27:42 +05303059 hif_update_rri_over_ddr_config(scn, &cfg);
3060
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303061 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003062 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05303063 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3064 mode = PLD_COLDBOOT_CALIBRATION;
Vignesh Viswanathan7c974c22019-07-24 15:24:03 +05303065 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3066 mode = PLD_FTM_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003067 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003068 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07003069 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003070 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07003071
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003072 if (BYPASS_QMI)
3073 return 0;
3074 else
Vevek Venkatesan0ac9aaf2019-06-28 17:17:22 +05303075 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003076}
3077
Nirav Shah0d0cce82018-01-17 17:00:31 +05303078#ifdef WLAN_FEATURE_EPPING
3079
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003080#define CE_EPPING_USES_IRQ true
3081
Nirav Shah0d0cce82018-01-17 17:00:31 +05303082void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3083{
3084 if (CE_EPPING_USES_IRQ)
3085 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3086 else
3087 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3088 hif_state->target_ce_config = target_ce_config_wlan_epping;
3089 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3090 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3091 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3092}
3093#endif
3094
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303095#ifdef QCN7605_SUPPORT
3096static inline
3097void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3098 struct HIF_CE_state *hif_state)
3099{
3100 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3101 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3102 hif_state->target_ce_config_sz =
3103 sizeof(target_ce_config_wlan_qcn7605);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303104 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3105 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303106 scn->ce_count = QCN7605_CE_COUNT;
3107}
3108#else
3109static inline
3110void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3111 struct HIF_CE_state *hif_state)
3112{
3113 HIF_ERROR("QCN7605 not supported");
3114}
3115#endif
3116
Sathish Kumar86876492018-08-27 13:39:20 +05303117#ifdef CE_SVC_CMN_INIT
3118#ifdef QCA_WIFI_SUPPORT_SRNG
3119static inline void hif_ce_service_init(void)
3120{
3121 ce_service_srng_init();
3122}
3123#else
3124static inline void hif_ce_service_init(void)
3125{
3126 ce_service_legacy_init();
3127}
3128#endif
3129#else
3130static inline void hif_ce_service_init(void)
3131{
3132}
3133#endif
3134
3135
Houston Hoffman108da402016-03-14 21:11:24 -07003136/**
3137 * hif_ce_prepare_config() - load the correct static tables.
3138 * @scn: hif context
3139 *
3140 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003141 */
Houston Hoffman108da402016-03-14 21:11:24 -07003142void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003143{
Komal Seelambd7c51d2016-02-24 10:27:30 +05303144 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003145 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3146 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303147 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003148
Sathish Kumar86876492018-08-27 13:39:20 +05303149 hif_ce_service_init();
Houston Hoffman10fedfc2017-01-23 15:23:09 -08003150 hif_state->ce_services = ce_services_attach(scn);
3151
Houston Hoffman710af5a2016-11-22 21:59:03 -08003152 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003153 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003154 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05303155 hif_ce_prepare_epping_config(hif_state);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303156 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003157 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003158
3159 switch (tgt_info->target_type) {
3160 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303161 hif_state->host_ce_config = host_ce_config_wlan;
3162 hif_state->target_ce_config = target_ce_config_wlan;
3163 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003164 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303165 case TARGET_TYPE_QCN7605:
3166 hif_set_ce_config_qcn7605(scn, hif_state);
3167 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003168 case TARGET_TYPE_AR900B:
3169 case TARGET_TYPE_QCA9984:
3170 case TARGET_TYPE_IPQ4019:
3171 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303172 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3173 hif_state->host_ce_config =
3174 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3175 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3176 hif_state->host_ce_config =
3177 host_lowdesc_ce_cfg_wlan_ar900b;
3178 } else {
3179 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3180 }
3181
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303182 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3183 hif_state->target_ce_config_sz =
3184 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003185
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003186 break;
3187
3188 case TARGET_TYPE_AR9888:
3189 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303190 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3191 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3192 } else {
3193 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3194 }
3195
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303196 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3197 hif_state->target_ce_config_sz =
3198 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003199
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003200 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003201
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303202 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05303203 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05303204 case TARGET_TYPE_QCA6018:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003205 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3206 hif_state->host_ce_config =
3207 host_ce_config_wlan_qca8074_pci;
3208 hif_state->target_ce_config =
3209 target_ce_config_wlan_qca8074_pci;
3210 hif_state->target_ce_config_sz =
3211 sizeof(target_ce_config_wlan_qca8074_pci);
3212 } else {
3213 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3214 hif_state->target_ce_config =
3215 target_ce_config_wlan_qca8074;
3216 hif_state->target_ce_config_sz =
3217 sizeof(target_ce_config_wlan_qca8074);
3218 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303219 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003220 case TARGET_TYPE_QCA6290:
3221 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3222 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3223 hif_state->target_ce_config_sz =
3224 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003225
Houston Hoffman710af5a2016-11-22 21:59:03 -08003226 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003227 break;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05303228 case TARGET_TYPE_QCN9000:
3229 hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3230 hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3231 hif_state->target_ce_config_sz =
3232 sizeof(target_ce_config_wlan_qcn9000);
3233 scn->ce_count = QCN_9000_CE_COUNT;
Nandha Kishore Easwaran54532862019-12-27 11:26:03 +05303234 scn->disable_wake_irq = 1;
Nandha Kishore Easwaran5d3475b2019-06-27 11:38:53 +05303235 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07003236 case TARGET_TYPE_QCA6390:
3237 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3238 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3239 hif_state->target_ce_config_sz =
3240 sizeof(target_ce_config_wlan_qca6390);
3241
3242 scn->ce_count = QCA_6390_CE_COUNT;
3243 break;
Mohit Khanna973308a2019-05-13 18:31:33 -07003244 case TARGET_TYPE_QCA6490:
3245 hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3246 hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3247 hif_state->target_ce_config_sz =
3248 sizeof(target_ce_config_wlan_qca6490);
3249
3250 scn->ce_count = QCA_6490_CE_COUNT;
3251 break;
Alok Kumarffc116e2020-01-06 18:12:35 +05303252 case TARGET_TYPE_QCA6750:
3253 hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3254 hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3255 hif_state->target_ce_config_sz =
3256 sizeof(target_ce_config_wlan_qca6750);
3257
3258 scn->ce_count = QCA_6750_CE_COUNT;
3259 break;
hangtianc572f5f2019-04-10 11:19:59 +08003260 case TARGET_TYPE_ADRASTEA:
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303261 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
hangtianc572f5f2019-04-10 11:19:59 +08003262 hif_state->host_ce_config =
3263 host_lowdesc_ce_config_wlan_adrastea_nopktlog;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303264 hif_state->target_ce_config =
3265 target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3266 hif_state->target_ce_config_sz =
3267 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3268 } else {
hangtianc572f5f2019-04-10 11:19:59 +08003269 hif_state->host_ce_config =
3270 host_ce_config_wlan_adrastea;
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303271 hif_state->target_ce_config =
3272 target_ce_config_wlan_adrastea;
3273 hif_state->target_ce_config_sz =
hangtianc572f5f2019-04-10 11:19:59 +08003274 sizeof(target_ce_config_wlan_adrastea);
Surabhi Vishnoib30b9172019-07-05 12:24:13 +05303275 }
hangtianc572f5f2019-04-10 11:19:59 +08003276 break;
3277
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003278 }
Yun parkc80eea72017-10-06 15:33:36 -07003279 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07003280}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003281
Houston Hoffman108da402016-03-14 21:11:24 -07003282/**
3283 * hif_ce_open() - do ce specific allocations
3284 * @hif_sc: pointer to hif context
3285 *
3286 * return: 0 for success or QDF_STATUS_E_NOMEM
3287 */
3288QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3289{
3290 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003291
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303292 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303293 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003294 return QDF_STATUS_SUCCESS;
3295}
3296
3297/**
3298 * hif_ce_close() - do ce specific free
3299 * @hif_sc: pointer to hif context
3300 */
3301void hif_ce_close(struct hif_softc *hif_sc)
3302{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303303 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3304
3305 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05303306 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003307}
3308
3309/**
3310 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3311 * @hif_sc: hif context
3312 *
3313 * uses state variables to support cleaning up when hif_config_ce fails.
3314 */
3315void hif_unconfig_ce(struct hif_softc *hif_sc)
3316{
3317 int pipe_num;
3318 struct HIF_CE_pipe_info *pipe_info;
3319 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07003320 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07003321
3322 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3323 pipe_info = &hif_state->pipe_info[pipe_num];
3324 if (pipe_info->ce_hdl) {
3325 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05303326 }
3327 }
3328 deinit_tasklet_workers(hif_hdl);
3329 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3330 pipe_info = &hif_state->pipe_info[pipe_num];
3331 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07003332 ce_fini(pipe_info->ce_hdl);
3333 pipe_info->ce_hdl = NULL;
3334 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08003335 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003336 }
3337 }
Houston Hoffman108da402016-03-14 21:11:24 -07003338 if (hif_sc->athdiag_procfs_inited) {
3339 athdiag_procfs_remove();
3340 hif_sc->athdiag_procfs_inited = false;
3341 }
3342}
3343
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003344#ifdef CONFIG_BYPASS_QMI
Nirav Shah8e930272018-07-10 16:28:21 +05303345#ifdef QCN7605_SUPPORT
3346/**
3347 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3348 * @scn: pointer to HIF structure
3349 *
3350 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3351 *
3352 * Return: void
3353 */
3354static void hif_post_static_buf_to_target(struct hif_softc *scn)
3355{
3356 void *target_va;
3357 phys_addr_t target_pa;
3358 struct ce_info *ce_info_ptr;
3359 uint32_t msi_data_start;
3360 uint32_t msi_data_count;
3361 uint32_t msi_irq_start;
3362 uint32_t i = 0;
3363 int ret;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003364
Nirav Shah8e930272018-07-10 16:28:21 +05303365 target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3366 scn->qdf_dev->dev,
3367 FW_SHARED_MEM +
3368 sizeof(struct ce_info),
3369 &target_pa);
3370 if (!target_va)
3371 return;
3372
3373 ce_info_ptr = (struct ce_info *)target_va;
3374
3375 if (scn->vaddr_rri_on_ddr) {
3376 ce_info_ptr->rri_over_ddr_low_paddr =
3377 BITS0_TO_31(scn->paddr_rri_on_ddr);
3378 ce_info_ptr->rri_over_ddr_high_paddr =
3379 BITS32_TO_35(scn->paddr_rri_on_ddr);
3380 }
3381
3382 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3383 &msi_data_count, &msi_data_start,
3384 &msi_irq_start);
3385 if (ret) {
3386 hif_err("Failed to get CE msi config");
3387 return;
3388 }
3389
3390 for (i = 0; i < CE_COUNT_MAX; i++) {
3391 ce_info_ptr->cfg[i].ce_id = i;
3392 ce_info_ptr->cfg[i].msi_vector =
3393 (i % msi_data_count) + msi_irq_start;
3394 }
3395
3396 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3397 hif_info("target va %pK target pa %pa", target_va, &target_pa);
3398}
3399#else
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003400/**
3401 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3402 * @scn: pointer to HIF structure
3403 *
3404 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3405 *
3406 * Return: void
3407 */
3408static void hif_post_static_buf_to_target(struct hif_softc *scn)
3409{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003410 void *target_va;
3411 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003412
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003413 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3414 FW_SHARED_MEM, &target_pa);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003415 if (!target_va) {
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003416 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003417 return;
3418 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303419 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003420 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003421}
Nirav Shah8e930272018-07-10 16:28:21 +05303422#endif
3423
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003424#else
3425static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3426{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003427}
3428#endif
3429
Houston Hoffman579c02f2017-08-02 01:57:38 -07003430static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3431 bool wait_for_it)
3432{
3433 /* todo */
3434 return 0;
3435}
3436
Houston Hoffman108da402016-03-14 21:11:24 -07003437/**
3438 * hif_config_ce() - configure copy engines
3439 * @scn: hif context
3440 *
3441 * Prepares fw, copy engine hardware and host sw according
3442 * to the attributes selected by hif_ce_prepare_config.
3443 *
3444 * also calls athdiag_procfs_init
3445 *
3446 * return: 0 for success nonzero for failure.
3447 */
3448int hif_config_ce(struct hif_softc *scn)
3449{
3450 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3451 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3452 struct HIF_CE_pipe_info *pipe_info;
3453 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303454 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05303455
Houston Hoffman108da402016-03-14 21:11:24 -07003456#ifdef ADRASTEA_SHADOW_REGISTERS
3457 int i;
3458#endif
3459 QDF_STATUS rv = QDF_STATUS_SUCCESS;
3460
3461 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05303462 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003463
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003464 hif_post_static_buf_to_target(scn);
3465
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003466 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07003467
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003468 hif_config_rri_on_ddr(scn);
3469
Houston Hoffman579c02f2017-08-02 01:57:38 -07003470 if (ce_srng_based(scn))
3471 scn->bus_ops.hif_target_sleep_state_adjust =
3472 &hif_srng_sleep_state_adjust;
3473
c_cgodavfda96ad2017-09-07 16:16:00 +05303474 /* Initialise the CE debug history sysfs interface inputs ce_id and
3475 * index. Disable data storing
3476 */
3477 reset_ce_debug_history(scn);
3478
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003479 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3480 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003481
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003482 pipe_info = &hif_state->pipe_info[pipe_num];
3483 pipe_info->pipe_num = pipe_num;
3484 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303485 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003486
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003487 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07003488 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303489 if (!ce_state) {
3490 A_TARGET_ACCESS_UNLIKELY(scn);
3491 goto err;
3492 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003493 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003494 QDF_ASSERT(pipe_info->ce_hdl);
3495 if (!pipe_info->ce_hdl) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303496 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003497 A_TARGET_ACCESS_UNLIKELY(scn);
3498 goto err;
3499 }
3500
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003501 ce_state->lro_data = qdf_lro_init();
3502
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303503 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003504 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003505 * Diagnostic Window support
3506 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003507 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003508 continue;
3509 }
3510
Houston Hoffman85925072016-05-06 17:02:18 -07003511 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3512 (ce_state->htt_rx_data))
3513 continue;
3514
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303515 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003516 if (attr->dest_nentries > 0) {
3517 atomic_set(&pipe_info->recv_bufs_needed,
3518 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303519 /*SRNG based CE has one entry less */
3520 if (ce_srng_based(scn))
3521 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003522 } else {
3523 atomic_set(&pipe_info->recv_bufs_needed, 0);
3524 }
3525 ce_tasklet_init(hif_state, (1 << pipe_num));
3526 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003527 }
3528
3529 if (athdiag_procfs_init(scn) != 0) {
3530 A_TARGET_ACCESS_UNLIKELY(scn);
3531 goto err;
3532 }
3533 scn->athdiag_procfs_inited = true;
3534
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003535 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003536
Houston Hoffman108da402016-03-14 21:11:24 -07003537 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003538
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003539 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003540
3541#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003542 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003543 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003544 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003545 __func__, i,
3546 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3547 }
3548#endif
3549
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303550 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003551
3552err:
3553 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003554 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003555 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303556 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003557}
3558
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003559#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003560/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303561 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003562 * @scn: bus context
3563 * @ce_sr_base_paddr: copyengine source ring base physical address
3564 * @ce_sr_ring_size: copyengine source ring size
3565 * @ce_reg_paddr: copyengine register physical address
3566 *
3567 * IPA micro controller data path offload feature enabled,
3568 * HIF should release copy engine related resource information to IPA UC
3569 * IPA UC will access hardware resource with released information
3570 *
3571 * Return: None
3572 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303573void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303574 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003575 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303576 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003577{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303578 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003579 struct HIF_CE_pipe_info *pipe_info =
3580 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3581 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3582
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303583 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003584 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003585}
3586#endif /* IPA_OFFLOAD */
3587
3588
3589#ifdef ADRASTEA_SHADOW_REGISTERS
3590
3591/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003592 * Current shadow register config
3593 *
3594 * -----------------------------------------------------------
3595 * Shadow Register | CE | src/dst write index
3596 * -----------------------------------------------------------
3597 * 0 | 0 | src
3598 * 1 No Config - Doesn't point to anything
3599 * 2 No Config - Doesn't point to anything
3600 * 3 | 3 | src
3601 * 4 | 4 | src
3602 * 5 | 5 | src
3603 * 6 No Config - Doesn't point to anything
3604 * 7 | 7 | src
3605 * 8 No Config - Doesn't point to anything
3606 * 9 No Config - Doesn't point to anything
3607 * 10 No Config - Doesn't point to anything
3608 * 11 No Config - Doesn't point to anything
3609 * -----------------------------------------------------------
3610 * 12 No Config - Doesn't point to anything
3611 * 13 | 1 | dst
3612 * 14 | 2 | dst
3613 * 15 No Config - Doesn't point to anything
3614 * 16 No Config - Doesn't point to anything
3615 * 17 No Config - Doesn't point to anything
3616 * 18 No Config - Doesn't point to anything
3617 * 19 | 7 | dst
3618 * 20 | 8 | dst
3619 * 21 No Config - Doesn't point to anything
3620 * 22 No Config - Doesn't point to anything
3621 * 23 No Config - Doesn't point to anything
3622 * -----------------------------------------------------------
3623 *
3624 *
3625 * ToDo - Move shadow register config to following in the future
3626 * This helps free up a block of shadow registers towards the end.
3627 * Can be used for other purposes
3628 *
3629 * -----------------------------------------------------------
3630 * Shadow Register | CE | src/dst write index
3631 * -----------------------------------------------------------
3632 * 0 | 0 | src
3633 * 1 | 3 | src
3634 * 2 | 4 | src
3635 * 3 | 5 | src
3636 * 4 | 7 | src
3637 * -----------------------------------------------------------
3638 * 5 | 1 | dst
3639 * 6 | 2 | dst
3640 * 7 | 7 | dst
3641 * 8 | 8 | dst
3642 * -----------------------------------------------------------
3643 * 9 No Config - Doesn't point to anything
3644 * 12 No Config - Doesn't point to anything
3645 * 13 No Config - Doesn't point to anything
3646 * 14 No Config - Doesn't point to anything
3647 * 15 No Config - Doesn't point to anything
3648 * 16 No Config - Doesn't point to anything
3649 * 17 No Config - Doesn't point to anything
3650 * 18 No Config - Doesn't point to anything
3651 * 19 No Config - Doesn't point to anything
3652 * 20 No Config - Doesn't point to anything
3653 * 21 No Config - Doesn't point to anything
3654 * 22 No Config - Doesn't point to anything
3655 * 23 No Config - Doesn't point to anything
3656 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003657*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303658#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303659u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003660{
3661 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003662 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003663
Houston Hoffmane6330442016-02-26 12:19:11 -08003664 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003665 case 0:
3666 addr = SHADOW_VALUE0;
3667 break;
3668 case 3:
3669 addr = SHADOW_VALUE3;
3670 break;
3671 case 4:
3672 addr = SHADOW_VALUE4;
3673 break;
3674 case 5:
3675 addr = SHADOW_VALUE5;
3676 break;
3677 case 7:
3678 addr = SHADOW_VALUE7;
3679 break;
3680 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003681 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303682 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003683 }
3684 return addr;
3685
3686}
3687
Komal Seelam644263d2016-02-22 20:45:49 +05303688u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003689{
3690 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003691 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003692
Houston Hoffmane6330442016-02-26 12:19:11 -08003693 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003694 case 1:
3695 addr = SHADOW_VALUE13;
3696 break;
3697 case 2:
3698 addr = SHADOW_VALUE14;
3699 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003700 case 5:
3701 addr = SHADOW_VALUE17;
3702 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003703 case 7:
3704 addr = SHADOW_VALUE19;
3705 break;
3706 case 8:
3707 addr = SHADOW_VALUE20;
3708 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003709 case 9:
3710 addr = SHADOW_VALUE21;
3711 break;
3712 case 10:
3713 addr = SHADOW_VALUE22;
3714 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303715 case 11:
3716 addr = SHADOW_VALUE23;
3717 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003718 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003719 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303720 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003721 }
3722
3723 return addr;
3724
3725}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303726#else
3727u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3728{
3729 u32 addr = 0;
3730 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3731
3732 switch (ce) {
3733 case 0:
3734 addr = SHADOW_VALUE0;
3735 break;
3736 case 4:
3737 addr = SHADOW_VALUE4;
3738 break;
3739 case 5:
3740 addr = SHADOW_VALUE5;
3741 break;
3742 default:
3743 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3744 QDF_ASSERT(0);
3745 }
3746 return addr;
3747}
3748
3749u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3750{
3751 u32 addr = 0;
3752 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3753
3754 switch (ce) {
3755 case 1:
3756 addr = SHADOW_VALUE13;
3757 break;
3758 case 2:
3759 addr = SHADOW_VALUE14;
3760 break;
3761 case 3:
3762 addr = SHADOW_VALUE15;
3763 break;
3764 case 5:
3765 addr = SHADOW_VALUE17;
3766 break;
3767 case 7:
3768 addr = SHADOW_VALUE19;
3769 break;
3770 case 8:
3771 addr = SHADOW_VALUE20;
3772 break;
3773 case 9:
3774 addr = SHADOW_VALUE21;
3775 break;
3776 case 10:
3777 addr = SHADOW_VALUE22;
3778 break;
3779 case 11:
3780 addr = SHADOW_VALUE23;
3781 break;
3782 default:
3783 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3784 QDF_ASSERT(0);
3785 }
3786
3787 return addr;
3788}
3789#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003790#endif
3791
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003792#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003793void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3794{
3795 struct CE_state *ce_state;
3796 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3797
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003798 ce_state = scn->ce_id_to_state[ctx_id];
3799
3800 return ce_state->lro_data;
3801}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003802#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003803
3804/**
3805 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3806 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303807 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003808 * @svc_id: Service ID for which the mapping is needed.
3809 * @ul_pipe: address of the container in which ul pipe is returned.
3810 * @dl_pipe: address of the container in which dl pipe is returned.
3811 * @ul_is_polled: address of the container in which a bool
3812 * indicating if the UL CE for this service
3813 * is polled is returned.
3814 * @dl_is_polled: address of the container in which a bool
3815 * indicating if the DL CE for this service
3816 * is polled is returned.
3817 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003818 * Return: Indicates whether the service has been found in the table.
3819 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3820 * There will be warning logs if either leg has not been updated
3821 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003822 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303823int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003824 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3825 int *dl_is_polled)
3826{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003827 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003828 unsigned int i;
3829 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003830 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003831 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303832 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003833 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003834 bool dl_updated = false;
3835 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003836
Houston Hoffman748e1a62017-03-30 17:20:42 -07003837 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3838 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003839
3840 *dl_is_polled = 0; /* polling for received messages not supported */
3841
3842 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3843
3844 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3845 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003846 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003847 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003848 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303849 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003850 CE_ATTR_DISABLE_INTR) != 0;
3851 ul_updated = true;
3852 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003853 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003854 dl_updated = true;
3855 }
3856 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003857 }
3858 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003859 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003860 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003861 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003862 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003863
3864 return status;
3865}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003866
3867#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303868inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003869 uint32_t CE_ctrl_addr)
3870{
3871 uint32_t read_from_hw, srri_from_ddr = 0;
3872
3873 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3874
3875 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3876
3877 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003878 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3879 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003880 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303881 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003882 }
3883 return srri_from_ddr;
3884}
3885
3886
Komal Seelam644263d2016-02-22 20:45:49 +05303887inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003888 uint32_t CE_ctrl_addr)
3889{
3890 uint32_t read_from_hw, drri_from_ddr = 0;
3891
3892 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3893
3894 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3895
3896 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003897 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003898 drri_from_ddr, read_from_hw,
3899 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303900 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003901 }
3902 return drri_from_ddr;
3903}
3904
3905#endif
3906
Govind Singh2443fb32016-01-13 17:44:48 +05303907/**
3908 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303909 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303910 *
3911 * Output the copy engine registers
3912 *
3913 * Return: 0 for success or error code
3914 */
Komal Seelam644263d2016-02-22 20:45:49 +05303915int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303916{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303917 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303918 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003919 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303920 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3921 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303922 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303923
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003924 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
Jeff Johnson8d639a02019-03-18 09:51:11 -07003925 if (!scn->ce_id_to_state[i]) {
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003926 HIF_DBG("CE%d not used.", i);
3927 continue;
3928 }
3929
Komal Seelam644263d2016-02-22 20:45:49 +05303930 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003931 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303932 ce_reg_word_size * sizeof(uint32_t));
3933
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303934 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003935 HIF_ERROR("Dumping CE register failed!");
3936 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303937 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303938 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303939 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003940 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303941 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303942 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303943 + SR_WR_INDEX_ADDRESS),
3944 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303945 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303946 + CURRENT_SRRI_ADDRESS),
3947 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303948 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303949 + DST_WR_INDEX_ADDRESS),
3950 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303951 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303952 + CURRENT_DRRI_ADDRESS),
3953 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303954 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303955 }
Govind Singh2443fb32016-01-13 17:44:48 +05303956 return 0;
3957}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303958qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003959#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3960struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3961 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3962{
3963 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3964 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3965 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3966 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3967 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3968 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3969 struct CE_ring_state *src_ring = ce_state->src_ring;
3970 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3971
3972 if (src_ring) {
3973 hif_info->ul_pipe.nentries = src_ring->nentries;
3974 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3975 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3976 hif_info->ul_pipe.write_index = src_ring->write_index;
3977 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3978 hif_info->ul_pipe.base_addr_CE_space =
3979 src_ring->base_addr_CE_space;
3980 hif_info->ul_pipe.base_addr_owner_space =
3981 src_ring->base_addr_owner_space;
3982 }
3983
3984
3985 if (dest_ring) {
3986 hif_info->dl_pipe.nentries = dest_ring->nentries;
3987 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3988 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3989 hif_info->dl_pipe.write_index = dest_ring->write_index;
3990 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3991 hif_info->dl_pipe.base_addr_CE_space =
3992 dest_ring->base_addr_CE_space;
3993 hif_info->dl_pipe.base_addr_owner_space =
3994 dest_ring->base_addr_owner_space;
3995 }
3996
3997 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3998 hif_info->ctrl_addr = ce_state->ctrl_addr;
3999
4000 return hif_info;
4001}
Pratik Gandhidc82a772018-01-30 18:57:05 +05304002qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07004003
4004uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4005{
4006 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4007
4008 scn->nss_wifi_ol_mode = mode;
4009 return 0;
4010}
Pratik Gandhidc82a772018-01-30 18:57:05 +05304011qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07004012#endif
4013
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05304014void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4015{
4016 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4017 scn->hif_attribute = hif_attrib;
4018}
4019
Yun Park3fb36442017-08-17 17:37:53 -07004020
4021/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07004022void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4023{
4024 struct hif_softc *scn = HIF_GET_SOFTC(osc);
4025 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4026 uint32_t ctrl_addr = CE_state->ctrl_addr;
4027
4028 Q_TARGET_ACCESS_BEGIN(scn);
4029 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4030 Q_TARGET_ACCESS_END(scn);
4031}
Pratik Gandhidc82a772018-01-30 18:57:05 +05304032qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304033
4034/**
4035 * hif_fw_event_handler() - hif fw event handler
4036 * @hif_state: pointer to hif ce state structure
4037 *
4038 * Process fw events and raise HTC callback to process fw events.
4039 *
4040 * Return: none
4041 */
4042static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4043{
4044 struct hif_msg_callbacks *msg_callbacks =
4045 &hif_state->msg_callbacks_current;
4046
4047 if (!msg_callbacks->fwEventHandler)
4048 return;
4049
4050 msg_callbacks->fwEventHandler(msg_callbacks->Context,
4051 QDF_STATUS_E_FAILURE);
4052}
4053
4054#ifndef QCA_WIFI_3_0
4055/**
4056 * hif_fw_interrupt_handler() - FW interrupt handler
4057 * @irq: irq number
4058 * @arg: the user pointer
4059 *
4060 * Called from the PCI interrupt handler when a
4061 * firmware-generated interrupt to the Host.
4062 *
Yun Park3fb36442017-08-17 17:37:53 -07004063 * only registered for legacy ce devices
4064 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304065 * Return: status of handled irq
4066 */
4067irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4068{
4069 struct hif_softc *scn = arg;
4070 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4071 uint32_t fw_indicator_address, fw_indicator;
4072
4073 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4074 return ATH_ISR_NOSCHED;
4075
4076 fw_indicator_address = hif_state->fw_indicator_address;
4077 /* For sudden unplug this will return ~0 */
4078 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4079
4080 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4081 /* ACK: clear Target-side pending event */
4082 A_TARGET_WRITE(scn, fw_indicator_address,
4083 fw_indicator & ~FW_IND_EVENT_PENDING);
4084 if (Q_TARGET_ACCESS_END(scn) < 0)
4085 return ATH_ISR_SCHED;
4086
4087 if (hif_state->started) {
4088 hif_fw_event_handler(hif_state);
4089 } else {
4090 /*
4091 * Probable Target failure before we're prepared
4092 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08004093 * fw_indicator used as bitmap, and defined as below:
4094 * FW_IND_EVENT_PENDING 0x1
4095 * FW_IND_INITIALIZED 0x2
4096 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304097 */
4098 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08004099 ("%s: Early firmware event indicated 0x%x\n",
4100 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304101 }
4102 } else {
4103 if (Q_TARGET_ACCESS_END(scn) < 0)
4104 return ATH_ISR_SCHED;
4105 }
4106
4107 return ATH_ISR_SCHED;
4108}
4109#else
4110irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4111{
4112 return ATH_ISR_SCHED;
4113}
4114#endif /* #ifdef QCA_WIFI_3_0 */
4115
4116
4117/**
4118 * hif_wlan_disable(): call the platform driver to disable wlan
4119 * @scn: HIF Context
4120 *
4121 * This function passes the con_mode to platform driver to disable
4122 * wlan.
4123 *
4124 * Return: void
4125 */
4126void hif_wlan_disable(struct hif_softc *scn)
4127{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004128 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304129 uint32_t con_mode = hif_get_conparam(scn);
4130
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05304131 if (scn->target_status == TARGET_STATUS_RESET)
4132 return;
4133
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304134 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004135 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304136 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004137 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304138 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004139 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304140
Yuanyuan Liufd594c22016-04-25 13:59:19 -07004141 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05304142}
Dustin Brown6bdbda52016-09-27 15:52:30 -07004143
Dustin Brown6834d322017-03-20 15:02:48 -07004144int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4145{
4146 QDF_STATUS status;
4147 uint8_t ul_pipe, dl_pipe;
4148 int ul_is_polled, dl_is_polled;
4149
4150 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4151 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4152 HTC_CTRL_RSVD_SVC,
4153 &ul_pipe, &dl_pipe,
4154 &ul_is_polled, &dl_is_polled);
4155 if (status) {
4156 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4157 return qdf_status_to_os_return(status);
4158 }
4159
4160 *ce_id = dl_pipe;
4161
4162 return 0;
4163}
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304164
4165#ifdef HIF_CE_LOG_INFO
4166/**
4167 * ce_get_index_info(): Get CE index info
4168 * @scn: HIF Context
4169 * @ce_state: CE opaque handle
4170 * @info: CE info
4171 *
4172 * Return: 0 for success and non zero for failure
4173 */
4174static
4175int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4176 struct ce_index *info)
4177{
4178 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4179
4180 return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4181}
4182
Yeshwanth Sriram Guntuka91658222020-03-27 14:42:44 +05304183void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304184 unsigned int *offset)
4185{
Yeshwanth Sriram Guntuka1a623fa2020-03-25 23:15:13 +05304186 struct hang_event_info info = {0};
4187 static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4188 BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4189 uint8_t curr_index = 0;
4190 uint8_t i;
4191 uint16_t size;
4192
4193 info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4194 info.active_grp_tasklet_cnt =
4195 qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4196
4197 for (i = 0; i < scn->ce_count; i++) {
4198 if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4199 continue;
4200
4201 if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4202 &info.ce_info[curr_index]))
4203 continue;
4204
4205 curr_index++;
4206 }
4207
4208 info.ce_count = curr_index;
4209 size = sizeof(info) -
4210 (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4211
4212 QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4213 size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4214
4215 qdf_mem_copy(data + *offset, &info, size);
4216 *offset = *offset + size;
4217}
4218#endif