blob: d0415885fe53ce21be572c62872515ab74c97a80 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Nirav Shah8e930272018-07-10 16:28:21 +05302 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070040#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070042#endif
Pratik Gandhidc82a772018-01-30 18:57:05 +053043#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#define CE_POLL_TIMEOUT 10 /* ms */
46
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053047#define AGC_DUMP 1
48#define CHANINFO_DUMP 2
49#define BB_WATCHDOG_DUMP 3
50#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51#define PCIE_ACCESS_DUMP 4
52#endif
53#include "mp_dev.h"
54
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +053055#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
Houston Hoffman5141f9d2017-01-05 10:49:17 -080057#define QCA_WIFI_SUPPORT_SRNG
58#endif
59
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080060/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053061QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070070#ifdef CONFIG_BYPASS_QMI
71#define BYPASS_QMI 1
72#else
73#define BYPASS_QMI 0
74#endif
75
Akshay Kosigi181b2f52018-11-26 17:02:54 +053076#ifdef ENABLE_10_4_FW_HDR
77#if (ENABLE_10_4_FW_HDR == 1)
Houston Hoffmanabd00772016-05-06 17:02:48 -070078#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Akshay Kosigi181b2f52018-11-26 17:02:54 +053082#endif /* ENABLE_10_4_FW_HDR == 1 */
Pratik Gandhi424c62e2016-08-23 19:47:09 +053083#endif /* ENABLE_10_4_FW_HDR */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Nachiket Kukadee5738b52017-09-07 17:16:12 +053085QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053086static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053088/**
89 * hif_target_access_log_dump() - dump access log
90 *
91 * dump access log
92 *
93 * Return: n/a
94 */
95#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96static void hif_target_access_log_dump(void)
97{
98 hif_target_dump_access_log();
99}
100#endif
101
102
103void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 uint8_t cmd_id, bool start)
105{
106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107
108 switch (cmd_id) {
109 case AGC_DUMP:
110 if (start)
111 priv_start_agc(scn);
112 else
113 priv_dump_agc(scn);
114 break;
115 case CHANINFO_DUMP:
116 if (start)
117 priv_start_cap_chaninfo(scn);
118 else
119 priv_dump_chaninfo(scn);
120 break;
121 case BB_WATCHDOG_DUMP:
122 priv_dump_bbwatchdog(scn);
123 break;
124#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 case PCIE_ACCESS_DUMP:
126 hif_target_access_log_dump();
127 break;
128#endif
129 default:
130 HIF_ERROR("%s: Invalid htc dump command", __func__);
131 break;
132 }
133}
134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135static void ce_poll_timeout(void *arg)
136{
137 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700138
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 if (CE_state->timer_inited) {
140 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143}
144
145static unsigned int roundup_pwr2(unsigned int n)
146{
147 int i;
148 unsigned int test_pwr2;
149
150 if (!(n & (n - 1)))
151 return n; /* already a power of 2 */
152
153 test_pwr2 = 4;
154 for (i = 0; i < 29; i++) {
155 if (test_pwr2 > n)
156 return test_pwr2;
157 test_pwr2 = test_pwr2 << 1;
158 }
159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530160 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800161 return 0;
162}
163
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700164#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166
167static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800177#ifdef QCA_WIFI_3_0_ADRASTEA
178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800181#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700182};
183
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530184#ifdef QCN7605_SUPPORT
185static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194};
195#endif
196
Nirav Shah0d0cce82018-01-17 17:00:31 +0530197#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700198static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530209#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700210
211/* CE_PCI TABLE */
212/*
213 * NOTE: the table below is out of date, though still a useful reference.
214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215 * mapping of HTC services to HIF pipes.
216 */
217/*
218 * This authoritative table defines Copy Engine configuration and the mapping
219 * of services/endpoints to CEs. A subset of this information is passed to
220 * the Target during startup as a prerequisite to entering BMI phase.
221 * See:
222 * target_service_to_ce_map - Target-side mapping
223 * hif_map_service_to_pipe - Host-side mapping
224 * target_ce_config - Target-side configuration
225 * host_ce_config - Host-side configuration
226 ============================================================================
227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
228 | | | ctio | Size | Frequency
229 | | | n | |
230 ============================================================================
231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
232 descriptor | | | | O(100B) | and regular
233 download | | | | |
234 ----------------------------------------------------------------------------
235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
236 indication | | | | O(10B) | regular
237 upload | | | | |
238 ----------------------------------------------------------------------------
239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
240 upload | | | | O(1000B) | (frequent
241 e.g. noise | | | | | during IP1.0
242 packets | | | | | testing)
243 ----------------------------------------------------------------------------
244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
245 download | | | | O(1000B) | (frequent
246 e.g. | | | | | during IP1.0
247 misdirecte | | | | | testing)
248 d EAPOL | | | | |
249 packets | | | | |
250 ----------------------------------------------------------------------------
251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
252 | DATA_VO (uplink) | | | |
253 ----------------------------------------------------------------------------
254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
255 | DATA_VO (downlink) | | | |
256 ----------------------------------------------------------------------------
257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258 | | | | O(100B) |
259 ----------------------------------------------------------------------------
260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
261 messages | (downlink) | | | O(100B) |
262 | | | | |
263 ----------------------------------------------------------------------------
264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
265 | HTC_RAW_STREAMS | | | |
266 | (uplink) | | | |
267 ----------------------------------------------------------------------------
268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
269 | HTC_RAW_STREAMS | | | |
270 | (downlink) | | | |
271 ----------------------------------------------------------------------------
272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
273 | | | | | infrequent
274 ============================================================================
275 */
276
277/*
278 * Map from service/endpoint to Copy Engine.
279 * This table is derived from the CE_PCI TABLE, above.
280 * It is passed to the Target at startup for use by firmware.
281 */
282static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 {
284 WMI_DATA_VO_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_VO_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BK_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BK_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_BE_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_BE_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_DATA_VI_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_DATA_VI_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
324 WMI_CONTROL_SVC,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 3,
327 },
328 {
329 WMI_CONTROL_SVC,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 HTC_CTRL_RSVD_SVC,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 0, /* could be moved to 3 (share with WMI) */
337 },
338 {
339 HTC_CTRL_RSVD_SVC,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
344 HTC_RAW_STREAMS_SVC, /* not currently used */
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0,
347 },
348 {
349 HTC_RAW_STREAMS_SVC, /* not currently used */
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTT_DATA_MSG_SVC,
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 4,
357 },
358 {
359 HTT_DATA_MSG_SVC,
360 PIPEDIR_IN, /* in = DL = target -> host */
361 1,
362 },
363 {
364 WDI_IPA_TX_SVC,
365 PIPEDIR_OUT, /* in = DL = target -> host */
366 5,
367 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800368#if defined(QCA_WIFI_3_0_ADRASTEA)
369 {
370 HTT_DATA2_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 9,
373 },
374 {
375 HTT_DATA3_MSG_SVC,
376 PIPEDIR_IN, /* in = DL = target -> host */
377 10,
378 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530379 {
380 PACKET_LOG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 11,
383 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800384#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700385 /* (Additions here) */
386
387 { /* Must be last */
388 0,
389 0,
390 0,
391 },
392};
393
Houston Hoffman88c896f2016-12-14 09:56:35 -0800394/* PIPEDIR_OUT = HOST to Target */
395/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530396#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530397static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530417 /* (Additions here) */
418 { 0, 0, 0, },
419};
Pratik Gandhi78461502018-02-05 17:22:41 +0530420#else
421static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422};
423#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530424
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530425#if (defined(QCA_WIFI_QCA8074V2))
426static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 /* (Additions here) */
449 { 0, 0, 0, },
450};
451#else
452static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453};
454#endif
455
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530456#if (defined(QCA_WIFI_QCA6018))
457static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
458 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
459 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
460 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
461 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
462 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
463 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
464 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
465 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
466 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
467 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
468 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 /* (Additions here) */
478 { 0, 0, 0, },
479};
480#else
481static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
482};
483#endif
484
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530485/* PIPEDIR_OUT = HOST to Target */
486/* PIPEDIR_IN = TARGET to HOST */
487#ifdef QCN7605_SUPPORT
488static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
489 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
490 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
491 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
492 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
493 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
494 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
495 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
496 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
497 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
498 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
499 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
500 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
501 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
502 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
503 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
504 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
505 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
506#ifdef IPA_OFFLOAD
507 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
508#else
509 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
510#endif
511 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
512 /* (Additions here) */
513 { 0, 0, 0, },
514};
515#endif
516
Pratik Gandhi78461502018-02-05 17:22:41 +0530517#if (defined(QCA_WIFI_QCA6290))
Akshay Kosigi181b2f52018-11-26 17:02:54 +0530518#ifdef QCA_6290_AP_MODE
Houston Hoffman88c896f2016-12-14 09:56:35 -0800519static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
520 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
521 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
522 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
523 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
524 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
525 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
526 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
527 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
528 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
529 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
530 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
531 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
532 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
533 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530534 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
535 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530536 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800537 /* (Additions here) */
538 { 0, 0, 0, },
539};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530540#else
541static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
542 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
543 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
544 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
545 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
546 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
547 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
548 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
549 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
550 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
551 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
552 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
553 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
554 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
555 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
556 /* (Additions here) */
557 { 0, 0, 0, },
558};
559#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530560#else
561static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
562};
563#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800564
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700565#if (defined(QCA_WIFI_QCA6390))
566static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
567 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
568 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
569 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
570 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
571 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
572 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
573 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
574 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
575 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
576 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
577 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
578 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
579 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
580 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800581 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700582 /* (Additions here) */
583 { 0, 0, 0, },
584};
585#else
586static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
587};
588#endif
589
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700590static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
591 {
592 WMI_DATA_VO_SVC,
593 PIPEDIR_OUT, /* out = UL = host -> target */
594 3,
595 },
596 {
597 WMI_DATA_VO_SVC,
598 PIPEDIR_IN, /* in = DL = target -> host */
599 2,
600 },
601 {
602 WMI_DATA_BK_SVC,
603 PIPEDIR_OUT, /* out = UL = host -> target */
604 3,
605 },
606 {
607 WMI_DATA_BK_SVC,
608 PIPEDIR_IN, /* in = DL = target -> host */
609 2,
610 },
611 {
612 WMI_DATA_BE_SVC,
613 PIPEDIR_OUT, /* out = UL = host -> target */
614 3,
615 },
616 {
617 WMI_DATA_BE_SVC,
618 PIPEDIR_IN, /* in = DL = target -> host */
619 2,
620 },
621 {
622 WMI_DATA_VI_SVC,
623 PIPEDIR_OUT, /* out = UL = host -> target */
624 3,
625 },
626 {
627 WMI_DATA_VI_SVC,
628 PIPEDIR_IN, /* in = DL = target -> host */
629 2,
630 },
631 {
632 WMI_CONTROL_SVC,
633 PIPEDIR_OUT, /* out = UL = host -> target */
634 3,
635 },
636 {
637 WMI_CONTROL_SVC,
638 PIPEDIR_IN, /* in = DL = target -> host */
639 2,
640 },
641 {
642 HTC_CTRL_RSVD_SVC,
643 PIPEDIR_OUT, /* out = UL = host -> target */
644 0, /* could be moved to 3 (share with WMI) */
645 },
646 {
647 HTC_CTRL_RSVD_SVC,
648 PIPEDIR_IN, /* in = DL = target -> host */
649 1,
650 },
651 {
652 HTC_RAW_STREAMS_SVC, /* not currently used */
653 PIPEDIR_OUT, /* out = UL = host -> target */
654 0,
655 },
656 {
657 HTC_RAW_STREAMS_SVC, /* not currently used */
658 PIPEDIR_IN, /* in = DL = target -> host */
659 1,
660 },
661 {
662 HTT_DATA_MSG_SVC,
663 PIPEDIR_OUT, /* out = UL = host -> target */
664 4,
665 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530666#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700667 {
668 HTT_DATA_MSG_SVC,
669 PIPEDIR_IN, /* in = DL = target -> host */
670 5,
671 },
672#else /* WLAN_FEATURE_FASTPATH */
673 {
674 HTT_DATA_MSG_SVC,
675 PIPEDIR_IN, /* in = DL = target -> host */
676 1,
677 },
678#endif /* WLAN_FEATURE_FASTPATH */
679
680 /* (Additions here) */
681
682 { /* Must be last */
683 0,
684 0,
685 0,
686 },
687};
688
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700689static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
690static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
691
Nirav Shah0d0cce82018-01-17 17:00:31 +0530692#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700693static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
694 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
695 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
696 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
697 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
698 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
699 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
700 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
701 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
702 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
703 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
704 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
705 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
706 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
707 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
708 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
709 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
710 {0, 0, 0,}, /* Must be last */
711};
712
Nirav Shah0d0cce82018-01-17 17:00:31 +0530713void hif_select_epping_service_to_pipe_map(struct service_to_pipe
714 **tgt_svc_map_to_use,
715 uint32_t *sz_tgt_svc_map_to_use)
716{
717 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
718 *sz_tgt_svc_map_to_use =
719 sizeof(target_service_to_ce_map_wlan_epping);
720}
721#endif
722
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530723#ifdef QCN7605_SUPPORT
724static inline
725void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
726 uint32_t *sz_tgt_svc_map_to_use)
727{
728 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
729 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
730}
731#else
732static inline
733void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
734 uint32_t *sz_tgt_svc_map_to_use)
735{
736 HIF_ERROR("%s: QCN7605 not supported", __func__);
737}
738#endif
739
Houston Hoffman748e1a62017-03-30 17:20:42 -0700740static void hif_select_service_to_pipe_map(struct hif_softc *scn,
741 struct service_to_pipe **tgt_svc_map_to_use,
742 uint32_t *sz_tgt_svc_map_to_use)
743{
744 uint32_t mode = hif_get_conparam(scn);
745 struct hif_target_info *tgt_info = &scn->target_info;
746
747 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530748 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
749 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700750 } else {
751 switch (tgt_info->target_type) {
752 default:
753 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
754 *sz_tgt_svc_map_to_use =
755 sizeof(target_service_to_ce_map_wlan);
756 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530757 case TARGET_TYPE_QCN7605:
758 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
759 sz_tgt_svc_map_to_use);
760 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700761 case TARGET_TYPE_AR900B:
762 case TARGET_TYPE_QCA9984:
763 case TARGET_TYPE_IPQ4019:
764 case TARGET_TYPE_QCA9888:
765 case TARGET_TYPE_AR9888:
766 case TARGET_TYPE_AR9888V2:
767 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
768 *sz_tgt_svc_map_to_use =
769 sizeof(target_service_to_ce_map_ar900b);
770 break;
771 case TARGET_TYPE_QCA6290:
772 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
773 *sz_tgt_svc_map_to_use =
774 sizeof(target_service_to_ce_map_qca6290);
775 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700776 case TARGET_TYPE_QCA6390:
777 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
778 *sz_tgt_svc_map_to_use =
779 sizeof(target_service_to_ce_map_qca6390);
780 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530781 case TARGET_TYPE_QCA8074:
782 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
783 *sz_tgt_svc_map_to_use =
784 sizeof(target_service_to_ce_map_qca8074);
785 break;
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530786 case TARGET_TYPE_QCA8074V2:
787 *tgt_svc_map_to_use =
788 target_service_to_ce_map_qca8074_v2;
789 *sz_tgt_svc_map_to_use =
790 sizeof(target_service_to_ce_map_qca8074_v2);
791 break;
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530792 case TARGET_TYPE_QCA6018:
793 *tgt_svc_map_to_use =
794 target_service_to_ce_map_qca6018;
795 *sz_tgt_svc_map_to_use =
796 sizeof(target_service_to_ce_map_qca6018);
797 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700798 }
799 }
800}
801
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700802/**
803 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
804 * @ce_state : pointer to the state context of the CE
805 *
806 * Description:
807 * Sets htt_rx_data attribute of the state structure if the
808 * CE serves one of the HTT DATA services.
809 *
810 * Return:
811 * false (attribute set to false)
812 * true (attribute set to true);
813 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700814static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700815{
816 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530817 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700818 int i;
819 bool rc = false;
820
Jeff Johnson8d639a02019-03-18 09:51:11 -0700821 if (ce_state) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700822 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
823 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700824
Kiran Venkatappac0687092017-04-13 16:45:03 +0530825 map_len = map_sz / sizeof(struct service_to_pipe);
826 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700827 if ((svc_map[i].pipenum == ce_state->id) &&
828 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
829 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
830 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
831 /* HTT CEs are unidirectional */
832 if (svc_map[i].pipedir == PIPEDIR_IN)
833 ce_state->htt_rx_data = true;
834 else
835 ce_state->htt_tx_data = true;
836 rc = true;
837 }
838 }
839 }
840 return rc;
841}
842
Houston Hoffman47808172016-05-06 10:04:21 -0700843/**
844 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
845 * @ce_id: ce in question
846 * @ring: ring state being examined
847 * @type: "src_ring" or "dest_ring" string for identifying the ring
848 *
849 * Warns on non-zero index values.
850 * Causes a kernel panic if the ring is not empty durring initialization.
851 */
852static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
853 char *type)
854{
855 if (ring->write_index != 0 || ring->sw_index != 0)
856 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
857 ce_id, type, ring->sw_index, ring->write_index);
858 if (ring->write_index != ring->sw_index)
859 QDF_BUG(0);
860}
861
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530862#ifdef IPA_OFFLOAD
863/**
864 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
865 * @scn: softc instance
866 * @ce_id: ce in question
867 * @base_addr: pointer to copyengine ring base address
868 * @ce_ring: copyengine instance
869 * @nentries: number of entries should be allocated
870 * @desc_size: ce desc size
871 *
872 * Return: QDF_STATUS_SUCCESS - for success
873 */
874static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
875 qdf_dma_addr_t *base_addr,
876 struct CE_ring_state *ce_ring,
877 unsigned int nentries, uint32_t desc_size)
878{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700879 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
880 !ce_srng_based(scn)) {
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530881 if (!scn->ipa_ce_ring) {
Mohit Khannaba7a7982018-03-21 22:06:25 -0700882 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
883 scn->qdf_dev,
884 nentries * desc_size + CE_DESC_RING_ALIGN);
885 if (!scn->ipa_ce_ring) {
886 HIF_ERROR(
887 "%s: Failed to allocate memory for IPA ce ring",
888 __func__);
889 return QDF_STATUS_E_NOMEM;
890 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530891 }
892 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
893 &scn->ipa_ce_ring->mem_info);
894 ce_ring->base_addr_owner_space_unaligned =
895 scn->ipa_ce_ring->vaddr;
896 } else {
897 ce_ring->base_addr_owner_space_unaligned =
898 qdf_mem_alloc_consistent(scn->qdf_dev,
899 scn->qdf_dev->dev,
900 (nentries * desc_size +
901 CE_DESC_RING_ALIGN),
902 base_addr);
903 if (!ce_ring->base_addr_owner_space_unaligned) {
904 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
905 __func__, CE_id);
906 return QDF_STATUS_E_NOMEM;
907 }
908 }
909 return QDF_STATUS_SUCCESS;
910}
911
912/**
913 * ce_free_desc_ring() - Frees copyengine descriptor ring
914 * @scn: softc instance
915 * @ce_id: ce in question
916 * @ce_ring: copyengine instance
917 * @desc_size: ce desc size
918 *
919 * Return: None
920 */
921static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
922 struct CE_ring_state *ce_ring, uint32_t desc_size)
923{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700924 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
925 !ce_srng_based(scn)) {
926 if (scn->ipa_ce_ring) {
927 qdf_mem_shared_mem_free(scn->qdf_dev,
928 scn->ipa_ce_ring);
929 scn->ipa_ce_ring = NULL;
930 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530931 ce_ring->base_addr_owner_space_unaligned = NULL;
932 } else {
933 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
934 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
935 ce_ring->base_addr_owner_space_unaligned,
936 ce_ring->base_addr_CE_space, 0);
937 ce_ring->base_addr_owner_space_unaligned = NULL;
938 }
939}
940#else
941static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
942 qdf_dma_addr_t *base_addr,
943 struct CE_ring_state *ce_ring,
944 unsigned int nentries, uint32_t desc_size)
945{
946 ce_ring->base_addr_owner_space_unaligned =
947 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
948 (nentries * desc_size +
949 CE_DESC_RING_ALIGN), base_addr);
950 if (!ce_ring->base_addr_owner_space_unaligned) {
951 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
952 __func__, CE_id);
953 return QDF_STATUS_E_NOMEM;
954 }
955 return QDF_STATUS_SUCCESS;
956}
957
958static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
959 struct CE_ring_state *ce_ring, uint32_t desc_size)
960{
961 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
962 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
963 ce_ring->base_addr_owner_space_unaligned,
964 ce_ring->base_addr_CE_space, 0);
965 ce_ring->base_addr_owner_space_unaligned = NULL;
966}
967#endif /* IPA_OFFLOAD */
968
Sathish Kumar86876492018-08-27 13:39:20 +0530969/*
970 * TODO: Need to explore the possibility of having this as part of a
971 * target context instead of a global array.
972 */
973static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
974
975void ce_service_register_module(enum ce_target_type target_type,
976 struct ce_ops* (*ce_attach)(void))
977{
978 if (target_type < CE_MAX_TARGET_TYPE)
979 ce_attach_register[target_type] = ce_attach;
980}
981
982qdf_export_symbol(ce_service_register_module);
983
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530984/**
985 * ce_srng_based() - Does this target use srng
986 * @ce_state : pointer to the state context of the CE
987 *
988 * Description:
989 * returns true if the target is SRNG based
990 *
991 * Return:
992 * false (attribute set to false)
993 * true (attribute set to true);
994 */
995bool ce_srng_based(struct hif_softc *scn)
996{
997 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
998 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
999
1000 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301001 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301002 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001003 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07001004 case TARGET_TYPE_QCA6390:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301005 case TARGET_TYPE_QCA6018:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301006 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301007 default:
1008 return false;
1009 }
1010 return false;
1011}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301012qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301013
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001014#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001015static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301016{
Sathish Kumar86876492018-08-27 13:39:20 +05301017 struct ce_ops *ops = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301018
Sathish Kumar86876492018-08-27 13:39:20 +05301019 if (ce_srng_based(scn)) {
1020 if (ce_attach_register[CE_SVC_SRNG])
1021 ops = ce_attach_register[CE_SVC_SRNG]();
1022 } else if (ce_attach_register[CE_SVC_LEGACY]) {
1023 ops = ce_attach_register[CE_SVC_LEGACY]();
1024 }
1025
1026 return ops;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301027}
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001028
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001029
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001030#else /* QCA_LITHIUM */
1031static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1032{
Sathish Kumar86876492018-08-27 13:39:20 +05301033 if (ce_attach_register[CE_SVC_LEGACY])
1034 return ce_attach_register[CE_SVC_LEGACY]();
1035
1036 return NULL;
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001037}
1038#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301039
Houston Hoffman403c2df2017-01-27 12:51:15 -08001040static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -08001041 struct pld_shadow_reg_v2_cfg **shadow_config,
1042 int *num_shadow_registers_configured) {
1043 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1044
1045 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1046 scn, shadow_config, num_shadow_registers_configured);
1047}
1048
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301049static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1050 uint8_t ring_type)
1051{
1052 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1053
1054 return hif_state->ce_services->ce_get_desc_size(ring_type);
1055}
1056
1057
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001058static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301059 uint8_t ring_type, uint32_t nentries)
1060{
1061 uint32_t ce_nbytes;
1062 char *ptr;
1063 qdf_dma_addr_t base_addr;
1064 struct CE_ring_state *ce_ring;
1065 uint32_t desc_size;
1066 struct hif_softc *scn = CE_state->scn;
1067
1068 ce_nbytes = sizeof(struct CE_ring_state)
1069 + (nentries * sizeof(void *));
1070 ptr = qdf_mem_malloc(ce_nbytes);
1071 if (!ptr)
1072 return NULL;
1073
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301074 ce_ring = (struct CE_ring_state *)ptr;
1075 ptr += sizeof(struct CE_ring_state);
1076 ce_ring->nentries = nentries;
1077 ce_ring->nentries_mask = nentries - 1;
1078
1079 ce_ring->low_water_mark_nentries = 0;
1080 ce_ring->high_water_mark_nentries = nentries;
1081 ce_ring->per_transfer_context = (void **)ptr;
1082
1083 desc_size = ce_get_desc_size(scn, ring_type);
1084
1085 /* Legacy platforms that do not support cache
1086 * coherent DMA are unsupported
1087 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301088 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1089 ce_ring, nentries,
1090 desc_size) !=
1091 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301092 HIF_ERROR("%s: ring has no DMA mem",
1093 __func__);
Alok Kumarfea70e32018-09-21 15:42:06 +05301094 qdf_mem_free(ce_ring);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301095 return NULL;
1096 }
1097 ce_ring->base_addr_CE_space_unaligned = base_addr;
1098
1099 /* Correctly initialize memory to 0 to
1100 * prevent garbage data crashing system
1101 * when download firmware
1102 */
1103 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1104 nentries * desc_size +
1105 CE_DESC_RING_ALIGN);
1106
1107 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1108
1109 ce_ring->base_addr_CE_space =
1110 (ce_ring->base_addr_CE_space_unaligned +
1111 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1112
1113 ce_ring->base_addr_owner_space = (void *)
1114 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1115 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1116 } else {
1117 ce_ring->base_addr_CE_space =
1118 ce_ring->base_addr_CE_space_unaligned;
1119 ce_ring->base_addr_owner_space =
1120 ce_ring->base_addr_owner_space_unaligned;
1121 }
1122
1123 return ce_ring;
1124}
1125
Yun Park3fb36442017-08-17 17:37:53 -07001126static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301127 uint32_t ce_id, struct CE_ring_state *ring,
1128 struct CE_attr *attr)
1129{
1130 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1131
Yun Park3fb36442017-08-17 17:37:53 -07001132 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001133 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301134}
1135
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001136int hif_ce_bus_early_suspend(struct hif_softc *scn)
1137{
1138 uint8_t ul_pipe, dl_pipe;
1139 int ce_id, status, ul_is_polled, dl_is_polled;
1140 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001141
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001142 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1143 &ul_pipe, &dl_pipe,
1144 &ul_is_polled, &dl_is_polled);
1145 if (status) {
1146 HIF_ERROR("%s: pipe_mapping failure", __func__);
1147 return status;
1148 }
1149
1150 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1151 if (ce_id == ul_pipe)
1152 continue;
1153 if (ce_id == dl_pipe)
1154 continue;
1155
1156 ce_state = scn->ce_id_to_state[ce_id];
1157 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1158 if (ce_state->state == CE_RUNNING)
1159 ce_state->state = CE_PAUSED;
1160 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1161 }
1162
1163 return status;
1164}
1165
1166int hif_ce_bus_late_resume(struct hif_softc *scn)
1167{
1168 int ce_id;
1169 struct CE_state *ce_state;
Nirav Shaheeb99622018-09-11 13:50:08 +05301170 int write_index = 0;
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001171 bool index_updated;
1172
1173 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1174 ce_state = scn->ce_id_to_state[ce_id];
1175 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1176 if (ce_state->state == CE_PENDING) {
1177 write_index = ce_state->src_ring->write_index;
1178 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1179 write_index);
1180 ce_state->state = CE_RUNNING;
1181 index_updated = true;
1182 } else {
1183 index_updated = false;
1184 }
1185
1186 if (ce_state->state == CE_PAUSED)
1187 ce_state->state = CE_RUNNING;
1188 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1189
1190 if (index_updated)
1191 hif_record_ce_desc_event(scn, ce_id,
1192 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301193 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001194 }
1195
1196 return 0;
1197}
1198
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001199/**
1200 * ce_oom_recovery() - try to recover rx ce from oom condition
1201 * @context: CE_state of the CE with oom rx ring
1202 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001203 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001204 * at least 1 descriptor is successfully posted to the rx ring.
1205 *
1206 * return: none
1207 */
1208static void ce_oom_recovery(void *context)
1209{
1210 struct CE_state *ce_state = context;
1211 struct hif_softc *scn = ce_state->scn;
1212 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1213 struct HIF_CE_pipe_info *pipe_info =
1214 &ce_softc->pipe_info[ce_state->id];
1215
1216 hif_post_recv_buffers_for_pipe(pipe_info);
1217}
1218
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301219#ifdef HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301220/**
1221 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1222 * the CE descriptors.
1223 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1224 * @scn: hif scn handle
1225 * ce_id: Copy Engine Id
1226 *
1227 * Return: QDF_STATUS
1228 */
1229QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1230{
1231 struct hif_ce_desc_event *event = NULL;
1232 struct hif_ce_desc_event *hist_ev = NULL;
1233 uint32_t index = 0;
1234
1235 hist_ev =
1236 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1237
1238 if (!hist_ev)
1239 return QDF_STATUS_E_NOMEM;
1240
1241 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1242 event = &hist_ev[index];
1243 event->data =
1244 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001245 if (!event->data)
c_cgodavfda96ad2017-09-07 16:16:00 +05301246 return QDF_STATUS_E_NOMEM;
1247 }
1248 return QDF_STATUS_SUCCESS;
1249}
1250
1251/**
1252 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1253 * the CE descriptors.
1254 * @scn: hif scn handle
1255 * ce_id: Copy Engine Id
1256 *
1257 * Return:
1258 */
1259void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1260{
1261 struct hif_ce_desc_event *event = NULL;
1262 struct hif_ce_desc_event *hist_ev = NULL;
1263 uint32_t index = 0;
1264
1265 hist_ev =
1266 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1267
1268 if (!hist_ev)
1269 return;
1270
1271 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1272 event = &hist_ev[index];
Jeff Johnson8d639a02019-03-18 09:51:11 -07001273 if (event->data)
c_cgodavfda96ad2017-09-07 16:16:00 +05301274 qdf_mem_free(event->data);
1275 event->data = NULL;
1276 event = NULL;
1277 }
1278}
1279#endif /* HIF_CE_DEBUG_DATA_BUF */
1280
Dustin Brown2f750872018-10-17 12:16:20 -07001281#if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */
1282struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1283
c_cgodavfda96ad2017-09-07 16:16:00 +05301284/**
Dustin Brown2f750872018-10-17 12:16:20 -07001285 * alloc_mem_ce_debug_history() - Allocate CE descriptor history
c_cgodavfda96ad2017-09-07 16:16:00 +05301286 * @scn: hif scn handle
Dustin Brown2f750872018-10-17 12:16:20 -07001287 * @ce_id: Copy Engine Id
c_cgodavfda96ad2017-09-07 16:16:00 +05301288 *
1289 * Return: QDF_STATUS
1290 */
Dustin Brown2f750872018-10-17 12:16:20 -07001291static QDF_STATUS
1292alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1293{
1294 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1295
1296 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1297 ce_hist->enable[ce_id] = 1;
1298
1299 return QDF_STATUS_SUCCESS;
1300}
1301
1302/**
1303 * free_mem_ce_debug_history() - Free CE descriptor history
1304 * @scn: hif scn handle
1305 * @ce_id: Copy Engine Id
1306 *
1307 * Return: None
1308 */
1309static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1310{
1311 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1312
1313 ce_hist->enable[ce_id] = 0;
1314 ce_hist->hist_ev[ce_id] = NULL;
1315}
1316
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301317#elif defined(HIF_CE_DEBUG_DATA_BUF) /* WIN */
Dustin Brown2f750872018-10-17 12:16:20 -07001318
1319static QDF_STATUS
1320alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301321{
1322 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1323 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1324
Jeff Johnson8d639a02019-03-18 09:51:11 -07001325 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
c_cgodavfda96ad2017-09-07 16:16:00 +05301326 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1327 return QDF_STATUS_E_NOMEM;
1328 } else {
1329 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1330 return QDF_STATUS_SUCCESS;
1331 }
1332}
1333
Dustin Brown2f750872018-10-17 12:16:20 -07001334static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301335{
1336 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
Dustin Brown2f750872018-10-17 12:16:20 -07001337 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
c_cgodavfda96ad2017-09-07 16:16:00 +05301338
1339 if (!hist_ev)
1340 return;
1341
c_cgodavfda96ad2017-09-07 16:16:00 +05301342 if (ce_hist->data_enable[CE_id] == 1) {
1343 ce_hist->data_enable[CE_id] = 0;
1344 free_mem_ce_debug_hist_data(scn, CE_id);
1345 }
Dustin Brown2f750872018-10-17 12:16:20 -07001346
c_cgodavfda96ad2017-09-07 16:16:00 +05301347 ce_hist->enable[CE_id] = 0;
1348 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1349 ce_hist->hist_ev[CE_id] = NULL;
1350}
1351
Dustin Brown2f750872018-10-17 12:16:20 -07001352#else /* Disabled */
1353
1354static inline QDF_STATUS
1355alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1356{
1357 return QDF_STATUS_SUCCESS;
1358}
1359
1360static inline void
1361free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1362#endif
1363
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301364#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
c_cgodavfda96ad2017-09-07 16:16:00 +05301365/**
1366 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1367 * CE records on the console using sysfs.
1368 * @scn: hif scn handle
1369 *
1370 * Return:
1371 */
1372static inline void reset_ce_debug_history(struct hif_softc *scn)
1373{
1374 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1375 /* Initialise the CE debug history sysfs interface inputs ce_id and
1376 * index. Disable data storing
1377 */
1378 ce_hist->hist_index = 0;
1379 ce_hist->hist_id = 0;
1380}
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301381#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
Dustin Brown2f750872018-10-17 12:16:20 -07001382static inline void reset_ce_debug_history(struct hif_softc *scn) { }
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301383#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
c_cgodavfda96ad2017-09-07 16:16:00 +05301384
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301385void ce_enable_polling(void *cestate)
1386{
1387 struct CE_state *CE_state = (struct CE_state *)cestate;
1388
1389 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1390 CE_state->timer_inited = true;
1391}
1392
1393void ce_disable_polling(void *cestate)
1394{
1395 struct CE_state *CE_state = (struct CE_state *)cestate;
1396
1397 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1398 CE_state->timer_inited = false;
1399}
1400
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001401/*
1402 * Initialize a Copy Engine based on caller-supplied attributes.
1403 * This may be called once to initialize both source and destination
1404 * rings or it may be called twice for separate source and destination
1405 * initialization. It may be that only one side or the other is
1406 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001407 *
1408 * This should be called durring the initialization sequence before
1409 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001410 */
Komal Seelam644263d2016-02-22 20:45:49 +05301411struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001412 unsigned int CE_id, struct CE_attr *attr)
1413{
1414 struct CE_state *CE_state;
1415 uint32_t ctrl_addr;
1416 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001417 bool malloc_CE_state = false;
1418 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001419 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001420
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301421 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001423 CE_state = scn->ce_id_to_state[CE_id];
1424
1425 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001426 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301427 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301428 if (!CE_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001429 return NULL;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301430
Houston Hoffman233e9092015-09-02 13:37:21 -07001431 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001433
1434 CE_state->id = CE_id;
1435 CE_state->ctrl_addr = ctrl_addr;
1436 CE_state->state = CE_RUNNING;
1437 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001438 }
1439 CE_state->scn = scn;
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301440 CE_state->service = ce_engine_service_reg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001441
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301442 qdf_atomic_init(&CE_state->rx_pending);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001443 if (!attr) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001444 /* Already initialized; caller wants the handle */
1445 return (struct CE_handle *)CE_state;
1446 }
1447
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001448 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301449 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001450 else
1451 CE_state->src_sz_max = attr->src_sz_max;
1452
c_cgodavfda96ad2017-09-07 16:16:00 +05301453 ce_init_ce_desc_event_log(scn, CE_id,
1454 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001455
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001456 /* source ring setup */
1457 nentries = attr->src_nentries;
1458 if (nentries) {
1459 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001460
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001461 nentries = roundup_pwr2(nentries);
1462 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301463 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001464 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301465 src_ring = CE_state->src_ring =
1466 ce_alloc_ring_state(CE_state,
1467 CE_RING_SRC,
1468 nentries);
1469 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001470 /* cannot allocate src ring. If the
1471 * CE_state is allocated locally free
1472 * CE_State and return error.
1473 */
1474 HIF_ERROR("%s: src ring has no mem", __func__);
1475 if (malloc_CE_state) {
1476 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301477 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001478 malloc_CE_state = false;
1479 }
1480 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001482 /* we can allocate src ring. Mark that the src ring is
1483 * allocated locally
1484 */
1485 malloc_src_ring = true;
1486
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 /*
1488 * Also allocate a shadow src ring in
1489 * regular mem to use for faster access.
1490 */
1491 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301492 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001493 sizeof(struct CE_src_desc) +
1494 CE_DESC_RING_ALIGN);
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301495 if (!src_ring->shadow_base_unaligned)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001496 goto error_no_dma_mem;
Madhvapathi Srirambfb01122019-01-07 09:17:29 +05301497
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001498 src_ring->shadow_base = (struct CE_src_desc *)
1499 (((size_t) src_ring->shadow_base_unaligned +
1500 CE_DESC_RING_ALIGN - 1) &
1501 ~(CE_DESC_RING_ALIGN - 1));
1502
Yun Park3fb36442017-08-17 17:37:53 -07001503 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1504 src_ring, attr);
1505 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001506 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001507
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301508 ce_ring_test_initial_indexes(CE_id, src_ring,
1509 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001510 }
1511 }
1512
1513 /* destination ring setup */
1514 nentries = attr->dest_nentries;
1515 if (nentries) {
1516 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001517
1518 nentries = roundup_pwr2(nentries);
1519 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301520 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001521 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301522 dest_ring = CE_state->dest_ring =
1523 ce_alloc_ring_state(CE_state,
1524 CE_RING_DEST,
1525 nentries);
1526 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001527 /* cannot allocate dst ring. If the CE_state
1528 * or src ring is allocated locally free
1529 * CE_State and src ring and return error.
1530 */
1531 HIF_ERROR("%s: dest ring has no mem",
1532 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301533 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001534 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001535
Yun Park3fb36442017-08-17 17:37:53 -07001536 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001537 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001538 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301539 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001540
1541 ce_ring_test_initial_indexes(CE_id, dest_ring,
1542 "dest_ring");
1543
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301544 /* For srng based target, init status ring here */
1545 if (ce_srng_based(CE_state->scn)) {
1546 CE_state->status_ring =
1547 ce_alloc_ring_state(CE_state,
1548 CE_RING_STATUS,
1549 nentries);
Jeff Johnson8d639a02019-03-18 09:51:11 -07001550 if (!CE_state->status_ring) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301551 /*Allocation failed. Cleanup*/
1552 qdf_mem_free(CE_state->dest_ring);
1553 if (malloc_src_ring) {
1554 qdf_mem_free
1555 (CE_state->src_ring);
1556 CE_state->src_ring = NULL;
1557 malloc_src_ring = false;
1558 }
1559 if (malloc_CE_state) {
1560 /* allocated CE_state locally */
1561 scn->ce_id_to_state[CE_id] =
1562 NULL;
1563 qdf_mem_free(CE_state);
1564 malloc_CE_state = false;
1565 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001566
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301567 return NULL;
1568 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001569
Yun Park3fb36442017-08-17 17:37:53 -07001570 status = ce_ring_setup(scn, CE_RING_STATUS,
1571 CE_id, CE_state->status_ring,
1572 attr);
1573 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301574 goto error_target_access;
1575
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001576 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001577
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001578 /* epping */
1579 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301580 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301581 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301582 &CE_state->poll_timer,
1583 ce_poll_timeout,
1584 CE_state,
1585 QDF_TIMER_TYPE_WAKE_APPS);
1586 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301587 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001588 CE_POLL_TIMEOUT);
1589 }
1590 }
1591 }
1592
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301593 if (!ce_srng_based(scn)) {
1594 /* Enable CE error interrupts */
1595 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1596 goto error_target_access;
1597 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1598 if (Q_TARGET_ACCESS_END(scn) < 0)
1599 goto error_target_access;
1600 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001602 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1603 ce_oom_recovery, CE_state);
1604
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001605 /* update the htt_data attribute */
1606 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001607 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001608
c_cgodavfda96ad2017-09-07 16:16:00 +05301609 alloc_mem_ce_debug_history(scn, CE_id);
1610
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001611 return (struct CE_handle *)CE_state;
1612
Houston Hoffman4411ad42016-03-14 21:12:04 -07001613error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614error_no_dma_mem:
1615 ce_fini((struct CE_handle *)CE_state);
1616 return NULL;
1617}
1618
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301619/**
1620 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1621 * @hif_ctx: HIF Context
1622 *
1623 * API to check if polling is enabled on all CEs. Returns true when polling
1624 * is enabled on all CEs.
1625 *
1626 * Return: bool
1627 */
1628bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1629{
1630 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1631 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1632 struct CE_attr *attr;
1633 int id;
1634
1635 for (id = 0; id < scn->ce_count; id++) {
1636 attr = &hif_state->host_ce_config[id];
1637 if (attr && (attr->dest_nentries) &&
1638 !(attr->flags & CE_ATTR_ENABLE_POLL))
1639 return false;
1640 }
1641 return true;
1642}
1643qdf_export_symbol(hif_is_polled_mode_enabled);
1644
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001645#ifdef WLAN_FEATURE_FASTPATH
1646/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001647 * hif_enable_fastpath() Update that we have enabled fastpath mode
1648 * @hif_ctx: HIF context
1649 *
1650 * For use in data path
1651 *
1652 * Retrun: void
1653 */
1654void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1655{
1656 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1657
Houston Hoffmand63cd742016-12-05 11:59:56 -08001658 if (ce_srng_based(scn)) {
1659 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1660 return;
1661 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001662 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001663 scn->fastpath_mode_on = true;
1664}
1665
1666/**
1667 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1668 * @hif_ctx: HIF Context
1669 *
1670 * For use in data path to skip HTC
1671 *
1672 * Return: bool
1673 */
1674bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1675{
1676 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1677
1678 return scn->fastpath_mode_on;
1679}
1680
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301681/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001682 * hif_get_ce_handle - API to get CE handle for FastPath mode
1683 * @hif_ctx: HIF Context
1684 * @id: CopyEngine Id
1685 *
1686 * API to return CE handle for fastpath mode
1687 *
1688 * Return: void
1689 */
1690void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1691{
1692 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1693
1694 return scn->ce_id_to_state[id];
1695}
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301696qdf_export_symbol(hif_get_ce_handle);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001697
1698/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001699 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1700 * No processing is required inside this function.
1701 * @ce_hdl: Cope engine handle
1702 * Using an assert, this function makes sure that,
1703 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001704 *
1705 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001706 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001707 * therfore no locking is needed.
1708 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709 * Return: none
1710 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001711void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001712{
1713 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1714 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301715 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001716 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001717
Houston Hoffman85925072016-05-06 17:02:18 -07001718 if (hif_is_nss_wifi_enabled(sc))
1719 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001721 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001722 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001723 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724 sw_index = src_ring->sw_index;
1725 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726
1727 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301728 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001729 }
1730}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001731
1732/**
1733 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1734 * @ce_hdl: Handle to CE
1735 *
1736 * These buffers are never allocated on the fly, but
1737 * are allocated only once during HIF start and freed
1738 * only once during HIF stop.
1739 * NOTE:
1740 * The assumption here is there is no in-flight DMA in progress
1741 * currently, so that buffers can be freed up safely.
1742 *
1743 * Return: NONE
1744 */
1745void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1746{
1747 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1748 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1749 qdf_nbuf_t nbuf;
1750 int i;
1751
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001752 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001753 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001754
1755 if (!ce_state->htt_rx_data)
1756 return;
1757
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001758 /*
1759 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1760 * this CE is completely full: does not leave one blank space, to
1761 * distinguish between empty queue & full queue. So free all the
1762 * entries.
1763 */
1764 for (i = 0; i < dst_ring->nentries; i++) {
1765 nbuf = dst_ring->per_transfer_context[i];
1766
1767 /*
1768 * The reasons for doing this check are:
1769 * 1) Protect against calling cleanup before allocating buffers
1770 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1771 * could have a partially filled ring, because of a memory
1772 * allocation failure in the middle of allocating ring.
1773 * This check accounts for that case, checking
1774 * fastpath_mode_on flag or started flag would not have
1775 * covered that case. This is not in performance path,
1776 * so OK to do this.
1777 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001778 if (nbuf) {
1779 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1780 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001781 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001782 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001783 }
1784}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001785
1786/**
1787 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1788 * @scn: HIF handle
1789 *
1790 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1791 * Hence we have to post all the entries in the pipe, even, in the beginning
1792 * unlike for other CE pipes where one less than dest_nentries are filled in
1793 * the beginning.
1794 *
1795 * Return: None
1796 */
1797static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1798{
1799 int pipe_num;
1800 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1801
1802 if (scn->fastpath_mode_on == false)
1803 return;
1804
1805 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1806 struct HIF_CE_pipe_info *pipe_info =
1807 &hif_state->pipe_info[pipe_num];
1808 struct CE_state *ce_state =
1809 scn->ce_id_to_state[pipe_info->pipe_num];
1810
1811 if (ce_state->htt_rx_data)
1812 atomic_inc(&pipe_info->recv_bufs_needed);
1813 }
1814}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001815#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001816static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817{
1818}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001819
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001820static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001821{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001822 return false;
1823}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001824#endif /* WLAN_FEATURE_FASTPATH */
1825
1826void ce_fini(struct CE_handle *copyeng)
1827{
1828 struct CE_state *CE_state = (struct CE_state *)copyeng;
1829 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301830 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301831 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001832
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301833 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834 CE_state->state = CE_UNUSED;
1835 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301836 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301837 ce_disable_polling(CE_state);
1838
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001839 qdf_lro_deinit(CE_state->lro_data);
1840
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001841 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001842 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843 ce_h2t_tx_ce_cleanup(copyeng);
1844
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301845 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001846 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301847 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301849 ce_free_desc_ring(scn, CE_state->id,
1850 CE_state->src_ring,
1851 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301852 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001853 }
1854 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001855 /* Cleanup the datapath Rx ring */
1856 ce_t2h_msg_ce_cleanup(copyeng);
1857
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301858 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001859 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301860 ce_free_desc_ring(scn, CE_state->id,
1861 CE_state->dest_ring,
1862 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301863 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001864
1865 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301866 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301867 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001868 }
1869 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001870 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301871 /* Cleanup the datapath Tx ring */
1872 ce_h2t_tx_ce_cleanup(copyeng);
1873
1874 if (CE_state->status_ring->shadow_base_unaligned)
1875 qdf_mem_free(
1876 CE_state->status_ring->shadow_base_unaligned);
1877
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301878 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301879 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301880 ce_free_desc_ring(scn, CE_state->id,
1881 CE_state->status_ring,
1882 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301883 qdf_mem_free(CE_state->status_ring);
1884 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001885
c_cgodavfda96ad2017-09-07 16:16:00 +05301886 free_mem_ce_debug_history(scn, CE_id);
1887 reset_ce_debug_history(scn);
1888 ce_deinit_ce_desc_event_log(scn, CE_id);
1889
Houston Hoffman03f46572016-12-12 12:53:56 -08001890 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301891 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001892}
1893
Komal Seelam5584a7c2016-02-24 19:22:48 +05301894void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001895{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301896 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001897
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301898 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001899 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301900 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 sizeof(hif_state->msg_callbacks_current));
1902}
1903
1904/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301905QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301906hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001907 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301908 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909{
Komal Seelam644263d2016-02-22 20:45:49 +05301910 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301911 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001912 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1913 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1914 int bytes = nbytes, nfrags = 0;
1915 struct ce_sendlist sendlist;
1916 int status, i = 0;
1917 unsigned int mux_id = 0;
1918
Santosh Anbudbfae9b2018-07-12 15:40:49 +05301919 if (nbytes > qdf_nbuf_len(nbuf)) {
1920 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1921 (uint32_t)qdf_nbuf_len(nbuf));
1922 QDF_ASSERT(0);
1923 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001924
1925 transfer_id =
1926 (mux_id & MUX_ID_MASK) |
1927 (transfer_id & TRANSACTION_ID_MASK);
1928 data_attr &= DESC_DATA_FLAG_MASK;
1929 /*
1930 * The common case involves sending multiple fragments within a
1931 * single download (the tx descriptor and the tx frame header).
1932 * So, optimize for the case of multiple fragments by not even
1933 * checking whether it's necessary to use a sendlist.
1934 * The overhead of using a sendlist for a single buffer download
1935 * is not a big deal, since it happens rarely (for WMI messages).
1936 */
1937 ce_sendlist_init(&sendlist);
1938 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301939 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940 int frag_bytes;
1941
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301942 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1943 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001944 /*
1945 * Clear the packet offset for all but the first CE desc.
1946 */
1947 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301948 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001949
1950 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1951 frag_bytes >
1952 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301953 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001954 (nbuf,
1955 nfrags) ? 0 :
1956 CE_SEND_FLAG_SWAP_DISABLE,
1957 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301958 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 HIF_ERROR("%s: error, frag_num %d larger than limit",
1960 __func__, nfrags);
1961 return status;
1962 }
1963 bytes -= frag_bytes;
1964 nfrags++;
1965 } while (bytes > 0);
1966
1967 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301968 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001969 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301970 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301972 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001973 }
1974 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301975 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001976
Jeff Johnson8d639a02019-03-18 09:51:11 -07001977 if (qdf_unlikely(!ce_hdl)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001978 HIF_ERROR("%s: error CE handle is null", __func__);
1979 return A_ERROR;
1980 }
1981
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301982 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301983 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301984 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1985 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001986 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301987 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988
1989 return status;
1990}
1991
Komal Seelam5584a7c2016-02-24 19:22:48 +05301992void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1993 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001994{
Komal Seelam644263d2016-02-22 20:45:49 +05301995 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301996 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301997
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001998 if (!force) {
1999 int resources;
2000 /*
2001 * Decide whether to actually poll for completions, or just
2002 * wait for a later chance. If there seem to be plenty of
2003 * resources left, then just wait, since checking involves
2004 * reading a CE register, which is a relatively expensive
2005 * operation.
2006 */
Komal Seelam644263d2016-02-22 20:45:49 +05302007 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002008 /*
2009 * If at least 50% of the total resources are still available,
2010 * don't bother checking again yet.
2011 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002012 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2013 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002014 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002015 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07002016#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017 ce_per_engine_servicereap(scn, pipe);
2018#else
2019 ce_per_engine_service(scn, pipe);
2020#endif
2021}
2022
Komal Seelam5584a7c2016-02-24 19:22:48 +05302023uint16_t
2024hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302026 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002027 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2028 uint16_t rv;
2029
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302030 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002031 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302032 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033 return rv;
2034}
2035
2036/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002037static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002038hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302039 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040 unsigned int nbytes, unsigned int transfer_id,
2041 unsigned int sw_index, unsigned int hw_index,
2042 unsigned int toeplitz_hash_result)
2043{
2044 struct HIF_CE_pipe_info *pipe_info =
2045 (struct HIF_CE_pipe_info *)ce_context;
2046 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302047 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002048 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07002049 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302050 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002051
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002052 do {
2053 /*
Houston Hoffman85118512015-09-28 14:17:11 -07002054 * The upper layer callback will be triggered
2055 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002056 */
Houston Hoffman85118512015-09-28 14:17:11 -07002057 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08002058 if (scn->target_status == TARGET_STATUS_RESET) {
2059
2060 qdf_nbuf_unmap_single(scn->qdf_dev,
2061 transfer_context,
2062 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302063 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08002064 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08002065 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07002066 msg_callbacks->Context,
2067 transfer_context, transfer_id,
2068 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069 }
2070
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302071 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07002072 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302073 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002074 } while (ce_completed_send_next(copyeng,
2075 &ce_context, &transfer_context,
2076 &CE_data, &nbytes, &transfer_id,
2077 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302078 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002079}
2080
Houston Hoffman910c6262015-09-28 12:56:25 -07002081/**
2082 * hif_ce_do_recv(): send message from copy engine to upper layers
2083 * @msg_callbacks: structure containing callback and callback context
2084 * @netbuff: skb containing message
2085 * @nbytes: number of bytes in the message
2086 * @pipe_info: used for the pipe_number info
2087 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07002088 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07002089 * and calls the upper layer callback.
2090 *
2091 * return: None
2092 */
2093static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302094 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07002095 struct HIF_CE_pipe_info *pipe_info) {
2096 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302097 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07002098 msg_callbacks->
2099 rxCompletionHandler(msg_callbacks->Context,
2100 netbuf, pipe_info->pipe_num);
2101 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07002102 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07002103 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08002104
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302105 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07002106 }
2107}
2108
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002109/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002110static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302112 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 unsigned int nbytes, unsigned int transfer_id,
2114 unsigned int flags)
2115{
2116 struct HIF_CE_pipe_info *pipe_info =
2117 (struct HIF_CE_pipe_info *)ce_context;
2118 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002119 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302120 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002121#ifdef HIF_PCI
2122 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2123#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07002124 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302125 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002126
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002127 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002128#ifdef HIF_PCI
2129 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2130#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302131 qdf_nbuf_unmap_single(scn->qdf_dev,
2132 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302133 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002134
Houston Hoffman910c6262015-09-28 12:56:25 -07002135 atomic_inc(&pipe_info->recv_bufs_needed);
2136 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302137 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302138 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002139 else
2140 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002141 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002142
2143 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002144 * MAX_NUM_OF_RECEIVES
2145 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002146 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002147 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002148 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002149 break;
2150 }
2151 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2152 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302153 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002154
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002155}
2156
2157/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2158
2159void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302160hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002161 struct hif_msg_callbacks *callbacks)
2162{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302163 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002164
2165#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2166 spin_lock_init(&pcie_access_log_lock);
2167#endif
2168 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302169 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002170 sizeof(hif_state->msg_callbacks_pending));
2171
2172}
2173
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002174static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002175{
2176 struct CE_handle *ce_diag = hif_state->ce_diag;
2177 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302178 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002179 struct hif_msg_callbacks *hif_msg_callbacks =
2180 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002181
2182 /* daemonize("hif_compl_thread"); */
2183
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002184 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002185 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002186 return -EINVAL;
2187 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002188
2189 if (!hif_msg_callbacks ||
2190 !hif_msg_callbacks->rxCompletionHandler ||
2191 !hif_msg_callbacks->txCompletionHandler) {
2192 HIF_ERROR("%s: no completion handler registered", __func__);
2193 return -EFAULT;
2194 }
2195
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002196 A_TARGET_ACCESS_LIKELY(scn);
2197 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2198 struct CE_attr attr;
2199 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002200
2201 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002202 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002203 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302204 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002205 if (attr.src_nentries) {
2206 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002207 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002208 __func__, pipe_num, pipe_info);
2209 ce_send_cb_register(pipe_info->ce_hdl,
2210 hif_pci_ce_send_done, pipe_info,
2211 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002212 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2213 }
2214 if (attr.dest_nentries) {
2215 /* pipe used to receive from target */
2216 ce_recv_cb_register(pipe_info->ce_hdl,
2217 hif_pci_ce_recv_data, pipe_info,
2218 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002219 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002220
2221 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302222 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302223
2224 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2225 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002227
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002228 A_TARGET_ACCESS_UNLIKELY(scn);
2229 return 0;
2230}
2231
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002232/*
2233 * Install pending msg callbacks.
2234 *
2235 * TBDXXX: This hack is needed because upper layers install msg callbacks
2236 * for use with HTC before BMI is done; yet this HIF implementation
2237 * needs to continue to use BMI msg callbacks. Really, upper layers
2238 * should not register HTC callbacks until AFTER BMI phase.
2239 */
Komal Seelam644263d2016-02-22 20:45:49 +05302240static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002241{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302242 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002243
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302244 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002245 &hif_state->msg_callbacks_pending,
2246 sizeof(hif_state->msg_callbacks_pending));
2247}
2248
Komal Seelam5584a7c2016-02-24 19:22:48 +05302249void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2250 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002251{
2252 int ul_is_polled, dl_is_polled;
2253
Komal Seelam644263d2016-02-22 20:45:49 +05302254 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2256}
2257
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002258/**
2259 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302260 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002261 *
2262 * Output the pipe error counts of each pipe to log file
2263 *
2264 * Return: N/A
2265 */
Komal Seelam644263d2016-02-22 20:45:49 +05302266void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002267{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302268 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002269 int pipe_num;
2270
Jeff Johnson8d639a02019-03-18 09:51:11 -07002271 if (!hif_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002272 HIF_ERROR("%s hif_state is NULL", __func__);
2273 return;
2274 }
2275 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2276 struct HIF_CE_pipe_info *pipe_info;
2277
2278 pipe_info = &hif_state->pipe_info[pipe_num];
2279
2280 if (pipe_info->nbuf_alloc_err_count > 0 ||
2281 pipe_info->nbuf_dma_err_count > 0 ||
2282 pipe_info->nbuf_ce_enqueue_err_count)
2283 HIF_ERROR(
2284 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2285 __func__, pipe_info->pipe_num,
2286 atomic_read(&pipe_info->recv_bufs_needed),
2287 pipe_info->nbuf_alloc_err_count,
2288 pipe_info->nbuf_dma_err_count,
2289 pipe_info->nbuf_ce_enqueue_err_count);
2290 }
2291}
2292
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002293static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2294 void *nbuf, uint32_t *error_cnt,
2295 enum hif_ce_event_type failure_type,
2296 const char *failure_type_string)
2297{
2298 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2299 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2300 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2301 int ce_id = CE_state->id;
2302 uint32_t error_cnt_tmp;
2303
2304 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2305 error_cnt_tmp = ++(*error_cnt);
2306 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302307 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002308 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2309 failure_type_string);
2310 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302311 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002312 /* if we fail to allocate the last buffer for an rx pipe,
2313 * there is no trigger to refill the ce and we will
2314 * eventually crash
2315 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302316 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002317 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302318
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002319}
2320
2321
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002322
2323
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302324QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002325{
2326 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302327 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302328 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302329 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002330 uint32_t bufs_posted = 0;
2331
2332 buf_sz = pipe_info->buf_sz;
2333 if (buf_sz == 0) {
2334 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302335 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002336 }
2337
2338 ce_hdl = pipe_info->ce_hdl;
2339
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302340 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002341 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302342 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302343 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002344
2345 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302346 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002347
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302348 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002349 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002350 hif_post_recv_buffers_failure(pipe_info, nbuf,
2351 &pipe_info->nbuf_alloc_err_count,
2352 HIF_RX_NBUF_ALLOC_FAILURE,
2353 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302354 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002355 }
2356
2357 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302358 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002359 * CE_data = dma_map_single(dev, data, buf_sz, );
2360 * DMA_FROM_DEVICE);
2361 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302362 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302363 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002364
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302365 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002366 hif_post_recv_buffers_failure(pipe_info, nbuf,
2367 &pipe_info->nbuf_dma_err_count,
2368 HIF_RX_NBUF_MAP_FAILURE,
2369 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302370 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302371 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002372 }
2373
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302374 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002375
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302376 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002377 buf_sz, DMA_FROM_DEVICE);
2378 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302379 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002380 hif_post_recv_buffers_failure(pipe_info, nbuf,
2381 &pipe_info->nbuf_ce_enqueue_err_count,
2382 HIF_RX_NBUF_ENQUEUE_FAILURE,
2383 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2384
Govind Singh4fcafd42016-08-08 12:37:31 +05302385 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2386 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302387 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302388 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002389 }
2390
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302391 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002392 bufs_posted++;
2393 }
2394 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002395 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002396 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2397 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002398 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002399 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2400 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002401 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002402 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002403
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302404 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002405
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302406 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002407}
2408
2409/*
2410 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302411 * Returns 0 for non fastpath rx copy engine as
2412 * oom_allocation_work will be scheduled to recover any
2413 * failures, non-zero if unable to completely replenish
2414 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002415 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302416QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302418 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302419 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302420 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302421 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002422
2423 A_TARGET_ACCESS_LIKELY(scn);
2424 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2425 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002426
Houston Hoffman85925072016-05-06 17:02:18 -07002427 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002428 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002429
2430 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002431 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002432 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002433
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302434 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302435 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302436 ce_state->htt_rx_data &&
2437 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302438 A_TARGET_ACCESS_UNLIKELY(scn);
2439 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302440 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002441 }
2442
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002443 A_TARGET_ACCESS_UNLIKELY(scn);
2444
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302445 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002446}
2447
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302448QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002449{
Komal Seelam644263d2016-02-22 20:45:49 +05302450 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302451 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302452 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002453
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002454 hif_update_fastpath_recv_bufs_cnt(scn);
2455
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002456 hif_msg_callbacks_install(scn);
2457
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002458 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302459 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002460
Houston Hoffman271951f2016-11-12 15:24:27 -08002461 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002462 hif_state->started = true;
2463
Houston Hoffman271951f2016-11-12 15:24:27 -08002464 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302465 qdf_status = hif_post_recv_buffers(scn);
2466 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002467 /* cleanup is done in hif_ce_disable */
2468 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302469 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002470 }
2471
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302472 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002473}
2474
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002475static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002476{
Komal Seelam644263d2016-02-22 20:45:49 +05302477 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002478 struct CE_handle *ce_hdl;
2479 uint32_t buf_sz;
2480 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302481 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302482 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002483 void *per_CE_context;
2484
2485 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002486 /* Unused Copy Engine */
2487 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002488 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002489
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002490
2491 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002492 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002493 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002494
Komal Seelam02cf2f82016-02-22 20:44:25 +05302495 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002496 ce_hdl = pipe_info->ce_hdl;
2497
Jeff Johnson8d639a02019-03-18 09:51:11 -07002498 if (!scn->qdf_dev)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002499 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002500 while (ce_revoke_recv_next
2501 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302502 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302503 if (netbuf) {
2504 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2505 QDF_DMA_FROM_DEVICE);
2506 qdf_nbuf_free(netbuf);
2507 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002508 }
2509}
2510
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002511static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002512{
2513 struct CE_handle *ce_hdl;
2514 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302515 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302516 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002517 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302518 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002519 unsigned int nbytes;
2520 unsigned int id;
2521 uint32_t buf_sz;
2522 uint32_t toeplitz_hash_result;
2523
2524 buf_sz = pipe_info->buf_sz;
2525 if (buf_sz == 0) {
2526 /* Unused Copy Engine */
2527 return;
2528 }
2529
2530 hif_state = pipe_info->HIF_CE_state;
2531 if (!hif_state->started) {
2532 return;
2533 }
2534
Komal Seelam02cf2f82016-02-22 20:44:25 +05302535 scn = HIF_GET_SOFTC(hif_state);
2536
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002537 ce_hdl = pipe_info->ce_hdl;
2538
2539 while (ce_cancel_send_next
2540 (ce_hdl, &per_CE_context,
2541 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302542 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002543 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2544 /*
2545 * Packets enqueued by htt_h2t_ver_req_msg() and
2546 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2547 * freed in htt_htc_misc_pkt_pool_free() in
2548 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002549 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002550 * which they are queued in.
2551 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302552 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002553 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302554 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002555 * layer to free the buffer
2556 */
2557 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302558 pipe_info->pipe_callbacks.
2559 txCompletionHandler(pipe_info->
2560 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002561 netbuf, id, toeplitz_hash_result);
2562 }
2563 }
2564}
2565
2566/*
2567 * Cleanup residual buffers for device shutdown:
2568 * buffers that were enqueued for receive
2569 * buffers that were to be sent
2570 * Note: Buffers that had completed but which were
2571 * not yet processed are on a completion queue. They
2572 * are handled when the completion thread shuts down.
2573 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002574static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002575{
2576 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302577 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002578 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002579
Komal Seelam02cf2f82016-02-22 20:44:25 +05302580 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581 struct HIF_CE_pipe_info *pipe_info;
2582
Houston Hoffman85925072016-05-06 17:02:18 -07002583 ce_state = scn->ce_id_to_state[pipe_num];
2584 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2585 ((ce_state->htt_tx_data) ||
2586 (ce_state->htt_rx_data))) {
2587 continue;
2588 }
2589
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002590 pipe_info = &hif_state->pipe_info[pipe_num];
2591 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2592 hif_send_buffer_cleanup_on_pipe(pipe_info);
2593 }
2594}
2595
Komal Seelam5584a7c2016-02-24 19:22:48 +05302596void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002597{
Komal Seelam644263d2016-02-22 20:45:49 +05302598 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302599 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302600
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002601 hif_buffer_cleanup(hif_state);
2602}
2603
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002604static void hif_destroy_oom_work(struct hif_softc *scn)
2605{
2606 struct CE_state *ce_state;
2607 int ce_id;
2608
2609 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2610 ce_state = scn->ce_id_to_state[ce_id];
2611 if (ce_state)
2612 qdf_destroy_work(scn->qdf_dev,
2613 &ce_state->oom_allocation_work);
2614 }
2615}
2616
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302617void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002618{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302619 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002620 int pipe_num;
2621
Houston Hoffmana69581e2016-11-14 18:03:19 -08002622 /*
2623 * before cleaning up any memory, ensure irq &
2624 * bottom half contexts will not be re-entered
2625 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002626 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002627 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002628 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002629
2630 /*
2631 * At this point, asynchronous threads are stopped,
2632 * The Target should not DMA nor interrupt, Host code may
2633 * not initiate anything more. So we just need to clean
2634 * up Host-side state.
2635 */
2636
2637 if (scn->athdiag_procfs_inited) {
2638 athdiag_procfs_remove();
2639 scn->athdiag_procfs_inited = false;
2640 }
2641
2642 hif_buffer_cleanup(hif_state);
2643
2644 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2645 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302646 struct CE_attr attr;
2647 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002648
2649 pipe_info = &hif_state->pipe_info[pipe_num];
2650 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302651 if (pipe_info->ce_hdl != ce_diag) {
2652 attr = hif_state->host_ce_config[pipe_num];
2653 if (attr.src_nentries)
2654 qdf_spinlock_destroy(&pipe_info->
2655 completion_freeq_lock);
2656 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002657 ce_fini(pipe_info->ce_hdl);
2658 pipe_info->ce_hdl = NULL;
2659 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302660 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002661 }
2662 }
2663
2664 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302665 qdf_timer_stop(&hif_state->sleep_timer);
2666 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002667 hif_state->sleep_timer_init = false;
2668 }
2669
2670 hif_state->started = false;
2671}
2672
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302673static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2674 struct shadow_reg_cfg
2675 **target_shadow_reg_cfg_ret,
2676 uint32_t *shadow_cfg_sz_ret)
2677{
Nirav Shah3e6e04b2018-07-20 12:00:34 +05302678 if (target_shadow_reg_cfg_ret)
2679 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2680 if (shadow_cfg_sz_ret)
2681 *shadow_cfg_sz_ret = shadow_cfg_sz;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302682}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002683
Houston Hoffman854e67f2016-03-14 21:11:39 -07002684/**
2685 * hif_get_target_ce_config() - get copy engine configuration
2686 * @target_ce_config_ret: basic copy engine configuration
2687 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2688 * @target_service_to_ce_map_ret: service mapping for the copy engines
2689 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2690 * @target_shadow_reg_cfg_ret: shadow register configuration
2691 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2692 *
2693 * providing accessor to these values outside of this file.
2694 * currently these are stored in static pointers to const sections.
2695 * there are multiple configurations that are selected from at compile time.
2696 * Runtime selection would need to consider mode, target type and bus type.
2697 *
2698 * Return: return by parameter.
2699 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302700void hif_get_target_ce_config(struct hif_softc *scn,
2701 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002702 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002703 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002704 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002705 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002706 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002707{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302708 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2709
2710 *target_ce_config_ret = hif_state->target_ce_config;
2711 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002712
2713 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2714 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302715 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2716 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002717}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002718
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002719#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002720static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002721{
2722 int i;
2723 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302724 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002725
2726 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2727 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302728 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002729 cfg->shadow_reg_v2_cfg[i].addr);
2730 }
2731}
2732
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002733#else
2734static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2735{
2736 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302737 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002738}
2739#endif
2740
Nirav Shahbc8daa42018-07-09 16:27:42 +05302741#ifdef ADRASTEA_RRI_ON_DDR
2742/**
2743 * hif_get_src_ring_read_index(): Called to get the SRRI
2744 *
2745 * @scn: hif_softc pointer
2746 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2747 *
2748 * This function returns the SRRI to the caller. For CEs that
2749 * dont have interrupts enabled, we look at the DDR based SRRI
2750 *
2751 * Return: SRRI
2752 */
2753inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2754 uint32_t CE_ctrl_addr)
2755{
2756 struct CE_attr attr;
2757 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2758
2759 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2760 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2761 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2762 } else {
2763 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2764 return A_TARGET_READ(scn,
2765 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2766 else
2767 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2768 CE_ctrl_addr);
2769 }
2770}
2771
2772/**
2773 * hif_get_dst_ring_read_index(): Called to get the DRRI
2774 *
2775 * @scn: hif_softc pointer
2776 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2777 *
2778 * This function returns the DRRI to the caller. For CEs that
2779 * dont have interrupts enabled, we look at the DDR based DRRI
2780 *
2781 * Return: DRRI
2782 */
2783inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2784 uint32_t CE_ctrl_addr)
2785{
2786 struct CE_attr attr;
2787 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2788
2789 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2790
2791 if (attr.flags & CE_ATTR_DISABLE_INTR) {
2792 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2793 } else {
2794 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2795 return A_TARGET_READ(scn,
2796 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2797 else
2798 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2799 CE_ctrl_addr);
2800 }
2801}
2802
2803/**
2804 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2805 * @scn: hif_softc pointer
2806 *
2807 * Return: qdf status
2808 */
2809static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2810{
2811 qdf_dma_addr_t paddr_rri_on_ddr = 0;
2812
2813 scn->vaddr_rri_on_ddr =
2814 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2815 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2816 &paddr_rri_on_ddr);
2817
2818 if (!scn->vaddr_rri_on_ddr) {
2819 hif_err("dmaable page alloc fail");
2820 return QDF_STATUS_E_NOMEM;
2821 }
2822
2823 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2824
2825 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2826
2827 return QDF_STATUS_SUCCESS;
2828}
2829#endif
2830
2831#if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2832/**
2833 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2834 *
2835 * @scn: hif_softc pointer
2836 *
2837 * This function allocates non cached memory on ddr and sends
2838 * the physical address of this memory to the CE hardware. The
2839 * hardware updates the RRI on this particular location.
2840 *
2841 * Return: None
2842 */
2843static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2844{
2845 unsigned int i;
2846 uint32_t high_paddr, low_paddr;
2847
2848 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2849 return;
2850
2851 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr);
2852 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2853
2854 HIF_DBG("%s using srri and drri from DDR", __func__);
2855
2856 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2857 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2858
2859 for (i = 0; i < CE_COUNT; i++)
2860 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2861}
2862#else
2863/**
2864 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2865 *
2866 * @scn: hif_softc pointer
2867 *
2868 * This is a dummy implementation for platforms that don't
2869 * support this functionality.
2870 *
2871 * Return: None
2872 */
2873static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2874{
2875}
2876#endif
2877
2878/**
2879 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2880 * QMI command
2881 * @scn: hif context
2882 * @cfg: wlan enable config
2883 *
2884 * In case of Genoa, rri_over_ddr memory configuration is passed
2885 * to firmware through QMI configure command.
2886 */
2887#if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
2888static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2889 struct pld_wlan_enable_cfg *cfg)
2890{
2891 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2892 return;
2893
2894 cfg->rri_over_ddr_cfg_valid = true;
2895 cfg->rri_over_ddr_cfg.base_addr_low =
2896 BITS0_TO_31(scn->paddr_rri_on_ddr);
2897 cfg->rri_over_ddr_cfg.base_addr_high =
2898 BITS32_TO_35(scn->paddr_rri_on_ddr);
2899}
2900#else
2901static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2902 struct pld_wlan_enable_cfg *cfg)
2903{
2904}
2905#endif
2906
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002907/**
2908 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302909 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002910 *
2911 * This function passes the con_mode and CE configuration to
2912 * platform driver to enable wlan.
2913 *
Houston Hoffman108da402016-03-14 21:11:24 -07002914 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002915 */
Houston Hoffman108da402016-03-14 21:11:24 -07002916int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002917{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002918 struct pld_wlan_enable_cfg cfg;
2919 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302920 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002921
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302922 hif_get_target_ce_config(scn,
2923 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002924 &cfg.num_ce_tgt_cfg,
2925 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2926 &cfg.num_ce_svc_pipe_cfg,
2927 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2928 &cfg.num_shadow_reg_cfg);
2929
2930 /* translate from structure size to array size */
2931 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2932 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2933 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002934
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002935 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2936 &cfg.num_shadow_reg_v2_cfg);
2937
2938 hif_print_hal_shadow_register_cfg(&cfg);
2939
Nirav Shahbc8daa42018-07-09 16:27:42 +05302940 hif_update_rri_over_ddr_config(scn, &cfg);
2941
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302942 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002943 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302944 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2945 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002946 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002947 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002948 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002949 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002950
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002951 if (BYPASS_QMI)
2952 return 0;
2953 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002954 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2955 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002956}
2957
Nirav Shah0d0cce82018-01-17 17:00:31 +05302958#ifdef WLAN_FEATURE_EPPING
2959
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002960#define CE_EPPING_USES_IRQ true
2961
Nirav Shah0d0cce82018-01-17 17:00:31 +05302962void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2963{
2964 if (CE_EPPING_USES_IRQ)
2965 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2966 else
2967 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2968 hif_state->target_ce_config = target_ce_config_wlan_epping;
2969 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2970 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2971 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2972}
2973#endif
2974
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302975#ifdef QCN7605_SUPPORT
2976static inline
2977void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2978 struct HIF_CE_state *hif_state)
2979{
2980 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2981 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2982 hif_state->target_ce_config_sz =
2983 sizeof(target_ce_config_wlan_qcn7605);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05302984 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
2985 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302986 scn->ce_count = QCN7605_CE_COUNT;
2987}
2988#else
2989static inline
2990void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2991 struct HIF_CE_state *hif_state)
2992{
2993 HIF_ERROR("QCN7605 not supported");
2994}
2995#endif
2996
Sathish Kumar86876492018-08-27 13:39:20 +05302997#ifdef CE_SVC_CMN_INIT
2998#ifdef QCA_WIFI_SUPPORT_SRNG
2999static inline void hif_ce_service_init(void)
3000{
3001 ce_service_srng_init();
3002}
3003#else
3004static inline void hif_ce_service_init(void)
3005{
3006 ce_service_legacy_init();
3007}
3008#endif
3009#else
3010static inline void hif_ce_service_init(void)
3011{
3012}
3013#endif
3014
3015
Houston Hoffman108da402016-03-14 21:11:24 -07003016/**
3017 * hif_ce_prepare_config() - load the correct static tables.
3018 * @scn: hif context
3019 *
3020 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003021 */
Houston Hoffman108da402016-03-14 21:11:24 -07003022void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003023{
Komal Seelambd7c51d2016-02-24 10:27:30 +05303024 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003025 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3026 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303027 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003028
Sathish Kumar86876492018-08-27 13:39:20 +05303029 hif_ce_service_init();
Houston Hoffman10fedfc2017-01-23 15:23:09 -08003030 hif_state->ce_services = ce_services_attach(scn);
3031
Houston Hoffman710af5a2016-11-22 21:59:03 -08003032 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003033 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07003034 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05303035 hif_ce_prepare_epping_config(hif_state);
Nirav Shah3e6e04b2018-07-20 12:00:34 +05303036 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003037 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003038
3039 switch (tgt_info->target_type) {
3040 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303041 hif_state->host_ce_config = host_ce_config_wlan;
3042 hif_state->target_ce_config = target_ce_config_wlan;
3043 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003044 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303045 case TARGET_TYPE_QCN7605:
3046 hif_set_ce_config_qcn7605(scn, hif_state);
3047 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003048 case TARGET_TYPE_AR900B:
3049 case TARGET_TYPE_QCA9984:
3050 case TARGET_TYPE_IPQ4019:
3051 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303052 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3053 hif_state->host_ce_config =
3054 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3055 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3056 hif_state->host_ce_config =
3057 host_lowdesc_ce_cfg_wlan_ar900b;
3058 } else {
3059 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3060 }
3061
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303062 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3063 hif_state->target_ce_config_sz =
3064 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003065
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003066 break;
3067
3068 case TARGET_TYPE_AR9888:
3069 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303070 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3071 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3072 } else {
3073 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3074 }
3075
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303076 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3077 hif_state->target_ce_config_sz =
3078 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003079
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003080 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003081
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303082 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05303083 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05303084 case TARGET_TYPE_QCA6018:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003085 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3086 hif_state->host_ce_config =
3087 host_ce_config_wlan_qca8074_pci;
3088 hif_state->target_ce_config =
3089 target_ce_config_wlan_qca8074_pci;
3090 hif_state->target_ce_config_sz =
3091 sizeof(target_ce_config_wlan_qca8074_pci);
3092 } else {
3093 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3094 hif_state->target_ce_config =
3095 target_ce_config_wlan_qca8074;
3096 hif_state->target_ce_config_sz =
3097 sizeof(target_ce_config_wlan_qca8074);
3098 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303099 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003100 case TARGET_TYPE_QCA6290:
3101 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3102 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3103 hif_state->target_ce_config_sz =
3104 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003105
Houston Hoffman710af5a2016-11-22 21:59:03 -08003106 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07003107 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07003108 case TARGET_TYPE_QCA6390:
3109 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3110 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3111 hif_state->target_ce_config_sz =
3112 sizeof(target_ce_config_wlan_qca6390);
3113
3114 scn->ce_count = QCA_6390_CE_COUNT;
3115 break;
hangtianc572f5f2019-04-10 11:19:59 +08003116 case TARGET_TYPE_ADRASTEA:
3117 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG))
3118 hif_state->host_ce_config =
3119 host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3120 else
3121 hif_state->host_ce_config =
3122 host_ce_config_wlan_adrastea;
3123
3124 hif_state->target_ce_config = target_ce_config_wlan_adrastea;
3125 hif_state->target_ce_config_sz =
3126 sizeof(target_ce_config_wlan_adrastea);
3127 break;
3128
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07003129 }
Yun parkc80eea72017-10-06 15:33:36 -07003130 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07003131}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003132
Houston Hoffman108da402016-03-14 21:11:24 -07003133/**
3134 * hif_ce_open() - do ce specific allocations
3135 * @hif_sc: pointer to hif context
3136 *
3137 * return: 0 for success or QDF_STATUS_E_NOMEM
3138 */
3139QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3140{
3141 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003142
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303143 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303144 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003145 return QDF_STATUS_SUCCESS;
3146}
3147
3148/**
3149 * hif_ce_close() - do ce specific free
3150 * @hif_sc: pointer to hif context
3151 */
3152void hif_ce_close(struct hif_softc *hif_sc)
3153{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303154 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3155
3156 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05303157 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003158}
3159
3160/**
3161 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3162 * @hif_sc: hif context
3163 *
3164 * uses state variables to support cleaning up when hif_config_ce fails.
3165 */
3166void hif_unconfig_ce(struct hif_softc *hif_sc)
3167{
3168 int pipe_num;
3169 struct HIF_CE_pipe_info *pipe_info;
3170 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07003171 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07003172
3173 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3174 pipe_info = &hif_state->pipe_info[pipe_num];
3175 if (pipe_info->ce_hdl) {
3176 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05303177 }
3178 }
3179 deinit_tasklet_workers(hif_hdl);
3180 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3181 pipe_info = &hif_state->pipe_info[pipe_num];
3182 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07003183 ce_fini(pipe_info->ce_hdl);
3184 pipe_info->ce_hdl = NULL;
3185 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08003186 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003187 }
3188 }
Houston Hoffman108da402016-03-14 21:11:24 -07003189 if (hif_sc->athdiag_procfs_inited) {
3190 athdiag_procfs_remove();
3191 hif_sc->athdiag_procfs_inited = false;
3192 }
3193}
3194
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003195#ifdef CONFIG_BYPASS_QMI
Nirav Shah8e930272018-07-10 16:28:21 +05303196#ifdef QCN7605_SUPPORT
3197/**
3198 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3199 * @scn: pointer to HIF structure
3200 *
3201 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3202 *
3203 * Return: void
3204 */
3205static void hif_post_static_buf_to_target(struct hif_softc *scn)
3206{
3207 void *target_va;
3208 phys_addr_t target_pa;
3209 struct ce_info *ce_info_ptr;
3210 uint32_t msi_data_start;
3211 uint32_t msi_data_count;
3212 uint32_t msi_irq_start;
3213 uint32_t i = 0;
3214 int ret;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003215
Nirav Shah8e930272018-07-10 16:28:21 +05303216 target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3217 scn->qdf_dev->dev,
3218 FW_SHARED_MEM +
3219 sizeof(struct ce_info),
3220 &target_pa);
3221 if (!target_va)
3222 return;
3223
3224 ce_info_ptr = (struct ce_info *)target_va;
3225
3226 if (scn->vaddr_rri_on_ddr) {
3227 ce_info_ptr->rri_over_ddr_low_paddr =
3228 BITS0_TO_31(scn->paddr_rri_on_ddr);
3229 ce_info_ptr->rri_over_ddr_high_paddr =
3230 BITS32_TO_35(scn->paddr_rri_on_ddr);
3231 }
3232
3233 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3234 &msi_data_count, &msi_data_start,
3235 &msi_irq_start);
3236 if (ret) {
3237 hif_err("Failed to get CE msi config");
3238 return;
3239 }
3240
3241 for (i = 0; i < CE_COUNT_MAX; i++) {
3242 ce_info_ptr->cfg[i].ce_id = i;
3243 ce_info_ptr->cfg[i].msi_vector =
3244 (i % msi_data_count) + msi_irq_start;
3245 }
3246
3247 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3248 hif_info("target va %pK target pa %pa", target_va, &target_pa);
3249}
3250#else
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003251/**
3252 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3253 * @scn: pointer to HIF structure
3254 *
3255 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3256 *
3257 * Return: void
3258 */
3259static void hif_post_static_buf_to_target(struct hif_softc *scn)
3260{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003261 void *target_va;
3262 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003263
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003264 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3265 FW_SHARED_MEM, &target_pa);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003266 if (!target_va) {
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003267 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003268 return;
3269 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303270 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003271 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003272}
Nirav Shah8e930272018-07-10 16:28:21 +05303273#endif
3274
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003275#else
3276static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3277{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003278}
3279#endif
3280
Houston Hoffman579c02f2017-08-02 01:57:38 -07003281static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3282 bool wait_for_it)
3283{
3284 /* todo */
3285 return 0;
3286}
3287
Houston Hoffman108da402016-03-14 21:11:24 -07003288/**
3289 * hif_config_ce() - configure copy engines
3290 * @scn: hif context
3291 *
3292 * Prepares fw, copy engine hardware and host sw according
3293 * to the attributes selected by hif_ce_prepare_config.
3294 *
3295 * also calls athdiag_procfs_init
3296 *
3297 * return: 0 for success nonzero for failure.
3298 */
3299int hif_config_ce(struct hif_softc *scn)
3300{
3301 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3302 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3303 struct HIF_CE_pipe_info *pipe_info;
3304 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303305 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05303306
Houston Hoffman108da402016-03-14 21:11:24 -07003307#ifdef ADRASTEA_SHADOW_REGISTERS
3308 int i;
3309#endif
3310 QDF_STATUS rv = QDF_STATUS_SUCCESS;
3311
3312 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05303313 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003314
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003315 hif_post_static_buf_to_target(scn);
3316
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003317 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07003318
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003319 hif_config_rri_on_ddr(scn);
3320
Houston Hoffman579c02f2017-08-02 01:57:38 -07003321 if (ce_srng_based(scn))
3322 scn->bus_ops.hif_target_sleep_state_adjust =
3323 &hif_srng_sleep_state_adjust;
3324
c_cgodavfda96ad2017-09-07 16:16:00 +05303325 /* Initialise the CE debug history sysfs interface inputs ce_id and
3326 * index. Disable data storing
3327 */
3328 reset_ce_debug_history(scn);
3329
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003330 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3331 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003332
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003333 pipe_info = &hif_state->pipe_info[pipe_num];
3334 pipe_info->pipe_num = pipe_num;
3335 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303336 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003337
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003338 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07003339 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303340 if (!ce_state) {
3341 A_TARGET_ACCESS_UNLIKELY(scn);
3342 goto err;
3343 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003344 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Jeff Johnson8d639a02019-03-18 09:51:11 -07003345 QDF_ASSERT(pipe_info->ce_hdl);
3346 if (!pipe_info->ce_hdl) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303347 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003348 A_TARGET_ACCESS_UNLIKELY(scn);
3349 goto err;
3350 }
3351
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003352 ce_state->lro_data = qdf_lro_init();
3353
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303354 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003355 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003356 * Diagnostic Window support
3357 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003358 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003359 continue;
3360 }
3361
Houston Hoffman85925072016-05-06 17:02:18 -07003362 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3363 (ce_state->htt_rx_data))
3364 continue;
3365
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303366 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003367 if (attr->dest_nentries > 0) {
3368 atomic_set(&pipe_info->recv_bufs_needed,
3369 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303370 /*SRNG based CE has one entry less */
3371 if (ce_srng_based(scn))
3372 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003373 } else {
3374 atomic_set(&pipe_info->recv_bufs_needed, 0);
3375 }
3376 ce_tasklet_init(hif_state, (1 << pipe_num));
3377 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003378 }
3379
3380 if (athdiag_procfs_init(scn) != 0) {
3381 A_TARGET_ACCESS_UNLIKELY(scn);
3382 goto err;
3383 }
3384 scn->athdiag_procfs_inited = true;
3385
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003386 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003387
Houston Hoffman108da402016-03-14 21:11:24 -07003388 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003389
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003390 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003391
3392#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003393 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003394 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003395 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003396 __func__, i,
3397 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3398 }
3399#endif
3400
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303401 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003402
3403err:
3404 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003405 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003406 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303407 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003408}
3409
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003410#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003411/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303412 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003413 * @scn: bus context
3414 * @ce_sr_base_paddr: copyengine source ring base physical address
3415 * @ce_sr_ring_size: copyengine source ring size
3416 * @ce_reg_paddr: copyengine register physical address
3417 *
3418 * IPA micro controller data path offload feature enabled,
3419 * HIF should release copy engine related resource information to IPA UC
3420 * IPA UC will access hardware resource with released information
3421 *
3422 * Return: None
3423 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303424void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303425 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003426 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303427 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003428{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303429 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003430 struct HIF_CE_pipe_info *pipe_info =
3431 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3432 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3433
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303434 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003435 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003436}
3437#endif /* IPA_OFFLOAD */
3438
3439
3440#ifdef ADRASTEA_SHADOW_REGISTERS
3441
3442/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003443 * Current shadow register config
3444 *
3445 * -----------------------------------------------------------
3446 * Shadow Register | CE | src/dst write index
3447 * -----------------------------------------------------------
3448 * 0 | 0 | src
3449 * 1 No Config - Doesn't point to anything
3450 * 2 No Config - Doesn't point to anything
3451 * 3 | 3 | src
3452 * 4 | 4 | src
3453 * 5 | 5 | src
3454 * 6 No Config - Doesn't point to anything
3455 * 7 | 7 | src
3456 * 8 No Config - Doesn't point to anything
3457 * 9 No Config - Doesn't point to anything
3458 * 10 No Config - Doesn't point to anything
3459 * 11 No Config - Doesn't point to anything
3460 * -----------------------------------------------------------
3461 * 12 No Config - Doesn't point to anything
3462 * 13 | 1 | dst
3463 * 14 | 2 | dst
3464 * 15 No Config - Doesn't point to anything
3465 * 16 No Config - Doesn't point to anything
3466 * 17 No Config - Doesn't point to anything
3467 * 18 No Config - Doesn't point to anything
3468 * 19 | 7 | dst
3469 * 20 | 8 | dst
3470 * 21 No Config - Doesn't point to anything
3471 * 22 No Config - Doesn't point to anything
3472 * 23 No Config - Doesn't point to anything
3473 * -----------------------------------------------------------
3474 *
3475 *
3476 * ToDo - Move shadow register config to following in the future
3477 * This helps free up a block of shadow registers towards the end.
3478 * Can be used for other purposes
3479 *
3480 * -----------------------------------------------------------
3481 * Shadow Register | CE | src/dst write index
3482 * -----------------------------------------------------------
3483 * 0 | 0 | src
3484 * 1 | 3 | src
3485 * 2 | 4 | src
3486 * 3 | 5 | src
3487 * 4 | 7 | src
3488 * -----------------------------------------------------------
3489 * 5 | 1 | dst
3490 * 6 | 2 | dst
3491 * 7 | 7 | dst
3492 * 8 | 8 | dst
3493 * -----------------------------------------------------------
3494 * 9 No Config - Doesn't point to anything
3495 * 12 No Config - Doesn't point to anything
3496 * 13 No Config - Doesn't point to anything
3497 * 14 No Config - Doesn't point to anything
3498 * 15 No Config - Doesn't point to anything
3499 * 16 No Config - Doesn't point to anything
3500 * 17 No Config - Doesn't point to anything
3501 * 18 No Config - Doesn't point to anything
3502 * 19 No Config - Doesn't point to anything
3503 * 20 No Config - Doesn't point to anything
3504 * 21 No Config - Doesn't point to anything
3505 * 22 No Config - Doesn't point to anything
3506 * 23 No Config - Doesn't point to anything
3507 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003508*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303509#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303510u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003511{
3512 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003513 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003514
Houston Hoffmane6330442016-02-26 12:19:11 -08003515 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003516 case 0:
3517 addr = SHADOW_VALUE0;
3518 break;
3519 case 3:
3520 addr = SHADOW_VALUE3;
3521 break;
3522 case 4:
3523 addr = SHADOW_VALUE4;
3524 break;
3525 case 5:
3526 addr = SHADOW_VALUE5;
3527 break;
3528 case 7:
3529 addr = SHADOW_VALUE7;
3530 break;
3531 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003532 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303533 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003534 }
3535 return addr;
3536
3537}
3538
Komal Seelam644263d2016-02-22 20:45:49 +05303539u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003540{
3541 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003542 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003543
Houston Hoffmane6330442016-02-26 12:19:11 -08003544 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003545 case 1:
3546 addr = SHADOW_VALUE13;
3547 break;
3548 case 2:
3549 addr = SHADOW_VALUE14;
3550 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003551 case 5:
3552 addr = SHADOW_VALUE17;
3553 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003554 case 7:
3555 addr = SHADOW_VALUE19;
3556 break;
3557 case 8:
3558 addr = SHADOW_VALUE20;
3559 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003560 case 9:
3561 addr = SHADOW_VALUE21;
3562 break;
3563 case 10:
3564 addr = SHADOW_VALUE22;
3565 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303566 case 11:
3567 addr = SHADOW_VALUE23;
3568 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003569 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003570 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303571 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003572 }
3573
3574 return addr;
3575
3576}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303577#else
3578u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3579{
3580 u32 addr = 0;
3581 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3582
3583 switch (ce) {
3584 case 0:
3585 addr = SHADOW_VALUE0;
3586 break;
3587 case 4:
3588 addr = SHADOW_VALUE4;
3589 break;
3590 case 5:
3591 addr = SHADOW_VALUE5;
3592 break;
3593 default:
3594 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3595 QDF_ASSERT(0);
3596 }
3597 return addr;
3598}
3599
3600u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3601{
3602 u32 addr = 0;
3603 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3604
3605 switch (ce) {
3606 case 1:
3607 addr = SHADOW_VALUE13;
3608 break;
3609 case 2:
3610 addr = SHADOW_VALUE14;
3611 break;
3612 case 3:
3613 addr = SHADOW_VALUE15;
3614 break;
3615 case 5:
3616 addr = SHADOW_VALUE17;
3617 break;
3618 case 7:
3619 addr = SHADOW_VALUE19;
3620 break;
3621 case 8:
3622 addr = SHADOW_VALUE20;
3623 break;
3624 case 9:
3625 addr = SHADOW_VALUE21;
3626 break;
3627 case 10:
3628 addr = SHADOW_VALUE22;
3629 break;
3630 case 11:
3631 addr = SHADOW_VALUE23;
3632 break;
3633 default:
3634 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3635 QDF_ASSERT(0);
3636 }
3637
3638 return addr;
3639}
3640#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003641#endif
3642
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003643#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003644void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3645{
3646 struct CE_state *ce_state;
3647 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3648
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003649 ce_state = scn->ce_id_to_state[ctx_id];
3650
3651 return ce_state->lro_data;
3652}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003653#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003654
3655/**
3656 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3657 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303658 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003659 * @svc_id: Service ID for which the mapping is needed.
3660 * @ul_pipe: address of the container in which ul pipe is returned.
3661 * @dl_pipe: address of the container in which dl pipe is returned.
3662 * @ul_is_polled: address of the container in which a bool
3663 * indicating if the UL CE for this service
3664 * is polled is returned.
3665 * @dl_is_polled: address of the container in which a bool
3666 * indicating if the DL CE for this service
3667 * is polled is returned.
3668 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003669 * Return: Indicates whether the service has been found in the table.
3670 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3671 * There will be warning logs if either leg has not been updated
3672 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003673 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303674int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003675 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3676 int *dl_is_polled)
3677{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003678 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003679 unsigned int i;
3680 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003681 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003682 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303683 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003684 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003685 bool dl_updated = false;
3686 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003687
Houston Hoffman748e1a62017-03-30 17:20:42 -07003688 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3689 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003690
3691 *dl_is_polled = 0; /* polling for received messages not supported */
3692
3693 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3694
3695 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3696 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003697 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003698 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003699 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303700 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003701 CE_ATTR_DISABLE_INTR) != 0;
3702 ul_updated = true;
3703 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003704 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003705 dl_updated = true;
3706 }
3707 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003708 }
3709 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003710 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003711 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003712 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003713 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003714
3715 return status;
3716}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003717
3718#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303719inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003720 uint32_t CE_ctrl_addr)
3721{
3722 uint32_t read_from_hw, srri_from_ddr = 0;
3723
3724 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3725
3726 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3727
3728 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003729 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3730 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003731 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303732 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003733 }
3734 return srri_from_ddr;
3735}
3736
3737
Komal Seelam644263d2016-02-22 20:45:49 +05303738inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003739 uint32_t CE_ctrl_addr)
3740{
3741 uint32_t read_from_hw, drri_from_ddr = 0;
3742
3743 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3744
3745 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3746
3747 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003748 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003749 drri_from_ddr, read_from_hw,
3750 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303751 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003752 }
3753 return drri_from_ddr;
3754}
3755
3756#endif
3757
Govind Singh2443fb32016-01-13 17:44:48 +05303758/**
3759 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303760 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303761 *
3762 * Output the copy engine registers
3763 *
3764 * Return: 0 for success or error code
3765 */
Komal Seelam644263d2016-02-22 20:45:49 +05303766int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303767{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303768 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303769 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003770 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303771 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3772 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303773 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303774
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003775 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
Jeff Johnson8d639a02019-03-18 09:51:11 -07003776 if (!scn->ce_id_to_state[i]) {
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003777 HIF_DBG("CE%d not used.", i);
3778 continue;
3779 }
3780
Komal Seelam644263d2016-02-22 20:45:49 +05303781 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003782 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303783 ce_reg_word_size * sizeof(uint32_t));
3784
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303785 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003786 HIF_ERROR("Dumping CE register failed!");
3787 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303788 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303789 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303790 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003791 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303792 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303793 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303794 + SR_WR_INDEX_ADDRESS),
3795 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303796 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303797 + CURRENT_SRRI_ADDRESS),
3798 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303799 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303800 + DST_WR_INDEX_ADDRESS),
3801 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303802 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303803 + CURRENT_DRRI_ADDRESS),
3804 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303805 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303806 }
Govind Singh2443fb32016-01-13 17:44:48 +05303807 return 0;
3808}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303809qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003810#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3811struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3812 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3813{
3814 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3815 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3816 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3817 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3818 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3819 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3820 struct CE_ring_state *src_ring = ce_state->src_ring;
3821 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3822
3823 if (src_ring) {
3824 hif_info->ul_pipe.nentries = src_ring->nentries;
3825 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3826 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3827 hif_info->ul_pipe.write_index = src_ring->write_index;
3828 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3829 hif_info->ul_pipe.base_addr_CE_space =
3830 src_ring->base_addr_CE_space;
3831 hif_info->ul_pipe.base_addr_owner_space =
3832 src_ring->base_addr_owner_space;
3833 }
3834
3835
3836 if (dest_ring) {
3837 hif_info->dl_pipe.nentries = dest_ring->nentries;
3838 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3839 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3840 hif_info->dl_pipe.write_index = dest_ring->write_index;
3841 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3842 hif_info->dl_pipe.base_addr_CE_space =
3843 dest_ring->base_addr_CE_space;
3844 hif_info->dl_pipe.base_addr_owner_space =
3845 dest_ring->base_addr_owner_space;
3846 }
3847
3848 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3849 hif_info->ctrl_addr = ce_state->ctrl_addr;
3850
3851 return hif_info;
3852}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303853qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003854
3855uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3856{
3857 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3858
3859 scn->nss_wifi_ol_mode = mode;
3860 return 0;
3861}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303862qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003863#endif
3864
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303865void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3866{
3867 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3868 scn->hif_attribute = hif_attrib;
3869}
3870
Yun Park3fb36442017-08-17 17:37:53 -07003871
3872/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003873void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3874{
3875 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3876 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3877 uint32_t ctrl_addr = CE_state->ctrl_addr;
3878
3879 Q_TARGET_ACCESS_BEGIN(scn);
3880 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3881 Q_TARGET_ACCESS_END(scn);
3882}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303883qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303884
3885/**
3886 * hif_fw_event_handler() - hif fw event handler
3887 * @hif_state: pointer to hif ce state structure
3888 *
3889 * Process fw events and raise HTC callback to process fw events.
3890 *
3891 * Return: none
3892 */
3893static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3894{
3895 struct hif_msg_callbacks *msg_callbacks =
3896 &hif_state->msg_callbacks_current;
3897
3898 if (!msg_callbacks->fwEventHandler)
3899 return;
3900
3901 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3902 QDF_STATUS_E_FAILURE);
3903}
3904
3905#ifndef QCA_WIFI_3_0
3906/**
3907 * hif_fw_interrupt_handler() - FW interrupt handler
3908 * @irq: irq number
3909 * @arg: the user pointer
3910 *
3911 * Called from the PCI interrupt handler when a
3912 * firmware-generated interrupt to the Host.
3913 *
Yun Park3fb36442017-08-17 17:37:53 -07003914 * only registered for legacy ce devices
3915 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303916 * Return: status of handled irq
3917 */
3918irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3919{
3920 struct hif_softc *scn = arg;
3921 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3922 uint32_t fw_indicator_address, fw_indicator;
3923
3924 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3925 return ATH_ISR_NOSCHED;
3926
3927 fw_indicator_address = hif_state->fw_indicator_address;
3928 /* For sudden unplug this will return ~0 */
3929 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3930
3931 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3932 /* ACK: clear Target-side pending event */
3933 A_TARGET_WRITE(scn, fw_indicator_address,
3934 fw_indicator & ~FW_IND_EVENT_PENDING);
3935 if (Q_TARGET_ACCESS_END(scn) < 0)
3936 return ATH_ISR_SCHED;
3937
3938 if (hif_state->started) {
3939 hif_fw_event_handler(hif_state);
3940 } else {
3941 /*
3942 * Probable Target failure before we're prepared
3943 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08003944 * fw_indicator used as bitmap, and defined as below:
3945 * FW_IND_EVENT_PENDING 0x1
3946 * FW_IND_INITIALIZED 0x2
3947 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303948 */
3949 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08003950 ("%s: Early firmware event indicated 0x%x\n",
3951 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303952 }
3953 } else {
3954 if (Q_TARGET_ACCESS_END(scn) < 0)
3955 return ATH_ISR_SCHED;
3956 }
3957
3958 return ATH_ISR_SCHED;
3959}
3960#else
3961irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3962{
3963 return ATH_ISR_SCHED;
3964}
3965#endif /* #ifdef QCA_WIFI_3_0 */
3966
3967
3968/**
3969 * hif_wlan_disable(): call the platform driver to disable wlan
3970 * @scn: HIF Context
3971 *
3972 * This function passes the con_mode to platform driver to disable
3973 * wlan.
3974 *
3975 * Return: void
3976 */
3977void hif_wlan_disable(struct hif_softc *scn)
3978{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003979 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303980 uint32_t con_mode = hif_get_conparam(scn);
3981
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05303982 if (scn->target_status == TARGET_STATUS_RESET)
3983 return;
3984
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303985 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003986 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303987 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003988 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303989 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003990 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303991
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003992 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303993}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003994
Dustin Brown6834d322017-03-20 15:02:48 -07003995int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3996{
3997 QDF_STATUS status;
3998 uint8_t ul_pipe, dl_pipe;
3999 int ul_is_polled, dl_is_polled;
4000
4001 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4002 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4003 HTC_CTRL_RSVD_SVC,
4004 &ul_pipe, &dl_pipe,
4005 &ul_is_polled, &dl_is_polled);
4006 if (status) {
4007 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4008 return qdf_status_to_os_return(status);
4009 }
4010
4011 *ce_id = dl_pipe;
4012
4013 return 0;
4014}