blob: 7253aa2092762ce2a2aabf4588218d2da90c01f7 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Nirav Shah8e930272018-07-10 16:28:21 +05302 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070040#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070042#endif
Pratik Gandhidc82a772018-01-30 18:57:05 +053043#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#define CE_POLL_TIMEOUT 10 /* ms */
46
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053047#define AGC_DUMP 1
48#define CHANINFO_DUMP 2
49#define BB_WATCHDOG_DUMP 3
50#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51#define PCIE_ACCESS_DUMP 4
52#endif
53#include "mp_dev.h"
54
Houston Hoffman5141f9d2017-01-05 10:49:17 -080055#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 !defined(QCA_WIFI_SUPPORT_SRNG)
57#define QCA_WIFI_SUPPORT_SRNG
58#endif
59
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080060/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053061QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070070#ifdef CONFIG_BYPASS_QMI
71#define BYPASS_QMI 1
72#else
73#define BYPASS_QMI 0
74#endif
75
Houston Hoffmanabd00772016-05-06 17:02:48 -070076#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053077#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070078#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053082#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070083#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Nachiket Kukadee5738b52017-09-07 17:16:12 +053085QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053086static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053088/**
89 * hif_target_access_log_dump() - dump access log
90 *
91 * dump access log
92 *
93 * Return: n/a
94 */
95#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96static void hif_target_access_log_dump(void)
97{
98 hif_target_dump_access_log();
99}
100#endif
101
102
103void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 uint8_t cmd_id, bool start)
105{
106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107
108 switch (cmd_id) {
109 case AGC_DUMP:
110 if (start)
111 priv_start_agc(scn);
112 else
113 priv_dump_agc(scn);
114 break;
115 case CHANINFO_DUMP:
116 if (start)
117 priv_start_cap_chaninfo(scn);
118 else
119 priv_dump_chaninfo(scn);
120 break;
121 case BB_WATCHDOG_DUMP:
122 priv_dump_bbwatchdog(scn);
123 break;
124#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 case PCIE_ACCESS_DUMP:
126 hif_target_access_log_dump();
127 break;
128#endif
129 default:
130 HIF_ERROR("%s: Invalid htc dump command", __func__);
131 break;
132 }
133}
134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135static void ce_poll_timeout(void *arg)
136{
137 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700138
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 if (CE_state->timer_inited) {
140 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143}
144
145static unsigned int roundup_pwr2(unsigned int n)
146{
147 int i;
148 unsigned int test_pwr2;
149
150 if (!(n & (n - 1)))
151 return n; /* already a power of 2 */
152
153 test_pwr2 = 4;
154 for (i = 0; i < 29; i++) {
155 if (test_pwr2 > n)
156 return test_pwr2;
157 test_pwr2 = test_pwr2 << 1;
158 }
159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530160 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800161 return 0;
162}
163
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700164#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166
167static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800177#ifdef QCA_WIFI_3_0_ADRASTEA
178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800181#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700182};
183
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530184#ifdef QCN7605_SUPPORT
185static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194};
195#endif
196
Nirav Shah0d0cce82018-01-17 17:00:31 +0530197#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700198static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530209#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700210
211/* CE_PCI TABLE */
212/*
213 * NOTE: the table below is out of date, though still a useful reference.
214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215 * mapping of HTC services to HIF pipes.
216 */
217/*
218 * This authoritative table defines Copy Engine configuration and the mapping
219 * of services/endpoints to CEs. A subset of this information is passed to
220 * the Target during startup as a prerequisite to entering BMI phase.
221 * See:
222 * target_service_to_ce_map - Target-side mapping
223 * hif_map_service_to_pipe - Host-side mapping
224 * target_ce_config - Target-side configuration
225 * host_ce_config - Host-side configuration
226 ============================================================================
227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
228 | | | ctio | Size | Frequency
229 | | | n | |
230 ============================================================================
231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
232 descriptor | | | | O(100B) | and regular
233 download | | | | |
234 ----------------------------------------------------------------------------
235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
236 indication | | | | O(10B) | regular
237 upload | | | | |
238 ----------------------------------------------------------------------------
239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
240 upload | | | | O(1000B) | (frequent
241 e.g. noise | | | | | during IP1.0
242 packets | | | | | testing)
243 ----------------------------------------------------------------------------
244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
245 download | | | | O(1000B) | (frequent
246 e.g. | | | | | during IP1.0
247 misdirecte | | | | | testing)
248 d EAPOL | | | | |
249 packets | | | | |
250 ----------------------------------------------------------------------------
251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
252 | DATA_VO (uplink) | | | |
253 ----------------------------------------------------------------------------
254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
255 | DATA_VO (downlink) | | | |
256 ----------------------------------------------------------------------------
257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258 | | | | O(100B) |
259 ----------------------------------------------------------------------------
260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
261 messages | (downlink) | | | O(100B) |
262 | | | | |
263 ----------------------------------------------------------------------------
264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
265 | HTC_RAW_STREAMS | | | |
266 | (uplink) | | | |
267 ----------------------------------------------------------------------------
268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
269 | HTC_RAW_STREAMS | | | |
270 | (downlink) | | | |
271 ----------------------------------------------------------------------------
272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
273 | | | | | infrequent
274 ============================================================================
275 */
276
277/*
278 * Map from service/endpoint to Copy Engine.
279 * This table is derived from the CE_PCI TABLE, above.
280 * It is passed to the Target at startup for use by firmware.
281 */
282static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 {
284 WMI_DATA_VO_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_VO_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BK_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BK_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_BE_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_BE_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_DATA_VI_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_DATA_VI_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
324 WMI_CONTROL_SVC,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 3,
327 },
328 {
329 WMI_CONTROL_SVC,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 HTC_CTRL_RSVD_SVC,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 0, /* could be moved to 3 (share with WMI) */
337 },
338 {
339 HTC_CTRL_RSVD_SVC,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
344 HTC_RAW_STREAMS_SVC, /* not currently used */
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0,
347 },
348 {
349 HTC_RAW_STREAMS_SVC, /* not currently used */
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTT_DATA_MSG_SVC,
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 4,
357 },
358 {
359 HTT_DATA_MSG_SVC,
360 PIPEDIR_IN, /* in = DL = target -> host */
361 1,
362 },
363 {
364 WDI_IPA_TX_SVC,
365 PIPEDIR_OUT, /* in = DL = target -> host */
366 5,
367 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800368#if defined(QCA_WIFI_3_0_ADRASTEA)
369 {
370 HTT_DATA2_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 9,
373 },
374 {
375 HTT_DATA3_MSG_SVC,
376 PIPEDIR_IN, /* in = DL = target -> host */
377 10,
378 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530379 {
380 PACKET_LOG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 11,
383 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800384#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700385 /* (Additions here) */
386
387 { /* Must be last */
388 0,
389 0,
390 0,
391 },
392};
393
Houston Hoffman88c896f2016-12-14 09:56:35 -0800394/* PIPEDIR_OUT = HOST to Target */
395/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530396#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530397static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530417 /* (Additions here) */
418 { 0, 0, 0, },
419};
Pratik Gandhi78461502018-02-05 17:22:41 +0530420#else
421static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422};
423#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530424
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530425#if (defined(QCA_WIFI_QCA8074V2))
426static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 /* (Additions here) */
449 { 0, 0, 0, },
450};
451#else
452static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453};
454#endif
455
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530456#if (defined(QCA_WIFI_QCA6018))
457static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
458 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
459 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
460 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
461 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
462 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
463 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
464 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
465 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
466 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
467 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
468 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 /* (Additions here) */
478 { 0, 0, 0, },
479};
480#else
481static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
482};
483#endif
484
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530485/* PIPEDIR_OUT = HOST to Target */
486/* PIPEDIR_IN = TARGET to HOST */
487#ifdef QCN7605_SUPPORT
488static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
489 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
490 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
491 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
492 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
493 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
494 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
495 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
496 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
497 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
498 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
499 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
500 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
501 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
502 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
503 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
504 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
505 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
506#ifdef IPA_OFFLOAD
507 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
508#else
509 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
510#endif
511 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
512 /* (Additions here) */
513 { 0, 0, 0, },
514};
515#endif
516
Pratik Gandhi78461502018-02-05 17:22:41 +0530517#if (defined(QCA_WIFI_QCA6290))
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530518#ifdef CONFIG_WIN
Houston Hoffman88c896f2016-12-14 09:56:35 -0800519static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
520 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
521 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
522 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
523 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
524 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
525 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
526 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
527 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
528 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
529 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
530 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
531 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
532 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
533 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530534 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
535 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530536 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800537 /* (Additions here) */
538 { 0, 0, 0, },
539};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530540#else
541static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
542 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
543 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
544 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
545 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
546 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
547 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
548 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
549 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
550 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
551 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
552 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
553 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
554 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
555 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
556 /* (Additions here) */
557 { 0, 0, 0, },
558};
559#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530560#else
561static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
562};
563#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800564
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700565#if (defined(QCA_WIFI_QCA6390))
566static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
567 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
568 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
569 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
570 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
571 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
572 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
573 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
574 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
575 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
576 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
577 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
578 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
579 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
580 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
581 /* (Additions here) */
582 { 0, 0, 0, },
583};
584#else
585static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
586};
587#endif
588
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700589static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
590 {
591 WMI_DATA_VO_SVC,
592 PIPEDIR_OUT, /* out = UL = host -> target */
593 3,
594 },
595 {
596 WMI_DATA_VO_SVC,
597 PIPEDIR_IN, /* in = DL = target -> host */
598 2,
599 },
600 {
601 WMI_DATA_BK_SVC,
602 PIPEDIR_OUT, /* out = UL = host -> target */
603 3,
604 },
605 {
606 WMI_DATA_BK_SVC,
607 PIPEDIR_IN, /* in = DL = target -> host */
608 2,
609 },
610 {
611 WMI_DATA_BE_SVC,
612 PIPEDIR_OUT, /* out = UL = host -> target */
613 3,
614 },
615 {
616 WMI_DATA_BE_SVC,
617 PIPEDIR_IN, /* in = DL = target -> host */
618 2,
619 },
620 {
621 WMI_DATA_VI_SVC,
622 PIPEDIR_OUT, /* out = UL = host -> target */
623 3,
624 },
625 {
626 WMI_DATA_VI_SVC,
627 PIPEDIR_IN, /* in = DL = target -> host */
628 2,
629 },
630 {
631 WMI_CONTROL_SVC,
632 PIPEDIR_OUT, /* out = UL = host -> target */
633 3,
634 },
635 {
636 WMI_CONTROL_SVC,
637 PIPEDIR_IN, /* in = DL = target -> host */
638 2,
639 },
640 {
641 HTC_CTRL_RSVD_SVC,
642 PIPEDIR_OUT, /* out = UL = host -> target */
643 0, /* could be moved to 3 (share with WMI) */
644 },
645 {
646 HTC_CTRL_RSVD_SVC,
647 PIPEDIR_IN, /* in = DL = target -> host */
648 1,
649 },
650 {
651 HTC_RAW_STREAMS_SVC, /* not currently used */
652 PIPEDIR_OUT, /* out = UL = host -> target */
653 0,
654 },
655 {
656 HTC_RAW_STREAMS_SVC, /* not currently used */
657 PIPEDIR_IN, /* in = DL = target -> host */
658 1,
659 },
660 {
661 HTT_DATA_MSG_SVC,
662 PIPEDIR_OUT, /* out = UL = host -> target */
663 4,
664 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530665#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700666 {
667 HTT_DATA_MSG_SVC,
668 PIPEDIR_IN, /* in = DL = target -> host */
669 5,
670 },
671#else /* WLAN_FEATURE_FASTPATH */
672 {
673 HTT_DATA_MSG_SVC,
674 PIPEDIR_IN, /* in = DL = target -> host */
675 1,
676 },
677#endif /* WLAN_FEATURE_FASTPATH */
678
679 /* (Additions here) */
680
681 { /* Must be last */
682 0,
683 0,
684 0,
685 },
686};
687
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700688static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
689static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
690
Nirav Shah0d0cce82018-01-17 17:00:31 +0530691#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700692static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
693 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
694 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
695 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
696 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
697 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
698 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
699 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
700 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
701 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
702 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
703 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
704 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
705 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
706 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
707 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
708 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
709 {0, 0, 0,}, /* Must be last */
710};
711
Nirav Shah0d0cce82018-01-17 17:00:31 +0530712void hif_select_epping_service_to_pipe_map(struct service_to_pipe
713 **tgt_svc_map_to_use,
714 uint32_t *sz_tgt_svc_map_to_use)
715{
716 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
717 *sz_tgt_svc_map_to_use =
718 sizeof(target_service_to_ce_map_wlan_epping);
719}
720#endif
721
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530722#ifdef QCN7605_SUPPORT
723static inline
724void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
725 uint32_t *sz_tgt_svc_map_to_use)
726{
727 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
728 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
729}
730#else
731static inline
732void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
733 uint32_t *sz_tgt_svc_map_to_use)
734{
735 HIF_ERROR("%s: QCN7605 not supported", __func__);
736}
737#endif
738
Houston Hoffman748e1a62017-03-30 17:20:42 -0700739static void hif_select_service_to_pipe_map(struct hif_softc *scn,
740 struct service_to_pipe **tgt_svc_map_to_use,
741 uint32_t *sz_tgt_svc_map_to_use)
742{
743 uint32_t mode = hif_get_conparam(scn);
744 struct hif_target_info *tgt_info = &scn->target_info;
745
746 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530747 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
748 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700749 } else {
750 switch (tgt_info->target_type) {
751 default:
752 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
753 *sz_tgt_svc_map_to_use =
754 sizeof(target_service_to_ce_map_wlan);
755 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530756 case TARGET_TYPE_QCN7605:
757 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
758 sz_tgt_svc_map_to_use);
759 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700760 case TARGET_TYPE_AR900B:
761 case TARGET_TYPE_QCA9984:
762 case TARGET_TYPE_IPQ4019:
763 case TARGET_TYPE_QCA9888:
764 case TARGET_TYPE_AR9888:
765 case TARGET_TYPE_AR9888V2:
766 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
767 *sz_tgt_svc_map_to_use =
768 sizeof(target_service_to_ce_map_ar900b);
769 break;
770 case TARGET_TYPE_QCA6290:
771 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
772 *sz_tgt_svc_map_to_use =
773 sizeof(target_service_to_ce_map_qca6290);
774 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700775 case TARGET_TYPE_QCA6390:
776 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
777 *sz_tgt_svc_map_to_use =
778 sizeof(target_service_to_ce_map_qca6390);
779 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530780 case TARGET_TYPE_QCA8074:
781 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
782 *sz_tgt_svc_map_to_use =
783 sizeof(target_service_to_ce_map_qca8074);
784 break;
Kiran Venkatappaf3e6bf12018-09-11 15:06:26 +0530785 case TARGET_TYPE_QCA8074V2:
786 *tgt_svc_map_to_use =
787 target_service_to_ce_map_qca8074_v2;
788 *sz_tgt_svc_map_to_use =
789 sizeof(target_service_to_ce_map_qca8074_v2);
790 break;
Basamma Yakkanahalli30265f82018-10-08 14:46:43 +0530791 case TARGET_TYPE_QCA6018:
792 *tgt_svc_map_to_use =
793 target_service_to_ce_map_qca6018;
794 *sz_tgt_svc_map_to_use =
795 sizeof(target_service_to_ce_map_qca6018);
796 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700797 }
798 }
799}
800
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700801/**
802 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
803 * @ce_state : pointer to the state context of the CE
804 *
805 * Description:
806 * Sets htt_rx_data attribute of the state structure if the
807 * CE serves one of the HTT DATA services.
808 *
809 * Return:
810 * false (attribute set to false)
811 * true (attribute set to true);
812 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700813static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700814{
815 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530816 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700817 int i;
818 bool rc = false;
819
820 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700821 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
822 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700823
Kiran Venkatappac0687092017-04-13 16:45:03 +0530824 map_len = map_sz / sizeof(struct service_to_pipe);
825 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700826 if ((svc_map[i].pipenum == ce_state->id) &&
827 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
828 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
829 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
830 /* HTT CEs are unidirectional */
831 if (svc_map[i].pipedir == PIPEDIR_IN)
832 ce_state->htt_rx_data = true;
833 else
834 ce_state->htt_tx_data = true;
835 rc = true;
836 }
837 }
838 }
839 return rc;
840}
841
Houston Hoffman47808172016-05-06 10:04:21 -0700842/**
843 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
844 * @ce_id: ce in question
845 * @ring: ring state being examined
846 * @type: "src_ring" or "dest_ring" string for identifying the ring
847 *
848 * Warns on non-zero index values.
849 * Causes a kernel panic if the ring is not empty durring initialization.
850 */
851static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
852 char *type)
853{
854 if (ring->write_index != 0 || ring->sw_index != 0)
855 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
856 ce_id, type, ring->sw_index, ring->write_index);
857 if (ring->write_index != ring->sw_index)
858 QDF_BUG(0);
859}
860
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530861#ifdef IPA_OFFLOAD
862/**
863 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
864 * @scn: softc instance
865 * @ce_id: ce in question
866 * @base_addr: pointer to copyengine ring base address
867 * @ce_ring: copyengine instance
868 * @nentries: number of entries should be allocated
869 * @desc_size: ce desc size
870 *
871 * Return: QDF_STATUS_SUCCESS - for success
872 */
873static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
874 qdf_dma_addr_t *base_addr,
875 struct CE_ring_state *ce_ring,
876 unsigned int nentries, uint32_t desc_size)
877{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700878 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
879 !ce_srng_based(scn)) {
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530880 if (!scn->ipa_ce_ring) {
Mohit Khannaba7a7982018-03-21 22:06:25 -0700881 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
882 scn->qdf_dev,
883 nentries * desc_size + CE_DESC_RING_ALIGN);
884 if (!scn->ipa_ce_ring) {
885 HIF_ERROR(
886 "%s: Failed to allocate memory for IPA ce ring",
887 __func__);
888 return QDF_STATUS_E_NOMEM;
889 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530890 }
891 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
892 &scn->ipa_ce_ring->mem_info);
893 ce_ring->base_addr_owner_space_unaligned =
894 scn->ipa_ce_ring->vaddr;
895 } else {
896 ce_ring->base_addr_owner_space_unaligned =
897 qdf_mem_alloc_consistent(scn->qdf_dev,
898 scn->qdf_dev->dev,
899 (nentries * desc_size +
900 CE_DESC_RING_ALIGN),
901 base_addr);
902 if (!ce_ring->base_addr_owner_space_unaligned) {
903 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
904 __func__, CE_id);
905 return QDF_STATUS_E_NOMEM;
906 }
907 }
908 return QDF_STATUS_SUCCESS;
909}
910
911/**
912 * ce_free_desc_ring() - Frees copyengine descriptor ring
913 * @scn: softc instance
914 * @ce_id: ce in question
915 * @ce_ring: copyengine instance
916 * @desc_size: ce desc size
917 *
918 * Return: None
919 */
920static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
921 struct CE_ring_state *ce_ring, uint32_t desc_size)
922{
Mohit Khannaba7a7982018-03-21 22:06:25 -0700923 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
924 !ce_srng_based(scn)) {
925 if (scn->ipa_ce_ring) {
926 qdf_mem_shared_mem_free(scn->qdf_dev,
927 scn->ipa_ce_ring);
928 scn->ipa_ce_ring = NULL;
929 }
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530930 ce_ring->base_addr_owner_space_unaligned = NULL;
931 } else {
932 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
933 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
934 ce_ring->base_addr_owner_space_unaligned,
935 ce_ring->base_addr_CE_space, 0);
936 ce_ring->base_addr_owner_space_unaligned = NULL;
937 }
938}
939#else
940static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
941 qdf_dma_addr_t *base_addr,
942 struct CE_ring_state *ce_ring,
943 unsigned int nentries, uint32_t desc_size)
944{
945 ce_ring->base_addr_owner_space_unaligned =
946 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
947 (nentries * desc_size +
948 CE_DESC_RING_ALIGN), base_addr);
949 if (!ce_ring->base_addr_owner_space_unaligned) {
950 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
951 __func__, CE_id);
952 return QDF_STATUS_E_NOMEM;
953 }
954 return QDF_STATUS_SUCCESS;
955}
956
957static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
958 struct CE_ring_state *ce_ring, uint32_t desc_size)
959{
960 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
961 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
962 ce_ring->base_addr_owner_space_unaligned,
963 ce_ring->base_addr_CE_space, 0);
964 ce_ring->base_addr_owner_space_unaligned = NULL;
965}
966#endif /* IPA_OFFLOAD */
967
Sathish Kumar86876492018-08-27 13:39:20 +0530968/*
969 * TODO: Need to explore the possibility of having this as part of a
970 * target context instead of a global array.
971 */
972static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
973
974void ce_service_register_module(enum ce_target_type target_type,
975 struct ce_ops* (*ce_attach)(void))
976{
977 if (target_type < CE_MAX_TARGET_TYPE)
978 ce_attach_register[target_type] = ce_attach;
979}
980
981qdf_export_symbol(ce_service_register_module);
982
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530983/**
984 * ce_srng_based() - Does this target use srng
985 * @ce_state : pointer to the state context of the CE
986 *
987 * Description:
988 * returns true if the target is SRNG based
989 *
990 * Return:
991 * false (attribute set to false)
992 * true (attribute set to true);
993 */
994bool ce_srng_based(struct hif_softc *scn)
995{
996 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
997 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
998
999 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301000 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05301001 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001002 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07001003 case TARGET_TYPE_QCA6390:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05301004 case TARGET_TYPE_QCA6018:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301005 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301006 default:
1007 return false;
1008 }
1009 return false;
1010}
Pratik Gandhidc82a772018-01-30 18:57:05 +05301011qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301012
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001013#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001014static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301015{
Sathish Kumar86876492018-08-27 13:39:20 +05301016 struct ce_ops *ops = NULL;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301017
Sathish Kumar86876492018-08-27 13:39:20 +05301018 if (ce_srng_based(scn)) {
1019 if (ce_attach_register[CE_SVC_SRNG])
1020 ops = ce_attach_register[CE_SVC_SRNG]();
1021 } else if (ce_attach_register[CE_SVC_LEGACY]) {
1022 ops = ce_attach_register[CE_SVC_LEGACY]();
1023 }
1024
1025 return ops;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301026}
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001027
Houston Hoffman5141f9d2017-01-05 10:49:17 -08001028
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001029#else /* QCA_LITHIUM */
1030static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1031{
Sathish Kumar86876492018-08-27 13:39:20 +05301032 if (ce_attach_register[CE_SVC_LEGACY])
1033 return ce_attach_register[CE_SVC_LEGACY]();
1034
1035 return NULL;
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -08001036}
1037#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301038
Houston Hoffman403c2df2017-01-27 12:51:15 -08001039static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -08001040 struct pld_shadow_reg_v2_cfg **shadow_config,
1041 int *num_shadow_registers_configured) {
1042 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1043
1044 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1045 scn, shadow_config, num_shadow_registers_configured);
1046}
1047
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301048static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1049 uint8_t ring_type)
1050{
1051 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1052
1053 return hif_state->ce_services->ce_get_desc_size(ring_type);
1054}
1055
1056
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001057static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301058 uint8_t ring_type, uint32_t nentries)
1059{
1060 uint32_t ce_nbytes;
1061 char *ptr;
1062 qdf_dma_addr_t base_addr;
1063 struct CE_ring_state *ce_ring;
1064 uint32_t desc_size;
1065 struct hif_softc *scn = CE_state->scn;
1066
1067 ce_nbytes = sizeof(struct CE_ring_state)
1068 + (nentries * sizeof(void *));
1069 ptr = qdf_mem_malloc(ce_nbytes);
1070 if (!ptr)
1071 return NULL;
1072
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301073 ce_ring = (struct CE_ring_state *)ptr;
1074 ptr += sizeof(struct CE_ring_state);
1075 ce_ring->nentries = nentries;
1076 ce_ring->nentries_mask = nentries - 1;
1077
1078 ce_ring->low_water_mark_nentries = 0;
1079 ce_ring->high_water_mark_nentries = nentries;
1080 ce_ring->per_transfer_context = (void **)ptr;
1081
1082 desc_size = ce_get_desc_size(scn, ring_type);
1083
1084 /* Legacy platforms that do not support cache
1085 * coherent DMA are unsupported
1086 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301087 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1088 ce_ring, nentries,
1089 desc_size) !=
1090 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301091 HIF_ERROR("%s: ring has no DMA mem",
1092 __func__);
Alok Kumarfea70e32018-09-21 15:42:06 +05301093 qdf_mem_free(ce_ring);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301094 return NULL;
1095 }
1096 ce_ring->base_addr_CE_space_unaligned = base_addr;
1097
1098 /* Correctly initialize memory to 0 to
1099 * prevent garbage data crashing system
1100 * when download firmware
1101 */
1102 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1103 nentries * desc_size +
1104 CE_DESC_RING_ALIGN);
1105
1106 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1107
1108 ce_ring->base_addr_CE_space =
1109 (ce_ring->base_addr_CE_space_unaligned +
1110 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1111
1112 ce_ring->base_addr_owner_space = (void *)
1113 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1114 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1115 } else {
1116 ce_ring->base_addr_CE_space =
1117 ce_ring->base_addr_CE_space_unaligned;
1118 ce_ring->base_addr_owner_space =
1119 ce_ring->base_addr_owner_space_unaligned;
1120 }
1121
1122 return ce_ring;
1123}
1124
Yun Park3fb36442017-08-17 17:37:53 -07001125static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301126 uint32_t ce_id, struct CE_ring_state *ring,
1127 struct CE_attr *attr)
1128{
1129 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1130
Yun Park3fb36442017-08-17 17:37:53 -07001131 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001132 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301133}
1134
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001135int hif_ce_bus_early_suspend(struct hif_softc *scn)
1136{
1137 uint8_t ul_pipe, dl_pipe;
1138 int ce_id, status, ul_is_polled, dl_is_polled;
1139 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001140
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001141 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1142 &ul_pipe, &dl_pipe,
1143 &ul_is_polled, &dl_is_polled);
1144 if (status) {
1145 HIF_ERROR("%s: pipe_mapping failure", __func__);
1146 return status;
1147 }
1148
1149 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1150 if (ce_id == ul_pipe)
1151 continue;
1152 if (ce_id == dl_pipe)
1153 continue;
1154
1155 ce_state = scn->ce_id_to_state[ce_id];
1156 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1157 if (ce_state->state == CE_RUNNING)
1158 ce_state->state = CE_PAUSED;
1159 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1160 }
1161
1162 return status;
1163}
1164
1165int hif_ce_bus_late_resume(struct hif_softc *scn)
1166{
1167 int ce_id;
1168 struct CE_state *ce_state;
Nirav Shaheeb99622018-09-11 13:50:08 +05301169 int write_index = 0;
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001170 bool index_updated;
1171
1172 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1173 ce_state = scn->ce_id_to_state[ce_id];
1174 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1175 if (ce_state->state == CE_PENDING) {
1176 write_index = ce_state->src_ring->write_index;
1177 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1178 write_index);
1179 ce_state->state = CE_RUNNING;
1180 index_updated = true;
1181 } else {
1182 index_updated = false;
1183 }
1184
1185 if (ce_state->state == CE_PAUSED)
1186 ce_state->state = CE_RUNNING;
1187 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1188
1189 if (index_updated)
1190 hif_record_ce_desc_event(scn, ce_id,
1191 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301192 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001193 }
1194
1195 return 0;
1196}
1197
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001198/**
1199 * ce_oom_recovery() - try to recover rx ce from oom condition
1200 * @context: CE_state of the CE with oom rx ring
1201 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001202 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001203 * at least 1 descriptor is successfully posted to the rx ring.
1204 *
1205 * return: none
1206 */
1207static void ce_oom_recovery(void *context)
1208{
1209 struct CE_state *ce_state = context;
1210 struct hif_softc *scn = ce_state->scn;
1211 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1212 struct HIF_CE_pipe_info *pipe_info =
1213 &ce_softc->pipe_info[ce_state->id];
1214
1215 hif_post_recv_buffers_for_pipe(pipe_info);
1216}
1217
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301218#ifdef HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301219/**
1220 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1221 * the CE descriptors.
1222 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1223 * @scn: hif scn handle
1224 * ce_id: Copy Engine Id
1225 *
1226 * Return: QDF_STATUS
1227 */
1228QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1229{
1230 struct hif_ce_desc_event *event = NULL;
1231 struct hif_ce_desc_event *hist_ev = NULL;
1232 uint32_t index = 0;
1233
1234 hist_ev =
1235 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1236
1237 if (!hist_ev)
1238 return QDF_STATUS_E_NOMEM;
1239
1240 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1241 event = &hist_ev[index];
1242 event->data =
1243 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1244 if (event->data == NULL)
1245 return QDF_STATUS_E_NOMEM;
1246 }
1247 return QDF_STATUS_SUCCESS;
1248}
1249
1250/**
1251 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1252 * the CE descriptors.
1253 * @scn: hif scn handle
1254 * ce_id: Copy Engine Id
1255 *
1256 * Return:
1257 */
1258void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1259{
1260 struct hif_ce_desc_event *event = NULL;
1261 struct hif_ce_desc_event *hist_ev = NULL;
1262 uint32_t index = 0;
1263
1264 hist_ev =
1265 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1266
1267 if (!hist_ev)
1268 return;
1269
1270 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1271 event = &hist_ev[index];
1272 if (event->data != NULL)
1273 qdf_mem_free(event->data);
1274 event->data = NULL;
1275 event = NULL;
1276 }
1277}
1278#endif /* HIF_CE_DEBUG_DATA_BUF */
1279
Dustin Brown2f750872018-10-17 12:16:20 -07001280#if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */
1281struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1282
c_cgodavfda96ad2017-09-07 16:16:00 +05301283/**
Dustin Brown2f750872018-10-17 12:16:20 -07001284 * alloc_mem_ce_debug_history() - Allocate CE descriptor history
c_cgodavfda96ad2017-09-07 16:16:00 +05301285 * @scn: hif scn handle
Dustin Brown2f750872018-10-17 12:16:20 -07001286 * @ce_id: Copy Engine Id
c_cgodavfda96ad2017-09-07 16:16:00 +05301287 *
1288 * Return: QDF_STATUS
1289 */
Dustin Brown2f750872018-10-17 12:16:20 -07001290static QDF_STATUS
1291alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1292{
1293 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1294
1295 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1296 ce_hist->enable[ce_id] = 1;
1297
1298 return QDF_STATUS_SUCCESS;
1299}
1300
1301/**
1302 * free_mem_ce_debug_history() - Free CE descriptor history
1303 * @scn: hif scn handle
1304 * @ce_id: Copy Engine Id
1305 *
1306 * Return: None
1307 */
1308static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1309{
1310 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1311
1312 ce_hist->enable[ce_id] = 0;
1313 ce_hist->hist_ev[ce_id] = NULL;
1314}
1315
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301316#elif defined(HIF_CE_DEBUG_DATA_BUF) /* WIN */
Dustin Brown2f750872018-10-17 12:16:20 -07001317
1318static QDF_STATUS
1319alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301320{
1321 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1322 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1323
1324 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1325 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1326 return QDF_STATUS_E_NOMEM;
1327 } else {
1328 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1329 return QDF_STATUS_SUCCESS;
1330 }
1331}
1332
Dustin Brown2f750872018-10-17 12:16:20 -07001333static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
c_cgodavfda96ad2017-09-07 16:16:00 +05301334{
1335 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
Dustin Brown2f750872018-10-17 12:16:20 -07001336 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
c_cgodavfda96ad2017-09-07 16:16:00 +05301337
1338 if (!hist_ev)
1339 return;
1340
c_cgodavfda96ad2017-09-07 16:16:00 +05301341 if (ce_hist->data_enable[CE_id] == 1) {
1342 ce_hist->data_enable[CE_id] = 0;
1343 free_mem_ce_debug_hist_data(scn, CE_id);
1344 }
Dustin Brown2f750872018-10-17 12:16:20 -07001345
c_cgodavfda96ad2017-09-07 16:16:00 +05301346 ce_hist->enable[CE_id] = 0;
1347 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1348 ce_hist->hist_ev[CE_id] = NULL;
1349}
1350
Dustin Brown2f750872018-10-17 12:16:20 -07001351#else /* Disabled */
1352
1353static inline QDF_STATUS
1354alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1355{
1356 return QDF_STATUS_SUCCESS;
1357}
1358
1359static inline void
1360free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1361#endif
1362
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301363#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
c_cgodavfda96ad2017-09-07 16:16:00 +05301364/**
1365 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1366 * CE records on the console using sysfs.
1367 * @scn: hif scn handle
1368 *
1369 * Return:
1370 */
1371static inline void reset_ce_debug_history(struct hif_softc *scn)
1372{
1373 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1374 /* Initialise the CE debug history sysfs interface inputs ce_id and
1375 * index. Disable data storing
1376 */
1377 ce_hist->hist_index = 0;
1378 ce_hist->hist_id = 0;
1379}
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301380#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
Dustin Brown2f750872018-10-17 12:16:20 -07001381static inline void reset_ce_debug_history(struct hif_softc *scn) { }
Pavankumar Nandeshwar7eddedd2018-10-25 16:57:08 +05301382#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
c_cgodavfda96ad2017-09-07 16:16:00 +05301383
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301384void ce_enable_polling(void *cestate)
1385{
1386 struct CE_state *CE_state = (struct CE_state *)cestate;
1387
1388 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1389 CE_state->timer_inited = true;
1390}
1391
1392void ce_disable_polling(void *cestate)
1393{
1394 struct CE_state *CE_state = (struct CE_state *)cestate;
1395
1396 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1397 CE_state->timer_inited = false;
1398}
1399
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001400/*
1401 * Initialize a Copy Engine based on caller-supplied attributes.
1402 * This may be called once to initialize both source and destination
1403 * rings or it may be called twice for separate source and destination
1404 * initialization. It may be that only one side or the other is
1405 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001406 *
1407 * This should be called durring the initialization sequence before
1408 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001409 */
Komal Seelam644263d2016-02-22 20:45:49 +05301410struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001411 unsigned int CE_id, struct CE_attr *attr)
1412{
1413 struct CE_state *CE_state;
1414 uint32_t ctrl_addr;
1415 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001416 bool malloc_CE_state = false;
1417 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001418 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001419
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301420 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001421 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 CE_state = scn->ce_id_to_state[CE_id];
1423
1424 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001425 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301426 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001427 if (!CE_state) {
1428 HIF_ERROR("%s: CE_state has no mem", __func__);
1429 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001430 }
Houston Hoffman233e9092015-09-02 13:37:21 -07001431 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001433
1434 CE_state->id = CE_id;
1435 CE_state->ctrl_addr = ctrl_addr;
1436 CE_state->state = CE_RUNNING;
1437 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001438 }
1439 CE_state->scn = scn;
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301440 CE_state->service = ce_engine_service_reg;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001441
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301442 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001443 if (attr == NULL) {
1444 /* Already initialized; caller wants the handle */
1445 return (struct CE_handle *)CE_state;
1446 }
1447
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001448 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301449 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001450 else
1451 CE_state->src_sz_max = attr->src_sz_max;
1452
c_cgodavfda96ad2017-09-07 16:16:00 +05301453 ce_init_ce_desc_event_log(scn, CE_id,
1454 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001455
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001456 /* source ring setup */
1457 nentries = attr->src_nentries;
1458 if (nentries) {
1459 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001460
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001461 nentries = roundup_pwr2(nentries);
1462 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301463 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001464 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301465 src_ring = CE_state->src_ring =
1466 ce_alloc_ring_state(CE_state,
1467 CE_RING_SRC,
1468 nentries);
1469 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001470 /* cannot allocate src ring. If the
1471 * CE_state is allocated locally free
1472 * CE_State and return error.
1473 */
1474 HIF_ERROR("%s: src ring has no mem", __func__);
1475 if (malloc_CE_state) {
1476 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301477 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001478 malloc_CE_state = false;
1479 }
1480 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001482 /* we can allocate src ring. Mark that the src ring is
1483 * allocated locally
1484 */
1485 malloc_src_ring = true;
1486
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 /*
1488 * Also allocate a shadow src ring in
1489 * regular mem to use for faster access.
1490 */
1491 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301492 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001493 sizeof(struct CE_src_desc) +
1494 CE_DESC_RING_ALIGN);
1495 if (src_ring->shadow_base_unaligned == NULL) {
1496 HIF_ERROR("%s: src ring no shadow_base mem",
1497 __func__);
1498 goto error_no_dma_mem;
1499 }
1500 src_ring->shadow_base = (struct CE_src_desc *)
1501 (((size_t) src_ring->shadow_base_unaligned +
1502 CE_DESC_RING_ALIGN - 1) &
1503 ~(CE_DESC_RING_ALIGN - 1));
1504
Yun Park3fb36442017-08-17 17:37:53 -07001505 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1506 src_ring, attr);
1507 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001508 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001509
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301510 ce_ring_test_initial_indexes(CE_id, src_ring,
1511 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512 }
1513 }
1514
1515 /* destination ring setup */
1516 nentries = attr->dest_nentries;
1517 if (nentries) {
1518 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519
1520 nentries = roundup_pwr2(nentries);
1521 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301522 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001523 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301524 dest_ring = CE_state->dest_ring =
1525 ce_alloc_ring_state(CE_state,
1526 CE_RING_DEST,
1527 nentries);
1528 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001529 /* cannot allocate dst ring. If the CE_state
1530 * or src ring is allocated locally free
1531 * CE_State and src ring and return error.
1532 */
1533 HIF_ERROR("%s: dest ring has no mem",
1534 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301535 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537
Yun Park3fb36442017-08-17 17:37:53 -07001538 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001539 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001540 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301541 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001542
1543 ce_ring_test_initial_indexes(CE_id, dest_ring,
1544 "dest_ring");
1545
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301546 /* For srng based target, init status ring here */
1547 if (ce_srng_based(CE_state->scn)) {
1548 CE_state->status_ring =
1549 ce_alloc_ring_state(CE_state,
1550 CE_RING_STATUS,
1551 nentries);
1552 if (CE_state->status_ring == NULL) {
1553 /*Allocation failed. Cleanup*/
1554 qdf_mem_free(CE_state->dest_ring);
1555 if (malloc_src_ring) {
1556 qdf_mem_free
1557 (CE_state->src_ring);
1558 CE_state->src_ring = NULL;
1559 malloc_src_ring = false;
1560 }
1561 if (malloc_CE_state) {
1562 /* allocated CE_state locally */
1563 scn->ce_id_to_state[CE_id] =
1564 NULL;
1565 qdf_mem_free(CE_state);
1566 malloc_CE_state = false;
1567 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001568
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301569 return NULL;
1570 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001571
Yun Park3fb36442017-08-17 17:37:53 -07001572 status = ce_ring_setup(scn, CE_RING_STATUS,
1573 CE_id, CE_state->status_ring,
1574 attr);
1575 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301576 goto error_target_access;
1577
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001578 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001579
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001580 /* epping */
1581 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301582 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301583 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301584 &CE_state->poll_timer,
1585 ce_poll_timeout,
1586 CE_state,
1587 QDF_TIMER_TYPE_WAKE_APPS);
1588 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301589 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001590 CE_POLL_TIMEOUT);
1591 }
1592 }
1593 }
1594
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301595 if (!ce_srng_based(scn)) {
1596 /* Enable CE error interrupts */
1597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1598 goto error_target_access;
1599 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1600 if (Q_TARGET_ACCESS_END(scn) < 0)
1601 goto error_target_access;
1602 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001603
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001604 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1605 ce_oom_recovery, CE_state);
1606
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001607 /* update the htt_data attribute */
1608 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001609 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001610
c_cgodavfda96ad2017-09-07 16:16:00 +05301611 alloc_mem_ce_debug_history(scn, CE_id);
1612
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 return (struct CE_handle *)CE_state;
1614
Houston Hoffman4411ad42016-03-14 21:12:04 -07001615error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001616error_no_dma_mem:
1617 ce_fini((struct CE_handle *)CE_state);
1618 return NULL;
1619}
1620
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301621/**
1622 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1623 * @hif_ctx: HIF Context
1624 *
1625 * API to check if polling is enabled on all CEs. Returns true when polling
1626 * is enabled on all CEs.
1627 *
1628 * Return: bool
1629 */
1630bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1631{
1632 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1633 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1634 struct CE_attr *attr;
1635 int id;
1636
1637 for (id = 0; id < scn->ce_count; id++) {
1638 attr = &hif_state->host_ce_config[id];
1639 if (attr && (attr->dest_nentries) &&
1640 !(attr->flags & CE_ATTR_ENABLE_POLL))
1641 return false;
1642 }
1643 return true;
1644}
1645qdf_export_symbol(hif_is_polled_mode_enabled);
1646
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001647#ifdef WLAN_FEATURE_FASTPATH
1648/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001649 * hif_enable_fastpath() Update that we have enabled fastpath mode
1650 * @hif_ctx: HIF context
1651 *
1652 * For use in data path
1653 *
1654 * Retrun: void
1655 */
1656void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1657{
1658 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1659
Houston Hoffmand63cd742016-12-05 11:59:56 -08001660 if (ce_srng_based(scn)) {
1661 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1662 return;
1663 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001664 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001665 scn->fastpath_mode_on = true;
1666}
1667
1668/**
1669 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1670 * @hif_ctx: HIF Context
1671 *
1672 * For use in data path to skip HTC
1673 *
1674 * Return: bool
1675 */
1676bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1677{
1678 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1679
1680 return scn->fastpath_mode_on;
1681}
1682
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301683/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001684 * hif_get_ce_handle - API to get CE handle for FastPath mode
1685 * @hif_ctx: HIF Context
1686 * @id: CopyEngine Id
1687 *
1688 * API to return CE handle for fastpath mode
1689 *
1690 * Return: void
1691 */
1692void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1693{
1694 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1695
1696 return scn->ce_id_to_state[id];
1697}
Aditya Sathish80bbaef2018-10-25 10:02:05 +05301698qdf_export_symbol(hif_get_ce_handle);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001699
1700/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001701 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1702 * No processing is required inside this function.
1703 * @ce_hdl: Cope engine handle
1704 * Using an assert, this function makes sure that,
1705 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001706 *
1707 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001708 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001709 * therfore no locking is needed.
1710 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001711 * Return: none
1712 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001713void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001714{
1715 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1716 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301717 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001718 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001719
Houston Hoffman85925072016-05-06 17:02:18 -07001720 if (hif_is_nss_wifi_enabled(sc))
1721 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001722
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001723 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001724 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001725 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726 sw_index = src_ring->sw_index;
1727 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728
1729 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301730 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731 }
1732}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001733
1734/**
1735 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1736 * @ce_hdl: Handle to CE
1737 *
1738 * These buffers are never allocated on the fly, but
1739 * are allocated only once during HIF start and freed
1740 * only once during HIF stop.
1741 * NOTE:
1742 * The assumption here is there is no in-flight DMA in progress
1743 * currently, so that buffers can be freed up safely.
1744 *
1745 * Return: NONE
1746 */
1747void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1748{
1749 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1750 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1751 qdf_nbuf_t nbuf;
1752 int i;
1753
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001754 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001755 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001756
1757 if (!ce_state->htt_rx_data)
1758 return;
1759
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001760 /*
1761 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1762 * this CE is completely full: does not leave one blank space, to
1763 * distinguish between empty queue & full queue. So free all the
1764 * entries.
1765 */
1766 for (i = 0; i < dst_ring->nentries; i++) {
1767 nbuf = dst_ring->per_transfer_context[i];
1768
1769 /*
1770 * The reasons for doing this check are:
1771 * 1) Protect against calling cleanup before allocating buffers
1772 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1773 * could have a partially filled ring, because of a memory
1774 * allocation failure in the middle of allocating ring.
1775 * This check accounts for that case, checking
1776 * fastpath_mode_on flag or started flag would not have
1777 * covered that case. This is not in performance path,
1778 * so OK to do this.
1779 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001780 if (nbuf) {
1781 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1782 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001783 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001784 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001785 }
1786}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001787
1788/**
1789 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1790 * @scn: HIF handle
1791 *
1792 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1793 * Hence we have to post all the entries in the pipe, even, in the beginning
1794 * unlike for other CE pipes where one less than dest_nentries are filled in
1795 * the beginning.
1796 *
1797 * Return: None
1798 */
1799static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1800{
1801 int pipe_num;
1802 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1803
1804 if (scn->fastpath_mode_on == false)
1805 return;
1806
1807 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1808 struct HIF_CE_pipe_info *pipe_info =
1809 &hif_state->pipe_info[pipe_num];
1810 struct CE_state *ce_state =
1811 scn->ce_id_to_state[pipe_info->pipe_num];
1812
1813 if (ce_state->htt_rx_data)
1814 atomic_inc(&pipe_info->recv_bufs_needed);
1815 }
1816}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001818static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001819{
1820}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001821
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001822static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001823{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001824 return false;
1825}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001826#endif /* WLAN_FEATURE_FASTPATH */
1827
1828void ce_fini(struct CE_handle *copyeng)
1829{
1830 struct CE_state *CE_state = (struct CE_state *)copyeng;
1831 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301832 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301833 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301835 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001836 CE_state->state = CE_UNUSED;
1837 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301838 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301839 ce_disable_polling(CE_state);
1840
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001841 qdf_lro_deinit(CE_state->lro_data);
1842
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001843 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001844 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001845 ce_h2t_tx_ce_cleanup(copyeng);
1846
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301847 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001848 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301849 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001850 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301851 ce_free_desc_ring(scn, CE_state->id,
1852 CE_state->src_ring,
1853 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301854 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001855 }
1856 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001857 /* Cleanup the datapath Rx ring */
1858 ce_t2h_msg_ce_cleanup(copyeng);
1859
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301860 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001861 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301862 ce_free_desc_ring(scn, CE_state->id,
1863 CE_state->dest_ring,
1864 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301865 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001866
1867 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301868 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301869 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001870 }
1871 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001872 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301873 /* Cleanup the datapath Tx ring */
1874 ce_h2t_tx_ce_cleanup(copyeng);
1875
1876 if (CE_state->status_ring->shadow_base_unaligned)
1877 qdf_mem_free(
1878 CE_state->status_ring->shadow_base_unaligned);
1879
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301880 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301881 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301882 ce_free_desc_ring(scn, CE_state->id,
1883 CE_state->status_ring,
1884 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301885 qdf_mem_free(CE_state->status_ring);
1886 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001887
c_cgodavfda96ad2017-09-07 16:16:00 +05301888 free_mem_ce_debug_history(scn, CE_id);
1889 reset_ce_debug_history(scn);
1890 ce_deinit_ce_desc_event_log(scn, CE_id);
1891
Houston Hoffman03f46572016-12-12 12:53:56 -08001892 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301893 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001894}
1895
Komal Seelam5584a7c2016-02-24 19:22:48 +05301896void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001897{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301898 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001899
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301900 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301902 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001903 sizeof(hif_state->msg_callbacks_current));
1904}
1905
1906/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301907QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301908hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301910 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001911{
Komal Seelam644263d2016-02-22 20:45:49 +05301912 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301913 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1915 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1916 int bytes = nbytes, nfrags = 0;
1917 struct ce_sendlist sendlist;
1918 int status, i = 0;
1919 unsigned int mux_id = 0;
1920
Santosh Anbudbfae9b2018-07-12 15:40:49 +05301921 if (nbytes > qdf_nbuf_len(nbuf)) {
1922 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1923 (uint32_t)qdf_nbuf_len(nbuf));
1924 QDF_ASSERT(0);
1925 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926
1927 transfer_id =
1928 (mux_id & MUX_ID_MASK) |
1929 (transfer_id & TRANSACTION_ID_MASK);
1930 data_attr &= DESC_DATA_FLAG_MASK;
1931 /*
1932 * The common case involves sending multiple fragments within a
1933 * single download (the tx descriptor and the tx frame header).
1934 * So, optimize for the case of multiple fragments by not even
1935 * checking whether it's necessary to use a sendlist.
1936 * The overhead of using a sendlist for a single buffer download
1937 * is not a big deal, since it happens rarely (for WMI messages).
1938 */
1939 ce_sendlist_init(&sendlist);
1940 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301941 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942 int frag_bytes;
1943
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301944 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1945 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001946 /*
1947 * Clear the packet offset for all but the first CE desc.
1948 */
1949 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301950 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001951
1952 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1953 frag_bytes >
1954 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301955 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956 (nbuf,
1957 nfrags) ? 0 :
1958 CE_SEND_FLAG_SWAP_DISABLE,
1959 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301960 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 HIF_ERROR("%s: error, frag_num %d larger than limit",
1962 __func__, nfrags);
1963 return status;
1964 }
1965 bytes -= frag_bytes;
1966 nfrags++;
1967 } while (bytes > 0);
1968
1969 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301970 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001971 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301972 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001973 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301974 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975 }
1976 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301977 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001978
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301979 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001980 HIF_ERROR("%s: error CE handle is null", __func__);
1981 return A_ERROR;
1982 }
1983
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301984 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301985 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301986 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1987 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301989 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001990
1991 return status;
1992}
1993
Komal Seelam5584a7c2016-02-24 19:22:48 +05301994void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1995 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001996{
Komal Seelam644263d2016-02-22 20:45:49 +05301997 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301998 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301999
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002000 if (!force) {
2001 int resources;
2002 /*
2003 * Decide whether to actually poll for completions, or just
2004 * wait for a later chance. If there seem to be plenty of
2005 * resources left, then just wait, since checking involves
2006 * reading a CE register, which is a relatively expensive
2007 * operation.
2008 */
Komal Seelam644263d2016-02-22 20:45:49 +05302009 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 /*
2011 * If at least 50% of the total resources are still available,
2012 * don't bother checking again yet.
2013 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002014 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2015 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002016 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07002018#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002019 ce_per_engine_servicereap(scn, pipe);
2020#else
2021 ce_per_engine_service(scn, pipe);
2022#endif
2023}
2024
Komal Seelam5584a7c2016-02-24 19:22:48 +05302025uint16_t
2026hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002027{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302028 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002029 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2030 uint16_t rv;
2031
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302032 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002033 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302034 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002035 return rv;
2036}
2037
2038/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002039static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302041 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002042 unsigned int nbytes, unsigned int transfer_id,
2043 unsigned int sw_index, unsigned int hw_index,
2044 unsigned int toeplitz_hash_result)
2045{
2046 struct HIF_CE_pipe_info *pipe_info =
2047 (struct HIF_CE_pipe_info *)ce_context;
2048 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302049 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002050 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07002051 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302052 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002053
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002054 do {
2055 /*
Houston Hoffman85118512015-09-28 14:17:11 -07002056 * The upper layer callback will be triggered
2057 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002058 */
Houston Hoffman85118512015-09-28 14:17:11 -07002059 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08002060 if (scn->target_status == TARGET_STATUS_RESET) {
2061
2062 qdf_nbuf_unmap_single(scn->qdf_dev,
2063 transfer_context,
2064 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302065 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08002066 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08002067 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07002068 msg_callbacks->Context,
2069 transfer_context, transfer_id,
2070 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002071 }
2072
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302073 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07002074 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05302075 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002076 } while (ce_completed_send_next(copyeng,
2077 &ce_context, &transfer_context,
2078 &CE_data, &nbytes, &transfer_id,
2079 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302080 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002081}
2082
Houston Hoffman910c6262015-09-28 12:56:25 -07002083/**
2084 * hif_ce_do_recv(): send message from copy engine to upper layers
2085 * @msg_callbacks: structure containing callback and callback context
2086 * @netbuff: skb containing message
2087 * @nbytes: number of bytes in the message
2088 * @pipe_info: used for the pipe_number info
2089 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07002090 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07002091 * and calls the upper layer callback.
2092 *
2093 * return: None
2094 */
2095static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302096 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07002097 struct HIF_CE_pipe_info *pipe_info) {
2098 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302099 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07002100 msg_callbacks->
2101 rxCompletionHandler(msg_callbacks->Context,
2102 netbuf, pipe_info->pipe_num);
2103 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07002104 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07002105 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08002106
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302107 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07002108 }
2109}
2110
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002112static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302114 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002115 unsigned int nbytes, unsigned int transfer_id,
2116 unsigned int flags)
2117{
2118 struct HIF_CE_pipe_info *pipe_info =
2119 (struct HIF_CE_pipe_info *)ce_context;
2120 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002121 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302122 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002123#ifdef HIF_PCI
2124 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2125#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07002126 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302127 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002129 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002130#ifdef HIF_PCI
2131 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2132#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302133 qdf_nbuf_unmap_single(scn->qdf_dev,
2134 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302135 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002136
Houston Hoffman910c6262015-09-28 12:56:25 -07002137 atomic_inc(&pipe_info->recv_bufs_needed);
2138 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302139 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302140 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002141 else
2142 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002143 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002144
2145 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002146 * MAX_NUM_OF_RECEIVES
2147 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002148 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002149 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002150 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002151 break;
2152 }
2153 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2154 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302155 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002156
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157}
2158
2159/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2160
2161void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302162hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002163 struct hif_msg_callbacks *callbacks)
2164{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302165 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002166
2167#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2168 spin_lock_init(&pcie_access_log_lock);
2169#endif
2170 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302171 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002172 sizeof(hif_state->msg_callbacks_pending));
2173
2174}
2175
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002176static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002177{
2178 struct CE_handle *ce_diag = hif_state->ce_diag;
2179 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302180 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002181 struct hif_msg_callbacks *hif_msg_callbacks =
2182 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002183
2184 /* daemonize("hif_compl_thread"); */
2185
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002186 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002187 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002188 return -EINVAL;
2189 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002190
2191 if (!hif_msg_callbacks ||
2192 !hif_msg_callbacks->rxCompletionHandler ||
2193 !hif_msg_callbacks->txCompletionHandler) {
2194 HIF_ERROR("%s: no completion handler registered", __func__);
2195 return -EFAULT;
2196 }
2197
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002198 A_TARGET_ACCESS_LIKELY(scn);
2199 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2200 struct CE_attr attr;
2201 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002202
2203 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002204 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002205 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302206 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002207 if (attr.src_nentries) {
2208 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002209 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002210 __func__, pipe_num, pipe_info);
2211 ce_send_cb_register(pipe_info->ce_hdl,
2212 hif_pci_ce_send_done, pipe_info,
2213 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002214 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2215 }
2216 if (attr.dest_nentries) {
2217 /* pipe used to receive from target */
2218 ce_recv_cb_register(pipe_info->ce_hdl,
2219 hif_pci_ce_recv_data, pipe_info,
2220 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002221 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002222
2223 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302224 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302225
2226 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2227 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002228 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002229
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002230 A_TARGET_ACCESS_UNLIKELY(scn);
2231 return 0;
2232}
2233
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002234/*
2235 * Install pending msg callbacks.
2236 *
2237 * TBDXXX: This hack is needed because upper layers install msg callbacks
2238 * for use with HTC before BMI is done; yet this HIF implementation
2239 * needs to continue to use BMI msg callbacks. Really, upper layers
2240 * should not register HTC callbacks until AFTER BMI phase.
2241 */
Komal Seelam644263d2016-02-22 20:45:49 +05302242static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002243{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302244 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002245
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302246 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002247 &hif_state->msg_callbacks_pending,
2248 sizeof(hif_state->msg_callbacks_pending));
2249}
2250
Komal Seelam5584a7c2016-02-24 19:22:48 +05302251void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2252 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002253{
2254 int ul_is_polled, dl_is_polled;
2255
Komal Seelam644263d2016-02-22 20:45:49 +05302256 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002257 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2258}
2259
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002260/**
2261 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302262 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002263 *
2264 * Output the pipe error counts of each pipe to log file
2265 *
2266 * Return: N/A
2267 */
Komal Seelam644263d2016-02-22 20:45:49 +05302268void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002269{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302270 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002271 int pipe_num;
2272
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002273 if (hif_state == NULL) {
2274 HIF_ERROR("%s hif_state is NULL", __func__);
2275 return;
2276 }
2277 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2278 struct HIF_CE_pipe_info *pipe_info;
2279
2280 pipe_info = &hif_state->pipe_info[pipe_num];
2281
2282 if (pipe_info->nbuf_alloc_err_count > 0 ||
2283 pipe_info->nbuf_dma_err_count > 0 ||
2284 pipe_info->nbuf_ce_enqueue_err_count)
2285 HIF_ERROR(
2286 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2287 __func__, pipe_info->pipe_num,
2288 atomic_read(&pipe_info->recv_bufs_needed),
2289 pipe_info->nbuf_alloc_err_count,
2290 pipe_info->nbuf_dma_err_count,
2291 pipe_info->nbuf_ce_enqueue_err_count);
2292 }
2293}
2294
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002295static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2296 void *nbuf, uint32_t *error_cnt,
2297 enum hif_ce_event_type failure_type,
2298 const char *failure_type_string)
2299{
2300 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2301 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2302 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2303 int ce_id = CE_state->id;
2304 uint32_t error_cnt_tmp;
2305
2306 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2307 error_cnt_tmp = ++(*error_cnt);
2308 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302309 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002310 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2311 failure_type_string);
2312 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302313 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002314 /* if we fail to allocate the last buffer for an rx pipe,
2315 * there is no trigger to refill the ce and we will
2316 * eventually crash
2317 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302318 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002319 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302320
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002321}
2322
2323
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002324
2325
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302326QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002327{
2328 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302329 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302330 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302331 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002332 uint32_t bufs_posted = 0;
2333
2334 buf_sz = pipe_info->buf_sz;
2335 if (buf_sz == 0) {
2336 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302337 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002338 }
2339
2340 ce_hdl = pipe_info->ce_hdl;
2341
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302342 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002343 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302344 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302345 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002346
2347 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302348 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002349
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302350 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002351 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002352 hif_post_recv_buffers_failure(pipe_info, nbuf,
2353 &pipe_info->nbuf_alloc_err_count,
2354 HIF_RX_NBUF_ALLOC_FAILURE,
2355 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302356 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002357 }
2358
2359 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302360 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361 * CE_data = dma_map_single(dev, data, buf_sz, );
2362 * DMA_FROM_DEVICE);
2363 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302364 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302365 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002366
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302367 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002368 hif_post_recv_buffers_failure(pipe_info, nbuf,
2369 &pipe_info->nbuf_dma_err_count,
2370 HIF_RX_NBUF_MAP_FAILURE,
2371 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302372 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302373 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002374 }
2375
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302376 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002377
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302378 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002379 buf_sz, DMA_FROM_DEVICE);
2380 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302381 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002382 hif_post_recv_buffers_failure(pipe_info, nbuf,
2383 &pipe_info->nbuf_ce_enqueue_err_count,
2384 HIF_RX_NBUF_ENQUEUE_FAILURE,
2385 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2386
Govind Singh4fcafd42016-08-08 12:37:31 +05302387 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2388 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302389 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302390 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002391 }
2392
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302393 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002394 bufs_posted++;
2395 }
2396 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002397 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002398 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2399 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002400 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002401 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2402 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002403 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002404 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002405
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302406 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002407
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302408 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002409}
2410
2411/*
2412 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302413 * Returns 0 for non fastpath rx copy engine as
2414 * oom_allocation_work will be scheduled to recover any
2415 * failures, non-zero if unable to completely replenish
2416 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302418QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002419{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302420 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302421 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302422 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302423 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002424
2425 A_TARGET_ACCESS_LIKELY(scn);
2426 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2427 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002428
Houston Hoffman85925072016-05-06 17:02:18 -07002429 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002430 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002431
2432 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002433 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002434 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002435
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302436 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302437 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302438 ce_state->htt_rx_data &&
2439 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302440 A_TARGET_ACCESS_UNLIKELY(scn);
2441 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302442 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002443 }
2444
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002445 A_TARGET_ACCESS_UNLIKELY(scn);
2446
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302447 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002448}
2449
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302450QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002451{
Komal Seelam644263d2016-02-22 20:45:49 +05302452 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302453 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302454 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002455
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002456 hif_update_fastpath_recv_bufs_cnt(scn);
2457
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002458 hif_msg_callbacks_install(scn);
2459
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002460 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302461 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002462
Houston Hoffman271951f2016-11-12 15:24:27 -08002463 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002464 hif_state->started = true;
2465
Houston Hoffman271951f2016-11-12 15:24:27 -08002466 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302467 qdf_status = hif_post_recv_buffers(scn);
2468 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002469 /* cleanup is done in hif_ce_disable */
2470 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302471 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002472 }
2473
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302474 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002475}
2476
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002477static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002478{
Komal Seelam644263d2016-02-22 20:45:49 +05302479 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002480 struct CE_handle *ce_hdl;
2481 uint32_t buf_sz;
2482 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302483 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302484 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002485 void *per_CE_context;
2486
2487 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002488 /* Unused Copy Engine */
2489 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002490 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002491
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002492
2493 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002494 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002495 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002496
Komal Seelam02cf2f82016-02-22 20:44:25 +05302497 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002498 ce_hdl = pipe_info->ce_hdl;
2499
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002500 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002501 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002502 while (ce_revoke_recv_next
2503 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302504 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302505 if (netbuf) {
2506 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2507 QDF_DMA_FROM_DEVICE);
2508 qdf_nbuf_free(netbuf);
2509 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002510 }
2511}
2512
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002513static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002514{
2515 struct CE_handle *ce_hdl;
2516 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302517 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302518 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002519 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302520 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002521 unsigned int nbytes;
2522 unsigned int id;
2523 uint32_t buf_sz;
2524 uint32_t toeplitz_hash_result;
2525
2526 buf_sz = pipe_info->buf_sz;
2527 if (buf_sz == 0) {
2528 /* Unused Copy Engine */
2529 return;
2530 }
2531
2532 hif_state = pipe_info->HIF_CE_state;
2533 if (!hif_state->started) {
2534 return;
2535 }
2536
Komal Seelam02cf2f82016-02-22 20:44:25 +05302537 scn = HIF_GET_SOFTC(hif_state);
2538
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002539 ce_hdl = pipe_info->ce_hdl;
2540
2541 while (ce_cancel_send_next
2542 (ce_hdl, &per_CE_context,
2543 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302544 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002545 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2546 /*
2547 * Packets enqueued by htt_h2t_ver_req_msg() and
2548 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2549 * freed in htt_htc_misc_pkt_pool_free() in
2550 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002551 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002552 * which they are queued in.
2553 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302554 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002555 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302556 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002557 * layer to free the buffer
2558 */
2559 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302560 pipe_info->pipe_callbacks.
2561 txCompletionHandler(pipe_info->
2562 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002563 netbuf, id, toeplitz_hash_result);
2564 }
2565 }
2566}
2567
2568/*
2569 * Cleanup residual buffers for device shutdown:
2570 * buffers that were enqueued for receive
2571 * buffers that were to be sent
2572 * Note: Buffers that had completed but which were
2573 * not yet processed are on a completion queue. They
2574 * are handled when the completion thread shuts down.
2575 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002576static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002577{
2578 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302579 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002580 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002581
Komal Seelam02cf2f82016-02-22 20:44:25 +05302582 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002583 struct HIF_CE_pipe_info *pipe_info;
2584
Houston Hoffman85925072016-05-06 17:02:18 -07002585 ce_state = scn->ce_id_to_state[pipe_num];
2586 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2587 ((ce_state->htt_tx_data) ||
2588 (ce_state->htt_rx_data))) {
2589 continue;
2590 }
2591
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002592 pipe_info = &hif_state->pipe_info[pipe_num];
2593 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2594 hif_send_buffer_cleanup_on_pipe(pipe_info);
2595 }
2596}
2597
Komal Seelam5584a7c2016-02-24 19:22:48 +05302598void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002599{
Komal Seelam644263d2016-02-22 20:45:49 +05302600 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302601 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302602
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002603 hif_buffer_cleanup(hif_state);
2604}
2605
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002606static void hif_destroy_oom_work(struct hif_softc *scn)
2607{
2608 struct CE_state *ce_state;
2609 int ce_id;
2610
2611 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2612 ce_state = scn->ce_id_to_state[ce_id];
2613 if (ce_state)
2614 qdf_destroy_work(scn->qdf_dev,
2615 &ce_state->oom_allocation_work);
2616 }
2617}
2618
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302619void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002620{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302621 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002622 int pipe_num;
2623
Houston Hoffmana69581e2016-11-14 18:03:19 -08002624 /*
2625 * before cleaning up any memory, ensure irq &
2626 * bottom half contexts will not be re-entered
2627 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002628 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002629 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002630 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002631
2632 /*
2633 * At this point, asynchronous threads are stopped,
2634 * The Target should not DMA nor interrupt, Host code may
2635 * not initiate anything more. So we just need to clean
2636 * up Host-side state.
2637 */
2638
2639 if (scn->athdiag_procfs_inited) {
2640 athdiag_procfs_remove();
2641 scn->athdiag_procfs_inited = false;
2642 }
2643
2644 hif_buffer_cleanup(hif_state);
2645
2646 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2647 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302648 struct CE_attr attr;
2649 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002650
2651 pipe_info = &hif_state->pipe_info[pipe_num];
2652 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302653 if (pipe_info->ce_hdl != ce_diag) {
2654 attr = hif_state->host_ce_config[pipe_num];
2655 if (attr.src_nentries)
2656 qdf_spinlock_destroy(&pipe_info->
2657 completion_freeq_lock);
2658 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002659 ce_fini(pipe_info->ce_hdl);
2660 pipe_info->ce_hdl = NULL;
2661 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302662 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002663 }
2664 }
2665
2666 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302667 qdf_timer_stop(&hif_state->sleep_timer);
2668 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002669 hif_state->sleep_timer_init = false;
2670 }
2671
2672 hif_state->started = false;
2673}
2674
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302675#ifdef QCN7605_SUPPORT
2676static inline
2677void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2678 **target_shadow_reg_cfg_ret,
2679 uint32_t *shadow_cfg_sz_ret)
2680{
2681 if (target_shadow_reg_cfg_ret)
2682 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2683 if (shadow_cfg_sz_ret)
2684 *shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2685}
2686#else
2687static inline
2688void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2689 **target_shadow_reg_cfg_ret,
2690 uint32_t *shadow_cfg_sz_ret)
2691{
2692 HIF_ERROR("QCN7605 not supported");
2693}
2694#endif
2695
2696static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2697 struct shadow_reg_cfg
2698 **target_shadow_reg_cfg_ret,
2699 uint32_t *shadow_cfg_sz_ret)
2700{
2701 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2702 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2703
2704 switch (tgt_info->target_type) {
2705 case TARGET_TYPE_QCN7605:
2706 hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2707 shadow_cfg_sz_ret);
2708 break;
2709 default:
2710 if (target_shadow_reg_cfg_ret)
2711 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2712 if (shadow_cfg_sz_ret)
2713 *shadow_cfg_sz_ret = shadow_cfg_sz;
2714 }
2715}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002716
Houston Hoffman854e67f2016-03-14 21:11:39 -07002717/**
2718 * hif_get_target_ce_config() - get copy engine configuration
2719 * @target_ce_config_ret: basic copy engine configuration
2720 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2721 * @target_service_to_ce_map_ret: service mapping for the copy engines
2722 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2723 * @target_shadow_reg_cfg_ret: shadow register configuration
2724 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2725 *
2726 * providing accessor to these values outside of this file.
2727 * currently these are stored in static pointers to const sections.
2728 * there are multiple configurations that are selected from at compile time.
2729 * Runtime selection would need to consider mode, target type and bus type.
2730 *
2731 * Return: return by parameter.
2732 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302733void hif_get_target_ce_config(struct hif_softc *scn,
2734 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002735 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002736 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002737 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002738 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002739 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002740{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302741 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2742
2743 *target_ce_config_ret = hif_state->target_ce_config;
2744 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002745
2746 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2747 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302748 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2749 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002750}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002751
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002752#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002753static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002754{
2755 int i;
2756 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302757 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002758
2759 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2760 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302761 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002762 cfg->shadow_reg_v2_cfg[i].addr);
2763 }
2764}
2765
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002766#else
2767static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2768{
2769 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302770 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002771}
2772#endif
2773
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002774/**
2775 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302776 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002777 *
2778 * This function passes the con_mode and CE configuration to
2779 * platform driver to enable wlan.
2780 *
Houston Hoffman108da402016-03-14 21:11:24 -07002781 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002782 */
Houston Hoffman108da402016-03-14 21:11:24 -07002783int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002784{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002785 struct pld_wlan_enable_cfg cfg;
2786 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302787 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002788
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302789 hif_get_target_ce_config(scn,
2790 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002791 &cfg.num_ce_tgt_cfg,
2792 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2793 &cfg.num_ce_svc_pipe_cfg,
2794 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2795 &cfg.num_shadow_reg_cfg);
2796
2797 /* translate from structure size to array size */
2798 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2799 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2800 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002801
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002802 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2803 &cfg.num_shadow_reg_v2_cfg);
2804
2805 hif_print_hal_shadow_register_cfg(&cfg);
2806
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302807 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002808 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302809 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2810 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002811 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002812 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002813 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002814 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002815
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002816 if (BYPASS_QMI)
2817 return 0;
2818 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002819 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2820 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002821}
2822
Nirav Shah0d0cce82018-01-17 17:00:31 +05302823#ifdef WLAN_FEATURE_EPPING
2824
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002825#define CE_EPPING_USES_IRQ true
2826
Nirav Shah0d0cce82018-01-17 17:00:31 +05302827void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2828{
2829 if (CE_EPPING_USES_IRQ)
2830 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2831 else
2832 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2833 hif_state->target_ce_config = target_ce_config_wlan_epping;
2834 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2835 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2836 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2837}
2838#endif
2839
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302840#ifdef QCN7605_SUPPORT
2841static inline
2842void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2843 struct HIF_CE_state *hif_state)
2844{
2845 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2846 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2847 hif_state->target_ce_config_sz =
2848 sizeof(target_ce_config_wlan_qcn7605);
2849 scn->ce_count = QCN7605_CE_COUNT;
2850}
2851#else
2852static inline
2853void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2854 struct HIF_CE_state *hif_state)
2855{
2856 HIF_ERROR("QCN7605 not supported");
2857}
2858#endif
2859
Sathish Kumar86876492018-08-27 13:39:20 +05302860#ifdef CE_SVC_CMN_INIT
2861#ifdef QCA_WIFI_SUPPORT_SRNG
2862static inline void hif_ce_service_init(void)
2863{
2864 ce_service_srng_init();
2865}
2866#else
2867static inline void hif_ce_service_init(void)
2868{
2869 ce_service_legacy_init();
2870}
2871#endif
2872#else
2873static inline void hif_ce_service_init(void)
2874{
2875}
2876#endif
2877
2878
Houston Hoffman108da402016-03-14 21:11:24 -07002879/**
2880 * hif_ce_prepare_config() - load the correct static tables.
2881 * @scn: hif context
2882 *
2883 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002884 */
Houston Hoffman108da402016-03-14 21:11:24 -07002885void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002886{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302887 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002888 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2889 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302890 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002891
Sathish Kumar86876492018-08-27 13:39:20 +05302892 hif_ce_service_init();
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002893 hif_state->ce_services = ce_services_attach(scn);
2894
Houston Hoffman710af5a2016-11-22 21:59:03 -08002895 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002896 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002897 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05302898 hif_ce_prepare_epping_config(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002899 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002900
2901 switch (tgt_info->target_type) {
2902 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302903 hif_state->host_ce_config = host_ce_config_wlan;
2904 hif_state->target_ce_config = target_ce_config_wlan;
2905 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002906 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302907 case TARGET_TYPE_QCN7605:
2908 hif_set_ce_config_qcn7605(scn, hif_state);
2909 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002910 case TARGET_TYPE_AR900B:
2911 case TARGET_TYPE_QCA9984:
2912 case TARGET_TYPE_IPQ4019:
2913 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302914 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2915 hif_state->host_ce_config =
2916 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2917 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2918 hif_state->host_ce_config =
2919 host_lowdesc_ce_cfg_wlan_ar900b;
2920 } else {
2921 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2922 }
2923
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302924 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2925 hif_state->target_ce_config_sz =
2926 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002927
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002928 break;
2929
2930 case TARGET_TYPE_AR9888:
2931 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302932 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2933 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2934 } else {
2935 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2936 }
2937
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302938 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2939 hif_state->target_ce_config_sz =
2940 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002941
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002942 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002943
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302944 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05302945 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05302946 case TARGET_TYPE_QCA6018:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002947 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2948 hif_state->host_ce_config =
2949 host_ce_config_wlan_qca8074_pci;
2950 hif_state->target_ce_config =
2951 target_ce_config_wlan_qca8074_pci;
2952 hif_state->target_ce_config_sz =
2953 sizeof(target_ce_config_wlan_qca8074_pci);
2954 } else {
2955 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2956 hif_state->target_ce_config =
2957 target_ce_config_wlan_qca8074;
2958 hif_state->target_ce_config_sz =
2959 sizeof(target_ce_config_wlan_qca8074);
2960 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302961 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002962 case TARGET_TYPE_QCA6290:
2963 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2964 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2965 hif_state->target_ce_config_sz =
2966 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002967
Houston Hoffman710af5a2016-11-22 21:59:03 -08002968 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002969 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07002970 case TARGET_TYPE_QCA6390:
2971 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
2972 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
2973 hif_state->target_ce_config_sz =
2974 sizeof(target_ce_config_wlan_qca6390);
2975
2976 scn->ce_count = QCA_6390_CE_COUNT;
2977 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002978 }
Yun parkc80eea72017-10-06 15:33:36 -07002979 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07002980}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002981
Houston Hoffman108da402016-03-14 21:11:24 -07002982/**
2983 * hif_ce_open() - do ce specific allocations
2984 * @hif_sc: pointer to hif context
2985 *
2986 * return: 0 for success or QDF_STATUS_E_NOMEM
2987 */
2988QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2989{
2990 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002991
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302992 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302993 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002994 return QDF_STATUS_SUCCESS;
2995}
2996
2997/**
2998 * hif_ce_close() - do ce specific free
2999 * @hif_sc: pointer to hif context
3000 */
3001void hif_ce_close(struct hif_softc *hif_sc)
3002{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05303003 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3004
3005 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05303006 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003007}
3008
3009/**
3010 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3011 * @hif_sc: hif context
3012 *
3013 * uses state variables to support cleaning up when hif_config_ce fails.
3014 */
3015void hif_unconfig_ce(struct hif_softc *hif_sc)
3016{
3017 int pipe_num;
3018 struct HIF_CE_pipe_info *pipe_info;
3019 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07003020 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07003021
3022 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3023 pipe_info = &hif_state->pipe_info[pipe_num];
3024 if (pipe_info->ce_hdl) {
3025 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05303026 }
3027 }
3028 deinit_tasklet_workers(hif_hdl);
3029 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3030 pipe_info = &hif_state->pipe_info[pipe_num];
3031 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07003032 ce_fini(pipe_info->ce_hdl);
3033 pipe_info->ce_hdl = NULL;
3034 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08003035 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07003036 }
3037 }
Houston Hoffman108da402016-03-14 21:11:24 -07003038 if (hif_sc->athdiag_procfs_inited) {
3039 athdiag_procfs_remove();
3040 hif_sc->athdiag_procfs_inited = false;
3041 }
3042}
3043
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003044#ifdef CONFIG_BYPASS_QMI
Nirav Shah8e930272018-07-10 16:28:21 +05303045#ifdef QCN7605_SUPPORT
3046/**
3047 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3048 * @scn: pointer to HIF structure
3049 *
3050 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3051 *
3052 * Return: void
3053 */
3054static void hif_post_static_buf_to_target(struct hif_softc *scn)
3055{
3056 void *target_va;
3057 phys_addr_t target_pa;
3058 struct ce_info *ce_info_ptr;
3059 uint32_t msi_data_start;
3060 uint32_t msi_data_count;
3061 uint32_t msi_irq_start;
3062 uint32_t i = 0;
3063 int ret;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003064
Nirav Shah8e930272018-07-10 16:28:21 +05303065 target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3066 scn->qdf_dev->dev,
3067 FW_SHARED_MEM +
3068 sizeof(struct ce_info),
3069 &target_pa);
3070 if (!target_va)
3071 return;
3072
3073 ce_info_ptr = (struct ce_info *)target_va;
3074
3075 if (scn->vaddr_rri_on_ddr) {
3076 ce_info_ptr->rri_over_ddr_low_paddr =
3077 BITS0_TO_31(scn->paddr_rri_on_ddr);
3078 ce_info_ptr->rri_over_ddr_high_paddr =
3079 BITS32_TO_35(scn->paddr_rri_on_ddr);
3080 }
3081
3082 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3083 &msi_data_count, &msi_data_start,
3084 &msi_irq_start);
3085 if (ret) {
3086 hif_err("Failed to get CE msi config");
3087 return;
3088 }
3089
3090 for (i = 0; i < CE_COUNT_MAX; i++) {
3091 ce_info_ptr->cfg[i].ce_id = i;
3092 ce_info_ptr->cfg[i].msi_vector =
3093 (i % msi_data_count) + msi_irq_start;
3094 }
3095
3096 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3097 hif_info("target va %pK target pa %pa", target_va, &target_pa);
3098}
3099#else
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003100/**
3101 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3102 * @scn: pointer to HIF structure
3103 *
3104 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3105 *
3106 * Return: void
3107 */
3108static void hif_post_static_buf_to_target(struct hif_softc *scn)
3109{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003110 void *target_va;
3111 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003112
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003113 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3114 FW_SHARED_MEM, &target_pa);
3115 if (NULL == target_va) {
3116 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003117 return;
3118 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05303119 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07003120 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003121}
Nirav Shah8e930272018-07-10 16:28:21 +05303122#endif
3123
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003124#else
3125static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3126{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003127}
3128#endif
3129
Houston Hoffman579c02f2017-08-02 01:57:38 -07003130static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3131 bool wait_for_it)
3132{
3133 /* todo */
3134 return 0;
3135}
3136
Houston Hoffman108da402016-03-14 21:11:24 -07003137/**
3138 * hif_config_ce() - configure copy engines
3139 * @scn: hif context
3140 *
3141 * Prepares fw, copy engine hardware and host sw according
3142 * to the attributes selected by hif_ce_prepare_config.
3143 *
3144 * also calls athdiag_procfs_init
3145 *
3146 * return: 0 for success nonzero for failure.
3147 */
3148int hif_config_ce(struct hif_softc *scn)
3149{
3150 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3151 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3152 struct HIF_CE_pipe_info *pipe_info;
3153 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303154 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05303155
Houston Hoffman108da402016-03-14 21:11:24 -07003156#ifdef ADRASTEA_SHADOW_REGISTERS
3157 int i;
3158#endif
3159 QDF_STATUS rv = QDF_STATUS_SUCCESS;
3160
3161 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05303162 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003163
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07003164 hif_post_static_buf_to_target(scn);
3165
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003166 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07003167
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003168 hif_config_rri_on_ddr(scn);
3169
Houston Hoffman579c02f2017-08-02 01:57:38 -07003170 if (ce_srng_based(scn))
3171 scn->bus_ops.hif_target_sleep_state_adjust =
3172 &hif_srng_sleep_state_adjust;
3173
c_cgodavfda96ad2017-09-07 16:16:00 +05303174 /* Initialise the CE debug history sysfs interface inputs ce_id and
3175 * index. Disable data storing
3176 */
3177 reset_ce_debug_history(scn);
3178
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003179 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3180 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003181
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003182 pipe_info = &hif_state->pipe_info[pipe_num];
3183 pipe_info->pipe_num = pipe_num;
3184 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303185 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07003186
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003187 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07003188 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05303189 if (!ce_state) {
3190 A_TARGET_ACCESS_UNLIKELY(scn);
3191 goto err;
3192 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003193 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303194 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003195 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303196 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003197 A_TARGET_ACCESS_UNLIKELY(scn);
3198 goto err;
3199 }
3200
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003201 ce_state->lro_data = qdf_lro_init();
3202
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303203 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003204 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003205 * Diagnostic Window support
3206 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003207 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003208 continue;
3209 }
3210
Houston Hoffman85925072016-05-06 17:02:18 -07003211 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3212 (ce_state->htt_rx_data))
3213 continue;
3214
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303215 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003216 if (attr->dest_nentries > 0) {
3217 atomic_set(&pipe_info->recv_bufs_needed,
3218 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303219 /*SRNG based CE has one entry less */
3220 if (ce_srng_based(scn))
3221 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003222 } else {
3223 atomic_set(&pipe_info->recv_bufs_needed, 0);
3224 }
3225 ce_tasklet_init(hif_state, (1 << pipe_num));
3226 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003227 }
3228
3229 if (athdiag_procfs_init(scn) != 0) {
3230 A_TARGET_ACCESS_UNLIKELY(scn);
3231 goto err;
3232 }
3233 scn->athdiag_procfs_inited = true;
3234
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003235 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003236
Houston Hoffman108da402016-03-14 21:11:24 -07003237 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003238
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003239 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003240
3241#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003242 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003243 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003244 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003245 __func__, i,
3246 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3247 }
3248#endif
3249
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303250 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003251
3252err:
3253 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003254 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003255 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303256 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003257}
3258
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003259#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003260/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303261 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003262 * @scn: bus context
3263 * @ce_sr_base_paddr: copyengine source ring base physical address
3264 * @ce_sr_ring_size: copyengine source ring size
3265 * @ce_reg_paddr: copyengine register physical address
3266 *
3267 * IPA micro controller data path offload feature enabled,
3268 * HIF should release copy engine related resource information to IPA UC
3269 * IPA UC will access hardware resource with released information
3270 *
3271 * Return: None
3272 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303273void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303274 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003275 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303276 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003277{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303278 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003279 struct HIF_CE_pipe_info *pipe_info =
3280 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3281 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3282
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303283 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003284 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003285}
3286#endif /* IPA_OFFLOAD */
3287
3288
3289#ifdef ADRASTEA_SHADOW_REGISTERS
3290
3291/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003292 * Current shadow register config
3293 *
3294 * -----------------------------------------------------------
3295 * Shadow Register | CE | src/dst write index
3296 * -----------------------------------------------------------
3297 * 0 | 0 | src
3298 * 1 No Config - Doesn't point to anything
3299 * 2 No Config - Doesn't point to anything
3300 * 3 | 3 | src
3301 * 4 | 4 | src
3302 * 5 | 5 | src
3303 * 6 No Config - Doesn't point to anything
3304 * 7 | 7 | src
3305 * 8 No Config - Doesn't point to anything
3306 * 9 No Config - Doesn't point to anything
3307 * 10 No Config - Doesn't point to anything
3308 * 11 No Config - Doesn't point to anything
3309 * -----------------------------------------------------------
3310 * 12 No Config - Doesn't point to anything
3311 * 13 | 1 | dst
3312 * 14 | 2 | dst
3313 * 15 No Config - Doesn't point to anything
3314 * 16 No Config - Doesn't point to anything
3315 * 17 No Config - Doesn't point to anything
3316 * 18 No Config - Doesn't point to anything
3317 * 19 | 7 | dst
3318 * 20 | 8 | dst
3319 * 21 No Config - Doesn't point to anything
3320 * 22 No Config - Doesn't point to anything
3321 * 23 No Config - Doesn't point to anything
3322 * -----------------------------------------------------------
3323 *
3324 *
3325 * ToDo - Move shadow register config to following in the future
3326 * This helps free up a block of shadow registers towards the end.
3327 * Can be used for other purposes
3328 *
3329 * -----------------------------------------------------------
3330 * Shadow Register | CE | src/dst write index
3331 * -----------------------------------------------------------
3332 * 0 | 0 | src
3333 * 1 | 3 | src
3334 * 2 | 4 | src
3335 * 3 | 5 | src
3336 * 4 | 7 | src
3337 * -----------------------------------------------------------
3338 * 5 | 1 | dst
3339 * 6 | 2 | dst
3340 * 7 | 7 | dst
3341 * 8 | 8 | dst
3342 * -----------------------------------------------------------
3343 * 9 No Config - Doesn't point to anything
3344 * 12 No Config - Doesn't point to anything
3345 * 13 No Config - Doesn't point to anything
3346 * 14 No Config - Doesn't point to anything
3347 * 15 No Config - Doesn't point to anything
3348 * 16 No Config - Doesn't point to anything
3349 * 17 No Config - Doesn't point to anything
3350 * 18 No Config - Doesn't point to anything
3351 * 19 No Config - Doesn't point to anything
3352 * 20 No Config - Doesn't point to anything
3353 * 21 No Config - Doesn't point to anything
3354 * 22 No Config - Doesn't point to anything
3355 * 23 No Config - Doesn't point to anything
3356 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003357*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303358#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303359u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003360{
3361 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003362 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003363
Houston Hoffmane6330442016-02-26 12:19:11 -08003364 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003365 case 0:
3366 addr = SHADOW_VALUE0;
3367 break;
3368 case 3:
3369 addr = SHADOW_VALUE3;
3370 break;
3371 case 4:
3372 addr = SHADOW_VALUE4;
3373 break;
3374 case 5:
3375 addr = SHADOW_VALUE5;
3376 break;
3377 case 7:
3378 addr = SHADOW_VALUE7;
3379 break;
3380 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003381 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303382 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003383 }
3384 return addr;
3385
3386}
3387
Komal Seelam644263d2016-02-22 20:45:49 +05303388u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003389{
3390 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003391 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003392
Houston Hoffmane6330442016-02-26 12:19:11 -08003393 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003394 case 1:
3395 addr = SHADOW_VALUE13;
3396 break;
3397 case 2:
3398 addr = SHADOW_VALUE14;
3399 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003400 case 5:
3401 addr = SHADOW_VALUE17;
3402 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003403 case 7:
3404 addr = SHADOW_VALUE19;
3405 break;
3406 case 8:
3407 addr = SHADOW_VALUE20;
3408 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003409 case 9:
3410 addr = SHADOW_VALUE21;
3411 break;
3412 case 10:
3413 addr = SHADOW_VALUE22;
3414 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303415 case 11:
3416 addr = SHADOW_VALUE23;
3417 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003418 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003419 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303420 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003421 }
3422
3423 return addr;
3424
3425}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303426#else
3427u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3428{
3429 u32 addr = 0;
3430 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3431
3432 switch (ce) {
3433 case 0:
3434 addr = SHADOW_VALUE0;
3435 break;
3436 case 4:
3437 addr = SHADOW_VALUE4;
3438 break;
3439 case 5:
3440 addr = SHADOW_VALUE5;
3441 break;
3442 default:
3443 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3444 QDF_ASSERT(0);
3445 }
3446 return addr;
3447}
3448
3449u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3450{
3451 u32 addr = 0;
3452 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3453
3454 switch (ce) {
3455 case 1:
3456 addr = SHADOW_VALUE13;
3457 break;
3458 case 2:
3459 addr = SHADOW_VALUE14;
3460 break;
3461 case 3:
3462 addr = SHADOW_VALUE15;
3463 break;
3464 case 5:
3465 addr = SHADOW_VALUE17;
3466 break;
3467 case 7:
3468 addr = SHADOW_VALUE19;
3469 break;
3470 case 8:
3471 addr = SHADOW_VALUE20;
3472 break;
3473 case 9:
3474 addr = SHADOW_VALUE21;
3475 break;
3476 case 10:
3477 addr = SHADOW_VALUE22;
3478 break;
3479 case 11:
3480 addr = SHADOW_VALUE23;
3481 break;
3482 default:
3483 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3484 QDF_ASSERT(0);
3485 }
3486
3487 return addr;
3488}
3489#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003490#endif
3491
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003492#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003493void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3494{
3495 struct CE_state *ce_state;
3496 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3497
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003498 ce_state = scn->ce_id_to_state[ctx_id];
3499
3500 return ce_state->lro_data;
3501}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003502#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003503
3504/**
3505 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3506 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303507 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003508 * @svc_id: Service ID for which the mapping is needed.
3509 * @ul_pipe: address of the container in which ul pipe is returned.
3510 * @dl_pipe: address of the container in which dl pipe is returned.
3511 * @ul_is_polled: address of the container in which a bool
3512 * indicating if the UL CE for this service
3513 * is polled is returned.
3514 * @dl_is_polled: address of the container in which a bool
3515 * indicating if the DL CE for this service
3516 * is polled is returned.
3517 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003518 * Return: Indicates whether the service has been found in the table.
3519 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3520 * There will be warning logs if either leg has not been updated
3521 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003522 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303523int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003524 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3525 int *dl_is_polled)
3526{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003527 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003528 unsigned int i;
3529 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003530 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003531 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303532 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003534 bool dl_updated = false;
3535 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003536
Houston Hoffman748e1a62017-03-30 17:20:42 -07003537 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3538 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003539
3540 *dl_is_polled = 0; /* polling for received messages not supported */
3541
3542 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3543
3544 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3545 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003546 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003547 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003548 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303549 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003550 CE_ATTR_DISABLE_INTR) != 0;
3551 ul_updated = true;
3552 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003553 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003554 dl_updated = true;
3555 }
3556 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003557 }
3558 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003559 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003560 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003561 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003562 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003563
3564 return status;
3565}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003566
3567#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303568inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003569 uint32_t CE_ctrl_addr)
3570{
3571 uint32_t read_from_hw, srri_from_ddr = 0;
3572
3573 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3574
3575 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3576
3577 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003578 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3579 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003580 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303581 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003582 }
3583 return srri_from_ddr;
3584}
3585
3586
Komal Seelam644263d2016-02-22 20:45:49 +05303587inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003588 uint32_t CE_ctrl_addr)
3589{
3590 uint32_t read_from_hw, drri_from_ddr = 0;
3591
3592 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3593
3594 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3595
3596 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003597 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003598 drri_from_ddr, read_from_hw,
3599 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303600 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003601 }
3602 return drri_from_ddr;
3603}
3604
3605#endif
3606
Houston Hoffman3d0cda82015-12-03 13:25:05 -08003607#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003608/**
3609 * hif_get_src_ring_read_index(): Called to get the SRRI
3610 *
Komal Seelam644263d2016-02-22 20:45:49 +05303611 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003612 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3613 *
3614 * This function returns the SRRI to the caller. For CEs that
3615 * dont have interrupts enabled, we look at the DDR based SRRI
3616 *
3617 * Return: SRRI
3618 */
Komal Seelam644263d2016-02-22 20:45:49 +05303619inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003620 uint32_t CE_ctrl_addr)
3621{
3622 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303623 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003624
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303625 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Govind Singhbc679dc2017-06-08 12:33:59 +05303626 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003627 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303628 } else {
3629 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3630 return A_TARGET_READ(scn,
3631 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3632 else
3633 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3634 CE_ctrl_addr);
3635 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003636}
3637
3638/**
3639 * hif_get_dst_ring_read_index(): Called to get the DRRI
3640 *
Komal Seelam644263d2016-02-22 20:45:49 +05303641 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003642 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3643 *
3644 * This function returns the DRRI to the caller. For CEs that
3645 * dont have interrupts enabled, we look at the DDR based DRRI
3646 *
3647 * Return: DRRI
3648 */
Komal Seelam644263d2016-02-22 20:45:49 +05303649inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003650 uint32_t CE_ctrl_addr)
3651{
3652 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303653 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003654
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303655 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003656
Govind Singhbc679dc2017-06-08 12:33:59 +05303657 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003658 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303659 } else {
3660 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3661 return A_TARGET_READ(scn,
3662 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3663 else
3664 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3665 CE_ctrl_addr);
3666 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003667}
3668
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003669/**
3670 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3671 *
Komal Seelam644263d2016-02-22 20:45:49 +05303672 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003673 *
3674 * This function allocates non cached memory on ddr and sends
3675 * the physical address of this memory to the CE hardware. The
3676 * hardware updates the RRI on this particular location.
3677 *
3678 * Return: None
3679 */
Komal Seelam644263d2016-02-22 20:45:49 +05303680static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003681{
3682 unsigned int i;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003683 uint32_t high_paddr, low_paddr;
Alok Kumar80488ce2018-11-09 14:04:38 +05303684 qdf_dma_addr_t paddr_rri_on_ddr = 0;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003685
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003686 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303687 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3688 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3689 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003690
Alok Kumar80488ce2018-11-09 14:04:38 +05303691 if (!scn->vaddr_rri_on_ddr) {
3692 HIF_DBG("dmaable page alloc fail");
3693 return;
3694 }
3695
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05303696 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003697 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3698 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3699
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003700 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003701
3702 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3703 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3704
3705 for (i = 0; i < CE_COUNT; i++)
3706 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3707
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303708 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003709
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003710}
3711#else
3712
3713/**
3714 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3715 *
Komal Seelam644263d2016-02-22 20:45:49 +05303716 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003717 *
3718 * This is a dummy implementation for platforms that don't
3719 * support this functionality.
3720 *
3721 * Return: None
3722 */
Komal Seelam644263d2016-02-22 20:45:49 +05303723static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003724{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003725}
3726#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303727
3728/**
3729 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303730 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303731 *
3732 * Output the copy engine registers
3733 *
3734 * Return: 0 for success or error code
3735 */
Komal Seelam644263d2016-02-22 20:45:49 +05303736int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303737{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303738 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303739 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003740 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303741 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3742 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303743 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303744
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003745 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3746 if (scn->ce_id_to_state[i] == NULL) {
3747 HIF_DBG("CE%d not used.", i);
3748 continue;
3749 }
3750
Komal Seelam644263d2016-02-22 20:45:49 +05303751 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003752 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303753 ce_reg_word_size * sizeof(uint32_t));
3754
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303755 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003756 HIF_ERROR("Dumping CE register failed!");
3757 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303758 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303759 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303760 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003761 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303762 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303763 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303764 + SR_WR_INDEX_ADDRESS),
3765 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303766 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303767 + CURRENT_SRRI_ADDRESS),
3768 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303769 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303770 + DST_WR_INDEX_ADDRESS),
3771 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303772 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303773 + CURRENT_DRRI_ADDRESS),
3774 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303775 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303776 }
Govind Singh2443fb32016-01-13 17:44:48 +05303777 return 0;
3778}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303779qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003780#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3781struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3782 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3783{
3784 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3785 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3786 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3787 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3788 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3789 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3790 struct CE_ring_state *src_ring = ce_state->src_ring;
3791 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3792
3793 if (src_ring) {
3794 hif_info->ul_pipe.nentries = src_ring->nentries;
3795 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3796 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3797 hif_info->ul_pipe.write_index = src_ring->write_index;
3798 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3799 hif_info->ul_pipe.base_addr_CE_space =
3800 src_ring->base_addr_CE_space;
3801 hif_info->ul_pipe.base_addr_owner_space =
3802 src_ring->base_addr_owner_space;
3803 }
3804
3805
3806 if (dest_ring) {
3807 hif_info->dl_pipe.nentries = dest_ring->nentries;
3808 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3809 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3810 hif_info->dl_pipe.write_index = dest_ring->write_index;
3811 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3812 hif_info->dl_pipe.base_addr_CE_space =
3813 dest_ring->base_addr_CE_space;
3814 hif_info->dl_pipe.base_addr_owner_space =
3815 dest_ring->base_addr_owner_space;
3816 }
3817
3818 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3819 hif_info->ctrl_addr = ce_state->ctrl_addr;
3820
3821 return hif_info;
3822}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303823qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003824
3825uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3826{
3827 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3828
3829 scn->nss_wifi_ol_mode = mode;
3830 return 0;
3831}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303832qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003833#endif
3834
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303835void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3836{
3837 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3838 scn->hif_attribute = hif_attrib;
3839}
3840
Yun Park3fb36442017-08-17 17:37:53 -07003841
3842/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003843void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3844{
3845 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3846 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3847 uint32_t ctrl_addr = CE_state->ctrl_addr;
3848
3849 Q_TARGET_ACCESS_BEGIN(scn);
3850 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3851 Q_TARGET_ACCESS_END(scn);
3852}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303853qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303854
3855/**
3856 * hif_fw_event_handler() - hif fw event handler
3857 * @hif_state: pointer to hif ce state structure
3858 *
3859 * Process fw events and raise HTC callback to process fw events.
3860 *
3861 * Return: none
3862 */
3863static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3864{
3865 struct hif_msg_callbacks *msg_callbacks =
3866 &hif_state->msg_callbacks_current;
3867
3868 if (!msg_callbacks->fwEventHandler)
3869 return;
3870
3871 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3872 QDF_STATUS_E_FAILURE);
3873}
3874
3875#ifndef QCA_WIFI_3_0
3876/**
3877 * hif_fw_interrupt_handler() - FW interrupt handler
3878 * @irq: irq number
3879 * @arg: the user pointer
3880 *
3881 * Called from the PCI interrupt handler when a
3882 * firmware-generated interrupt to the Host.
3883 *
Yun Park3fb36442017-08-17 17:37:53 -07003884 * only registered for legacy ce devices
3885 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303886 * Return: status of handled irq
3887 */
3888irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3889{
3890 struct hif_softc *scn = arg;
3891 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3892 uint32_t fw_indicator_address, fw_indicator;
3893
3894 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3895 return ATH_ISR_NOSCHED;
3896
3897 fw_indicator_address = hif_state->fw_indicator_address;
3898 /* For sudden unplug this will return ~0 */
3899 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3900
3901 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3902 /* ACK: clear Target-side pending event */
3903 A_TARGET_WRITE(scn, fw_indicator_address,
3904 fw_indicator & ~FW_IND_EVENT_PENDING);
3905 if (Q_TARGET_ACCESS_END(scn) < 0)
3906 return ATH_ISR_SCHED;
3907
3908 if (hif_state->started) {
3909 hif_fw_event_handler(hif_state);
3910 } else {
3911 /*
3912 * Probable Target failure before we're prepared
3913 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08003914 * fw_indicator used as bitmap, and defined as below:
3915 * FW_IND_EVENT_PENDING 0x1
3916 * FW_IND_INITIALIZED 0x2
3917 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303918 */
3919 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08003920 ("%s: Early firmware event indicated 0x%x\n",
3921 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303922 }
3923 } else {
3924 if (Q_TARGET_ACCESS_END(scn) < 0)
3925 return ATH_ISR_SCHED;
3926 }
3927
3928 return ATH_ISR_SCHED;
3929}
3930#else
3931irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3932{
3933 return ATH_ISR_SCHED;
3934}
3935#endif /* #ifdef QCA_WIFI_3_0 */
3936
3937
3938/**
3939 * hif_wlan_disable(): call the platform driver to disable wlan
3940 * @scn: HIF Context
3941 *
3942 * This function passes the con_mode to platform driver to disable
3943 * wlan.
3944 *
3945 * Return: void
3946 */
3947void hif_wlan_disable(struct hif_softc *scn)
3948{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003949 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303950 uint32_t con_mode = hif_get_conparam(scn);
3951
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05303952 if (scn->target_status == TARGET_STATUS_RESET)
3953 return;
3954
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303955 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003956 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303957 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003958 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303959 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003960 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303961
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003962 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303963}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003964
Dustin Brown6834d322017-03-20 15:02:48 -07003965int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3966{
3967 QDF_STATUS status;
3968 uint8_t ul_pipe, dl_pipe;
3969 int ul_is_polled, dl_is_polled;
3970
3971 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3972 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3973 HTC_CTRL_RSVD_SVC,
3974 &ul_pipe, &dl_pipe,
3975 &ul_is_polled, &dl_is_polled);
3976 if (status) {
3977 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3978 return qdf_status_to_os_return(status);
3979 }
3980
3981 *ce_id = dl_pipe;
3982
3983 return 0;
3984}