blob: 4427bc8d60a8f415d7d81e0958f944fa0c7f0c4f [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Pratik Gandhi034cb7c2017-11-10 16:46:06 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070040#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070042#endif
Pratik Gandhidc82a772018-01-30 18:57:05 +053043#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#define CE_POLL_TIMEOUT 10 /* ms */
46
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053047#define AGC_DUMP 1
48#define CHANINFO_DUMP 2
49#define BB_WATCHDOG_DUMP 3
50#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51#define PCIE_ACCESS_DUMP 4
52#endif
53#include "mp_dev.h"
54
Houston Hoffman5141f9d2017-01-05 10:49:17 -080055#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 !defined(QCA_WIFI_SUPPORT_SRNG)
57#define QCA_WIFI_SUPPORT_SRNG
58#endif
59
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080060/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053061QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070070#ifdef CONFIG_BYPASS_QMI
71#define BYPASS_QMI 1
72#else
73#define BYPASS_QMI 0
74#endif
75
Houston Hoffmanabd00772016-05-06 17:02:48 -070076#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053077#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070078#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053082#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070083#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Nachiket Kukadee5738b52017-09-07 17:16:12 +053085QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053086static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053088/**
89 * hif_target_access_log_dump() - dump access log
90 *
91 * dump access log
92 *
93 * Return: n/a
94 */
95#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96static void hif_target_access_log_dump(void)
97{
98 hif_target_dump_access_log();
99}
100#endif
101
102
103void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 uint8_t cmd_id, bool start)
105{
106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107
108 switch (cmd_id) {
109 case AGC_DUMP:
110 if (start)
111 priv_start_agc(scn);
112 else
113 priv_dump_agc(scn);
114 break;
115 case CHANINFO_DUMP:
116 if (start)
117 priv_start_cap_chaninfo(scn);
118 else
119 priv_dump_chaninfo(scn);
120 break;
121 case BB_WATCHDOG_DUMP:
122 priv_dump_bbwatchdog(scn);
123 break;
124#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 case PCIE_ACCESS_DUMP:
126 hif_target_access_log_dump();
127 break;
128#endif
129 default:
130 HIF_ERROR("%s: Invalid htc dump command", __func__);
131 break;
132 }
133}
134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135static void ce_poll_timeout(void *arg)
136{
137 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700138
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 if (CE_state->timer_inited) {
140 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143}
144
145static unsigned int roundup_pwr2(unsigned int n)
146{
147 int i;
148 unsigned int test_pwr2;
149
150 if (!(n & (n - 1)))
151 return n; /* already a power of 2 */
152
153 test_pwr2 = 4;
154 for (i = 0; i < 29; i++) {
155 if (test_pwr2 > n)
156 return test_pwr2;
157 test_pwr2 = test_pwr2 << 1;
158 }
159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530160 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800161 return 0;
162}
163
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700164#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166
167static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800177#ifdef QCA_WIFI_3_0_ADRASTEA
178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800181#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700182};
183
Nirav Shah0d0cce82018-01-17 17:00:31 +0530184#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700185static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
194 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
195};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530196#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700197
198/* CE_PCI TABLE */
199/*
200 * NOTE: the table below is out of date, though still a useful reference.
201 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
202 * mapping of HTC services to HIF pipes.
203 */
204/*
205 * This authoritative table defines Copy Engine configuration and the mapping
206 * of services/endpoints to CEs. A subset of this information is passed to
207 * the Target during startup as a prerequisite to entering BMI phase.
208 * See:
209 * target_service_to_ce_map - Target-side mapping
210 * hif_map_service_to_pipe - Host-side mapping
211 * target_ce_config - Target-side configuration
212 * host_ce_config - Host-side configuration
213 ============================================================================
214 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
215 | | | ctio | Size | Frequency
216 | | | n | |
217 ============================================================================
218 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
219 descriptor | | | | O(100B) | and regular
220 download | | | | |
221 ----------------------------------------------------------------------------
222 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
223 indication | | | | O(10B) | regular
224 upload | | | | |
225 ----------------------------------------------------------------------------
226 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
227 upload | | | | O(1000B) | (frequent
228 e.g. noise | | | | | during IP1.0
229 packets | | | | | testing)
230 ----------------------------------------------------------------------------
231 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
232 download | | | | O(1000B) | (frequent
233 e.g. | | | | | during IP1.0
234 misdirecte | | | | | testing)
235 d EAPOL | | | | |
236 packets | | | | |
237 ----------------------------------------------------------------------------
238 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
239 | DATA_VO (uplink) | | | |
240 ----------------------------------------------------------------------------
241 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
242 | DATA_VO (downlink) | | | |
243 ----------------------------------------------------------------------------
244 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
245 | | | | O(100B) |
246 ----------------------------------------------------------------------------
247 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
248 messages | (downlink) | | | O(100B) |
249 | | | | |
250 ----------------------------------------------------------------------------
251 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
252 | HTC_RAW_STREAMS | | | |
253 | (uplink) | | | |
254 ----------------------------------------------------------------------------
255 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
256 | HTC_RAW_STREAMS | | | |
257 | (downlink) | | | |
258 ----------------------------------------------------------------------------
259 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
260 | | | | | infrequent
261 ============================================================================
262 */
263
264/*
265 * Map from service/endpoint to Copy Engine.
266 * This table is derived from the CE_PCI TABLE, above.
267 * It is passed to the Target at startup for use by firmware.
268 */
269static struct service_to_pipe target_service_to_ce_map_wlan[] = {
270 {
271 WMI_DATA_VO_SVC,
272 PIPEDIR_OUT, /* out = UL = host -> target */
273 3,
274 },
275 {
276 WMI_DATA_VO_SVC,
277 PIPEDIR_IN, /* in = DL = target -> host */
278 2,
279 },
280 {
281 WMI_DATA_BK_SVC,
282 PIPEDIR_OUT, /* out = UL = host -> target */
283 3,
284 },
285 {
286 WMI_DATA_BK_SVC,
287 PIPEDIR_IN, /* in = DL = target -> host */
288 2,
289 },
290 {
291 WMI_DATA_BE_SVC,
292 PIPEDIR_OUT, /* out = UL = host -> target */
293 3,
294 },
295 {
296 WMI_DATA_BE_SVC,
297 PIPEDIR_IN, /* in = DL = target -> host */
298 2,
299 },
300 {
301 WMI_DATA_VI_SVC,
302 PIPEDIR_OUT, /* out = UL = host -> target */
303 3,
304 },
305 {
306 WMI_DATA_VI_SVC,
307 PIPEDIR_IN, /* in = DL = target -> host */
308 2,
309 },
310 {
311 WMI_CONTROL_SVC,
312 PIPEDIR_OUT, /* out = UL = host -> target */
313 3,
314 },
315 {
316 WMI_CONTROL_SVC,
317 PIPEDIR_IN, /* in = DL = target -> host */
318 2,
319 },
320 {
321 HTC_CTRL_RSVD_SVC,
322 PIPEDIR_OUT, /* out = UL = host -> target */
323 0, /* could be moved to 3 (share with WMI) */
324 },
325 {
326 HTC_CTRL_RSVD_SVC,
327 PIPEDIR_IN, /* in = DL = target -> host */
328 2,
329 },
330 {
331 HTC_RAW_STREAMS_SVC, /* not currently used */
332 PIPEDIR_OUT, /* out = UL = host -> target */
333 0,
334 },
335 {
336 HTC_RAW_STREAMS_SVC, /* not currently used */
337 PIPEDIR_IN, /* in = DL = target -> host */
338 2,
339 },
340 {
341 HTT_DATA_MSG_SVC,
342 PIPEDIR_OUT, /* out = UL = host -> target */
343 4,
344 },
345 {
346 HTT_DATA_MSG_SVC,
347 PIPEDIR_IN, /* in = DL = target -> host */
348 1,
349 },
350 {
351 WDI_IPA_TX_SVC,
352 PIPEDIR_OUT, /* in = DL = target -> host */
353 5,
354 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800355#if defined(QCA_WIFI_3_0_ADRASTEA)
356 {
357 HTT_DATA2_MSG_SVC,
358 PIPEDIR_IN, /* in = DL = target -> host */
359 9,
360 },
361 {
362 HTT_DATA3_MSG_SVC,
363 PIPEDIR_IN, /* in = DL = target -> host */
364 10,
365 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530366 {
367 PACKET_LOG_SVC,
368 PIPEDIR_IN, /* in = DL = target -> host */
369 11,
370 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800371#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700372 /* (Additions here) */
373
374 { /* Must be last */
375 0,
376 0,
377 0,
378 },
379};
380
Houston Hoffman88c896f2016-12-14 09:56:35 -0800381/* PIPEDIR_OUT = HOST to Target */
382/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530383#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530384static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
385 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
386 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
387 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
388 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
389 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
390 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
391 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
392 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
393 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
394 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
395 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
396 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
397 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
398 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
399 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
400 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
401 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
402 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530403 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530404 /* (Additions here) */
405 { 0, 0, 0, },
406};
Pratik Gandhi78461502018-02-05 17:22:41 +0530407#else
408static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
409};
410#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530411
Pratik Gandhi78461502018-02-05 17:22:41 +0530412#if (defined(QCA_WIFI_QCA6290))
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530413#ifdef CONFIG_WIN
Houston Hoffman88c896f2016-12-14 09:56:35 -0800414static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
415 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
416 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
417 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
418 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
419 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
420 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
421 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
422 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
423 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
424 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
425 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
426 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
427 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
428 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530429 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
430 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530431 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800432 /* (Additions here) */
433 { 0, 0, 0, },
434};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530435#else
436static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
437 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
438 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
439 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
440 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
441 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
442 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
443 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
444 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
445 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
446 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
447 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
448 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
449 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
450 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
451 /* (Additions here) */
452 { 0, 0, 0, },
453};
454#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530455#else
456static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
457};
458#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800459
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700460static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
461 {
462 WMI_DATA_VO_SVC,
463 PIPEDIR_OUT, /* out = UL = host -> target */
464 3,
465 },
466 {
467 WMI_DATA_VO_SVC,
468 PIPEDIR_IN, /* in = DL = target -> host */
469 2,
470 },
471 {
472 WMI_DATA_BK_SVC,
473 PIPEDIR_OUT, /* out = UL = host -> target */
474 3,
475 },
476 {
477 WMI_DATA_BK_SVC,
478 PIPEDIR_IN, /* in = DL = target -> host */
479 2,
480 },
481 {
482 WMI_DATA_BE_SVC,
483 PIPEDIR_OUT, /* out = UL = host -> target */
484 3,
485 },
486 {
487 WMI_DATA_BE_SVC,
488 PIPEDIR_IN, /* in = DL = target -> host */
489 2,
490 },
491 {
492 WMI_DATA_VI_SVC,
493 PIPEDIR_OUT, /* out = UL = host -> target */
494 3,
495 },
496 {
497 WMI_DATA_VI_SVC,
498 PIPEDIR_IN, /* in = DL = target -> host */
499 2,
500 },
501 {
502 WMI_CONTROL_SVC,
503 PIPEDIR_OUT, /* out = UL = host -> target */
504 3,
505 },
506 {
507 WMI_CONTROL_SVC,
508 PIPEDIR_IN, /* in = DL = target -> host */
509 2,
510 },
511 {
512 HTC_CTRL_RSVD_SVC,
513 PIPEDIR_OUT, /* out = UL = host -> target */
514 0, /* could be moved to 3 (share with WMI) */
515 },
516 {
517 HTC_CTRL_RSVD_SVC,
518 PIPEDIR_IN, /* in = DL = target -> host */
519 1,
520 },
521 {
522 HTC_RAW_STREAMS_SVC, /* not currently used */
523 PIPEDIR_OUT, /* out = UL = host -> target */
524 0,
525 },
526 {
527 HTC_RAW_STREAMS_SVC, /* not currently used */
528 PIPEDIR_IN, /* in = DL = target -> host */
529 1,
530 },
531 {
532 HTT_DATA_MSG_SVC,
533 PIPEDIR_OUT, /* out = UL = host -> target */
534 4,
535 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530536#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700537 {
538 HTT_DATA_MSG_SVC,
539 PIPEDIR_IN, /* in = DL = target -> host */
540 5,
541 },
542#else /* WLAN_FEATURE_FASTPATH */
543 {
544 HTT_DATA_MSG_SVC,
545 PIPEDIR_IN, /* in = DL = target -> host */
546 1,
547 },
548#endif /* WLAN_FEATURE_FASTPATH */
549
550 /* (Additions here) */
551
552 { /* Must be last */
553 0,
554 0,
555 0,
556 },
557};
558
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700559static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
560static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
561
Nirav Shah0d0cce82018-01-17 17:00:31 +0530562#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700563static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
564 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
565 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
566 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
567 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
568 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
569 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
570 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
571 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
572 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
573 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
574 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
575 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
576 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
577 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
578 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
579 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
580 {0, 0, 0,}, /* Must be last */
581};
582
Nirav Shah0d0cce82018-01-17 17:00:31 +0530583void hif_select_epping_service_to_pipe_map(struct service_to_pipe
584 **tgt_svc_map_to_use,
585 uint32_t *sz_tgt_svc_map_to_use)
586{
587 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
588 *sz_tgt_svc_map_to_use =
589 sizeof(target_service_to_ce_map_wlan_epping);
590}
591#endif
592
Houston Hoffman748e1a62017-03-30 17:20:42 -0700593static void hif_select_service_to_pipe_map(struct hif_softc *scn,
594 struct service_to_pipe **tgt_svc_map_to_use,
595 uint32_t *sz_tgt_svc_map_to_use)
596{
597 uint32_t mode = hif_get_conparam(scn);
598 struct hif_target_info *tgt_info = &scn->target_info;
599
600 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530601 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
602 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700603 } else {
604 switch (tgt_info->target_type) {
605 default:
606 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
607 *sz_tgt_svc_map_to_use =
608 sizeof(target_service_to_ce_map_wlan);
609 break;
610 case TARGET_TYPE_AR900B:
611 case TARGET_TYPE_QCA9984:
612 case TARGET_TYPE_IPQ4019:
613 case TARGET_TYPE_QCA9888:
614 case TARGET_TYPE_AR9888:
615 case TARGET_TYPE_AR9888V2:
616 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
617 *sz_tgt_svc_map_to_use =
618 sizeof(target_service_to_ce_map_ar900b);
619 break;
620 case TARGET_TYPE_QCA6290:
621 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
622 *sz_tgt_svc_map_to_use =
623 sizeof(target_service_to_ce_map_qca6290);
624 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530625 case TARGET_TYPE_QCA8074:
626 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
627 *sz_tgt_svc_map_to_use =
628 sizeof(target_service_to_ce_map_qca8074);
629 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700630 }
631 }
632}
633
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700634/**
635 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
636 * @ce_state : pointer to the state context of the CE
637 *
638 * Description:
639 * Sets htt_rx_data attribute of the state structure if the
640 * CE serves one of the HTT DATA services.
641 *
642 * Return:
643 * false (attribute set to false)
644 * true (attribute set to true);
645 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700646static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700647{
648 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530649 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700650 int i;
651 bool rc = false;
652
653 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700654 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
655 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700656
Kiran Venkatappac0687092017-04-13 16:45:03 +0530657 map_len = map_sz / sizeof(struct service_to_pipe);
658 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700659 if ((svc_map[i].pipenum == ce_state->id) &&
660 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
661 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
662 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
663 /* HTT CEs are unidirectional */
664 if (svc_map[i].pipedir == PIPEDIR_IN)
665 ce_state->htt_rx_data = true;
666 else
667 ce_state->htt_tx_data = true;
668 rc = true;
669 }
670 }
671 }
672 return rc;
673}
674
Houston Hoffman47808172016-05-06 10:04:21 -0700675/**
676 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
677 * @ce_id: ce in question
678 * @ring: ring state being examined
679 * @type: "src_ring" or "dest_ring" string for identifying the ring
680 *
681 * Warns on non-zero index values.
682 * Causes a kernel panic if the ring is not empty durring initialization.
683 */
684static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
685 char *type)
686{
687 if (ring->write_index != 0 || ring->sw_index != 0)
688 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
689 ce_id, type, ring->sw_index, ring->write_index);
690 if (ring->write_index != ring->sw_index)
691 QDF_BUG(0);
692}
693
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530694#ifdef IPA_OFFLOAD
695/**
696 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
697 * @scn: softc instance
698 * @ce_id: ce in question
699 * @base_addr: pointer to copyengine ring base address
700 * @ce_ring: copyengine instance
701 * @nentries: number of entries should be allocated
702 * @desc_size: ce desc size
703 *
704 * Return: QDF_STATUS_SUCCESS - for success
705 */
706static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
707 qdf_dma_addr_t *base_addr,
708 struct CE_ring_state *ce_ring,
709 unsigned int nentries, uint32_t desc_size)
710{
711 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
712 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
713 nentries * desc_size + CE_DESC_RING_ALIGN);
714 if (!scn->ipa_ce_ring) {
715 HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
716 __func__);
717 return QDF_STATUS_E_NOMEM;
718 }
719 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
720 &scn->ipa_ce_ring->mem_info);
721 ce_ring->base_addr_owner_space_unaligned =
722 scn->ipa_ce_ring->vaddr;
723 } else {
724 ce_ring->base_addr_owner_space_unaligned =
725 qdf_mem_alloc_consistent(scn->qdf_dev,
726 scn->qdf_dev->dev,
727 (nentries * desc_size +
728 CE_DESC_RING_ALIGN),
729 base_addr);
730 if (!ce_ring->base_addr_owner_space_unaligned) {
731 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
732 __func__, CE_id);
733 return QDF_STATUS_E_NOMEM;
734 }
735 }
736 return QDF_STATUS_SUCCESS;
737}
738
739/**
740 * ce_free_desc_ring() - Frees copyengine descriptor ring
741 * @scn: softc instance
742 * @ce_id: ce in question
743 * @ce_ring: copyengine instance
744 * @desc_size: ce desc size
745 *
746 * Return: None
747 */
748static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
749 struct CE_ring_state *ce_ring, uint32_t desc_size)
750{
751 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
752 qdf_mem_shared_mem_free(scn->qdf_dev,
753 scn->ipa_ce_ring);
754 ce_ring->base_addr_owner_space_unaligned = NULL;
755 } else {
756 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
757 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
758 ce_ring->base_addr_owner_space_unaligned,
759 ce_ring->base_addr_CE_space, 0);
760 ce_ring->base_addr_owner_space_unaligned = NULL;
761 }
762}
763#else
764static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
765 qdf_dma_addr_t *base_addr,
766 struct CE_ring_state *ce_ring,
767 unsigned int nentries, uint32_t desc_size)
768{
769 ce_ring->base_addr_owner_space_unaligned =
770 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
771 (nentries * desc_size +
772 CE_DESC_RING_ALIGN), base_addr);
773 if (!ce_ring->base_addr_owner_space_unaligned) {
774 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
775 __func__, CE_id);
776 return QDF_STATUS_E_NOMEM;
777 }
778 return QDF_STATUS_SUCCESS;
779}
780
781static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
782 struct CE_ring_state *ce_ring, uint32_t desc_size)
783{
784 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
785 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
786 ce_ring->base_addr_owner_space_unaligned,
787 ce_ring->base_addr_CE_space, 0);
788 ce_ring->base_addr_owner_space_unaligned = NULL;
789}
790#endif /* IPA_OFFLOAD */
791
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530792/**
793 * ce_srng_based() - Does this target use srng
794 * @ce_state : pointer to the state context of the CE
795 *
796 * Description:
797 * returns true if the target is SRNG based
798 *
799 * Return:
800 * false (attribute set to false)
801 * true (attribute set to true);
802 */
803bool ce_srng_based(struct hif_softc *scn)
804{
805 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
806 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
807
808 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530809 case TARGET_TYPE_QCA8074:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700810 case TARGET_TYPE_QCA6290:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530811 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530812 default:
813 return false;
814 }
815 return false;
816}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530817qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530818
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800819#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700820static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530821{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530822 if (ce_srng_based(scn))
823 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530824
825 return ce_services_legacy();
826}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800827
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800828
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800829#else /* QCA_LITHIUM */
830static struct ce_ops *ce_services_attach(struct hif_softc *scn)
831{
832 return ce_services_legacy();
833}
834#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530835
Houston Hoffman403c2df2017-01-27 12:51:15 -0800836static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800837 struct pld_shadow_reg_v2_cfg **shadow_config,
838 int *num_shadow_registers_configured) {
839 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
840
841 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
842 scn, shadow_config, num_shadow_registers_configured);
843}
844
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530845static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
846 uint8_t ring_type)
847{
848 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
849
850 return hif_state->ce_services->ce_get_desc_size(ring_type);
851}
852
853
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700854static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530855 uint8_t ring_type, uint32_t nentries)
856{
857 uint32_t ce_nbytes;
858 char *ptr;
859 qdf_dma_addr_t base_addr;
860 struct CE_ring_state *ce_ring;
861 uint32_t desc_size;
862 struct hif_softc *scn = CE_state->scn;
863
864 ce_nbytes = sizeof(struct CE_ring_state)
865 + (nentries * sizeof(void *));
866 ptr = qdf_mem_malloc(ce_nbytes);
867 if (!ptr)
868 return NULL;
869
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530870 ce_ring = (struct CE_ring_state *)ptr;
871 ptr += sizeof(struct CE_ring_state);
872 ce_ring->nentries = nentries;
873 ce_ring->nentries_mask = nentries - 1;
874
875 ce_ring->low_water_mark_nentries = 0;
876 ce_ring->high_water_mark_nentries = nentries;
877 ce_ring->per_transfer_context = (void **)ptr;
878
879 desc_size = ce_get_desc_size(scn, ring_type);
880
881 /* Legacy platforms that do not support cache
882 * coherent DMA are unsupported
883 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530884 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
885 ce_ring, nentries,
886 desc_size) !=
887 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530888 HIF_ERROR("%s: ring has no DMA mem",
889 __func__);
890 qdf_mem_free(ptr);
891 return NULL;
892 }
893 ce_ring->base_addr_CE_space_unaligned = base_addr;
894
895 /* Correctly initialize memory to 0 to
896 * prevent garbage data crashing system
897 * when download firmware
898 */
899 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
900 nentries * desc_size +
901 CE_DESC_RING_ALIGN);
902
903 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
904
905 ce_ring->base_addr_CE_space =
906 (ce_ring->base_addr_CE_space_unaligned +
907 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
908
909 ce_ring->base_addr_owner_space = (void *)
910 (((size_t) ce_ring->base_addr_owner_space_unaligned +
911 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
912 } else {
913 ce_ring->base_addr_CE_space =
914 ce_ring->base_addr_CE_space_unaligned;
915 ce_ring->base_addr_owner_space =
916 ce_ring->base_addr_owner_space_unaligned;
917 }
918
919 return ce_ring;
920}
921
Yun Park3fb36442017-08-17 17:37:53 -0700922static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530923 uint32_t ce_id, struct CE_ring_state *ring,
924 struct CE_attr *attr)
925{
926 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
927
Yun Park3fb36442017-08-17 17:37:53 -0700928 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700929 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530930}
931
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800932int hif_ce_bus_early_suspend(struct hif_softc *scn)
933{
934 uint8_t ul_pipe, dl_pipe;
935 int ce_id, status, ul_is_polled, dl_is_polled;
936 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700937
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800938 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
939 &ul_pipe, &dl_pipe,
940 &ul_is_polled, &dl_is_polled);
941 if (status) {
942 HIF_ERROR("%s: pipe_mapping failure", __func__);
943 return status;
944 }
945
946 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
947 if (ce_id == ul_pipe)
948 continue;
949 if (ce_id == dl_pipe)
950 continue;
951
952 ce_state = scn->ce_id_to_state[ce_id];
953 qdf_spin_lock_bh(&ce_state->ce_index_lock);
954 if (ce_state->state == CE_RUNNING)
955 ce_state->state = CE_PAUSED;
956 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
957 }
958
959 return status;
960}
961
962int hif_ce_bus_late_resume(struct hif_softc *scn)
963{
964 int ce_id;
965 struct CE_state *ce_state;
966 int write_index;
967 bool index_updated;
968
969 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
970 ce_state = scn->ce_id_to_state[ce_id];
971 qdf_spin_lock_bh(&ce_state->ce_index_lock);
972 if (ce_state->state == CE_PENDING) {
973 write_index = ce_state->src_ring->write_index;
974 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
975 write_index);
976 ce_state->state = CE_RUNNING;
977 index_updated = true;
978 } else {
979 index_updated = false;
980 }
981
982 if (ce_state->state == CE_PAUSED)
983 ce_state->state = CE_RUNNING;
984 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
985
986 if (index_updated)
987 hif_record_ce_desc_event(scn, ce_id,
988 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +0530989 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -0800990 }
991
992 return 0;
993}
994
Houston Hoffmanb12ccb72017-03-01 20:02:28 -0800995/**
996 * ce_oom_recovery() - try to recover rx ce from oom condition
997 * @context: CE_state of the CE with oom rx ring
998 *
Jeff Johnson1002ca52018-05-12 11:29:24 -0700999 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001000 * at least 1 descriptor is successfully posted to the rx ring.
1001 *
1002 * return: none
1003 */
1004static void ce_oom_recovery(void *context)
1005{
1006 struct CE_state *ce_state = context;
1007 struct hif_softc *scn = ce_state->scn;
1008 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1009 struct HIF_CE_pipe_info *pipe_info =
1010 &ce_softc->pipe_info[ce_state->id];
1011
1012 hif_post_recv_buffers_for_pipe(pipe_info);
1013}
1014
c_cgodavfda96ad2017-09-07 16:16:00 +05301015#if HIF_CE_DEBUG_DATA_BUF
1016/**
1017 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1018 * the CE descriptors.
1019 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1020 * @scn: hif scn handle
1021 * ce_id: Copy Engine Id
1022 *
1023 * Return: QDF_STATUS
1024 */
1025QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1026{
1027 struct hif_ce_desc_event *event = NULL;
1028 struct hif_ce_desc_event *hist_ev = NULL;
1029 uint32_t index = 0;
1030
1031 hist_ev =
1032 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1033
1034 if (!hist_ev)
1035 return QDF_STATUS_E_NOMEM;
1036
1037 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1038 event = &hist_ev[index];
1039 event->data =
1040 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1041 if (event->data == NULL)
1042 return QDF_STATUS_E_NOMEM;
1043 }
1044 return QDF_STATUS_SUCCESS;
1045}
1046
1047/**
1048 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1049 * the CE descriptors.
1050 * @scn: hif scn handle
1051 * ce_id: Copy Engine Id
1052 *
1053 * Return:
1054 */
1055void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1056{
1057 struct hif_ce_desc_event *event = NULL;
1058 struct hif_ce_desc_event *hist_ev = NULL;
1059 uint32_t index = 0;
1060
1061 hist_ev =
1062 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1063
1064 if (!hist_ev)
1065 return;
1066
1067 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1068 event = &hist_ev[index];
1069 if (event->data != NULL)
1070 qdf_mem_free(event->data);
1071 event->data = NULL;
1072 event = NULL;
1073 }
1074}
1075#endif /* HIF_CE_DEBUG_DATA_BUF */
1076
1077/*
1078 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1079 * for defined here
1080 */
1081#if HIF_CE_DEBUG_DATA_BUF
1082/**
1083 * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1084 * @scn: hif scn handle
1085 * ce_id: Copy Engine Id
1086 *
1087 * Return: QDF_STATUS
1088 */
1089static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1090 unsigned int CE_id)
1091{
1092 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1093 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1094
1095 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1096 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1097 return QDF_STATUS_E_NOMEM;
1098 } else {
1099 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1100 return QDF_STATUS_SUCCESS;
1101 }
1102}
1103
1104/**
1105 * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1106 * storing.
1107 * @scn: hif scn handle
1108 * ce_id: Copy Engine Id
1109 *
1110 * Return:
1111 */
1112static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1113 unsigned int CE_id)
1114{
1115 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1116 struct hif_ce_desc_event *hist_ev =
1117 (struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1118
1119 if (!hist_ev)
1120 return;
1121
1122#if HIF_CE_DEBUG_DATA_BUF
1123 if (ce_hist->data_enable[CE_id] == 1) {
1124 ce_hist->data_enable[CE_id] = 0;
1125 free_mem_ce_debug_hist_data(scn, CE_id);
1126 }
1127#endif
1128 ce_hist->enable[CE_id] = 0;
1129 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1130 ce_hist->hist_ev[CE_id] = NULL;
1131}
1132
1133/**
1134 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1135 * CE records on the console using sysfs.
1136 * @scn: hif scn handle
1137 *
1138 * Return:
1139 */
1140static inline void reset_ce_debug_history(struct hif_softc *scn)
1141{
1142 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1143 /* Initialise the CE debug history sysfs interface inputs ce_id and
1144 * index. Disable data storing
1145 */
1146 ce_hist->hist_index = 0;
1147 ce_hist->hist_id = 0;
1148}
1149#else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1150static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1151 unsigned int CE_id)
1152{
1153 return QDF_STATUS_SUCCESS;
1154}
1155
1156static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1157 unsigned int CE_id)
1158{
1159}
1160
1161static inline void reset_ce_debug_history(struct hif_softc *scn)
1162{
1163}
1164#endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1165
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001166/*
1167 * Initialize a Copy Engine based on caller-supplied attributes.
1168 * This may be called once to initialize both source and destination
1169 * rings or it may be called twice for separate source and destination
1170 * initialization. It may be that only one side or the other is
1171 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001172 *
1173 * This should be called durring the initialization sequence before
1174 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001175 */
Komal Seelam644263d2016-02-22 20:45:49 +05301176struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001177 unsigned int CE_id, struct CE_attr *attr)
1178{
1179 struct CE_state *CE_state;
1180 uint32_t ctrl_addr;
1181 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182 bool malloc_CE_state = false;
1183 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001184 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001185
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301186 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001187 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001188 CE_state = scn->ce_id_to_state[CE_id];
1189
1190 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001191 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301192 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001193 if (!CE_state) {
1194 HIF_ERROR("%s: CE_state has no mem", __func__);
1195 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001196 }
Houston Hoffman233e9092015-09-02 13:37:21 -07001197 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301198 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001199
1200 CE_state->id = CE_id;
1201 CE_state->ctrl_addr = ctrl_addr;
1202 CE_state->state = CE_RUNNING;
1203 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001204 }
1205 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001206
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301207 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001208 if (attr == NULL) {
1209 /* Already initialized; caller wants the handle */
1210 return (struct CE_handle *)CE_state;
1211 }
1212
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301214 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001215 else
1216 CE_state->src_sz_max = attr->src_sz_max;
1217
c_cgodavfda96ad2017-09-07 16:16:00 +05301218 ce_init_ce_desc_event_log(scn, CE_id,
1219 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001220
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001221 /* source ring setup */
1222 nentries = attr->src_nentries;
1223 if (nentries) {
1224 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001225
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001226 nentries = roundup_pwr2(nentries);
1227 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301228 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001229 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301230 src_ring = CE_state->src_ring =
1231 ce_alloc_ring_state(CE_state,
1232 CE_RING_SRC,
1233 nentries);
1234 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001235 /* cannot allocate src ring. If the
1236 * CE_state is allocated locally free
1237 * CE_State and return error.
1238 */
1239 HIF_ERROR("%s: src ring has no mem", __func__);
1240 if (malloc_CE_state) {
1241 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301242 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001243 malloc_CE_state = false;
1244 }
1245 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001246 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001247 /* we can allocate src ring. Mark that the src ring is
1248 * allocated locally
1249 */
1250 malloc_src_ring = true;
1251
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001252 /*
1253 * Also allocate a shadow src ring in
1254 * regular mem to use for faster access.
1255 */
1256 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301257 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001258 sizeof(struct CE_src_desc) +
1259 CE_DESC_RING_ALIGN);
1260 if (src_ring->shadow_base_unaligned == NULL) {
1261 HIF_ERROR("%s: src ring no shadow_base mem",
1262 __func__);
1263 goto error_no_dma_mem;
1264 }
1265 src_ring->shadow_base = (struct CE_src_desc *)
1266 (((size_t) src_ring->shadow_base_unaligned +
1267 CE_DESC_RING_ALIGN - 1) &
1268 ~(CE_DESC_RING_ALIGN - 1));
1269
Yun Park3fb36442017-08-17 17:37:53 -07001270 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1271 src_ring, attr);
1272 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001273 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001274
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301275 ce_ring_test_initial_indexes(CE_id, src_ring,
1276 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001277 }
1278 }
1279
1280 /* destination ring setup */
1281 nentries = attr->dest_nentries;
1282 if (nentries) {
1283 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001284
1285 nentries = roundup_pwr2(nentries);
1286 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301287 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001288 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301289 dest_ring = CE_state->dest_ring =
1290 ce_alloc_ring_state(CE_state,
1291 CE_RING_DEST,
1292 nentries);
1293 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001294 /* cannot allocate dst ring. If the CE_state
1295 * or src ring is allocated locally free
1296 * CE_State and src ring and return error.
1297 */
1298 HIF_ERROR("%s: dest ring has no mem",
1299 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301300 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001302
Yun Park3fb36442017-08-17 17:37:53 -07001303 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001304 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001305 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301306 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001307
1308 ce_ring_test_initial_indexes(CE_id, dest_ring,
1309 "dest_ring");
1310
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301311 /* For srng based target, init status ring here */
1312 if (ce_srng_based(CE_state->scn)) {
1313 CE_state->status_ring =
1314 ce_alloc_ring_state(CE_state,
1315 CE_RING_STATUS,
1316 nentries);
1317 if (CE_state->status_ring == NULL) {
1318 /*Allocation failed. Cleanup*/
1319 qdf_mem_free(CE_state->dest_ring);
1320 if (malloc_src_ring) {
1321 qdf_mem_free
1322 (CE_state->src_ring);
1323 CE_state->src_ring = NULL;
1324 malloc_src_ring = false;
1325 }
1326 if (malloc_CE_state) {
1327 /* allocated CE_state locally */
1328 scn->ce_id_to_state[CE_id] =
1329 NULL;
1330 qdf_mem_free(CE_state);
1331 malloc_CE_state = false;
1332 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001333
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301334 return NULL;
1335 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001336
Yun Park3fb36442017-08-17 17:37:53 -07001337 status = ce_ring_setup(scn, CE_RING_STATUS,
1338 CE_id, CE_state->status_ring,
1339 attr);
1340 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301341 goto error_target_access;
1342
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001344
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001345 /* epping */
1346 /* poll timer */
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301347 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) ||
1348 scn->polled_mode_on) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301349 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001350 &CE_state->poll_timer,
1351 ce_poll_timeout,
1352 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301353 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001354 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301355 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001356 CE_POLL_TIMEOUT);
1357 }
1358 }
1359 }
1360
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301361 if (!ce_srng_based(scn)) {
1362 /* Enable CE error interrupts */
1363 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1364 goto error_target_access;
1365 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1366 if (Q_TARGET_ACCESS_END(scn) < 0)
1367 goto error_target_access;
1368 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001370 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1371 ce_oom_recovery, CE_state);
1372
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001373 /* update the htt_data attribute */
1374 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001375 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001376
c_cgodavfda96ad2017-09-07 16:16:00 +05301377 alloc_mem_ce_debug_history(scn, CE_id);
1378
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001379 return (struct CE_handle *)CE_state;
1380
Houston Hoffman4411ad42016-03-14 21:12:04 -07001381error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001382error_no_dma_mem:
1383 ce_fini((struct CE_handle *)CE_state);
1384 return NULL;
1385}
1386
1387#ifdef WLAN_FEATURE_FASTPATH
1388/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001389 * hif_enable_fastpath() Update that we have enabled fastpath mode
1390 * @hif_ctx: HIF context
1391 *
1392 * For use in data path
1393 *
1394 * Retrun: void
1395 */
1396void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1397{
1398 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1399
Houston Hoffmand63cd742016-12-05 11:59:56 -08001400 if (ce_srng_based(scn)) {
1401 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1402 return;
1403 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001404 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001405 scn->fastpath_mode_on = true;
1406}
1407
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301408void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx)
1409{
1410 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1411 HIF_DBG("%s, Enabling polled mode", __func__);
1412
1413 scn->polled_mode_on = true;
1414}
1415
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001416/**
1417 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1418 * @hif_ctx: HIF Context
1419 *
1420 * For use in data path to skip HTC
1421 *
1422 * Return: bool
1423 */
1424bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1425{
1426 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1427
1428 return scn->fastpath_mode_on;
1429}
1430
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301431bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1432{
1433 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1434
1435 return scn->polled_mode_on;
1436}
1437
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001438/**
1439 * hif_get_ce_handle - API to get CE handle for FastPath mode
1440 * @hif_ctx: HIF Context
1441 * @id: CopyEngine Id
1442 *
1443 * API to return CE handle for fastpath mode
1444 *
1445 * Return: void
1446 */
1447void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1448{
1449 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1450
1451 return scn->ce_id_to_state[id];
1452}
1453
1454/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001455 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1456 * No processing is required inside this function.
1457 * @ce_hdl: Cope engine handle
1458 * Using an assert, this function makes sure that,
1459 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001460 *
1461 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001462 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001463 * therfore no locking is needed.
1464 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001465 * Return: none
1466 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001467void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001468{
1469 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1470 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301471 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001472 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001473
Houston Hoffman85925072016-05-06 17:02:18 -07001474 if (hif_is_nss_wifi_enabled(sc))
1475 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001476
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001477 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001478 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001479 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001480 sw_index = src_ring->sw_index;
1481 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001482
1483 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301484 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001485 }
1486}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001487
1488/**
1489 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1490 * @ce_hdl: Handle to CE
1491 *
1492 * These buffers are never allocated on the fly, but
1493 * are allocated only once during HIF start and freed
1494 * only once during HIF stop.
1495 * NOTE:
1496 * The assumption here is there is no in-flight DMA in progress
1497 * currently, so that buffers can be freed up safely.
1498 *
1499 * Return: NONE
1500 */
1501void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1502{
1503 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1504 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1505 qdf_nbuf_t nbuf;
1506 int i;
1507
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001508 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001509 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001510
1511 if (!ce_state->htt_rx_data)
1512 return;
1513
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001514 /*
1515 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1516 * this CE is completely full: does not leave one blank space, to
1517 * distinguish between empty queue & full queue. So free all the
1518 * entries.
1519 */
1520 for (i = 0; i < dst_ring->nentries; i++) {
1521 nbuf = dst_ring->per_transfer_context[i];
1522
1523 /*
1524 * The reasons for doing this check are:
1525 * 1) Protect against calling cleanup before allocating buffers
1526 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1527 * could have a partially filled ring, because of a memory
1528 * allocation failure in the middle of allocating ring.
1529 * This check accounts for that case, checking
1530 * fastpath_mode_on flag or started flag would not have
1531 * covered that case. This is not in performance path,
1532 * so OK to do this.
1533 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001534 if (nbuf) {
1535 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1536 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001537 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001538 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001539 }
1540}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001541
1542/**
1543 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1544 * @scn: HIF handle
1545 *
1546 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1547 * Hence we have to post all the entries in the pipe, even, in the beginning
1548 * unlike for other CE pipes where one less than dest_nentries are filled in
1549 * the beginning.
1550 *
1551 * Return: None
1552 */
1553static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1554{
1555 int pipe_num;
1556 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1557
1558 if (scn->fastpath_mode_on == false)
1559 return;
1560
1561 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1562 struct HIF_CE_pipe_info *pipe_info =
1563 &hif_state->pipe_info[pipe_num];
1564 struct CE_state *ce_state =
1565 scn->ce_id_to_state[pipe_info->pipe_num];
1566
1567 if (ce_state->htt_rx_data)
1568 atomic_inc(&pipe_info->recv_bufs_needed);
1569 }
1570}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001571#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001572static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573{
1574}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001575
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001576static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001577{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001578 return false;
1579}
1580
1581static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1582{
1583 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001584}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001585#endif /* WLAN_FEATURE_FASTPATH */
1586
1587void ce_fini(struct CE_handle *copyeng)
1588{
1589 struct CE_state *CE_state = (struct CE_state *)copyeng;
1590 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301591 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301592 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301594 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001595 CE_state->state = CE_UNUSED;
1596 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301597 /* Set the flag to false first to stop processing in ce_poll_timeout */
1598 CE_state->timer_inited = false;
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001599 qdf_lro_deinit(CE_state->lro_data);
1600
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001602 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001603 ce_h2t_tx_ce_cleanup(copyeng);
1604
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301605 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001606 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301607 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001608 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301609 ce_free_desc_ring(scn, CE_state->id,
1610 CE_state->src_ring,
1611 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301612 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001613 }
1614 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001615 /* Cleanup the datapath Rx ring */
1616 ce_t2h_msg_ce_cleanup(copyeng);
1617
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301618 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001619 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301620 ce_free_desc_ring(scn, CE_state->id,
1621 CE_state->dest_ring,
1622 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301623 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001624
1625 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301626 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301627 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001628 }
1629 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001630 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301631 /* Cleanup the datapath Tx ring */
1632 ce_h2t_tx_ce_cleanup(copyeng);
1633
1634 if (CE_state->status_ring->shadow_base_unaligned)
1635 qdf_mem_free(
1636 CE_state->status_ring->shadow_base_unaligned);
1637
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301638 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301639 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301640 ce_free_desc_ring(scn, CE_state->id,
1641 CE_state->status_ring,
1642 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301643 qdf_mem_free(CE_state->status_ring);
1644 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001645
c_cgodavfda96ad2017-09-07 16:16:00 +05301646 free_mem_ce_debug_history(scn, CE_id);
1647 reset_ce_debug_history(scn);
1648 ce_deinit_ce_desc_event_log(scn, CE_id);
1649
Houston Hoffman03f46572016-12-12 12:53:56 -08001650 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301651 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001652}
1653
Komal Seelam5584a7c2016-02-24 19:22:48 +05301654void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001655{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301656 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001657
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301658 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001659 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301660 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001661 sizeof(hif_state->msg_callbacks_current));
1662}
1663
1664/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301665QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301666hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001667 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301668 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669{
Komal Seelam644263d2016-02-22 20:45:49 +05301670 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301671 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001672 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1673 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1674 int bytes = nbytes, nfrags = 0;
1675 struct ce_sendlist sendlist;
1676 int status, i = 0;
1677 unsigned int mux_id = 0;
1678
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301679 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001680
1681 transfer_id =
1682 (mux_id & MUX_ID_MASK) |
1683 (transfer_id & TRANSACTION_ID_MASK);
1684 data_attr &= DESC_DATA_FLAG_MASK;
1685 /*
1686 * The common case involves sending multiple fragments within a
1687 * single download (the tx descriptor and the tx frame header).
1688 * So, optimize for the case of multiple fragments by not even
1689 * checking whether it's necessary to use a sendlist.
1690 * The overhead of using a sendlist for a single buffer download
1691 * is not a big deal, since it happens rarely (for WMI messages).
1692 */
1693 ce_sendlist_init(&sendlist);
1694 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301695 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001696 int frag_bytes;
1697
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301698 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1699 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001700 /*
1701 * Clear the packet offset for all but the first CE desc.
1702 */
1703 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301704 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001705
1706 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1707 frag_bytes >
1708 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301709 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001710 (nbuf,
1711 nfrags) ? 0 :
1712 CE_SEND_FLAG_SWAP_DISABLE,
1713 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301714 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001715 HIF_ERROR("%s: error, frag_num %d larger than limit",
1716 __func__, nfrags);
1717 return status;
1718 }
1719 bytes -= frag_bytes;
1720 nfrags++;
1721 } while (bytes > 0);
1722
1723 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301724 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001725 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301726 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001727 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301728 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001729 }
1730 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301731 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001732
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301733 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001734 HIF_ERROR("%s: error CE handle is null", __func__);
1735 return A_ERROR;
1736 }
1737
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301738 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301739 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301740 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1741 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301743 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001744
1745 return status;
1746}
1747
Komal Seelam5584a7c2016-02-24 19:22:48 +05301748void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1749 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001750{
Komal Seelam644263d2016-02-22 20:45:49 +05301751 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301752 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301753
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001754 if (!force) {
1755 int resources;
1756 /*
1757 * Decide whether to actually poll for completions, or just
1758 * wait for a later chance. If there seem to be plenty of
1759 * resources left, then just wait, since checking involves
1760 * reading a CE register, which is a relatively expensive
1761 * operation.
1762 */
Komal Seelam644263d2016-02-22 20:45:49 +05301763 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001764 /*
1765 * If at least 50% of the total resources are still available,
1766 * don't bother checking again yet.
1767 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001768 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1769 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001770 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001771 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001772#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001773 ce_per_engine_servicereap(scn, pipe);
1774#else
1775 ce_per_engine_service(scn, pipe);
1776#endif
1777}
1778
Komal Seelam5584a7c2016-02-24 19:22:48 +05301779uint16_t
1780hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001781{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301782 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001783 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1784 uint16_t rv;
1785
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301786 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001787 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301788 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001789 return rv;
1790}
1791
1792/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001793static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301795 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001796 unsigned int nbytes, unsigned int transfer_id,
1797 unsigned int sw_index, unsigned int hw_index,
1798 unsigned int toeplitz_hash_result)
1799{
1800 struct HIF_CE_pipe_info *pipe_info =
1801 (struct HIF_CE_pipe_info *)ce_context;
1802 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301803 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001804 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001805 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301806 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001807
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001808 do {
1809 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001810 * The upper layer callback will be triggered
1811 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001812 */
Houston Hoffman85118512015-09-28 14:17:11 -07001813 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001814 if (scn->target_status == TARGET_STATUS_RESET) {
1815
1816 qdf_nbuf_unmap_single(scn->qdf_dev,
1817 transfer_context,
1818 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301819 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001820 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001821 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001822 msg_callbacks->Context,
1823 transfer_context, transfer_id,
1824 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001825 }
1826
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301827 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001828 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301829 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001830 } while (ce_completed_send_next(copyeng,
1831 &ce_context, &transfer_context,
1832 &CE_data, &nbytes, &transfer_id,
1833 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301834 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001835}
1836
Houston Hoffman910c6262015-09-28 12:56:25 -07001837/**
1838 * hif_ce_do_recv(): send message from copy engine to upper layers
1839 * @msg_callbacks: structure containing callback and callback context
1840 * @netbuff: skb containing message
1841 * @nbytes: number of bytes in the message
1842 * @pipe_info: used for the pipe_number info
1843 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07001844 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07001845 * and calls the upper layer callback.
1846 *
1847 * return: None
1848 */
1849static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301850 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001851 struct HIF_CE_pipe_info *pipe_info) {
1852 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301853 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001854 msg_callbacks->
1855 rxCompletionHandler(msg_callbacks->Context,
1856 netbuf, pipe_info->pipe_num);
1857 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07001858 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07001859 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001860
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301861 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001862 }
1863}
1864
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001865/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001866static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001867hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301868 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001869 unsigned int nbytes, unsigned int transfer_id,
1870 unsigned int flags)
1871{
1872 struct HIF_CE_pipe_info *pipe_info =
1873 (struct HIF_CE_pipe_info *)ce_context;
1874 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001875 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301876 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001877#ifdef HIF_PCI
1878 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1879#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001880 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301881 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001883 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001884#ifdef HIF_PCI
1885 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1886#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301887 qdf_nbuf_unmap_single(scn->qdf_dev,
1888 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301889 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890
Houston Hoffman910c6262015-09-28 12:56:25 -07001891 atomic_inc(&pipe_info->recv_bufs_needed);
1892 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301893 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301894 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001895 else
1896 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001897 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001898
1899 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001900 * MAX_NUM_OF_RECEIVES
1901 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001902 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07001903 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001904 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001905 break;
1906 }
1907 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1908 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301909 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001910
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001911}
1912
1913/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1914
1915void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301916hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001917 struct hif_msg_callbacks *callbacks)
1918{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301919 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001920
1921#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1922 spin_lock_init(&pcie_access_log_lock);
1923#endif
1924 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301925 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926 sizeof(hif_state->msg_callbacks_pending));
1927
1928}
1929
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001930static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001931{
1932 struct CE_handle *ce_diag = hif_state->ce_diag;
1933 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301934 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001935 struct hif_msg_callbacks *hif_msg_callbacks =
1936 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937
1938 /* daemonize("hif_compl_thread"); */
1939
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001940 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07001941 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001942 return -EINVAL;
1943 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001944
1945 if (!hif_msg_callbacks ||
1946 !hif_msg_callbacks->rxCompletionHandler ||
1947 !hif_msg_callbacks->txCompletionHandler) {
1948 HIF_ERROR("%s: no completion handler registered", __func__);
1949 return -EFAULT;
1950 }
1951
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001952 A_TARGET_ACCESS_LIKELY(scn);
1953 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1954 struct CE_attr attr;
1955 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956
1957 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001958 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301960 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001961 if (attr.src_nentries) {
1962 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07001963 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001964 __func__, pipe_num, pipe_info);
1965 ce_send_cb_register(pipe_info->ce_hdl,
1966 hif_pci_ce_send_done, pipe_info,
1967 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001968 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1969 }
1970 if (attr.dest_nentries) {
1971 /* pipe used to receive from target */
1972 ce_recv_cb_register(pipe_info->ce_hdl,
1973 hif_pci_ce_recv_data, pipe_info,
1974 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001975 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001976
1977 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301978 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301979
1980 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1981 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001982 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001983
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001984 A_TARGET_ACCESS_UNLIKELY(scn);
1985 return 0;
1986}
1987
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001988/*
1989 * Install pending msg callbacks.
1990 *
1991 * TBDXXX: This hack is needed because upper layers install msg callbacks
1992 * for use with HTC before BMI is done; yet this HIF implementation
1993 * needs to continue to use BMI msg callbacks. Really, upper layers
1994 * should not register HTC callbacks until AFTER BMI phase.
1995 */
Komal Seelam644263d2016-02-22 20:45:49 +05301996static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001997{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301998 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001999
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302000 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002001 &hif_state->msg_callbacks_pending,
2002 sizeof(hif_state->msg_callbacks_pending));
2003}
2004
Komal Seelam5584a7c2016-02-24 19:22:48 +05302005void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2006 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002007{
2008 int ul_is_polled, dl_is_polled;
2009
Komal Seelam644263d2016-02-22 20:45:49 +05302010 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002011 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2012}
2013
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002014/**
2015 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302016 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002017 *
2018 * Output the pipe error counts of each pipe to log file
2019 *
2020 * Return: N/A
2021 */
Komal Seelam644263d2016-02-22 20:45:49 +05302022void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002023{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302024 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002025 int pipe_num;
2026
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002027 if (hif_state == NULL) {
2028 HIF_ERROR("%s hif_state is NULL", __func__);
2029 return;
2030 }
2031 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2032 struct HIF_CE_pipe_info *pipe_info;
2033
2034 pipe_info = &hif_state->pipe_info[pipe_num];
2035
2036 if (pipe_info->nbuf_alloc_err_count > 0 ||
2037 pipe_info->nbuf_dma_err_count > 0 ||
2038 pipe_info->nbuf_ce_enqueue_err_count)
2039 HIF_ERROR(
2040 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2041 __func__, pipe_info->pipe_num,
2042 atomic_read(&pipe_info->recv_bufs_needed),
2043 pipe_info->nbuf_alloc_err_count,
2044 pipe_info->nbuf_dma_err_count,
2045 pipe_info->nbuf_ce_enqueue_err_count);
2046 }
2047}
2048
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002049static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2050 void *nbuf, uint32_t *error_cnt,
2051 enum hif_ce_event_type failure_type,
2052 const char *failure_type_string)
2053{
2054 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2055 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2056 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2057 int ce_id = CE_state->id;
2058 uint32_t error_cnt_tmp;
2059
2060 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2061 error_cnt_tmp = ++(*error_cnt);
2062 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302063 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002064 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2065 failure_type_string);
2066 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302067 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002068 /* if we fail to allocate the last buffer for an rx pipe,
2069 * there is no trigger to refill the ce and we will
2070 * eventually crash
2071 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302072 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002073 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302074
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002075}
2076
2077
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002078
2079
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302080QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002081{
2082 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302083 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302084 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302085 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002086 uint32_t bufs_posted = 0;
2087
2088 buf_sz = pipe_info->buf_sz;
2089 if (buf_sz == 0) {
2090 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302091 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002092 }
2093
2094 ce_hdl = pipe_info->ce_hdl;
2095
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302096 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002097 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302098 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302099 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002100
2101 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302102 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002103
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302104 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002105 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002106 hif_post_recv_buffers_failure(pipe_info, nbuf,
2107 &pipe_info->nbuf_alloc_err_count,
2108 HIF_RX_NBUF_ALLOC_FAILURE,
2109 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302110 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111 }
2112
2113 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302114 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002115 * CE_data = dma_map_single(dev, data, buf_sz, );
2116 * DMA_FROM_DEVICE);
2117 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302118 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302119 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002120
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302121 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002122 hif_post_recv_buffers_failure(pipe_info, nbuf,
2123 &pipe_info->nbuf_dma_err_count,
2124 HIF_RX_NBUF_MAP_FAILURE,
2125 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302126 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302127 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128 }
2129
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302130 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002131
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302132 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002133 buf_sz, DMA_FROM_DEVICE);
2134 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302135 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002136 hif_post_recv_buffers_failure(pipe_info, nbuf,
2137 &pipe_info->nbuf_ce_enqueue_err_count,
2138 HIF_RX_NBUF_ENQUEUE_FAILURE,
2139 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2140
Govind Singh4fcafd42016-08-08 12:37:31 +05302141 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2142 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302143 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302144 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002145 }
2146
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302147 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002148 bufs_posted++;
2149 }
2150 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002151 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2153 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002154 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002155 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2156 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002157 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002158 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302160 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002161
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302162 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002163}
2164
2165/*
2166 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302167 * Returns 0 for non fastpath rx copy engine as
2168 * oom_allocation_work will be scheduled to recover any
2169 * failures, non-zero if unable to completely replenish
2170 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002171 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302172QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002173{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302174 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302175 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302176 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302177 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002178
2179 A_TARGET_ACCESS_LIKELY(scn);
2180 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2181 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002182
Houston Hoffman85925072016-05-06 17:02:18 -07002183 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002184 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002185
2186 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002187 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002188 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002189
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302190 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302191 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302192 ce_state->htt_rx_data &&
2193 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302194 A_TARGET_ACCESS_UNLIKELY(scn);
2195 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302196 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002197 }
2198
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002199 A_TARGET_ACCESS_UNLIKELY(scn);
2200
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302201 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002202}
2203
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302204QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002205{
Komal Seelam644263d2016-02-22 20:45:49 +05302206 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302207 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302208 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002209
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002210 hif_update_fastpath_recv_bufs_cnt(scn);
2211
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002212 hif_msg_callbacks_install(scn);
2213
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002214 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302215 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002216
Houston Hoffman271951f2016-11-12 15:24:27 -08002217 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002218 hif_state->started = true;
2219
Houston Hoffman271951f2016-11-12 15:24:27 -08002220 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302221 qdf_status = hif_post_recv_buffers(scn);
2222 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002223 /* cleanup is done in hif_ce_disable */
2224 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302225 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002226 }
2227
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302228 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002229}
2230
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002231static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002232{
Komal Seelam644263d2016-02-22 20:45:49 +05302233 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002234 struct CE_handle *ce_hdl;
2235 uint32_t buf_sz;
2236 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302237 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302238 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002239 void *per_CE_context;
2240
2241 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002242 /* Unused Copy Engine */
2243 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002244 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002245
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002246
2247 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002248 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002249 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002250
Komal Seelam02cf2f82016-02-22 20:44:25 +05302251 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002252 ce_hdl = pipe_info->ce_hdl;
2253
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002254 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002255 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002256 while (ce_revoke_recv_next
2257 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302258 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302259 if (netbuf) {
2260 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2261 QDF_DMA_FROM_DEVICE);
2262 qdf_nbuf_free(netbuf);
2263 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002264 }
2265}
2266
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002267static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002268{
2269 struct CE_handle *ce_hdl;
2270 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302271 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302272 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002273 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302274 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002275 unsigned int nbytes;
2276 unsigned int id;
2277 uint32_t buf_sz;
2278 uint32_t toeplitz_hash_result;
2279
2280 buf_sz = pipe_info->buf_sz;
2281 if (buf_sz == 0) {
2282 /* Unused Copy Engine */
2283 return;
2284 }
2285
2286 hif_state = pipe_info->HIF_CE_state;
2287 if (!hif_state->started) {
2288 return;
2289 }
2290
Komal Seelam02cf2f82016-02-22 20:44:25 +05302291 scn = HIF_GET_SOFTC(hif_state);
2292
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002293 ce_hdl = pipe_info->ce_hdl;
2294
2295 while (ce_cancel_send_next
2296 (ce_hdl, &per_CE_context,
2297 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302298 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002299 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2300 /*
2301 * Packets enqueued by htt_h2t_ver_req_msg() and
2302 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2303 * freed in htt_htc_misc_pkt_pool_free() in
2304 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002305 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002306 * which they are queued in.
2307 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302308 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002309 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302310 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002311 * layer to free the buffer
2312 */
2313 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302314 pipe_info->pipe_callbacks.
2315 txCompletionHandler(pipe_info->
2316 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002317 netbuf, id, toeplitz_hash_result);
2318 }
2319 }
2320}
2321
2322/*
2323 * Cleanup residual buffers for device shutdown:
2324 * buffers that were enqueued for receive
2325 * buffers that were to be sent
2326 * Note: Buffers that had completed but which were
2327 * not yet processed are on a completion queue. They
2328 * are handled when the completion thread shuts down.
2329 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002330static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002331{
2332 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302333 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002334 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002335
Komal Seelam02cf2f82016-02-22 20:44:25 +05302336 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002337 struct HIF_CE_pipe_info *pipe_info;
2338
Houston Hoffman85925072016-05-06 17:02:18 -07002339 ce_state = scn->ce_id_to_state[pipe_num];
2340 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2341 ((ce_state->htt_tx_data) ||
2342 (ce_state->htt_rx_data))) {
2343 continue;
2344 }
2345
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002346 pipe_info = &hif_state->pipe_info[pipe_num];
2347 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2348 hif_send_buffer_cleanup_on_pipe(pipe_info);
2349 }
2350}
2351
Komal Seelam5584a7c2016-02-24 19:22:48 +05302352void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002353{
Komal Seelam644263d2016-02-22 20:45:49 +05302354 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302355 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302356
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002357 hif_buffer_cleanup(hif_state);
2358}
2359
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002360static void hif_destroy_oom_work(struct hif_softc *scn)
2361{
2362 struct CE_state *ce_state;
2363 int ce_id;
2364
2365 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2366 ce_state = scn->ce_id_to_state[ce_id];
2367 if (ce_state)
2368 qdf_destroy_work(scn->qdf_dev,
2369 &ce_state->oom_allocation_work);
2370 }
2371}
2372
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302373void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002374{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302375 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002376 int pipe_num;
2377
Houston Hoffmana69581e2016-11-14 18:03:19 -08002378 /*
2379 * before cleaning up any memory, ensure irq &
2380 * bottom half contexts will not be re-entered
2381 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002382 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002383 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002384 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002385
2386 /*
2387 * At this point, asynchronous threads are stopped,
2388 * The Target should not DMA nor interrupt, Host code may
2389 * not initiate anything more. So we just need to clean
2390 * up Host-side state.
2391 */
2392
2393 if (scn->athdiag_procfs_inited) {
2394 athdiag_procfs_remove();
2395 scn->athdiag_procfs_inited = false;
2396 }
2397
2398 hif_buffer_cleanup(hif_state);
2399
2400 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2401 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302402 struct CE_attr attr;
2403 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002404
2405 pipe_info = &hif_state->pipe_info[pipe_num];
2406 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302407 if (pipe_info->ce_hdl != ce_diag) {
2408 attr = hif_state->host_ce_config[pipe_num];
2409 if (attr.src_nentries)
2410 qdf_spinlock_destroy(&pipe_info->
2411 completion_freeq_lock);
2412 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002413 ce_fini(pipe_info->ce_hdl);
2414 pipe_info->ce_hdl = NULL;
2415 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302416 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002417 }
2418 }
2419
2420 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302421 qdf_timer_stop(&hif_state->sleep_timer);
2422 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002423 hif_state->sleep_timer_init = false;
2424 }
2425
2426 hif_state->started = false;
2427}
2428
Houston Hoffman748e1a62017-03-30 17:20:42 -07002429
Houston Hoffman854e67f2016-03-14 21:11:39 -07002430/**
2431 * hif_get_target_ce_config() - get copy engine configuration
2432 * @target_ce_config_ret: basic copy engine configuration
2433 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2434 * @target_service_to_ce_map_ret: service mapping for the copy engines
2435 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2436 * @target_shadow_reg_cfg_ret: shadow register configuration
2437 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2438 *
2439 * providing accessor to these values outside of this file.
2440 * currently these are stored in static pointers to const sections.
2441 * there are multiple configurations that are selected from at compile time.
2442 * Runtime selection would need to consider mode, target type and bus type.
2443 *
2444 * Return: return by parameter.
2445 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302446void hif_get_target_ce_config(struct hif_softc *scn,
2447 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002448 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002449 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002450 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002451 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002452 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002453{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302454 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2455
2456 *target_ce_config_ret = hif_state->target_ce_config;
2457 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002458
2459 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2460 target_service_to_ce_map_sz_ret);
Houston Hoffman854e67f2016-03-14 21:11:39 -07002461
2462 if (target_shadow_reg_cfg_ret)
2463 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2464
2465 if (shadow_cfg_sz_ret)
2466 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002467}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002468
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002469#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002470static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002471{
2472 int i;
2473 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2474 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2475
2476 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2477 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2478 "%s: i %d, val %x\n", __func__, i,
2479 cfg->shadow_reg_v2_cfg[i].addr);
2480 }
2481}
2482
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002483#else
2484static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2485{
2486 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2487 "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2488}
2489#endif
2490
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002491/**
2492 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302493 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002494 *
2495 * This function passes the con_mode and CE configuration to
2496 * platform driver to enable wlan.
2497 *
Houston Hoffman108da402016-03-14 21:11:24 -07002498 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002499 */
Houston Hoffman108da402016-03-14 21:11:24 -07002500int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002501{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002502 struct pld_wlan_enable_cfg cfg;
2503 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302504 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002505
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302506 hif_get_target_ce_config(scn,
2507 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002508 &cfg.num_ce_tgt_cfg,
2509 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2510 &cfg.num_ce_svc_pipe_cfg,
2511 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2512 &cfg.num_shadow_reg_cfg);
2513
2514 /* translate from structure size to array size */
2515 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2516 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2517 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002518
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002519 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2520 &cfg.num_shadow_reg_v2_cfg);
2521
2522 hif_print_hal_shadow_register_cfg(&cfg);
2523
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302524 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002525 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302526 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2527 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002528 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002529 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002530 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002531 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002532
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002533 if (BYPASS_QMI)
2534 return 0;
2535 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002536 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2537 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002538}
2539
Nirav Shah0d0cce82018-01-17 17:00:31 +05302540#ifdef WLAN_FEATURE_EPPING
2541
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002542#define CE_EPPING_USES_IRQ true
2543
Nirav Shah0d0cce82018-01-17 17:00:31 +05302544void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2545{
2546 if (CE_EPPING_USES_IRQ)
2547 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2548 else
2549 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2550 hif_state->target_ce_config = target_ce_config_wlan_epping;
2551 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2552 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2553 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2554}
2555#endif
2556
Houston Hoffman108da402016-03-14 21:11:24 -07002557/**
2558 * hif_ce_prepare_config() - load the correct static tables.
2559 * @scn: hif context
2560 *
2561 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002562 */
Houston Hoffman108da402016-03-14 21:11:24 -07002563void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002564{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302565 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002566 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2567 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302568 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002569
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002570 hif_state->ce_services = ce_services_attach(scn);
2571
Houston Hoffman710af5a2016-11-22 21:59:03 -08002572 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002573 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002574 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05302575 hif_ce_prepare_epping_config(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002576 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002577
2578 switch (tgt_info->target_type) {
2579 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302580 hif_state->host_ce_config = host_ce_config_wlan;
2581 hif_state->target_ce_config = target_ce_config_wlan;
2582 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002583 break;
2584 case TARGET_TYPE_AR900B:
2585 case TARGET_TYPE_QCA9984:
2586 case TARGET_TYPE_IPQ4019:
2587 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302588 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2589 hif_state->host_ce_config =
2590 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2591 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2592 hif_state->host_ce_config =
2593 host_lowdesc_ce_cfg_wlan_ar900b;
2594 } else {
2595 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2596 }
2597
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302598 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2599 hif_state->target_ce_config_sz =
2600 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002601
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002602 break;
2603
2604 case TARGET_TYPE_AR9888:
2605 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302606 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2607 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2608 } else {
2609 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2610 }
2611
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302612 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2613 hif_state->target_ce_config_sz =
2614 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002615
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002616 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002617
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302618 case TARGET_TYPE_QCA8074:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002619 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2620 hif_state->host_ce_config =
2621 host_ce_config_wlan_qca8074_pci;
2622 hif_state->target_ce_config =
2623 target_ce_config_wlan_qca8074_pci;
2624 hif_state->target_ce_config_sz =
2625 sizeof(target_ce_config_wlan_qca8074_pci);
2626 } else {
2627 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2628 hif_state->target_ce_config =
2629 target_ce_config_wlan_qca8074;
2630 hif_state->target_ce_config_sz =
2631 sizeof(target_ce_config_wlan_qca8074);
2632 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302633 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002634 case TARGET_TYPE_QCA6290:
2635 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2636 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2637 hif_state->target_ce_config_sz =
2638 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002639
Houston Hoffman710af5a2016-11-22 21:59:03 -08002640 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002641 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002642 }
Yun parkc80eea72017-10-06 15:33:36 -07002643 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07002644}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002645
Houston Hoffman108da402016-03-14 21:11:24 -07002646/**
2647 * hif_ce_open() - do ce specific allocations
2648 * @hif_sc: pointer to hif context
2649 *
2650 * return: 0 for success or QDF_STATUS_E_NOMEM
2651 */
2652QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2653{
2654 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002655
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302656 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302657 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002658 return QDF_STATUS_SUCCESS;
2659}
2660
2661/**
2662 * hif_ce_close() - do ce specific free
2663 * @hif_sc: pointer to hif context
2664 */
2665void hif_ce_close(struct hif_softc *hif_sc)
2666{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302667 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2668
2669 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302670 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002671}
2672
2673/**
2674 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2675 * @hif_sc: hif context
2676 *
2677 * uses state variables to support cleaning up when hif_config_ce fails.
2678 */
2679void hif_unconfig_ce(struct hif_softc *hif_sc)
2680{
2681 int pipe_num;
2682 struct HIF_CE_pipe_info *pipe_info;
2683 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07002684 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002685
2686 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2687 pipe_info = &hif_state->pipe_info[pipe_num];
2688 if (pipe_info->ce_hdl) {
2689 ce_unregister_irq(hif_state, (1 << pipe_num));
Houston Hoffman108da402016-03-14 21:11:24 -07002690 ce_fini(pipe_info->ce_hdl);
2691 pipe_info->ce_hdl = NULL;
2692 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002693 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002694 }
2695 }
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07002696 deinit_tasklet_workers(hif_hdl);
Houston Hoffman108da402016-03-14 21:11:24 -07002697 if (hif_sc->athdiag_procfs_inited) {
2698 athdiag_procfs_remove();
2699 hif_sc->athdiag_procfs_inited = false;
2700 }
2701}
2702
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002703#ifdef CONFIG_BYPASS_QMI
2704#define FW_SHARED_MEM (2 * 1024 * 1024)
2705
2706/**
2707 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2708 * @scn: pointer to HIF structure
2709 *
2710 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2711 *
2712 * Return: void
2713 */
2714static void hif_post_static_buf_to_target(struct hif_softc *scn)
2715{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002716 void *target_va;
2717 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002718
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002719 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2720 FW_SHARED_MEM, &target_pa);
2721 if (NULL == target_va) {
2722 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002723 return;
2724 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002725 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2726 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002727}
2728#else
2729static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2730{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002731}
2732#endif
2733
Houston Hoffman579c02f2017-08-02 01:57:38 -07002734static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2735 bool wait_for_it)
2736{
2737 /* todo */
2738 return 0;
2739}
2740
Houston Hoffman108da402016-03-14 21:11:24 -07002741/**
2742 * hif_config_ce() - configure copy engines
2743 * @scn: hif context
2744 *
2745 * Prepares fw, copy engine hardware and host sw according
2746 * to the attributes selected by hif_ce_prepare_config.
2747 *
2748 * also calls athdiag_procfs_init
2749 *
2750 * return: 0 for success nonzero for failure.
2751 */
2752int hif_config_ce(struct hif_softc *scn)
2753{
2754 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2755 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2756 struct HIF_CE_pipe_info *pipe_info;
2757 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302758 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05302759
Houston Hoffman108da402016-03-14 21:11:24 -07002760#ifdef ADRASTEA_SHADOW_REGISTERS
2761 int i;
2762#endif
2763 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2764
2765 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05302766 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002767
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002768 hif_post_static_buf_to_target(scn);
2769
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002770 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002771
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002772 hif_config_rri_on_ddr(scn);
2773
Houston Hoffman579c02f2017-08-02 01:57:38 -07002774 if (ce_srng_based(scn))
2775 scn->bus_ops.hif_target_sleep_state_adjust =
2776 &hif_srng_sleep_state_adjust;
2777
c_cgodavfda96ad2017-09-07 16:16:00 +05302778 /* Initialise the CE debug history sysfs interface inputs ce_id and
2779 * index. Disable data storing
2780 */
2781 reset_ce_debug_history(scn);
2782
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002783 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2784 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002785
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002786 pipe_info = &hif_state->pipe_info[pipe_num];
2787 pipe_info->pipe_num = pipe_num;
2788 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302789 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002790
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002791 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002792 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302793 if (!ce_state) {
2794 A_TARGET_ACCESS_UNLIKELY(scn);
2795 goto err;
2796 }
Houston Hoffman03f46572016-12-12 12:53:56 -08002797 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302798 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002799 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302800 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002801 A_TARGET_ACCESS_UNLIKELY(scn);
2802 goto err;
2803 }
2804
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07002805 ce_state->lro_data = qdf_lro_init();
2806
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05302807 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002808 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002809 * Diagnostic Window support
2810 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002811 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002812 continue;
2813 }
2814
Houston Hoffman85925072016-05-06 17:02:18 -07002815 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2816 (ce_state->htt_rx_data))
2817 continue;
2818
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302819 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002820 if (attr->dest_nentries > 0) {
2821 atomic_set(&pipe_info->recv_bufs_needed,
2822 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302823 /*SRNG based CE has one entry less */
2824 if (ce_srng_based(scn))
2825 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002826 } else {
2827 atomic_set(&pipe_info->recv_bufs_needed, 0);
2828 }
2829 ce_tasklet_init(hif_state, (1 << pipe_num));
2830 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002831 }
2832
2833 if (athdiag_procfs_init(scn) != 0) {
2834 A_TARGET_ACCESS_UNLIKELY(scn);
2835 goto err;
2836 }
2837 scn->athdiag_procfs_inited = true;
2838
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002839 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002840
Houston Hoffman108da402016-03-14 21:11:24 -07002841 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002842
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002843 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002844
2845#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002846 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002847 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08002848 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002849 __func__, i,
2850 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2851 }
2852#endif
2853
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302854 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002855
2856err:
2857 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002858 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002859 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302860 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002861}
2862
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002863#ifdef WLAN_FEATURE_FASTPATH
2864/**
2865 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2866 * @handler: Callback funtcion
2867 * @context: handle for callback function
2868 *
2869 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2870 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002871int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2872 fastpath_msg_handler handler,
2873 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002874{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002875 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002876 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002877 int i;
2878
Himanshu Agarwal2a924592016-06-30 18:04:14 +05302879 if (!scn) {
2880 HIF_ERROR("%s: scn is NULL", __func__);
2881 QDF_ASSERT(0);
2882 return QDF_STATUS_E_FAILURE;
2883 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002884
2885 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002886 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002887 return QDF_STATUS_E_FAILURE;
2888 }
2889
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002890 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002891 ce_state = scn->ce_id_to_state[i];
2892 if (ce_state->htt_rx_data) {
2893 ce_state->fastpath_handler = handler;
2894 ce_state->context = context;
2895 }
2896 }
2897
2898 return QDF_STATUS_SUCCESS;
2899}
Pratik Gandhidc82a772018-01-30 18:57:05 +05302900qdf_export_symbol(hif_ce_fastpath_cb_register);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002901#endif
2902
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002903#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002904/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302905 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08002906 * @scn: bus context
2907 * @ce_sr_base_paddr: copyengine source ring base physical address
2908 * @ce_sr_ring_size: copyengine source ring size
2909 * @ce_reg_paddr: copyengine register physical address
2910 *
2911 * IPA micro controller data path offload feature enabled,
2912 * HIF should release copy engine related resource information to IPA UC
2913 * IPA UC will access hardware resource with released information
2914 *
2915 * Return: None
2916 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302917void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302918 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002919 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302920 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002921{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302922 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002923 struct HIF_CE_pipe_info *pipe_info =
2924 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2925 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2926
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05302927 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002928 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002929}
2930#endif /* IPA_OFFLOAD */
2931
2932
2933#ifdef ADRASTEA_SHADOW_REGISTERS
2934
2935/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002936 * Current shadow register config
2937 *
2938 * -----------------------------------------------------------
2939 * Shadow Register | CE | src/dst write index
2940 * -----------------------------------------------------------
2941 * 0 | 0 | src
2942 * 1 No Config - Doesn't point to anything
2943 * 2 No Config - Doesn't point to anything
2944 * 3 | 3 | src
2945 * 4 | 4 | src
2946 * 5 | 5 | src
2947 * 6 No Config - Doesn't point to anything
2948 * 7 | 7 | src
2949 * 8 No Config - Doesn't point to anything
2950 * 9 No Config - Doesn't point to anything
2951 * 10 No Config - Doesn't point to anything
2952 * 11 No Config - Doesn't point to anything
2953 * -----------------------------------------------------------
2954 * 12 No Config - Doesn't point to anything
2955 * 13 | 1 | dst
2956 * 14 | 2 | dst
2957 * 15 No Config - Doesn't point to anything
2958 * 16 No Config - Doesn't point to anything
2959 * 17 No Config - Doesn't point to anything
2960 * 18 No Config - Doesn't point to anything
2961 * 19 | 7 | dst
2962 * 20 | 8 | dst
2963 * 21 No Config - Doesn't point to anything
2964 * 22 No Config - Doesn't point to anything
2965 * 23 No Config - Doesn't point to anything
2966 * -----------------------------------------------------------
2967 *
2968 *
2969 * ToDo - Move shadow register config to following in the future
2970 * This helps free up a block of shadow registers towards the end.
2971 * Can be used for other purposes
2972 *
2973 * -----------------------------------------------------------
2974 * Shadow Register | CE | src/dst write index
2975 * -----------------------------------------------------------
2976 * 0 | 0 | src
2977 * 1 | 3 | src
2978 * 2 | 4 | src
2979 * 3 | 5 | src
2980 * 4 | 7 | src
2981 * -----------------------------------------------------------
2982 * 5 | 1 | dst
2983 * 6 | 2 | dst
2984 * 7 | 7 | dst
2985 * 8 | 8 | dst
2986 * -----------------------------------------------------------
2987 * 9 No Config - Doesn't point to anything
2988 * 12 No Config - Doesn't point to anything
2989 * 13 No Config - Doesn't point to anything
2990 * 14 No Config - Doesn't point to anything
2991 * 15 No Config - Doesn't point to anything
2992 * 16 No Config - Doesn't point to anything
2993 * 17 No Config - Doesn't point to anything
2994 * 18 No Config - Doesn't point to anything
2995 * 19 No Config - Doesn't point to anything
2996 * 20 No Config - Doesn't point to anything
2997 * 21 No Config - Doesn't point to anything
2998 * 22 No Config - Doesn't point to anything
2999 * 23 No Config - Doesn't point to anything
3000 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003001*/
3002
Komal Seelam644263d2016-02-22 20:45:49 +05303003u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003004{
3005 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003006 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003007
Houston Hoffmane6330442016-02-26 12:19:11 -08003008 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003009 case 0:
3010 addr = SHADOW_VALUE0;
3011 break;
3012 case 3:
3013 addr = SHADOW_VALUE3;
3014 break;
3015 case 4:
3016 addr = SHADOW_VALUE4;
3017 break;
3018 case 5:
3019 addr = SHADOW_VALUE5;
3020 break;
3021 case 7:
3022 addr = SHADOW_VALUE7;
3023 break;
3024 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003025 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303026 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003027 }
3028 return addr;
3029
3030}
3031
Komal Seelam644263d2016-02-22 20:45:49 +05303032u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003033{
3034 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003035 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003036
Houston Hoffmane6330442016-02-26 12:19:11 -08003037 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003038 case 1:
3039 addr = SHADOW_VALUE13;
3040 break;
3041 case 2:
3042 addr = SHADOW_VALUE14;
3043 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003044 case 5:
3045 addr = SHADOW_VALUE17;
3046 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003047 case 7:
3048 addr = SHADOW_VALUE19;
3049 break;
3050 case 8:
3051 addr = SHADOW_VALUE20;
3052 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003053 case 9:
3054 addr = SHADOW_VALUE21;
3055 break;
3056 case 10:
3057 addr = SHADOW_VALUE22;
3058 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303059 case 11:
3060 addr = SHADOW_VALUE23;
3061 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003062 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003063 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303064 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003065 }
3066
3067 return addr;
3068
3069}
3070#endif
3071
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003072#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003073void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3074{
3075 struct CE_state *ce_state;
3076 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3077
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003078 ce_state = scn->ce_id_to_state[ctx_id];
3079
3080 return ce_state->lro_data;
3081}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003082#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003083
3084/**
3085 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3086 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303087 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003088 * @svc_id: Service ID for which the mapping is needed.
3089 * @ul_pipe: address of the container in which ul pipe is returned.
3090 * @dl_pipe: address of the container in which dl pipe is returned.
3091 * @ul_is_polled: address of the container in which a bool
3092 * indicating if the UL CE for this service
3093 * is polled is returned.
3094 * @dl_is_polled: address of the container in which a bool
3095 * indicating if the DL CE for this service
3096 * is polled is returned.
3097 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003098 * Return: Indicates whether the service has been found in the table.
3099 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3100 * There will be warning logs if either leg has not been updated
3101 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003102 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303103int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003104 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3105 int *dl_is_polled)
3106{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003107 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003108 unsigned int i;
3109 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003110 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003111 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303112 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003113 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003114 bool dl_updated = false;
3115 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003116
Houston Hoffman748e1a62017-03-30 17:20:42 -07003117 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3118 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003119
3120 *dl_is_polled = 0; /* polling for received messages not supported */
3121
3122 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3123
3124 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3125 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003126 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003127 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003128 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303129 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003130 CE_ATTR_DISABLE_INTR) != 0;
3131 ul_updated = true;
3132 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003133 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003134 dl_updated = true;
3135 }
3136 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003137 }
3138 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003139 if (ul_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05303140 HIF_INFO("%s: ul pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003141 __func__, svc_id);
3142 if (dl_updated == false)
Poddar, Siddarthf53a9b02017-03-14 20:30:17 +05303143 HIF_INFO("%s: dl pipe is NOT updated for service %d",
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003144 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003145
3146 return status;
3147}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003148
3149#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303150inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003151 uint32_t CE_ctrl_addr)
3152{
3153 uint32_t read_from_hw, srri_from_ddr = 0;
3154
3155 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3156
3157 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3158
3159 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003160 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3161 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003162 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303163 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003164 }
3165 return srri_from_ddr;
3166}
3167
3168
Komal Seelam644263d2016-02-22 20:45:49 +05303169inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003170 uint32_t CE_ctrl_addr)
3171{
3172 uint32_t read_from_hw, drri_from_ddr = 0;
3173
3174 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3175
3176 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3177
3178 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003179 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003180 drri_from_ddr, read_from_hw,
3181 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303182 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003183 }
3184 return drri_from_ddr;
3185}
3186
3187#endif
3188
Houston Hoffman3d0cda82015-12-03 13:25:05 -08003189#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003190/**
3191 * hif_get_src_ring_read_index(): Called to get the SRRI
3192 *
Komal Seelam644263d2016-02-22 20:45:49 +05303193 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003194 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3195 *
3196 * This function returns the SRRI to the caller. For CEs that
3197 * dont have interrupts enabled, we look at the DDR based SRRI
3198 *
3199 * Return: SRRI
3200 */
Komal Seelam644263d2016-02-22 20:45:49 +05303201inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003202 uint32_t CE_ctrl_addr)
3203{
3204 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303205 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003206
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303207 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Govind Singhbc679dc2017-06-08 12:33:59 +05303208 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003209 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303210 } else {
3211 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3212 return A_TARGET_READ(scn,
3213 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3214 else
3215 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3216 CE_ctrl_addr);
3217 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003218}
3219
3220/**
3221 * hif_get_dst_ring_read_index(): Called to get the DRRI
3222 *
Komal Seelam644263d2016-02-22 20:45:49 +05303223 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003224 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3225 *
3226 * This function returns the DRRI to the caller. For CEs that
3227 * dont have interrupts enabled, we look at the DDR based DRRI
3228 *
3229 * Return: DRRI
3230 */
Komal Seelam644263d2016-02-22 20:45:49 +05303231inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003232 uint32_t CE_ctrl_addr)
3233{
3234 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303235 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003236
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303237 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003238
Govind Singhbc679dc2017-06-08 12:33:59 +05303239 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003240 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303241 } else {
3242 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3243 return A_TARGET_READ(scn,
3244 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3245 else
3246 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3247 CE_ctrl_addr);
3248 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003249}
3250
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003251/**
3252 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3253 *
Komal Seelam644263d2016-02-22 20:45:49 +05303254 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003255 *
3256 * This function allocates non cached memory on ddr and sends
3257 * the physical address of this memory to the CE hardware. The
3258 * hardware updates the RRI on this particular location.
3259 *
3260 * Return: None
3261 */
Komal Seelam644263d2016-02-22 20:45:49 +05303262static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003263{
3264 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303265 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003266 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003267
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003268 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303269 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3270 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3271 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003272
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05303273 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003274 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3275 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3276
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003277 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003278
3279 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3280 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3281
3282 for (i = 0; i < CE_COUNT; i++)
3283 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3284
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303285 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003286
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003287}
3288#else
3289
3290/**
3291 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3292 *
Komal Seelam644263d2016-02-22 20:45:49 +05303293 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003294 *
3295 * This is a dummy implementation for platforms that don't
3296 * support this functionality.
3297 *
3298 * Return: None
3299 */
Komal Seelam644263d2016-02-22 20:45:49 +05303300static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003301{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003302}
3303#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303304
3305/**
3306 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303307 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303308 *
3309 * Output the copy engine registers
3310 *
3311 * Return: 0 for success or error code
3312 */
Komal Seelam644263d2016-02-22 20:45:49 +05303313int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303314{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303315 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303316 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003317 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303318 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3319 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303320 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303321
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003322 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3323 if (scn->ce_id_to_state[i] == NULL) {
3324 HIF_DBG("CE%d not used.", i);
3325 continue;
3326 }
3327
Komal Seelam644263d2016-02-22 20:45:49 +05303328 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003329 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303330 ce_reg_word_size * sizeof(uint32_t));
3331
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303332 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003333 HIF_ERROR("Dumping CE register failed!");
3334 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303335 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303336 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303337 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003338 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303339 ce_reg_word_size * sizeof(uint32_t));
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303340 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3341 + SR_WR_INDEX_ADDRESS),
3342 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3343 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3344 + CURRENT_SRRI_ADDRESS),
3345 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3346 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3347 + DST_WR_INDEX_ADDRESS),
3348 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3349 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3350 + CURRENT_DRRI_ADDRESS),
3351 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3352 qdf_print("---\n");
Govind Singh2443fb32016-01-13 17:44:48 +05303353 }
Govind Singh2443fb32016-01-13 17:44:48 +05303354 return 0;
3355}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303356qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003357#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3358struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3359 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3360{
3361 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3362 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3363 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3364 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3365 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3366 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3367 struct CE_ring_state *src_ring = ce_state->src_ring;
3368 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3369
3370 if (src_ring) {
3371 hif_info->ul_pipe.nentries = src_ring->nentries;
3372 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3373 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3374 hif_info->ul_pipe.write_index = src_ring->write_index;
3375 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3376 hif_info->ul_pipe.base_addr_CE_space =
3377 src_ring->base_addr_CE_space;
3378 hif_info->ul_pipe.base_addr_owner_space =
3379 src_ring->base_addr_owner_space;
3380 }
3381
3382
3383 if (dest_ring) {
3384 hif_info->dl_pipe.nentries = dest_ring->nentries;
3385 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3386 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3387 hif_info->dl_pipe.write_index = dest_ring->write_index;
3388 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3389 hif_info->dl_pipe.base_addr_CE_space =
3390 dest_ring->base_addr_CE_space;
3391 hif_info->dl_pipe.base_addr_owner_space =
3392 dest_ring->base_addr_owner_space;
3393 }
3394
3395 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3396 hif_info->ctrl_addr = ce_state->ctrl_addr;
3397
3398 return hif_info;
3399}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303400qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003401
3402uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3403{
3404 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3405
3406 scn->nss_wifi_ol_mode = mode;
3407 return 0;
3408}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303409qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003410#endif
3411
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303412void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3413{
3414 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3415 scn->hif_attribute = hif_attrib;
3416}
3417
Yun Park3fb36442017-08-17 17:37:53 -07003418
3419/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003420void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3421{
3422 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3423 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3424 uint32_t ctrl_addr = CE_state->ctrl_addr;
3425
3426 Q_TARGET_ACCESS_BEGIN(scn);
3427 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3428 Q_TARGET_ACCESS_END(scn);
3429}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303430qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303431
3432/**
3433 * hif_fw_event_handler() - hif fw event handler
3434 * @hif_state: pointer to hif ce state structure
3435 *
3436 * Process fw events and raise HTC callback to process fw events.
3437 *
3438 * Return: none
3439 */
3440static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3441{
3442 struct hif_msg_callbacks *msg_callbacks =
3443 &hif_state->msg_callbacks_current;
3444
3445 if (!msg_callbacks->fwEventHandler)
3446 return;
3447
3448 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3449 QDF_STATUS_E_FAILURE);
3450}
3451
3452#ifndef QCA_WIFI_3_0
3453/**
3454 * hif_fw_interrupt_handler() - FW interrupt handler
3455 * @irq: irq number
3456 * @arg: the user pointer
3457 *
3458 * Called from the PCI interrupt handler when a
3459 * firmware-generated interrupt to the Host.
3460 *
Yun Park3fb36442017-08-17 17:37:53 -07003461 * only registered for legacy ce devices
3462 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303463 * Return: status of handled irq
3464 */
3465irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3466{
3467 struct hif_softc *scn = arg;
3468 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3469 uint32_t fw_indicator_address, fw_indicator;
3470
3471 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3472 return ATH_ISR_NOSCHED;
3473
3474 fw_indicator_address = hif_state->fw_indicator_address;
3475 /* For sudden unplug this will return ~0 */
3476 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3477
3478 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3479 /* ACK: clear Target-side pending event */
3480 A_TARGET_WRITE(scn, fw_indicator_address,
3481 fw_indicator & ~FW_IND_EVENT_PENDING);
3482 if (Q_TARGET_ACCESS_END(scn) < 0)
3483 return ATH_ISR_SCHED;
3484
3485 if (hif_state->started) {
3486 hif_fw_event_handler(hif_state);
3487 } else {
3488 /*
3489 * Probable Target failure before we're prepared
3490 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08003491 * fw_indicator used as bitmap, and defined as below:
3492 * FW_IND_EVENT_PENDING 0x1
3493 * FW_IND_INITIALIZED 0x2
3494 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303495 */
3496 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08003497 ("%s: Early firmware event indicated 0x%x\n",
3498 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303499 }
3500 } else {
3501 if (Q_TARGET_ACCESS_END(scn) < 0)
3502 return ATH_ISR_SCHED;
3503 }
3504
3505 return ATH_ISR_SCHED;
3506}
3507#else
3508irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3509{
3510 return ATH_ISR_SCHED;
3511}
3512#endif /* #ifdef QCA_WIFI_3_0 */
3513
3514
3515/**
3516 * hif_wlan_disable(): call the platform driver to disable wlan
3517 * @scn: HIF Context
3518 *
3519 * This function passes the con_mode to platform driver to disable
3520 * wlan.
3521 *
3522 * Return: void
3523 */
3524void hif_wlan_disable(struct hif_softc *scn)
3525{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003526 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303527 uint32_t con_mode = hif_get_conparam(scn);
3528
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05303529 if (scn->target_status == TARGET_STATUS_RESET)
3530 return;
3531
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303532 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003533 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303534 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003535 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303536 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003537 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303538
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003539 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303540}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003541
Dustin Brown6834d322017-03-20 15:02:48 -07003542int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3543{
3544 QDF_STATUS status;
3545 uint8_t ul_pipe, dl_pipe;
3546 int ul_is_polled, dl_is_polled;
3547
3548 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3549 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3550 HTC_CTRL_RSVD_SVC,
3551 &ul_pipe, &dl_pipe,
3552 &ul_is_polled, &dl_is_polled);
3553 if (status) {
3554 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3555 return qdf_status_to_os_return(status);
3556 }
3557
3558 *ce_id = dl_pipe;
3559
3560 return 0;
3561}