blob: d07cdde7e30acceceabe93dd31ea42c2648adb0f [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Pratik Gandhi034cb7c2017-11-10 16:46:06 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080019#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053020#include "qdf_lock.h"
21#include "qdf_status.h"
22#include "qdf_status.h"
23#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080024#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080025#include "hif_io32.h"
26#include <hif.h>
Pratik Gandhi034cb7c2017-11-10 16:46:06 +053027#include <target_type.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080028#include "regtable.h"
29#define ATH_MODULE_NAME hif
30#include <a_debug.h>
31#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053033#include "qdf_trace.h"
Yuanyuan Liufd594c22016-04-25 13:59:19 -070034#include "pld_common.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080035#include "hif_debug.h"
36#include "ce_internal.h"
37#include "ce_reg.h"
38#include "ce_assignment.h"
39#include "ce_tasklet.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070040#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070042#endif
Pratik Gandhidc82a772018-01-30 18:57:05 +053043#include "qdf_module.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044
45#define CE_POLL_TIMEOUT 10 /* ms */
46
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053047#define AGC_DUMP 1
48#define CHANINFO_DUMP 2
49#define BB_WATCHDOG_DUMP 3
50#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51#define PCIE_ACCESS_DUMP 4
52#endif
53#include "mp_dev.h"
54
Houston Hoffman5141f9d2017-01-05 10:49:17 -080055#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 !defined(QCA_WIFI_SUPPORT_SRNG)
57#define QCA_WIFI_SUPPORT_SRNG
58#endif
59
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080060/* Forward references */
Nachiket Kukadee5738b52017-09-07 17:16:12 +053061QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080062
63/*
64 * Fix EV118783, poll to check whether a BMI response comes
65 * other than waiting for the interruption which may be lost.
66 */
67/* #define BMI_RSP_POLLING */
68#define BMI_RSP_TO_MILLISEC 1000
69
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070070#ifdef CONFIG_BYPASS_QMI
71#define BYPASS_QMI 1
72#else
73#define BYPASS_QMI 0
74#endif
75
Houston Hoffmanabd00772016-05-06 17:02:48 -070076#ifdef CONFIG_WIN
Pratik Gandhi424c62e2016-08-23 19:47:09 +053077#if ENABLE_10_4_FW_HDR
Houston Hoffmanabd00772016-05-06 17:02:48 -070078#define WDI_IPA_SERVICE_GROUP 5
79#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
Pratik Gandhi424c62e2016-08-23 19:47:09 +053082#endif /* ENABLE_10_4_FW_HDR */
Houston Hoffmanabd00772016-05-06 17:02:48 -070083#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080084
Nachiket Kukadee5738b52017-09-07 17:16:12 +053085QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
Komal Seelam644263d2016-02-22 20:45:49 +053086static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080087
Poddar, Siddarthe41943f2016-04-27 15:33:48 +053088/**
89 * hif_target_access_log_dump() - dump access log
90 *
91 * dump access log
92 *
93 * Return: n/a
94 */
95#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96static void hif_target_access_log_dump(void)
97{
98 hif_target_dump_access_log();
99}
100#endif
101
102
103void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 uint8_t cmd_id, bool start)
105{
106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107
108 switch (cmd_id) {
109 case AGC_DUMP:
110 if (start)
111 priv_start_agc(scn);
112 else
113 priv_dump_agc(scn);
114 break;
115 case CHANINFO_DUMP:
116 if (start)
117 priv_start_cap_chaninfo(scn);
118 else
119 priv_dump_chaninfo(scn);
120 break;
121 case BB_WATCHDOG_DUMP:
122 priv_dump_bbwatchdog(scn);
123 break;
124#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 case PCIE_ACCESS_DUMP:
126 hif_target_access_log_dump();
127 break;
128#endif
129 default:
130 HIF_ERROR("%s: Invalid htc dump command", __func__);
131 break;
132 }
133}
134
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800135static void ce_poll_timeout(void *arg)
136{
137 struct CE_state *CE_state = (struct CE_state *)arg;
Manikandan Mohanafd6e882017-04-07 17:46:41 -0700138
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800139 if (CE_state->timer_inited) {
140 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800142 }
143}
144
145static unsigned int roundup_pwr2(unsigned int n)
146{
147 int i;
148 unsigned int test_pwr2;
149
150 if (!(n & (n - 1)))
151 return n; /* already a power of 2 */
152
153 test_pwr2 = 4;
154 for (i = 0; i < 29; i++) {
155 if (test_pwr2 > n)
156 return test_pwr2;
157 test_pwr2 = test_pwr2 << 1;
158 }
159
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530160 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800161 return 0;
162}
163
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700164#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166
167static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800177#ifdef QCA_WIFI_3_0_ADRASTEA
178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
Nirav Shah75cc5c82016-05-25 10:52:38 +0530180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800181#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700182};
183
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530184#ifdef QCN7605_SUPPORT
185static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194};
195#endif
196
Nirav Shah0d0cce82018-01-17 17:00:31 +0530197#ifdef WLAN_FEATURE_EPPING
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700198static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208};
Nirav Shah0d0cce82018-01-17 17:00:31 +0530209#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700210
211/* CE_PCI TABLE */
212/*
213 * NOTE: the table below is out of date, though still a useful reference.
214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215 * mapping of HTC services to HIF pipes.
216 */
217/*
218 * This authoritative table defines Copy Engine configuration and the mapping
219 * of services/endpoints to CEs. A subset of this information is passed to
220 * the Target during startup as a prerequisite to entering BMI phase.
221 * See:
222 * target_service_to_ce_map - Target-side mapping
223 * hif_map_service_to_pipe - Host-side mapping
224 * target_ce_config - Target-side configuration
225 * host_ce_config - Host-side configuration
226 ============================================================================
227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
228 | | | ctio | Size | Frequency
229 | | | n | |
230 ============================================================================
231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
232 descriptor | | | | O(100B) | and regular
233 download | | | | |
234 ----------------------------------------------------------------------------
235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
236 indication | | | | O(10B) | regular
237 upload | | | | |
238 ----------------------------------------------------------------------------
239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
240 upload | | | | O(1000B) | (frequent
241 e.g. noise | | | | | during IP1.0
242 packets | | | | | testing)
243 ----------------------------------------------------------------------------
244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
245 download | | | | O(1000B) | (frequent
246 e.g. | | | | | during IP1.0
247 misdirecte | | | | | testing)
248 d EAPOL | | | | |
249 packets | | | | |
250 ----------------------------------------------------------------------------
251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
252 | DATA_VO (uplink) | | | |
253 ----------------------------------------------------------------------------
254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
255 | DATA_VO (downlink) | | | |
256 ----------------------------------------------------------------------------
257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258 | | | | O(100B) |
259 ----------------------------------------------------------------------------
260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
261 messages | (downlink) | | | O(100B) |
262 | | | | |
263 ----------------------------------------------------------------------------
264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
265 | HTC_RAW_STREAMS | | | |
266 | (uplink) | | | |
267 ----------------------------------------------------------------------------
268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
269 | HTC_RAW_STREAMS | | | |
270 | (downlink) | | | |
271 ----------------------------------------------------------------------------
272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
273 | | | | | infrequent
274 ============================================================================
275 */
276
277/*
278 * Map from service/endpoint to Copy Engine.
279 * This table is derived from the CE_PCI TABLE, above.
280 * It is passed to the Target at startup for use by firmware.
281 */
282static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 {
284 WMI_DATA_VO_SVC,
285 PIPEDIR_OUT, /* out = UL = host -> target */
286 3,
287 },
288 {
289 WMI_DATA_VO_SVC,
290 PIPEDIR_IN, /* in = DL = target -> host */
291 2,
292 },
293 {
294 WMI_DATA_BK_SVC,
295 PIPEDIR_OUT, /* out = UL = host -> target */
296 3,
297 },
298 {
299 WMI_DATA_BK_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 2,
302 },
303 {
304 WMI_DATA_BE_SVC,
305 PIPEDIR_OUT, /* out = UL = host -> target */
306 3,
307 },
308 {
309 WMI_DATA_BE_SVC,
310 PIPEDIR_IN, /* in = DL = target -> host */
311 2,
312 },
313 {
314 WMI_DATA_VI_SVC,
315 PIPEDIR_OUT, /* out = UL = host -> target */
316 3,
317 },
318 {
319 WMI_DATA_VI_SVC,
320 PIPEDIR_IN, /* in = DL = target -> host */
321 2,
322 },
323 {
324 WMI_CONTROL_SVC,
325 PIPEDIR_OUT, /* out = UL = host -> target */
326 3,
327 },
328 {
329 WMI_CONTROL_SVC,
330 PIPEDIR_IN, /* in = DL = target -> host */
331 2,
332 },
333 {
334 HTC_CTRL_RSVD_SVC,
335 PIPEDIR_OUT, /* out = UL = host -> target */
336 0, /* could be moved to 3 (share with WMI) */
337 },
338 {
339 HTC_CTRL_RSVD_SVC,
340 PIPEDIR_IN, /* in = DL = target -> host */
341 2,
342 },
343 {
344 HTC_RAW_STREAMS_SVC, /* not currently used */
345 PIPEDIR_OUT, /* out = UL = host -> target */
346 0,
347 },
348 {
349 HTC_RAW_STREAMS_SVC, /* not currently used */
350 PIPEDIR_IN, /* in = DL = target -> host */
351 2,
352 },
353 {
354 HTT_DATA_MSG_SVC,
355 PIPEDIR_OUT, /* out = UL = host -> target */
356 4,
357 },
358 {
359 HTT_DATA_MSG_SVC,
360 PIPEDIR_IN, /* in = DL = target -> host */
361 1,
362 },
363 {
364 WDI_IPA_TX_SVC,
365 PIPEDIR_OUT, /* in = DL = target -> host */
366 5,
367 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800368#if defined(QCA_WIFI_3_0_ADRASTEA)
369 {
370 HTT_DATA2_MSG_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 9,
373 },
374 {
375 HTT_DATA3_MSG_SVC,
376 PIPEDIR_IN, /* in = DL = target -> host */
377 10,
378 },
Nirav Shah75cc5c82016-05-25 10:52:38 +0530379 {
380 PACKET_LOG_SVC,
381 PIPEDIR_IN, /* in = DL = target -> host */
382 11,
383 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800384#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700385 /* (Additions here) */
386
387 { /* Must be last */
388 0,
389 0,
390 0,
391 },
392};
393
Houston Hoffman88c896f2016-12-14 09:56:35 -0800394/* PIPEDIR_OUT = HOST to Target */
395/* PIPEDIR_IN = TARGET to HOST */
Pratik Gandhi78461502018-02-05 17:22:41 +0530396#if (defined(QCA_WIFI_QCA8074))
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530397static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
Balamurugan Mahalingamdcb52262017-08-16 19:16:45 +0530416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530417 /* (Additions here) */
418 { 0, 0, 0, },
419};
Pratik Gandhi78461502018-02-05 17:22:41 +0530420#else
421static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422};
423#endif
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530424
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530425/* PIPEDIR_OUT = HOST to Target */
426/* PIPEDIR_IN = TARGET to HOST */
427#ifdef QCN7605_SUPPORT
428static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
429 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
430 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
431 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
432 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
433 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
434 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
435 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
436 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
437 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
438 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
439 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
440 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
441 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
442 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
443 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
444 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
445 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
446#ifdef IPA_OFFLOAD
447 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
448#else
449 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
450#endif
451 { PACKET_LOG_SVC, PIPEDIR_IN, 7, },
452 /* (Additions here) */
453 { 0, 0, 0, },
454};
455#endif
456
Pratik Gandhi78461502018-02-05 17:22:41 +0530457#if (defined(QCA_WIFI_QCA6290))
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530458#ifdef CONFIG_WIN
Houston Hoffman88c896f2016-12-14 09:56:35 -0800459static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
460 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
461 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
462 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
463 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
464 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
465 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
466 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
467 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
468 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
469 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
472 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
473 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530474 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
475 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
Nandha Kishore Easwaranac0b96e2018-03-20 21:56:01 +0530476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, },
Houston Hoffman88c896f2016-12-14 09:56:35 -0800477 /* (Additions here) */
478 { 0, 0, 0, },
479};
Nandha Kishore Easwaran51f80b82018-02-21 12:04:34 +0530480#else
481static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
482 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
483 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
484 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
485 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
486 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
487 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
488 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
489 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
490 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
491 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
492 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
493 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
494 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
495 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
496 /* (Additions here) */
497 { 0, 0, 0, },
498};
499#endif
Pratik Gandhi78461502018-02-05 17:22:41 +0530500#else
501static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
502};
503#endif
Houston Hoffman88c896f2016-12-14 09:56:35 -0800504
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700505#if (defined(QCA_WIFI_QCA6390))
506static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
507 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
508 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
509 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
510 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
511 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
512 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
513 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
514 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
515 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
516 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
517 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
518 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
519 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
520 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
521 /* (Additions here) */
522 { 0, 0, 0, },
523};
524#else
525static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
526};
527#endif
528
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700529static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
530 {
531 WMI_DATA_VO_SVC,
532 PIPEDIR_OUT, /* out = UL = host -> target */
533 3,
534 },
535 {
536 WMI_DATA_VO_SVC,
537 PIPEDIR_IN, /* in = DL = target -> host */
538 2,
539 },
540 {
541 WMI_DATA_BK_SVC,
542 PIPEDIR_OUT, /* out = UL = host -> target */
543 3,
544 },
545 {
546 WMI_DATA_BK_SVC,
547 PIPEDIR_IN, /* in = DL = target -> host */
548 2,
549 },
550 {
551 WMI_DATA_BE_SVC,
552 PIPEDIR_OUT, /* out = UL = host -> target */
553 3,
554 },
555 {
556 WMI_DATA_BE_SVC,
557 PIPEDIR_IN, /* in = DL = target -> host */
558 2,
559 },
560 {
561 WMI_DATA_VI_SVC,
562 PIPEDIR_OUT, /* out = UL = host -> target */
563 3,
564 },
565 {
566 WMI_DATA_VI_SVC,
567 PIPEDIR_IN, /* in = DL = target -> host */
568 2,
569 },
570 {
571 WMI_CONTROL_SVC,
572 PIPEDIR_OUT, /* out = UL = host -> target */
573 3,
574 },
575 {
576 WMI_CONTROL_SVC,
577 PIPEDIR_IN, /* in = DL = target -> host */
578 2,
579 },
580 {
581 HTC_CTRL_RSVD_SVC,
582 PIPEDIR_OUT, /* out = UL = host -> target */
583 0, /* could be moved to 3 (share with WMI) */
584 },
585 {
586 HTC_CTRL_RSVD_SVC,
587 PIPEDIR_IN, /* in = DL = target -> host */
588 1,
589 },
590 {
591 HTC_RAW_STREAMS_SVC, /* not currently used */
592 PIPEDIR_OUT, /* out = UL = host -> target */
593 0,
594 },
595 {
596 HTC_RAW_STREAMS_SVC, /* not currently used */
597 PIPEDIR_IN, /* in = DL = target -> host */
598 1,
599 },
600 {
601 HTT_DATA_MSG_SVC,
602 PIPEDIR_OUT, /* out = UL = host -> target */
603 4,
604 },
Nirav Shah77250fa2018-03-11 14:56:22 +0530605#ifdef WLAN_FEATURE_FASTPATH
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700606 {
607 HTT_DATA_MSG_SVC,
608 PIPEDIR_IN, /* in = DL = target -> host */
609 5,
610 },
611#else /* WLAN_FEATURE_FASTPATH */
612 {
613 HTT_DATA_MSG_SVC,
614 PIPEDIR_IN, /* in = DL = target -> host */
615 1,
616 },
617#endif /* WLAN_FEATURE_FASTPATH */
618
619 /* (Additions here) */
620
621 { /* Must be last */
622 0,
623 0,
624 0,
625 },
626};
627
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700628static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
629static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
630
Nirav Shah0d0cce82018-01-17 17:00:31 +0530631#ifdef WLAN_FEATURE_EPPING
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700632static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
633 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
634 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
635 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
636 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
637 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
638 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
639 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
640 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
641 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
642 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
643 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
644 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
645 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
646 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
647 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
648 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
649 {0, 0, 0,}, /* Must be last */
650};
651
Nirav Shah0d0cce82018-01-17 17:00:31 +0530652void hif_select_epping_service_to_pipe_map(struct service_to_pipe
653 **tgt_svc_map_to_use,
654 uint32_t *sz_tgt_svc_map_to_use)
655{
656 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
657 *sz_tgt_svc_map_to_use =
658 sizeof(target_service_to_ce_map_wlan_epping);
659}
660#endif
661
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530662#ifdef QCN7605_SUPPORT
663static inline
664void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
665 uint32_t *sz_tgt_svc_map_to_use)
666{
667 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
668 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
669}
670#else
671static inline
672void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
673 uint32_t *sz_tgt_svc_map_to_use)
674{
675 HIF_ERROR("%s: QCN7605 not supported", __func__);
676}
677#endif
678
Houston Hoffman748e1a62017-03-30 17:20:42 -0700679static void hif_select_service_to_pipe_map(struct hif_softc *scn,
680 struct service_to_pipe **tgt_svc_map_to_use,
681 uint32_t *sz_tgt_svc_map_to_use)
682{
683 uint32_t mode = hif_get_conparam(scn);
684 struct hif_target_info *tgt_info = &scn->target_info;
685
686 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +0530687 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
688 sz_tgt_svc_map_to_use);
Houston Hoffman748e1a62017-03-30 17:20:42 -0700689 } else {
690 switch (tgt_info->target_type) {
691 default:
692 *tgt_svc_map_to_use = target_service_to_ce_map_wlan;
693 *sz_tgt_svc_map_to_use =
694 sizeof(target_service_to_ce_map_wlan);
695 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +0530696 case TARGET_TYPE_QCN7605:
697 hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
698 sz_tgt_svc_map_to_use);
699 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700700 case TARGET_TYPE_AR900B:
701 case TARGET_TYPE_QCA9984:
702 case TARGET_TYPE_IPQ4019:
703 case TARGET_TYPE_QCA9888:
704 case TARGET_TYPE_AR9888:
705 case TARGET_TYPE_AR9888V2:
706 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
707 *sz_tgt_svc_map_to_use =
708 sizeof(target_service_to_ce_map_ar900b);
709 break;
710 case TARGET_TYPE_QCA6290:
711 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
712 *sz_tgt_svc_map_to_use =
713 sizeof(target_service_to_ce_map_qca6290);
714 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700715 case TARGET_TYPE_QCA6390:
716 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
717 *sz_tgt_svc_map_to_use =
718 sizeof(target_service_to_ce_map_qca6390);
719 break;
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530720 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +0530721 case TARGET_TYPE_QCA8074V2:
Balamurugan Mahalingam20802b22017-05-02 19:11:38 +0530722 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
723 *sz_tgt_svc_map_to_use =
724 sizeof(target_service_to_ce_map_qca8074);
725 break;
Houston Hoffman748e1a62017-03-30 17:20:42 -0700726 }
727 }
728}
729
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700730/**
731 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
732 * @ce_state : pointer to the state context of the CE
733 *
734 * Description:
735 * Sets htt_rx_data attribute of the state structure if the
736 * CE serves one of the HTT DATA services.
737 *
738 * Return:
739 * false (attribute set to false)
740 * true (attribute set to true);
741 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700742static bool ce_mark_datapath(struct CE_state *ce_state)
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700743{
744 struct service_to_pipe *svc_map;
Kiran Venkatappac0687092017-04-13 16:45:03 +0530745 uint32_t map_sz, map_len;
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700746 int i;
747 bool rc = false;
748
749 if (ce_state != NULL) {
Houston Hoffman748e1a62017-03-30 17:20:42 -0700750 hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
751 &map_sz);
Houston Hoffman55fcf5a2016-09-27 23:21:51 -0700752
Kiran Venkatappac0687092017-04-13 16:45:03 +0530753 map_len = map_sz / sizeof(struct service_to_pipe);
754 for (i = 0; i < map_len; i++) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700755 if ((svc_map[i].pipenum == ce_state->id) &&
756 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
757 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
758 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
759 /* HTT CEs are unidirectional */
760 if (svc_map[i].pipedir == PIPEDIR_IN)
761 ce_state->htt_rx_data = true;
762 else
763 ce_state->htt_tx_data = true;
764 rc = true;
765 }
766 }
767 }
768 return rc;
769}
770
Houston Hoffman47808172016-05-06 10:04:21 -0700771/**
772 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
773 * @ce_id: ce in question
774 * @ring: ring state being examined
775 * @type: "src_ring" or "dest_ring" string for identifying the ring
776 *
777 * Warns on non-zero index values.
778 * Causes a kernel panic if the ring is not empty durring initialization.
779 */
780static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
781 char *type)
782{
783 if (ring->write_index != 0 || ring->sw_index != 0)
784 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
785 ce_id, type, ring->sw_index, ring->write_index);
786 if (ring->write_index != ring->sw_index)
787 QDF_BUG(0);
788}
789
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530790#ifdef IPA_OFFLOAD
791/**
792 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
793 * @scn: softc instance
794 * @ce_id: ce in question
795 * @base_addr: pointer to copyengine ring base address
796 * @ce_ring: copyengine instance
797 * @nentries: number of entries should be allocated
798 * @desc_size: ce desc size
799 *
800 * Return: QDF_STATUS_SUCCESS - for success
801 */
802static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
803 qdf_dma_addr_t *base_addr,
804 struct CE_ring_state *ce_ring,
805 unsigned int nentries, uint32_t desc_size)
806{
807 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
808 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
809 nentries * desc_size + CE_DESC_RING_ALIGN);
810 if (!scn->ipa_ce_ring) {
811 HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
812 __func__);
813 return QDF_STATUS_E_NOMEM;
814 }
815 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
816 &scn->ipa_ce_ring->mem_info);
817 ce_ring->base_addr_owner_space_unaligned =
818 scn->ipa_ce_ring->vaddr;
819 } else {
820 ce_ring->base_addr_owner_space_unaligned =
821 qdf_mem_alloc_consistent(scn->qdf_dev,
822 scn->qdf_dev->dev,
823 (nentries * desc_size +
824 CE_DESC_RING_ALIGN),
825 base_addr);
826 if (!ce_ring->base_addr_owner_space_unaligned) {
827 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
828 __func__, CE_id);
829 return QDF_STATUS_E_NOMEM;
830 }
831 }
832 return QDF_STATUS_SUCCESS;
833}
834
835/**
836 * ce_free_desc_ring() - Frees copyengine descriptor ring
837 * @scn: softc instance
838 * @ce_id: ce in question
839 * @ce_ring: copyengine instance
840 * @desc_size: ce desc size
841 *
842 * Return: None
843 */
844static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
845 struct CE_ring_state *ce_ring, uint32_t desc_size)
846{
847 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
848 qdf_mem_shared_mem_free(scn->qdf_dev,
849 scn->ipa_ce_ring);
850 ce_ring->base_addr_owner_space_unaligned = NULL;
851 } else {
852 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
853 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
854 ce_ring->base_addr_owner_space_unaligned,
855 ce_ring->base_addr_CE_space, 0);
856 ce_ring->base_addr_owner_space_unaligned = NULL;
857 }
858}
859#else
860static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
861 qdf_dma_addr_t *base_addr,
862 struct CE_ring_state *ce_ring,
863 unsigned int nentries, uint32_t desc_size)
864{
865 ce_ring->base_addr_owner_space_unaligned =
866 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
867 (nentries * desc_size +
868 CE_DESC_RING_ALIGN), base_addr);
869 if (!ce_ring->base_addr_owner_space_unaligned) {
870 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
871 __func__, CE_id);
872 return QDF_STATUS_E_NOMEM;
873 }
874 return QDF_STATUS_SUCCESS;
875}
876
877static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
878 struct CE_ring_state *ce_ring, uint32_t desc_size)
879{
880 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
881 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
882 ce_ring->base_addr_owner_space_unaligned,
883 ce_ring->base_addr_CE_space, 0);
884 ce_ring->base_addr_owner_space_unaligned = NULL;
885}
886#endif /* IPA_OFFLOAD */
887
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530888/**
889 * ce_srng_based() - Does this target use srng
890 * @ce_state : pointer to the state context of the CE
891 *
892 * Description:
893 * returns true if the target is SRNG based
894 *
895 * Return:
896 * false (attribute set to false)
897 * true (attribute set to true);
898 */
899bool ce_srng_based(struct hif_softc *scn)
900{
901 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
902 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
903
904 switch (tgt_info->target_type) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530905 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +0530906 case TARGET_TYPE_QCA8074V2:
Houston Hoffman31b25ec2016-09-19 13:12:30 -0700907 case TARGET_TYPE_QCA6290:
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -0700908 case TARGET_TYPE_QCA6390:
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530909 return true;
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530910 default:
911 return false;
912 }
913 return false;
914}
Pratik Gandhidc82a772018-01-30 18:57:05 +0530915qdf_export_symbol(ce_srng_based);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530916
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800917#ifdef QCA_WIFI_SUPPORT_SRNG
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700918static struct ce_ops *ce_services_attach(struct hif_softc *scn)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530919{
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530920 if (ce_srng_based(scn))
921 return ce_services_srng();
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530922
923 return ce_services_legacy();
924}
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800925
Houston Hoffman5141f9d2017-01-05 10:49:17 -0800926
Venkata Sharath Chandra Manchala837d3232017-01-18 15:11:56 -0800927#else /* QCA_LITHIUM */
928static struct ce_ops *ce_services_attach(struct hif_softc *scn)
929{
930 return ce_services_legacy();
931}
932#endif /* QCA_LITHIUM */
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530933
Houston Hoffman403c2df2017-01-27 12:51:15 -0800934static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
Houston Hoffman10fedfc2017-01-23 15:23:09 -0800935 struct pld_shadow_reg_v2_cfg **shadow_config,
936 int *num_shadow_registers_configured) {
937 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
938
939 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
940 scn, shadow_config, num_shadow_registers_configured);
941}
942
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530943static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
944 uint8_t ring_type)
945{
946 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
947
948 return hif_state->ce_services->ce_get_desc_size(ring_type);
949}
950
951
Jeff Johnson6950fdb2016-10-07 13:00:59 -0700952static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530953 uint8_t ring_type, uint32_t nentries)
954{
955 uint32_t ce_nbytes;
956 char *ptr;
957 qdf_dma_addr_t base_addr;
958 struct CE_ring_state *ce_ring;
959 uint32_t desc_size;
960 struct hif_softc *scn = CE_state->scn;
961
962 ce_nbytes = sizeof(struct CE_ring_state)
963 + (nentries * sizeof(void *));
964 ptr = qdf_mem_malloc(ce_nbytes);
965 if (!ptr)
966 return NULL;
967
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530968 ce_ring = (struct CE_ring_state *)ptr;
969 ptr += sizeof(struct CE_ring_state);
970 ce_ring->nentries = nentries;
971 ce_ring->nentries_mask = nentries - 1;
972
973 ce_ring->low_water_mark_nentries = 0;
974 ce_ring->high_water_mark_nentries = nentries;
975 ce_ring->per_transfer_context = (void **)ptr;
976
977 desc_size = ce_get_desc_size(scn, ring_type);
978
979 /* Legacy platforms that do not support cache
980 * coherent DMA are unsupported
981 */
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +0530982 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
983 ce_ring, nentries,
984 desc_size) !=
985 QDF_STATUS_SUCCESS) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +0530986 HIF_ERROR("%s: ring has no DMA mem",
987 __func__);
988 qdf_mem_free(ptr);
989 return NULL;
990 }
991 ce_ring->base_addr_CE_space_unaligned = base_addr;
992
993 /* Correctly initialize memory to 0 to
994 * prevent garbage data crashing system
995 * when download firmware
996 */
997 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
998 nentries * desc_size +
999 CE_DESC_RING_ALIGN);
1000
1001 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1002
1003 ce_ring->base_addr_CE_space =
1004 (ce_ring->base_addr_CE_space_unaligned +
1005 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1006
1007 ce_ring->base_addr_owner_space = (void *)
1008 (((size_t) ce_ring->base_addr_owner_space_unaligned +
1009 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1010 } else {
1011 ce_ring->base_addr_CE_space =
1012 ce_ring->base_addr_CE_space_unaligned;
1013 ce_ring->base_addr_owner_space =
1014 ce_ring->base_addr_owner_space_unaligned;
1015 }
1016
1017 return ce_ring;
1018}
1019
Yun Park3fb36442017-08-17 17:37:53 -07001020static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301021 uint32_t ce_id, struct CE_ring_state *ring,
1022 struct CE_attr *attr)
1023{
1024 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1025
Yun Park3fb36442017-08-17 17:37:53 -07001026 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001027 ring, attr);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301028}
1029
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001030int hif_ce_bus_early_suspend(struct hif_softc *scn)
1031{
1032 uint8_t ul_pipe, dl_pipe;
1033 int ce_id, status, ul_is_polled, dl_is_polled;
1034 struct CE_state *ce_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001035
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001036 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1037 &ul_pipe, &dl_pipe,
1038 &ul_is_polled, &dl_is_polled);
1039 if (status) {
1040 HIF_ERROR("%s: pipe_mapping failure", __func__);
1041 return status;
1042 }
1043
1044 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1045 if (ce_id == ul_pipe)
1046 continue;
1047 if (ce_id == dl_pipe)
1048 continue;
1049
1050 ce_state = scn->ce_id_to_state[ce_id];
1051 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1052 if (ce_state->state == CE_RUNNING)
1053 ce_state->state = CE_PAUSED;
1054 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1055 }
1056
1057 return status;
1058}
1059
1060int hif_ce_bus_late_resume(struct hif_softc *scn)
1061{
1062 int ce_id;
1063 struct CE_state *ce_state;
1064 int write_index;
1065 bool index_updated;
1066
1067 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1068 ce_state = scn->ce_id_to_state[ce_id];
1069 qdf_spin_lock_bh(&ce_state->ce_index_lock);
1070 if (ce_state->state == CE_PENDING) {
1071 write_index = ce_state->src_ring->write_index;
1072 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1073 write_index);
1074 ce_state->state = CE_RUNNING;
1075 index_updated = true;
1076 } else {
1077 index_updated = false;
1078 }
1079
1080 if (ce_state->state == CE_PAUSED)
1081 ce_state->state = CE_RUNNING;
1082 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1083
1084 if (index_updated)
1085 hif_record_ce_desc_event(scn, ce_id,
1086 RESUME_WRITE_INDEX_UPDATE,
c_cgodavfda96ad2017-09-07 16:16:00 +05301087 NULL, NULL, write_index, 0);
Houston Hoffmancbcd8392017-02-08 17:43:13 -08001088 }
1089
1090 return 0;
1091}
1092
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001093/**
1094 * ce_oom_recovery() - try to recover rx ce from oom condition
1095 * @context: CE_state of the CE with oom rx ring
1096 *
Jeff Johnson1002ca52018-05-12 11:29:24 -07001097 * the executing work Will continue to be rescheduled until
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001098 * at least 1 descriptor is successfully posted to the rx ring.
1099 *
1100 * return: none
1101 */
1102static void ce_oom_recovery(void *context)
1103{
1104 struct CE_state *ce_state = context;
1105 struct hif_softc *scn = ce_state->scn;
1106 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1107 struct HIF_CE_pipe_info *pipe_info =
1108 &ce_softc->pipe_info[ce_state->id];
1109
1110 hif_post_recv_buffers_for_pipe(pipe_info);
1111}
1112
c_cgodavfda96ad2017-09-07 16:16:00 +05301113#if HIF_CE_DEBUG_DATA_BUF
1114/**
1115 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1116 * the CE descriptors.
1117 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1118 * @scn: hif scn handle
1119 * ce_id: Copy Engine Id
1120 *
1121 * Return: QDF_STATUS
1122 */
1123QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1124{
1125 struct hif_ce_desc_event *event = NULL;
1126 struct hif_ce_desc_event *hist_ev = NULL;
1127 uint32_t index = 0;
1128
1129 hist_ev =
1130 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1131
1132 if (!hist_ev)
1133 return QDF_STATUS_E_NOMEM;
1134
1135 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1136 event = &hist_ev[index];
1137 event->data =
1138 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1139 if (event->data == NULL)
1140 return QDF_STATUS_E_NOMEM;
1141 }
1142 return QDF_STATUS_SUCCESS;
1143}
1144
1145/**
1146 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1147 * the CE descriptors.
1148 * @scn: hif scn handle
1149 * ce_id: Copy Engine Id
1150 *
1151 * Return:
1152 */
1153void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1154{
1155 struct hif_ce_desc_event *event = NULL;
1156 struct hif_ce_desc_event *hist_ev = NULL;
1157 uint32_t index = 0;
1158
1159 hist_ev =
1160 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1161
1162 if (!hist_ev)
1163 return;
1164
1165 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1166 event = &hist_ev[index];
1167 if (event->data != NULL)
1168 qdf_mem_free(event->data);
1169 event->data = NULL;
1170 event = NULL;
1171 }
1172}
1173#endif /* HIF_CE_DEBUG_DATA_BUF */
1174
1175/*
1176 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1177 * for defined here
1178 */
Dustin Brown041b10a2018-08-17 15:56:02 -07001179#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF
c_cgodavfda96ad2017-09-07 16:16:00 +05301180/**
1181 * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1182 * @scn: hif scn handle
1183 * ce_id: Copy Engine Id
1184 *
1185 * Return: QDF_STATUS
1186 */
1187static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1188 unsigned int CE_id)
1189{
1190 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1191 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1192
1193 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1194 scn->hif_ce_desc_hist.enable[CE_id] = 0;
1195 return QDF_STATUS_E_NOMEM;
1196 } else {
1197 scn->hif_ce_desc_hist.enable[CE_id] = 1;
1198 return QDF_STATUS_SUCCESS;
1199 }
1200}
1201
1202/**
1203 * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1204 * storing.
1205 * @scn: hif scn handle
1206 * ce_id: Copy Engine Id
1207 *
1208 * Return:
1209 */
1210static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1211 unsigned int CE_id)
1212{
1213 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1214 struct hif_ce_desc_event *hist_ev =
1215 (struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1216
1217 if (!hist_ev)
1218 return;
1219
1220#if HIF_CE_DEBUG_DATA_BUF
1221 if (ce_hist->data_enable[CE_id] == 1) {
1222 ce_hist->data_enable[CE_id] = 0;
1223 free_mem_ce_debug_hist_data(scn, CE_id);
1224 }
1225#endif
1226 ce_hist->enable[CE_id] = 0;
1227 qdf_mem_free(ce_hist->hist_ev[CE_id]);
1228 ce_hist->hist_ev[CE_id] = NULL;
1229}
1230
1231/**
1232 * reset_ce_debug_history() - reset the index and ce id used for dumping the
1233 * CE records on the console using sysfs.
1234 * @scn: hif scn handle
1235 *
1236 * Return:
1237 */
1238static inline void reset_ce_debug_history(struct hif_softc *scn)
1239{
1240 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1241 /* Initialise the CE debug history sysfs interface inputs ce_id and
1242 * index. Disable data storing
1243 */
1244 ce_hist->hist_index = 0;
1245 ce_hist->hist_id = 0;
1246}
1247#else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1248static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1249 unsigned int CE_id)
1250{
1251 return QDF_STATUS_SUCCESS;
1252}
1253
1254static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1255 unsigned int CE_id)
1256{
1257}
1258
1259static inline void reset_ce_debug_history(struct hif_softc *scn)
1260{
1261}
1262#endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1263
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301264void ce_enable_polling(void *cestate)
1265{
1266 struct CE_state *CE_state = (struct CE_state *)cestate;
1267
1268 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1269 CE_state->timer_inited = true;
1270}
1271
1272void ce_disable_polling(void *cestate)
1273{
1274 struct CE_state *CE_state = (struct CE_state *)cestate;
1275
1276 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1277 CE_state->timer_inited = false;
1278}
1279
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001280/*
1281 * Initialize a Copy Engine based on caller-supplied attributes.
1282 * This may be called once to initialize both source and destination
1283 * rings or it may be called twice for separate source and destination
1284 * initialization. It may be that only one side or the other is
1285 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -07001286 *
1287 * This should be called durring the initialization sequence before
1288 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001289 */
Komal Seelam644263d2016-02-22 20:45:49 +05301290struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291 unsigned int CE_id, struct CE_attr *attr)
1292{
1293 struct CE_state *CE_state;
1294 uint32_t ctrl_addr;
1295 unsigned int nentries;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001296 bool malloc_CE_state = false;
1297 bool malloc_src_ring = false;
Yun Park3fb36442017-08-17 17:37:53 -07001298 int status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001299
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301300 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001302 CE_state = scn->ce_id_to_state[CE_id];
1303
1304 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001305 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301306 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001307 if (!CE_state) {
1308 HIF_ERROR("%s: CE_state has no mem", __func__);
1309 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001310 }
Houston Hoffman233e9092015-09-02 13:37:21 -07001311 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301312 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -07001313
1314 CE_state->id = CE_id;
1315 CE_state->ctrl_addr = ctrl_addr;
1316 CE_state->state = CE_RUNNING;
1317 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001318 }
1319 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001320
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301321 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001322 if (attr == NULL) {
1323 /* Already initialized; caller wants the handle */
1324 return (struct CE_handle *)CE_state;
1325 }
1326
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001327 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301328 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001329 else
1330 CE_state->src_sz_max = attr->src_sz_max;
1331
c_cgodavfda96ad2017-09-07 16:16:00 +05301332 ce_init_ce_desc_event_log(scn, CE_id,
1333 attr->src_nentries + attr->dest_nentries);
Houston Hoffman68e837e2015-12-04 12:57:24 -08001334
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001335 /* source ring setup */
1336 nentries = attr->src_nentries;
1337 if (nentries) {
1338 struct CE_ring_state *src_ring;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001339
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001340 nentries = roundup_pwr2(nentries);
1341 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301342 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301344 src_ring = CE_state->src_ring =
1345 ce_alloc_ring_state(CE_state,
1346 CE_RING_SRC,
1347 nentries);
1348 if (!src_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001349 /* cannot allocate src ring. If the
1350 * CE_state is allocated locally free
1351 * CE_State and return error.
1352 */
1353 HIF_ERROR("%s: src ring has no mem", __func__);
1354 if (malloc_CE_state) {
1355 /* allocated CE_state locally */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301356 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001357 malloc_CE_state = false;
1358 }
1359 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001360 }
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001361 /* we can allocate src ring. Mark that the src ring is
1362 * allocated locally
1363 */
1364 malloc_src_ring = true;
1365
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001366 /*
1367 * Also allocate a shadow src ring in
1368 * regular mem to use for faster access.
1369 */
1370 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301371 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001372 sizeof(struct CE_src_desc) +
1373 CE_DESC_RING_ALIGN);
1374 if (src_ring->shadow_base_unaligned == NULL) {
1375 HIF_ERROR("%s: src ring no shadow_base mem",
1376 __func__);
1377 goto error_no_dma_mem;
1378 }
1379 src_ring->shadow_base = (struct CE_src_desc *)
1380 (((size_t) src_ring->shadow_base_unaligned +
1381 CE_DESC_RING_ALIGN - 1) &
1382 ~(CE_DESC_RING_ALIGN - 1));
1383
Yun Park3fb36442017-08-17 17:37:53 -07001384 status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1385 src_ring, attr);
1386 if (status < 0)
Houston Hoffman4411ad42016-03-14 21:12:04 -07001387 goto error_target_access;
Houston Hoffmanf789c662016-04-12 15:39:04 -07001388
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301389 ce_ring_test_initial_indexes(CE_id, src_ring,
1390 "src_ring");
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001391 }
1392 }
1393
1394 /* destination ring setup */
1395 nentries = attr->dest_nentries;
1396 if (nentries) {
1397 struct CE_ring_state *dest_ring;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001398
1399 nentries = roundup_pwr2(nentries);
1400 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301401 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001402 } else {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301403 dest_ring = CE_state->dest_ring =
1404 ce_alloc_ring_state(CE_state,
1405 CE_RING_DEST,
1406 nentries);
1407 if (!dest_ring) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 /* cannot allocate dst ring. If the CE_state
1409 * or src ring is allocated locally free
1410 * CE_State and src ring and return error.
1411 */
1412 HIF_ERROR("%s: dest ring has no mem",
1413 __func__);
Poddar, Siddarth55d6da02017-03-31 18:42:54 +05301414 goto error_no_dma_mem;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001415 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001416
Yun Park3fb36442017-08-17 17:37:53 -07001417 status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001418 dest_ring, attr);
Yun Park3fb36442017-08-17 17:37:53 -07001419 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301420 goto error_target_access;
Houston Hoffman47808172016-05-06 10:04:21 -07001421
1422 ce_ring_test_initial_indexes(CE_id, dest_ring,
1423 "dest_ring");
1424
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301425 /* For srng based target, init status ring here */
1426 if (ce_srng_based(CE_state->scn)) {
1427 CE_state->status_ring =
1428 ce_alloc_ring_state(CE_state,
1429 CE_RING_STATUS,
1430 nentries);
1431 if (CE_state->status_ring == NULL) {
1432 /*Allocation failed. Cleanup*/
1433 qdf_mem_free(CE_state->dest_ring);
1434 if (malloc_src_ring) {
1435 qdf_mem_free
1436 (CE_state->src_ring);
1437 CE_state->src_ring = NULL;
1438 malloc_src_ring = false;
1439 }
1440 if (malloc_CE_state) {
1441 /* allocated CE_state locally */
1442 scn->ce_id_to_state[CE_id] =
1443 NULL;
1444 qdf_mem_free(CE_state);
1445 malloc_CE_state = false;
1446 }
Houston Hoffman4411ad42016-03-14 21:12:04 -07001447
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301448 return NULL;
1449 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001450
Yun Park3fb36442017-08-17 17:37:53 -07001451 status = ce_ring_setup(scn, CE_RING_STATUS,
1452 CE_id, CE_state->status_ring,
1453 attr);
1454 if (status < 0)
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301455 goto error_target_access;
1456
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001457 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001458
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001459 /* epping */
1460 /* poll timer */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301461 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301462 qdf_timer_init(scn->qdf_dev,
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301463 &CE_state->poll_timer,
1464 ce_poll_timeout,
1465 CE_state,
1466 QDF_TIMER_TYPE_WAKE_APPS);
1467 ce_enable_polling(CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301468 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001469 CE_POLL_TIMEOUT);
1470 }
1471 }
1472 }
1473
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301474 if (!ce_srng_based(scn)) {
1475 /* Enable CE error interrupts */
1476 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1477 goto error_target_access;
1478 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1479 if (Q_TARGET_ACCESS_END(scn) < 0)
1480 goto error_target_access;
1481 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001482
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08001483 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1484 ce_oom_recovery, CE_state);
1485
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001486 /* update the htt_data attribute */
1487 ce_mark_datapath(CE_state);
Houston Hoffmanb01db182017-03-13 14:38:09 -07001488 scn->ce_id_to_state[CE_id] = CE_state;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001489
c_cgodavfda96ad2017-09-07 16:16:00 +05301490 alloc_mem_ce_debug_history(scn, CE_id);
1491
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001492 return (struct CE_handle *)CE_state;
1493
Houston Hoffman4411ad42016-03-14 21:12:04 -07001494error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001495error_no_dma_mem:
1496 ce_fini((struct CE_handle *)CE_state);
1497 return NULL;
1498}
1499
1500#ifdef WLAN_FEATURE_FASTPATH
1501/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001502 * hif_enable_fastpath() Update that we have enabled fastpath mode
1503 * @hif_ctx: HIF context
1504 *
1505 * For use in data path
1506 *
1507 * Retrun: void
1508 */
1509void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1510{
1511 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1512
Houston Hoffmand63cd742016-12-05 11:59:56 -08001513 if (ce_srng_based(scn)) {
1514 HIF_INFO("%s, srng rings do not support fastpath", __func__);
1515 return;
1516 }
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001517 HIF_DBG("%s, Enabling fastpath mode", __func__);
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001518 scn->fastpath_mode_on = true;
1519}
1520
1521/**
1522 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1523 * @hif_ctx: HIF Context
1524 *
1525 * For use in data path to skip HTC
1526 *
1527 * Return: bool
1528 */
1529bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1530{
1531 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1532
1533 return scn->fastpath_mode_on;
1534}
1535
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301536/**
1537 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1538 * @hif_ctx: HIF Context
1539 *
1540 * API to check if polling is enabled on all CEs. Returns true when polling
1541 * is enabled on all CEs.
1542 *
1543 * Return: bool
1544 */
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301545bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1546{
1547 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301548 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1549 struct CE_attr *attr;
1550 int id;
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301551
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301552 for (id = 0; id < scn->ce_count; id++) {
1553 attr = &hif_state->host_ce_config[id];
1554 if (attr && (attr->dest_nentries) &&
1555 !(attr->flags & CE_ATTR_ENABLE_POLL))
1556 return false;
1557 }
1558 return true;
Balamurugan Mahalingam3ab36332018-01-29 19:15:02 +05301559}
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301560qdf_export_symbol(hif_is_polled_mode_enabled);
1561
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001562/**
1563 * hif_get_ce_handle - API to get CE handle for FastPath mode
1564 * @hif_ctx: HIF Context
1565 * @id: CopyEngine Id
1566 *
1567 * API to return CE handle for fastpath mode
1568 *
1569 * Return: void
1570 */
1571void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1572{
1573 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1574
1575 return scn->ce_id_to_state[id];
1576}
1577
1578/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1580 * No processing is required inside this function.
1581 * @ce_hdl: Cope engine handle
1582 * Using an assert, this function makes sure that,
1583 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001584 *
1585 * This is called while dismantling CE structures. No other thread
Jeff Johnson1002ca52018-05-12 11:29:24 -07001586 * should be using these structures while dismantling is occurring
Houston Hoffman9a831ef2015-09-03 14:42:40 -07001587 * therfore no locking is needed.
1588 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001589 * Return: none
1590 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001591void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592{
1593 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1594 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +05301595 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001596 uint32_t sw_index, write_index;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001597
Houston Hoffman85925072016-05-06 17:02:18 -07001598 if (hif_is_nss_wifi_enabled(sc))
1599 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001600
Houston Hoffmanc7d54292016-04-13 18:55:37 -07001601 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08001602 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
Houston Hoffman85925072016-05-06 17:02:18 -07001603 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001604 sw_index = src_ring->sw_index;
1605 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001606
1607 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301608 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001609 }
1610}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001611
1612/**
1613 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1614 * @ce_hdl: Handle to CE
1615 *
1616 * These buffers are never allocated on the fly, but
1617 * are allocated only once during HIF start and freed
1618 * only once during HIF stop.
1619 * NOTE:
1620 * The assumption here is there is no in-flight DMA in progress
1621 * currently, so that buffers can be freed up safely.
1622 *
1623 * Return: NONE
1624 */
1625void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1626{
1627 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1628 struct CE_ring_state *dst_ring = ce_state->dest_ring;
1629 qdf_nbuf_t nbuf;
1630 int i;
1631
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001632 if (ce_state->scn->fastpath_mode_on == false)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001633 return;
Houston Hoffman7fe51b12016-11-14 18:01:05 -08001634
1635 if (!ce_state->htt_rx_data)
1636 return;
1637
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001638 /*
1639 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1640 * this CE is completely full: does not leave one blank space, to
1641 * distinguish between empty queue & full queue. So free all the
1642 * entries.
1643 */
1644 for (i = 0; i < dst_ring->nentries; i++) {
1645 nbuf = dst_ring->per_transfer_context[i];
1646
1647 /*
1648 * The reasons for doing this check are:
1649 * 1) Protect against calling cleanup before allocating buffers
1650 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1651 * could have a partially filled ring, because of a memory
1652 * allocation failure in the middle of allocating ring.
1653 * This check accounts for that case, checking
1654 * fastpath_mode_on flag or started flag would not have
1655 * covered that case. This is not in performance path,
1656 * so OK to do this.
1657 */
Houston Hoffman1c728302017-03-10 16:58:49 -08001658 if (nbuf) {
1659 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1660 QDF_DMA_FROM_DEVICE);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001661 qdf_nbuf_free(nbuf);
Houston Hoffman1c728302017-03-10 16:58:49 -08001662 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001663 }
1664}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001665
1666/**
1667 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1668 * @scn: HIF handle
1669 *
1670 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1671 * Hence we have to post all the entries in the pipe, even, in the beginning
1672 * unlike for other CE pipes where one less than dest_nentries are filled in
1673 * the beginning.
1674 *
1675 * Return: None
1676 */
1677static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1678{
1679 int pipe_num;
1680 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1681
1682 if (scn->fastpath_mode_on == false)
1683 return;
1684
1685 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1686 struct HIF_CE_pipe_info *pipe_info =
1687 &hif_state->pipe_info[pipe_num];
1688 struct CE_state *ce_state =
1689 scn->ce_id_to_state[pipe_info->pipe_num];
1690
1691 if (ce_state->htt_rx_data)
1692 atomic_inc(&pipe_info->recv_bufs_needed);
1693 }
1694}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001695#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001696static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697{
1698}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001699
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001700static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001701{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001702 return false;
1703}
1704
1705static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1706{
1707 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001708}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709#endif /* WLAN_FEATURE_FASTPATH */
1710
1711void ce_fini(struct CE_handle *copyeng)
1712{
1713 struct CE_state *CE_state = (struct CE_state *)copyeng;
1714 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301715 struct hif_softc *scn = CE_state->scn;
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301716 uint32_t desc_size;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001717
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301718 bool inited = CE_state->timer_inited;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001719 CE_state->state = CE_UNUSED;
1720 scn->ce_id_to_state[CE_id] = NULL;
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301721 /* Set the flag to false first to stop processing in ce_poll_timeout */
Nandha Kishore Easwaran7cdaae22018-07-30 15:02:51 +05301722 ce_disable_polling(CE_state);
1723
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07001724 qdf_lro_deinit(CE_state->lro_data);
1725
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001727 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001728 ce_h2t_tx_ce_cleanup(copyeng);
1729
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301730 desc_size = ce_get_desc_size(scn, CE_RING_SRC);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001731 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301732 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001733 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301734 ce_free_desc_ring(scn, CE_state->id,
1735 CE_state->src_ring,
1736 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301737 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 }
1739 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001740 /* Cleanup the datapath Rx ring */
1741 ce_t2h_msg_ce_cleanup(copyeng);
1742
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301743 desc_size = ce_get_desc_size(scn, CE_RING_DEST);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001744 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301745 ce_free_desc_ring(scn, CE_state->id,
1746 CE_state->dest_ring,
1747 desc_size);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301748 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001749
1750 /* epping */
Balamurugan Mahalingamf6d30352018-01-31 16:17:24 +05301751 if (inited) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301752 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001753 }
1754 }
Houston Hoffman31b25ec2016-09-19 13:12:30 -07001755 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301756 /* Cleanup the datapath Tx ring */
1757 ce_h2t_tx_ce_cleanup(copyeng);
1758
1759 if (CE_state->status_ring->shadow_base_unaligned)
1760 qdf_mem_free(
1761 CE_state->status_ring->shadow_base_unaligned);
1762
Kiran Venkatappaae1a3702017-12-29 21:08:10 +05301763 desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301764 if (CE_state->status_ring->base_addr_owner_space_unaligned)
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05301765 ce_free_desc_ring(scn, CE_state->id,
1766 CE_state->status_ring,
1767 desc_size);
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05301768 qdf_mem_free(CE_state->status_ring);
1769 }
Houston Hoffman03f46572016-12-12 12:53:56 -08001770
c_cgodavfda96ad2017-09-07 16:16:00 +05301771 free_mem_ce_debug_history(scn, CE_id);
1772 reset_ce_debug_history(scn);
1773 ce_deinit_ce_desc_event_log(scn, CE_id);
1774
Houston Hoffman03f46572016-12-12 12:53:56 -08001775 qdf_spinlock_destroy(&CE_state->ce_index_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301776 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001777}
1778
Komal Seelam5584a7c2016-02-24 19:22:48 +05301779void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001780{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301781 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001782
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301783 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001784 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301785 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001786 sizeof(hif_state->msg_callbacks_current));
1787}
1788
1789/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301790QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301791hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001792 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301793 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001794{
Komal Seelam644263d2016-02-22 20:45:49 +05301795 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301796 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001797 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1798 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1799 int bytes = nbytes, nfrags = 0;
1800 struct ce_sendlist sendlist;
1801 int status, i = 0;
1802 unsigned int mux_id = 0;
1803
Santosh Anbudbfae9b2018-07-12 15:40:49 +05301804 if (nbytes > qdf_nbuf_len(nbuf)) {
1805 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1806 (uint32_t)qdf_nbuf_len(nbuf));
1807 QDF_ASSERT(0);
1808 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001809
1810 transfer_id =
1811 (mux_id & MUX_ID_MASK) |
1812 (transfer_id & TRANSACTION_ID_MASK);
1813 data_attr &= DESC_DATA_FLAG_MASK;
1814 /*
1815 * The common case involves sending multiple fragments within a
1816 * single download (the tx descriptor and the tx frame header).
1817 * So, optimize for the case of multiple fragments by not even
1818 * checking whether it's necessary to use a sendlist.
1819 * The overhead of using a sendlist for a single buffer download
1820 * is not a big deal, since it happens rarely (for WMI messages).
1821 */
1822 ce_sendlist_init(&sendlist);
1823 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301824 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001825 int frag_bytes;
1826
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301827 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1828 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001829 /*
1830 * Clear the packet offset for all but the first CE desc.
1831 */
1832 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301833 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001834
1835 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1836 frag_bytes >
1837 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301838 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001839 (nbuf,
1840 nfrags) ? 0 :
1841 CE_SEND_FLAG_SWAP_DISABLE,
1842 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301843 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001844 HIF_ERROR("%s: error, frag_num %d larger than limit",
1845 __func__, nfrags);
1846 return status;
1847 }
1848 bytes -= frag_bytes;
1849 nfrags++;
1850 } while (bytes > 0);
1851
1852 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301853 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001854 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301855 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001856 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301857 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001858 }
1859 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301860 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001861
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301862 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001863 HIF_ERROR("%s: error CE handle is null", __func__);
1864 return A_ERROR;
1865 }
1866
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301867 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301868 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Nandha Kishore Easwarane43583f2017-05-15 21:01:13 +05301869 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1870 sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001871 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301872 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001873
1874 return status;
1875}
1876
Komal Seelam5584a7c2016-02-24 19:22:48 +05301877void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1878 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001879{
Komal Seelam644263d2016-02-22 20:45:49 +05301880 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05301881 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Komal Seelam644263d2016-02-22 20:45:49 +05301882
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001883 if (!force) {
1884 int resources;
1885 /*
1886 * Decide whether to actually poll for completions, or just
1887 * wait for a later chance. If there seem to be plenty of
1888 * resources left, then just wait, since checking involves
1889 * reading a CE register, which is a relatively expensive
1890 * operation.
1891 */
Komal Seelam644263d2016-02-22 20:45:49 +05301892 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893 /*
1894 * If at least 50% of the total resources are still available,
1895 * don't bother checking again yet.
1896 */
Manikandan Mohanafd6e882017-04-07 17:46:41 -07001897 if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1898 1))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001899 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001900 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001901#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001902 ce_per_engine_servicereap(scn, pipe);
1903#else
1904 ce_per_engine_service(scn, pipe);
1905#endif
1906}
1907
Komal Seelam5584a7c2016-02-24 19:22:48 +05301908uint16_t
1909hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001910{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301911 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001912 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1913 uint16_t rv;
1914
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301915 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001916 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301917 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001918 return rv;
1919}
1920
1921/* Called by lower (CE) layer when a send to Target completes. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001922static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001923hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301924 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001925 unsigned int nbytes, unsigned int transfer_id,
1926 unsigned int sw_index, unsigned int hw_index,
1927 unsigned int toeplitz_hash_result)
1928{
1929 struct HIF_CE_pipe_info *pipe_info =
1930 (struct HIF_CE_pipe_info *)ce_context;
1931 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301932 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001933 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001934 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05301935 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001936
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001937 do {
1938 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001939 * The upper layer callback will be triggered
1940 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001941 */
Houston Hoffman85118512015-09-28 14:17:11 -07001942 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Houston Hoffman1c728302017-03-10 16:58:49 -08001943 if (scn->target_status == TARGET_STATUS_RESET) {
1944
1945 qdf_nbuf_unmap_single(scn->qdf_dev,
1946 transfer_context,
1947 QDF_DMA_TO_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301948 qdf_nbuf_free(transfer_context);
Houston Hoffman1c728302017-03-10 16:58:49 -08001949 } else
Houston Hoffman49794a32015-12-21 12:14:56 -08001950 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001951 msg_callbacks->Context,
1952 transfer_context, transfer_id,
1953 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001954 }
1955
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05301956 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001957 pipe_info->num_sends_allowed++;
Pavankumar Nandeshwar5bdd94b2018-09-05 18:16:21 +05301958 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001959 } while (ce_completed_send_next(copyeng,
1960 &ce_context, &transfer_context,
1961 &CE_data, &nbytes, &transfer_id,
1962 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301963 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001964}
1965
Houston Hoffman910c6262015-09-28 12:56:25 -07001966/**
1967 * hif_ce_do_recv(): send message from copy engine to upper layers
1968 * @msg_callbacks: structure containing callback and callback context
1969 * @netbuff: skb containing message
1970 * @nbytes: number of bytes in the message
1971 * @pipe_info: used for the pipe_number info
1972 *
Jeff Johnsondc9c5592018-05-06 15:40:42 -07001973 * Checks the packet length, configures the length in the netbuff,
Houston Hoffman910c6262015-09-28 12:56:25 -07001974 * and calls the upper layer callback.
1975 *
1976 * return: None
1977 */
1978static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301979 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001980 struct HIF_CE_pipe_info *pipe_info) {
1981 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301982 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001983 msg_callbacks->
1984 rxCompletionHandler(msg_callbacks->Context,
1985 netbuf, pipe_info->pipe_num);
1986 } else {
Jeff Johnsonb9450212017-09-18 10:12:38 -07001987 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
Houston Hoffman910c6262015-09-28 12:56:25 -07001988 __func__, netbuf, nbytes);
Houston Hoffman1c728302017-03-10 16:58:49 -08001989
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301990 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001991 }
1992}
1993
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001994/* Called by lower (CE) layer when data is received from the Target. */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07001995static void
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001996hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301997 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001998 unsigned int nbytes, unsigned int transfer_id,
1999 unsigned int flags)
2000{
2001 struct HIF_CE_pipe_info *pipe_info =
2002 (struct HIF_CE_pipe_info *)ce_context;
2003 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002004 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05302005 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002006#ifdef HIF_PCI
2007 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2008#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07002009 struct hif_msg_callbacks *msg_callbacks =
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302010 &pipe_info->pipe_callbacks;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002011
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002012 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07002013#ifdef HIF_PCI
2014 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2015#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302016 qdf_nbuf_unmap_single(scn->qdf_dev,
2017 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302018 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002019
Houston Hoffman910c6262015-09-28 12:56:25 -07002020 atomic_inc(&pipe_info->recv_bufs_needed);
2021 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05302022 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302023 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08002024 else
2025 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07002026 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002027
2028 /* Set up force_break flag if num of receices reaches
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002029 * MAX_NUM_OF_RECEIVES
2030 */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07002031 ce_state->receive_count++;
Houston Hoffman05652722016-04-29 16:58:59 -07002032 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07002033 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002034 break;
2035 }
2036 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2037 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302038 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08002039
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002040}
2041
2042/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2043
2044void
Komal Seelam5584a7c2016-02-24 19:22:48 +05302045hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002046 struct hif_msg_callbacks *callbacks)
2047{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302048 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002049
2050#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2051 spin_lock_init(&pcie_access_log_lock);
2052#endif
2053 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302054 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002055 sizeof(hif_state->msg_callbacks_pending));
2056
2057}
2058
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002059static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002060{
2061 struct CE_handle *ce_diag = hif_state->ce_diag;
2062 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302063 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002064 struct hif_msg_callbacks *hif_msg_callbacks =
2065 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002066
2067 /* daemonize("hif_compl_thread"); */
2068
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069 if (scn->ce_count == 0) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07002070 HIF_ERROR("%s: Invalid ce_count", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002071 return -EINVAL;
2072 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002073
2074 if (!hif_msg_callbacks ||
2075 !hif_msg_callbacks->rxCompletionHandler ||
2076 !hif_msg_callbacks->txCompletionHandler) {
2077 HIF_ERROR("%s: no completion handler registered", __func__);
2078 return -EFAULT;
2079 }
2080
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002081 A_TARGET_ACCESS_LIKELY(scn);
2082 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2083 struct CE_attr attr;
2084 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002085
2086 pipe_info = &hif_state->pipe_info[pipe_num];
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002087 if (pipe_info->ce_hdl == ce_diag)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002088 continue; /* Handle Diagnostic CE specially */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302089 attr = hif_state->host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002090 if (attr.src_nentries) {
2091 /* pipe used to send to target */
Jeff Johnsonb9450212017-09-18 10:12:38 -07002092 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002093 __func__, pipe_num, pipe_info);
2094 ce_send_cb_register(pipe_info->ce_hdl,
2095 hif_pci_ce_send_done, pipe_info,
2096 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002097 pipe_info->num_sends_allowed = attr.src_nentries - 1;
2098 }
2099 if (attr.dest_nentries) {
2100 /* pipe used to receive from target */
2101 ce_recv_cb_register(pipe_info->ce_hdl,
2102 hif_pci_ce_recv_data, pipe_info,
2103 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002104 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002105
2106 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302107 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302108
2109 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2110 sizeof(pipe_info->pipe_callbacks));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002111 }
Houston Hoffman6666df72015-11-30 16:48:35 -08002112
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002113 A_TARGET_ACCESS_UNLIKELY(scn);
2114 return 0;
2115}
2116
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002117/*
2118 * Install pending msg callbacks.
2119 *
2120 * TBDXXX: This hack is needed because upper layers install msg callbacks
2121 * for use with HTC before BMI is done; yet this HIF implementation
2122 * needs to continue to use BMI msg callbacks. Really, upper layers
2123 * should not register HTC callbacks until AFTER BMI phase.
2124 */
Komal Seelam644263d2016-02-22 20:45:49 +05302125static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002126{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302127 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302129 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002130 &hif_state->msg_callbacks_pending,
2131 sizeof(hif_state->msg_callbacks_pending));
2132}
2133
Komal Seelam5584a7c2016-02-24 19:22:48 +05302134void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2135 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002136{
2137 int ul_is_polled, dl_is_polled;
2138
Komal Seelam644263d2016-02-22 20:45:49 +05302139 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002140 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2141}
2142
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002143/**
2144 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05302145 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002146 *
2147 * Output the pipe error counts of each pipe to log file
2148 *
2149 * Return: N/A
2150 */
Komal Seelam644263d2016-02-22 20:45:49 +05302151void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002152{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302153 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002154 int pipe_num;
2155
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002156 if (hif_state == NULL) {
2157 HIF_ERROR("%s hif_state is NULL", __func__);
2158 return;
2159 }
2160 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2161 struct HIF_CE_pipe_info *pipe_info;
2162
2163 pipe_info = &hif_state->pipe_info[pipe_num];
2164
2165 if (pipe_info->nbuf_alloc_err_count > 0 ||
2166 pipe_info->nbuf_dma_err_count > 0 ||
2167 pipe_info->nbuf_ce_enqueue_err_count)
2168 HIF_ERROR(
2169 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2170 __func__, pipe_info->pipe_num,
2171 atomic_read(&pipe_info->recv_bufs_needed),
2172 pipe_info->nbuf_alloc_err_count,
2173 pipe_info->nbuf_dma_err_count,
2174 pipe_info->nbuf_ce_enqueue_err_count);
2175 }
2176}
2177
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002178static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2179 void *nbuf, uint32_t *error_cnt,
2180 enum hif_ce_event_type failure_type,
2181 const char *failure_type_string)
2182{
2183 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2184 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2185 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2186 int ce_id = CE_state->id;
2187 uint32_t error_cnt_tmp;
2188
2189 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2190 error_cnt_tmp = ++(*error_cnt);
2191 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Himanshu Agarwal38cea4a2017-03-30 19:02:52 +05302192 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002193 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2194 failure_type_string);
2195 hif_record_ce_desc_event(scn, ce_id, failure_type,
c_cgodavfda96ad2017-09-07 16:16:00 +05302196 NULL, nbuf, bufs_needed_tmp, 0);
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002197 /* if we fail to allocate the last buffer for an rx pipe,
2198 * there is no trigger to refill the ce and we will
2199 * eventually crash
2200 */
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302201 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002202 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
Himanshu Agarwalbedeed92017-03-21 14:05:10 +05302203
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002204}
2205
2206
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002207
2208
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302209QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002210{
2211 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302212 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05302213 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302214 QDF_STATUS status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002215 uint32_t bufs_posted = 0;
2216
2217 buf_sz = pipe_info->buf_sz;
2218 if (buf_sz == 0) {
2219 /* Unused Copy Engine */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302220 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002221 }
2222
2223 ce_hdl = pipe_info->ce_hdl;
2224
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302225 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002226 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302227 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302228 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002229
2230 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302231 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002232
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302233 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002234 if (!nbuf) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002235 hif_post_recv_buffers_failure(pipe_info, nbuf,
2236 &pipe_info->nbuf_alloc_err_count,
2237 HIF_RX_NBUF_ALLOC_FAILURE,
2238 "HIF_RX_NBUF_ALLOC_FAILURE");
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302239 return QDF_STATUS_E_NOMEM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002240 }
2241
2242 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302243 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002244 * CE_data = dma_map_single(dev, data, buf_sz, );
2245 * DMA_FROM_DEVICE);
2246 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302247 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302248 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002249
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302250 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002251 hif_post_recv_buffers_failure(pipe_info, nbuf,
2252 &pipe_info->nbuf_dma_err_count,
2253 HIF_RX_NBUF_MAP_FAILURE,
2254 "HIF_RX_NBUF_MAP_FAILURE");
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302255 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302256 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002257 }
2258
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302259 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002260
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302261 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002262 buf_sz, DMA_FROM_DEVICE);
2263 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302264 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002265 hif_post_recv_buffers_failure(pipe_info, nbuf,
2266 &pipe_info->nbuf_ce_enqueue_err_count,
2267 HIF_RX_NBUF_ENQUEUE_FAILURE,
2268 "HIF_RX_NBUF_ENQUEUE_FAILURE");
2269
Govind Singh4fcafd42016-08-08 12:37:31 +05302270 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2271 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302272 qdf_nbuf_free(nbuf);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302273 return status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002274 }
2275
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302276 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002277 bufs_posted++;
2278 }
2279 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002280 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002281 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2282 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002283 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002284 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2285 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07002286 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Houston Hoffmanc0c00a22017-02-24 17:37:46 -08002287 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002288
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302289 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002290
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302291 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002292}
2293
2294/*
2295 * Try to post all desired receive buffers for all pipes.
Govind Singhcaa850e2017-04-20 16:41:36 +05302296 * Returns 0 for non fastpath rx copy engine as
2297 * oom_allocation_work will be scheduled to recover any
2298 * failures, non-zero if unable to completely replenish
2299 * receive buffers for fastpath rx Copy engine.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002300 */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302301QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002302{
Komal Seelam02cf2f82016-02-22 20:44:25 +05302303 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302304 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302305 struct CE_state *ce_state = NULL;
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302306 QDF_STATUS qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002307
2308 A_TARGET_ACCESS_LIKELY(scn);
2309 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2310 struct HIF_CE_pipe_info *pipe_info;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002311
Houston Hoffman85925072016-05-06 17:02:18 -07002312 ce_state = scn->ce_id_to_state[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002313 pipe_info = &hif_state->pipe_info[pipe_num];
Houston Hoffman85925072016-05-06 17:02:18 -07002314
2315 if (hif_is_nss_wifi_enabled(scn) &&
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002316 ce_state && (ce_state->htt_rx_data))
Houston Hoffman85925072016-05-06 17:02:18 -07002317 continue;
Houston Hoffman85925072016-05-06 17:02:18 -07002318
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302319 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302320 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
Govind Singhcaa850e2017-04-20 16:41:36 +05302321 ce_state->htt_rx_data &&
2322 scn->fastpath_mode_on) {
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302323 A_TARGET_ACCESS_UNLIKELY(scn);
2324 return qdf_status;
Govind Singhcaa850e2017-04-20 16:41:36 +05302325 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002326 }
2327
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002328 A_TARGET_ACCESS_UNLIKELY(scn);
2329
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302330 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002331}
2332
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302333QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002334{
Komal Seelam644263d2016-02-22 20:45:49 +05302335 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302336 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302337 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002338
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07002339 hif_update_fastpath_recv_bufs_cnt(scn);
2340
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07002341 hif_msg_callbacks_install(scn);
2342
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002343 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302344 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002345
Houston Hoffman271951f2016-11-12 15:24:27 -08002346 /* enable buffer cleanup */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002347 hif_state->started = true;
2348
Houston Hoffman271951f2016-11-12 15:24:27 -08002349 /* Post buffers once to start things off. */
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302350 qdf_status = hif_post_recv_buffers(scn);
2351 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Houston Hoffman271951f2016-11-12 15:24:27 -08002352 /* cleanup is done in hif_ce_disable */
2353 HIF_ERROR("%s:failed to post buffers", __func__);
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302354 return qdf_status;
Houston Hoffman271951f2016-11-12 15:24:27 -08002355 }
2356
Nachiket Kukadee5738b52017-09-07 17:16:12 +05302357 return qdf_status;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002358}
2359
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002360static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002361{
Komal Seelam644263d2016-02-22 20:45:49 +05302362 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002363 struct CE_handle *ce_hdl;
2364 uint32_t buf_sz;
2365 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302366 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302367 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002368 void *per_CE_context;
2369
2370 buf_sz = pipe_info->buf_sz;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002371 /* Unused Copy Engine */
2372 if (buf_sz == 0)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002373 return;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002374
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002375
2376 hif_state = pipe_info->HIF_CE_state;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002377 if (!hif_state->started)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002378 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002379
Komal Seelam02cf2f82016-02-22 20:44:25 +05302380 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002381 ce_hdl = pipe_info->ce_hdl;
2382
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002383 if (scn->qdf_dev == NULL)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002384 return;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002385 while (ce_revoke_recv_next
2386 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302387 &CE_data) == QDF_STATUS_SUCCESS) {
Govind Singhcaa850e2017-04-20 16:41:36 +05302388 if (netbuf) {
2389 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2390 QDF_DMA_FROM_DEVICE);
2391 qdf_nbuf_free(netbuf);
2392 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002393 }
2394}
2395
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002396static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002397{
2398 struct CE_handle *ce_hdl;
2399 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05302400 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05302401 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002402 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302403 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002404 unsigned int nbytes;
2405 unsigned int id;
2406 uint32_t buf_sz;
2407 uint32_t toeplitz_hash_result;
2408
2409 buf_sz = pipe_info->buf_sz;
2410 if (buf_sz == 0) {
2411 /* Unused Copy Engine */
2412 return;
2413 }
2414
2415 hif_state = pipe_info->HIF_CE_state;
2416 if (!hif_state->started) {
2417 return;
2418 }
2419
Komal Seelam02cf2f82016-02-22 20:44:25 +05302420 scn = HIF_GET_SOFTC(hif_state);
2421
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002422 ce_hdl = pipe_info->ce_hdl;
2423
2424 while (ce_cancel_send_next
2425 (ce_hdl, &per_CE_context,
2426 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302427 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002428 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2429 /*
2430 * Packets enqueued by htt_h2t_ver_req_msg() and
2431 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2432 * freed in htt_htc_misc_pkt_pool_free() in
2433 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07002434 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002435 * which they are queued in.
2436 */
Nirav Shahd7f91592016-04-21 14:18:43 +05302437 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002438 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05302439 /* Indicate the completion to higher
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002440 * layer to free the buffer
2441 */
2442 if (pipe_info->pipe_callbacks.txCompletionHandler)
Venkateswara Swamy Bandaru26f6f1e2016-10-03 19:35:57 +05302443 pipe_info->pipe_callbacks.
2444 txCompletionHandler(pipe_info->
2445 pipe_callbacks.Context,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002446 netbuf, id, toeplitz_hash_result);
2447 }
2448 }
2449}
2450
2451/*
2452 * Cleanup residual buffers for device shutdown:
2453 * buffers that were enqueued for receive
2454 * buffers that were to be sent
2455 * Note: Buffers that had completed but which were
2456 * not yet processed are on a completion queue. They
2457 * are handled when the completion thread shuts down.
2458 */
Jeff Johnson6950fdb2016-10-07 13:00:59 -07002459static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002460{
2461 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05302462 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman85925072016-05-06 17:02:18 -07002463 struct CE_state *ce_state;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002464
Komal Seelam02cf2f82016-02-22 20:44:25 +05302465 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002466 struct HIF_CE_pipe_info *pipe_info;
2467
Houston Hoffman85925072016-05-06 17:02:18 -07002468 ce_state = scn->ce_id_to_state[pipe_num];
2469 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2470 ((ce_state->htt_tx_data) ||
2471 (ce_state->htt_rx_data))) {
2472 continue;
2473 }
2474
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002475 pipe_info = &hif_state->pipe_info[pipe_num];
2476 hif_recv_buffer_cleanup_on_pipe(pipe_info);
2477 hif_send_buffer_cleanup_on_pipe(pipe_info);
2478 }
2479}
2480
Komal Seelam5584a7c2016-02-24 19:22:48 +05302481void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002482{
Komal Seelam644263d2016-02-22 20:45:49 +05302483 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302484 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05302485
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002486 hif_buffer_cleanup(hif_state);
2487}
2488
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002489static void hif_destroy_oom_work(struct hif_softc *scn)
2490{
2491 struct CE_state *ce_state;
2492 int ce_id;
2493
2494 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2495 ce_state = scn->ce_id_to_state[ce_id];
2496 if (ce_state)
2497 qdf_destroy_work(scn->qdf_dev,
2498 &ce_state->oom_allocation_work);
2499 }
2500}
2501
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302502void hif_ce_stop(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002503{
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05302504 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002505 int pipe_num;
2506
Houston Hoffmana69581e2016-11-14 18:03:19 -08002507 /*
2508 * before cleaning up any memory, ensure irq &
2509 * bottom half contexts will not be re-entered
2510 */
Houston Hoffman7622cd32017-04-06 14:17:49 -07002511 hif_disable_isr(&scn->osc);
Houston Hoffmanb12ccb72017-03-01 20:02:28 -08002512 hif_destroy_oom_work(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002513 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002514
2515 /*
2516 * At this point, asynchronous threads are stopped,
2517 * The Target should not DMA nor interrupt, Host code may
2518 * not initiate anything more. So we just need to clean
2519 * up Host-side state.
2520 */
2521
2522 if (scn->athdiag_procfs_inited) {
2523 athdiag_procfs_remove();
2524 scn->athdiag_procfs_inited = false;
2525 }
2526
2527 hif_buffer_cleanup(hif_state);
2528
2529 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2530 struct HIF_CE_pipe_info *pipe_info;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302531 struct CE_attr attr;
2532 struct CE_handle *ce_diag = hif_state->ce_diag;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002533
2534 pipe_info = &hif_state->pipe_info[pipe_num];
2535 if (pipe_info->ce_hdl) {
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302536 if (pipe_info->ce_hdl != ce_diag) {
2537 attr = hif_state->host_ce_config[pipe_num];
2538 if (attr.src_nentries)
2539 qdf_spinlock_destroy(&pipe_info->
2540 completion_freeq_lock);
2541 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002542 ce_fini(pipe_info->ce_hdl);
2543 pipe_info->ce_hdl = NULL;
2544 pipe_info->buf_sz = 0;
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302545 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002546 }
2547 }
2548
2549 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302550 qdf_timer_stop(&hif_state->sleep_timer);
2551 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002552 hif_state->sleep_timer_init = false;
2553 }
2554
2555 hif_state->started = false;
2556}
2557
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302558#ifdef QCN7605_SUPPORT
2559static inline
2560void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2561 **target_shadow_reg_cfg_ret,
2562 uint32_t *shadow_cfg_sz_ret)
2563{
2564 if (target_shadow_reg_cfg_ret)
2565 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2566 if (shadow_cfg_sz_ret)
2567 *shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2568}
2569#else
2570static inline
2571void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2572 **target_shadow_reg_cfg_ret,
2573 uint32_t *shadow_cfg_sz_ret)
2574{
2575 HIF_ERROR("QCN7605 not supported");
2576}
2577#endif
2578
2579static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2580 struct shadow_reg_cfg
2581 **target_shadow_reg_cfg_ret,
2582 uint32_t *shadow_cfg_sz_ret)
2583{
2584 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2585 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2586
2587 switch (tgt_info->target_type) {
2588 case TARGET_TYPE_QCN7605:
2589 hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2590 shadow_cfg_sz_ret);
2591 break;
2592 default:
2593 if (target_shadow_reg_cfg_ret)
2594 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2595 if (shadow_cfg_sz_ret)
2596 *shadow_cfg_sz_ret = shadow_cfg_sz;
2597 }
2598}
Houston Hoffman748e1a62017-03-30 17:20:42 -07002599
Houston Hoffman854e67f2016-03-14 21:11:39 -07002600/**
2601 * hif_get_target_ce_config() - get copy engine configuration
2602 * @target_ce_config_ret: basic copy engine configuration
2603 * @target_ce_config_sz_ret: size of the basic configuration in bytes
2604 * @target_service_to_ce_map_ret: service mapping for the copy engines
2605 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2606 * @target_shadow_reg_cfg_ret: shadow register configuration
2607 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2608 *
2609 * providing accessor to these values outside of this file.
2610 * currently these are stored in static pointers to const sections.
2611 * there are multiple configurations that are selected from at compile time.
2612 * Runtime selection would need to consider mode, target type and bus type.
2613 *
2614 * Return: return by parameter.
2615 */
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302616void hif_get_target_ce_config(struct hif_softc *scn,
2617 struct CE_pipe_config **target_ce_config_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002618 uint32_t *target_ce_config_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002619 struct service_to_pipe **target_service_to_ce_map_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002620 uint32_t *target_service_to_ce_map_sz_ret,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002621 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
Houston Hoffman748e1a62017-03-30 17:20:42 -07002622 uint32_t *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002623{
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302624 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2625
2626 *target_ce_config_ret = hif_state->target_ce_config;
2627 *target_ce_config_sz_ret = hif_state->target_ce_config_sz;
Houston Hoffman748e1a62017-03-30 17:20:42 -07002628
2629 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2630 target_service_to_ce_map_sz_ret);
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302631 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2632 shadow_cfg_sz_ret);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002633}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002634
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002635#ifdef CONFIG_SHADOW_V2
Houston Hoffman403c2df2017-01-27 12:51:15 -08002636static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002637{
2638 int i;
2639 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302640 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002641
2642 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2643 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathish648ce112018-07-02 16:41:39 +05302644 "%s: i %d, val %x", __func__, i,
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002645 cfg->shadow_reg_v2_cfg[i].addr);
2646 }
2647}
2648
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002649#else
2650static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2651{
2652 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathish648ce112018-07-02 16:41:39 +05302653 "%s: CONFIG_SHADOW_V2 not defined", __func__);
Houston Hoffmanf60a3482017-01-31 10:45:07 -08002654}
2655#endif
2656
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002657/**
2658 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05302659 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002660 *
2661 * This function passes the con_mode and CE configuration to
2662 * platform driver to enable wlan.
2663 *
Houston Hoffman108da402016-03-14 21:11:24 -07002664 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002665 */
Houston Hoffman108da402016-03-14 21:11:24 -07002666int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002667{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002668 struct pld_wlan_enable_cfg cfg;
2669 enum pld_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302670 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002671
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302672 hif_get_target_ce_config(scn,
2673 (struct CE_pipe_config **)&cfg.ce_tgt_cfg,
Houston Hoffman854e67f2016-03-14 21:11:39 -07002674 &cfg.num_ce_tgt_cfg,
2675 (struct service_to_pipe **)&cfg.ce_svc_cfg,
2676 &cfg.num_ce_svc_pipe_cfg,
2677 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2678 &cfg.num_shadow_reg_cfg);
2679
2680 /* translate from structure size to array size */
2681 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2682 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2683 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002684
Houston Hoffman5141f9d2017-01-05 10:49:17 -08002685 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2686 &cfg.num_shadow_reg_v2_cfg);
2687
2688 hif_print_hal_shadow_register_cfg(&cfg);
2689
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302690 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002691 mode = PLD_FTM;
Balamurugan Mahalingam1666dd32017-09-14 15:19:42 +05302692 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2693 mode = PLD_COLDBOOT_CALIBRATION;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002694 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002695 mode = PLD_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07002696 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002697 mode = PLD_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07002698
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002699 if (BYPASS_QMI)
2700 return 0;
2701 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07002702 return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2703 mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002704}
2705
Nirav Shah0d0cce82018-01-17 17:00:31 +05302706#ifdef WLAN_FEATURE_EPPING
2707
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002708#define CE_EPPING_USES_IRQ true
2709
Nirav Shah0d0cce82018-01-17 17:00:31 +05302710void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2711{
2712 if (CE_EPPING_USES_IRQ)
2713 hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2714 else
2715 hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2716 hif_state->target_ce_config = target_ce_config_wlan_epping;
2717 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2718 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2719 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2720}
2721#endif
2722
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302723#ifdef QCN7605_SUPPORT
2724static inline
2725void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2726 struct HIF_CE_state *hif_state)
2727{
2728 hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2729 hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2730 hif_state->target_ce_config_sz =
2731 sizeof(target_ce_config_wlan_qcn7605);
2732 scn->ce_count = QCN7605_CE_COUNT;
2733}
2734#else
2735static inline
2736void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2737 struct HIF_CE_state *hif_state)
2738{
2739 HIF_ERROR("QCN7605 not supported");
2740}
2741#endif
2742
Houston Hoffman108da402016-03-14 21:11:24 -07002743/**
2744 * hif_ce_prepare_config() - load the correct static tables.
2745 * @scn: hif context
2746 *
2747 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002748 */
Houston Hoffman108da402016-03-14 21:11:24 -07002749void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002750{
Komal Seelambd7c51d2016-02-24 10:27:30 +05302751 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002752 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2753 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302754 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002755
Houston Hoffman10fedfc2017-01-23 15:23:09 -08002756 hif_state->ce_services = ce_services_attach(scn);
2757
Houston Hoffman710af5a2016-11-22 21:59:03 -08002758 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002759 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002760 if (QDF_IS_EPPING_ENABLED(mode)) {
Nirav Shah0d0cce82018-01-17 17:00:31 +05302761 hif_ce_prepare_epping_config(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002762 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002763
2764 switch (tgt_info->target_type) {
2765 default:
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302766 hif_state->host_ce_config = host_ce_config_wlan;
2767 hif_state->target_ce_config = target_ce_config_wlan;
2768 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002769 break;
Nirav Shah4c8b78a2018-06-12 11:49:35 +05302770 case TARGET_TYPE_QCN7605:
2771 hif_set_ce_config_qcn7605(scn, hif_state);
2772 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002773 case TARGET_TYPE_AR900B:
2774 case TARGET_TYPE_QCA9984:
2775 case TARGET_TYPE_IPQ4019:
2776 case TARGET_TYPE_QCA9888:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302777 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2778 hif_state->host_ce_config =
2779 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2780 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2781 hif_state->host_ce_config =
2782 host_lowdesc_ce_cfg_wlan_ar900b;
2783 } else {
2784 hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2785 }
2786
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302787 hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2788 hif_state->target_ce_config_sz =
2789 sizeof(target_ce_config_wlan_ar900b);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002790
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002791 break;
2792
2793 case TARGET_TYPE_AR9888:
2794 case TARGET_TYPE_AR9888V2:
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05302795 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2796 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2797 } else {
2798 hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2799 }
2800
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302801 hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2802 hif_state->target_ce_config_sz =
2803 sizeof(target_ce_config_wlan_ar9888);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002804
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002805 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002806
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302807 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandarudbacd5e2018-08-07 13:01:50 +05302808 case TARGET_TYPE_QCA8074V2:
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002809 if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2810 hif_state->host_ce_config =
2811 host_ce_config_wlan_qca8074_pci;
2812 hif_state->target_ce_config =
2813 target_ce_config_wlan_qca8074_pci;
2814 hif_state->target_ce_config_sz =
2815 sizeof(target_ce_config_wlan_qca8074_pci);
2816 } else {
2817 hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2818 hif_state->target_ce_config =
2819 target_ce_config_wlan_qca8074;
2820 hif_state->target_ce_config_sz =
2821 sizeof(target_ce_config_wlan_qca8074);
2822 }
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05302823 break;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002824 case TARGET_TYPE_QCA6290:
2825 hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2826 hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2827 hif_state->target_ce_config_sz =
2828 sizeof(target_ce_config_wlan_qca6290);
Houston Hoffman748e1a62017-03-30 17:20:42 -07002829
Houston Hoffman710af5a2016-11-22 21:59:03 -08002830 scn->ce_count = QCA_6290_CE_COUNT;
Houston Hoffman31b25ec2016-09-19 13:12:30 -07002831 break;
Venkata Sharath Chandra Manchala79860aa2018-06-12 15:16:36 -07002832 case TARGET_TYPE_QCA6390:
2833 hif_state->host_ce_config = host_ce_config_wlan_qca6390;
2834 hif_state->target_ce_config = target_ce_config_wlan_qca6390;
2835 hif_state->target_ce_config_sz =
2836 sizeof(target_ce_config_wlan_qca6390);
2837
2838 scn->ce_count = QCA_6390_CE_COUNT;
2839 break;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002840 }
Yun parkc80eea72017-10-06 15:33:36 -07002841 QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
Houston Hoffman108da402016-03-14 21:11:24 -07002842}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002843
Houston Hoffman108da402016-03-14 21:11:24 -07002844/**
2845 * hif_ce_open() - do ce specific allocations
2846 * @hif_sc: pointer to hif context
2847 *
2848 * return: 0 for success or QDF_STATUS_E_NOMEM
2849 */
2850QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2851{
2852 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002853
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302854 qdf_spinlock_create(&hif_state->irq_reg_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302855 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002856 return QDF_STATUS_SUCCESS;
2857}
2858
2859/**
2860 * hif_ce_close() - do ce specific free
2861 * @hif_sc: pointer to hif context
2862 */
2863void hif_ce_close(struct hif_softc *hif_sc)
2864{
Venkateswara Swamy Bandaru9fd9af02016-09-20 20:27:31 +05302865 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2866
2867 qdf_spinlock_destroy(&hif_state->irq_reg_lock);
Poddar, Siddarth725e9f52017-07-19 15:18:28 +05302868 qdf_spinlock_destroy(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002869}
2870
2871/**
2872 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2873 * @hif_sc: hif context
2874 *
2875 * uses state variables to support cleaning up when hif_config_ce fails.
2876 */
2877void hif_unconfig_ce(struct hif_softc *hif_sc)
2878{
2879 int pipe_num;
2880 struct HIF_CE_pipe_info *pipe_info;
2881 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Manjunathappa Prakasha5a30862018-05-21 16:32:32 -07002882 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
Houston Hoffman108da402016-03-14 21:11:24 -07002883
2884 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2885 pipe_info = &hif_state->pipe_info[pipe_num];
2886 if (pipe_info->ce_hdl) {
2887 ce_unregister_irq(hif_state, (1 << pipe_num));
jitiphile393cf42018-07-30 14:14:48 +05302888 }
2889 }
2890 deinit_tasklet_workers(hif_hdl);
2891 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2892 pipe_info = &hif_state->pipe_info[pipe_num];
2893 if (pipe_info->ce_hdl) {
Houston Hoffman108da402016-03-14 21:11:24 -07002894 ce_fini(pipe_info->ce_hdl);
2895 pipe_info->ce_hdl = NULL;
2896 pipe_info->buf_sz = 0;
Houston Hoffman03f46572016-12-12 12:53:56 -08002897 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07002898 }
2899 }
Houston Hoffman108da402016-03-14 21:11:24 -07002900 if (hif_sc->athdiag_procfs_inited) {
2901 athdiag_procfs_remove();
2902 hif_sc->athdiag_procfs_inited = false;
2903 }
2904}
2905
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002906#ifdef CONFIG_BYPASS_QMI
2907#define FW_SHARED_MEM (2 * 1024 * 1024)
2908
2909/**
2910 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2911 * @scn: pointer to HIF structure
2912 *
2913 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2914 *
2915 * Return: void
2916 */
2917static void hif_post_static_buf_to_target(struct hif_softc *scn)
2918{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002919 void *target_va;
2920 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002921
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002922 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2923 FW_SHARED_MEM, &target_pa);
2924 if (NULL == target_va) {
2925 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002926 return;
2927 }
Nirav Shahf1e3fb52018-06-12 14:39:34 +05302928 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002929 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002930}
2931#else
2932static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2933{
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002934}
2935#endif
2936
Houston Hoffman579c02f2017-08-02 01:57:38 -07002937static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2938 bool wait_for_it)
2939{
2940 /* todo */
2941 return 0;
2942}
2943
Houston Hoffman108da402016-03-14 21:11:24 -07002944/**
2945 * hif_config_ce() - configure copy engines
2946 * @scn: hif context
2947 *
2948 * Prepares fw, copy engine hardware and host sw according
2949 * to the attributes selected by hif_ce_prepare_config.
2950 *
2951 * also calls athdiag_procfs_init
2952 *
2953 * return: 0 for success nonzero for failure.
2954 */
2955int hif_config_ce(struct hif_softc *scn)
2956{
2957 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2958 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2959 struct HIF_CE_pipe_info *pipe_info;
2960 int pipe_num;
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302961 struct CE_state *ce_state = NULL;
c_cgodavfda96ad2017-09-07 16:16:00 +05302962
Houston Hoffman108da402016-03-14 21:11:24 -07002963#ifdef ADRASTEA_SHADOW_REGISTERS
2964 int i;
2965#endif
2966 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2967
2968 scn->notice_send = true;
Poddar, Siddarth1ea82922017-06-28 14:39:33 +05302969 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002970
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002971 hif_post_static_buf_to_target(scn);
2972
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002973 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002974
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002975 hif_config_rri_on_ddr(scn);
2976
Houston Hoffman579c02f2017-08-02 01:57:38 -07002977 if (ce_srng_based(scn))
2978 scn->bus_ops.hif_target_sleep_state_adjust =
2979 &hif_srng_sleep_state_adjust;
2980
c_cgodavfda96ad2017-09-07 16:16:00 +05302981 /* Initialise the CE debug history sysfs interface inputs ce_id and
2982 * index. Disable data storing
2983 */
2984 reset_ce_debug_history(scn);
2985
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002986 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2987 struct CE_attr *attr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07002988
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002989 pipe_info = &hif_state->pipe_info[pipe_num];
2990 pipe_info->pipe_num = pipe_num;
2991 pipe_info->HIF_CE_state = hif_state;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05302992 attr = &hif_state->host_ce_config[pipe_num];
Karunakar Dasinenif61cb072016-09-29 11:50:45 -07002993
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002994 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Houston Hoffman85925072016-05-06 17:02:18 -07002995 ce_state = scn->ce_id_to_state[pipe_num];
Aditya Sathish61f7fa32018-03-27 17:16:33 +05302996 if (!ce_state) {
2997 A_TARGET_ACCESS_UNLIKELY(scn);
2998 goto err;
2999 }
Houston Hoffman03f46572016-12-12 12:53:56 -08003000 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303001 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003002 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303003 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003004 A_TARGET_ACCESS_UNLIKELY(scn);
3005 goto err;
3006 }
3007
Dhanashri Atre991ee4d2017-05-03 19:03:10 -07003008 ce_state->lro_data = qdf_lro_init();
3009
Kiran Venkatappae17e3b62017-02-10 16:31:49 +05303010 if (attr->flags & CE_ATTR_DIAG) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003011 /* Reserve the ultimate CE for
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003012 * Diagnostic Window support
3013 */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07003014 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003015 continue;
3016 }
3017
Houston Hoffman85925072016-05-06 17:02:18 -07003018 if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3019 (ce_state->htt_rx_data))
3020 continue;
3021
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303022 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003023 if (attr->dest_nentries > 0) {
3024 atomic_set(&pipe_info->recv_bufs_needed,
3025 init_buffer_count(attr->dest_nentries - 1));
Kiran Venkatappaf41ef2e2016-09-05 10:59:58 +05303026 /*SRNG based CE has one entry less */
3027 if (ce_srng_based(scn))
3028 atomic_dec(&pipe_info->recv_bufs_needed);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003029 } else {
3030 atomic_set(&pipe_info->recv_bufs_needed, 0);
3031 }
3032 ce_tasklet_init(hif_state, (1 << pipe_num));
3033 ce_register_irq(hif_state, (1 << pipe_num));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003034 }
3035
3036 if (athdiag_procfs_init(scn) != 0) {
3037 A_TARGET_ACCESS_UNLIKELY(scn);
3038 goto err;
3039 }
3040 scn->athdiag_procfs_inited = true;
3041
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003042 HIF_DBG("%s: ce_init done", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003043
Houston Hoffman108da402016-03-14 21:11:24 -07003044 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003045
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003046 HIF_DBG("%s: X, ret = %d", __func__, rv);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003047
3048#ifdef ADRASTEA_SHADOW_REGISTERS
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003049 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003050 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003051 HIF_DBG("%s Shadow Register%d is mapped to address %x",
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003052 __func__, i,
3053 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3054 }
3055#endif
3056
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303057 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003058
3059err:
3060 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07003061 hif_unconfig_ce(scn);
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003062 HIF_TRACE("%s: X, ret = %d", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303063 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003064}
3065
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003066#ifdef WLAN_FEATURE_FASTPATH
3067/**
3068 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
3069 * @handler: Callback funtcion
3070 * @context: handle for callback function
3071 *
3072 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
3073 */
Houston Hoffman127467f2016-04-26 22:37:14 -07003074int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
3075 fastpath_msg_handler handler,
3076 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003077{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003078 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07003079 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003080 int i;
3081
Himanshu Agarwal2a924592016-06-30 18:04:14 +05303082 if (!scn) {
3083 HIF_ERROR("%s: scn is NULL", __func__);
3084 QDF_ASSERT(0);
3085 return QDF_STATUS_E_FAILURE;
3086 }
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003087
3088 if (!scn->fastpath_mode_on) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003089 HIF_WARN("%s: Fastpath mode disabled", __func__);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003090 return QDF_STATUS_E_FAILURE;
3091 }
3092
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003093 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003094 ce_state = scn->ce_id_to_state[i];
3095 if (ce_state->htt_rx_data) {
3096 ce_state->fastpath_handler = handler;
3097 ce_state->context = context;
3098 }
3099 }
3100
3101 return QDF_STATUS_SUCCESS;
3102}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303103qdf_export_symbol(hif_ce_fastpath_cb_register);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07003104#endif
3105
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003106#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08003107/**
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303108 * hif_ce_ipa_get_ce_resource() - get uc resource on hif
Leo Changd85f78d2015-11-13 10:55:34 -08003109 * @scn: bus context
3110 * @ce_sr_base_paddr: copyengine source ring base physical address
3111 * @ce_sr_ring_size: copyengine source ring size
3112 * @ce_reg_paddr: copyengine register physical address
3113 *
3114 * IPA micro controller data path offload feature enabled,
3115 * HIF should release copy engine related resource information to IPA UC
3116 * IPA UC will access hardware resource with released information
3117 *
3118 * Return: None
3119 */
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303120void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303121 qdf_shared_mem_t **ce_sr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003122 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303123 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003124{
Komal Seelam02cf2f82016-02-22 20:44:25 +05303125 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003126 struct HIF_CE_pipe_info *pipe_info =
3127 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3128 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3129
Sravan Kumar Kairam58e0adf2018-02-27 18:37:40 +05303130 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003131 ce_reg_paddr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003132}
3133#endif /* IPA_OFFLOAD */
3134
3135
3136#ifdef ADRASTEA_SHADOW_REGISTERS
3137
3138/*
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003139 * Current shadow register config
3140 *
3141 * -----------------------------------------------------------
3142 * Shadow Register | CE | src/dst write index
3143 * -----------------------------------------------------------
3144 * 0 | 0 | src
3145 * 1 No Config - Doesn't point to anything
3146 * 2 No Config - Doesn't point to anything
3147 * 3 | 3 | src
3148 * 4 | 4 | src
3149 * 5 | 5 | src
3150 * 6 No Config - Doesn't point to anything
3151 * 7 | 7 | src
3152 * 8 No Config - Doesn't point to anything
3153 * 9 No Config - Doesn't point to anything
3154 * 10 No Config - Doesn't point to anything
3155 * 11 No Config - Doesn't point to anything
3156 * -----------------------------------------------------------
3157 * 12 No Config - Doesn't point to anything
3158 * 13 | 1 | dst
3159 * 14 | 2 | dst
3160 * 15 No Config - Doesn't point to anything
3161 * 16 No Config - Doesn't point to anything
3162 * 17 No Config - Doesn't point to anything
3163 * 18 No Config - Doesn't point to anything
3164 * 19 | 7 | dst
3165 * 20 | 8 | dst
3166 * 21 No Config - Doesn't point to anything
3167 * 22 No Config - Doesn't point to anything
3168 * 23 No Config - Doesn't point to anything
3169 * -----------------------------------------------------------
3170 *
3171 *
3172 * ToDo - Move shadow register config to following in the future
3173 * This helps free up a block of shadow registers towards the end.
3174 * Can be used for other purposes
3175 *
3176 * -----------------------------------------------------------
3177 * Shadow Register | CE | src/dst write index
3178 * -----------------------------------------------------------
3179 * 0 | 0 | src
3180 * 1 | 3 | src
3181 * 2 | 4 | src
3182 * 3 | 5 | src
3183 * 4 | 7 | src
3184 * -----------------------------------------------------------
3185 * 5 | 1 | dst
3186 * 6 | 2 | dst
3187 * 7 | 7 | dst
3188 * 8 | 8 | dst
3189 * -----------------------------------------------------------
3190 * 9 No Config - Doesn't point to anything
3191 * 12 No Config - Doesn't point to anything
3192 * 13 No Config - Doesn't point to anything
3193 * 14 No Config - Doesn't point to anything
3194 * 15 No Config - Doesn't point to anything
3195 * 16 No Config - Doesn't point to anything
3196 * 17 No Config - Doesn't point to anything
3197 * 18 No Config - Doesn't point to anything
3198 * 19 No Config - Doesn't point to anything
3199 * 20 No Config - Doesn't point to anything
3200 * 21 No Config - Doesn't point to anything
3201 * 22 No Config - Doesn't point to anything
3202 * 23 No Config - Doesn't point to anything
3203 * -----------------------------------------------------------
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003204*/
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303205#ifndef QCN7605_SUPPORT
Komal Seelam644263d2016-02-22 20:45:49 +05303206u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003207{
3208 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003209 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003210
Houston Hoffmane6330442016-02-26 12:19:11 -08003211 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003212 case 0:
3213 addr = SHADOW_VALUE0;
3214 break;
3215 case 3:
3216 addr = SHADOW_VALUE3;
3217 break;
3218 case 4:
3219 addr = SHADOW_VALUE4;
3220 break;
3221 case 5:
3222 addr = SHADOW_VALUE5;
3223 break;
3224 case 7:
3225 addr = SHADOW_VALUE7;
3226 break;
3227 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003228 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303229 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003230 }
3231 return addr;
3232
3233}
3234
Komal Seelam644263d2016-02-22 20:45:49 +05303235u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003236{
3237 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08003238 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003239
Houston Hoffmane6330442016-02-26 12:19:11 -08003240 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003241 case 1:
3242 addr = SHADOW_VALUE13;
3243 break;
3244 case 2:
3245 addr = SHADOW_VALUE14;
3246 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07003247 case 5:
3248 addr = SHADOW_VALUE17;
3249 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003250 case 7:
3251 addr = SHADOW_VALUE19;
3252 break;
3253 case 8:
3254 addr = SHADOW_VALUE20;
3255 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08003256 case 9:
3257 addr = SHADOW_VALUE21;
3258 break;
3259 case 10:
3260 addr = SHADOW_VALUE22;
3261 break;
Nirav Shah75cc5c82016-05-25 10:52:38 +05303262 case 11:
3263 addr = SHADOW_VALUE23;
3264 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003265 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08003266 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303267 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003268 }
3269
3270 return addr;
3271
3272}
Nirav Shah4c8b78a2018-06-12 11:49:35 +05303273#else
3274u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3275{
3276 u32 addr = 0;
3277 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3278
3279 switch (ce) {
3280 case 0:
3281 addr = SHADOW_VALUE0;
3282 break;
3283 case 4:
3284 addr = SHADOW_VALUE4;
3285 break;
3286 case 5:
3287 addr = SHADOW_VALUE5;
3288 break;
3289 default:
3290 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3291 QDF_ASSERT(0);
3292 }
3293 return addr;
3294}
3295
3296u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3297{
3298 u32 addr = 0;
3299 u32 ce = COPY_ENGINE_ID(ctrl_addr);
3300
3301 switch (ce) {
3302 case 1:
3303 addr = SHADOW_VALUE13;
3304 break;
3305 case 2:
3306 addr = SHADOW_VALUE14;
3307 break;
3308 case 3:
3309 addr = SHADOW_VALUE15;
3310 break;
3311 case 5:
3312 addr = SHADOW_VALUE17;
3313 break;
3314 case 7:
3315 addr = SHADOW_VALUE19;
3316 break;
3317 case 8:
3318 addr = SHADOW_VALUE20;
3319 break;
3320 case 9:
3321 addr = SHADOW_VALUE21;
3322 break;
3323 case 10:
3324 addr = SHADOW_VALUE22;
3325 break;
3326 case 11:
3327 addr = SHADOW_VALUE23;
3328 break;
3329 default:
3330 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3331 QDF_ASSERT(0);
3332 }
3333
3334 return addr;
3335}
3336#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003337#endif
3338
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003339#if defined(FEATURE_LRO)
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003340void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3341{
3342 struct CE_state *ce_state;
3343 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3344
Manjunathappa Prakash2146da32016-10-13 14:47:47 -07003345 ce_state = scn->ce_id_to_state[ctx_id];
3346
3347 return ce_state->lro_data;
3348}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07003349#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08003350
3351/**
3352 * hif_map_service_to_pipe() - returns the ce ids pertaining to
3353 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05303354 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08003355 * @svc_id: Service ID for which the mapping is needed.
3356 * @ul_pipe: address of the container in which ul pipe is returned.
3357 * @dl_pipe: address of the container in which dl pipe is returned.
3358 * @ul_is_polled: address of the container in which a bool
3359 * indicating if the UL CE for this service
3360 * is polled is returned.
3361 * @dl_is_polled: address of the container in which a bool
3362 * indicating if the DL CE for this service
3363 * is polled is returned.
3364 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003365 * Return: Indicates whether the service has been found in the table.
3366 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
3367 * There will be warning logs if either leg has not been updated
3368 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08003369 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05303370int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08003371 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3372 int *dl_is_polled)
3373{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003374 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003375 unsigned int i;
3376 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003377 struct service_to_pipe *tgt_svc_map_to_use;
Houston Hoffman748e1a62017-03-30 17:20:42 -07003378 uint32_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05303379 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Houston Hoffman748e1a62017-03-30 17:20:42 -07003380 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003381 bool dl_updated = false;
3382 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003383
Houston Hoffman748e1a62017-03-30 17:20:42 -07003384 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3385 &sz_tgt_svc_map_to_use);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003386
3387 *dl_is_polled = 0; /* polling for received messages not supported */
3388
3389 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3390
3391 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3392 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003393 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003394 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003395 *ul_is_polled =
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303396 (hif_state->host_ce_config[*ul_pipe].flags &
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003397 CE_ATTR_DISABLE_INTR) != 0;
3398 ul_updated = true;
3399 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08003400 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003401 dl_updated = true;
3402 }
3403 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08003404 }
3405 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003406 if (ul_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003407 HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07003408 if (dl_updated == false)
Dustin Brown1ec15102018-08-01 00:43:43 -07003409 HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08003410
3411 return status;
3412}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003413
3414#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05303415inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003416 uint32_t CE_ctrl_addr)
3417{
3418 uint32_t read_from_hw, srri_from_ddr = 0;
3419
3420 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3421
3422 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3423
3424 if (read_from_hw != srri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003425 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3426 __func__, srri_from_ddr, read_from_hw,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003427 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303428 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003429 }
3430 return srri_from_ddr;
3431}
3432
3433
Komal Seelam644263d2016-02-22 20:45:49 +05303434inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003435 uint32_t CE_ctrl_addr)
3436{
3437 uint32_t read_from_hw, drri_from_ddr = 0;
3438
3439 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3440
3441 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3442
3443 if (read_from_hw != drri_from_ddr) {
Houston Hoffmanc50572b2016-06-08 19:49:46 -07003444 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003445 drri_from_ddr, read_from_hw,
3446 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303447 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003448 }
3449 return drri_from_ddr;
3450}
3451
3452#endif
3453
Houston Hoffman3d0cda82015-12-03 13:25:05 -08003454#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003455/**
3456 * hif_get_src_ring_read_index(): Called to get the SRRI
3457 *
Komal Seelam644263d2016-02-22 20:45:49 +05303458 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003459 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3460 *
3461 * This function returns the SRRI to the caller. For CEs that
3462 * dont have interrupts enabled, we look at the DDR based SRRI
3463 *
3464 * Return: SRRI
3465 */
Komal Seelam644263d2016-02-22 20:45:49 +05303466inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003467 uint32_t CE_ctrl_addr)
3468{
3469 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303470 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003471
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303472 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Govind Singhbc679dc2017-06-08 12:33:59 +05303473 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003474 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303475 } else {
3476 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3477 return A_TARGET_READ(scn,
3478 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3479 else
3480 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3481 CE_ctrl_addr);
3482 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003483}
3484
3485/**
3486 * hif_get_dst_ring_read_index(): Called to get the DRRI
3487 *
Komal Seelam644263d2016-02-22 20:45:49 +05303488 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003489 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3490 *
3491 * This function returns the DRRI to the caller. For CEs that
3492 * dont have interrupts enabled, we look at the DDR based DRRI
3493 *
3494 * Return: DRRI
3495 */
Komal Seelam644263d2016-02-22 20:45:49 +05303496inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003497 uint32_t CE_ctrl_addr)
3498{
3499 struct CE_attr attr;
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303500 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003501
Venkateswara Swamy Bandaru13164aa2016-09-20 20:24:54 +05303502 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003503
Govind Singhbc679dc2017-06-08 12:33:59 +05303504 if (attr.flags & CE_ATTR_DISABLE_INTR) {
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003505 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
Govind Singhbc679dc2017-06-08 12:33:59 +05303506 } else {
3507 if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3508 return A_TARGET_READ(scn,
3509 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3510 else
3511 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3512 CE_ctrl_addr);
3513 }
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003514}
3515
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003516/**
3517 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3518 *
Komal Seelam644263d2016-02-22 20:45:49 +05303519 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003520 *
3521 * This function allocates non cached memory on ddr and sends
3522 * the physical address of this memory to the CE hardware. The
3523 * hardware updates the RRI on this particular location.
3524 *
3525 * Return: None
3526 */
Komal Seelam644263d2016-02-22 20:45:49 +05303527static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003528{
3529 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303530 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003531 uint32_t high_paddr, low_paddr;
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003532
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003533 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303534 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3535 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3536 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003537
Arunk Khandavallie14e8e92017-04-03 21:40:26 +05303538 scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003539 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
3540 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3541
Srinivas Girigowda6e0cfd92017-03-09 15:49:59 -08003542 HIF_DBG("%s using srri and drri from DDR", __func__);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003543
3544 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3545 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3546
3547 for (i = 0; i < CE_COUNT; i++)
3548 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3549
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303550 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003551
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003552}
3553#else
3554
3555/**
3556 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3557 *
Komal Seelam644263d2016-02-22 20:45:49 +05303558 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003559 *
3560 * This is a dummy implementation for platforms that don't
3561 * support this functionality.
3562 *
3563 * Return: None
3564 */
Komal Seelam644263d2016-02-22 20:45:49 +05303565static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003566{
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08003567}
3568#endif
Govind Singh2443fb32016-01-13 17:44:48 +05303569
3570/**
3571 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05303572 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05303573 *
3574 * Output the copy engine registers
3575 *
3576 * Return: 0 for success or error code
3577 */
Komal Seelam644263d2016-02-22 20:45:49 +05303578int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05303579{
Komal Seelam5584a7c2016-02-24 19:22:48 +05303580 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05303581 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003582 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
Govind Singh2443fb32016-01-13 17:44:48 +05303583 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3584 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303585 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05303586
Houston Hoffmand6f946c2016-04-06 15:16:00 -07003587 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3588 if (scn->ce_id_to_state[i] == NULL) {
3589 HIF_DBG("CE%d not used.", i);
3590 continue;
3591 }
3592
Komal Seelam644263d2016-02-22 20:45:49 +05303593 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003594 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303595 ce_reg_word_size * sizeof(uint32_t));
3596
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303597 if (status != QDF_STATUS_SUCCESS) {
Manikandan Mohanafd6e882017-04-07 17:46:41 -07003598 HIF_ERROR("Dumping CE register failed!");
3599 return -EACCES;
Govind Singh2443fb32016-01-13 17:44:48 +05303600 }
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303601 HIF_ERROR("CE%d=>\n", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05303602 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Houston Hoffman6296c3e2016-07-12 18:43:32 -07003603 (uint8_t *) &ce_reg_values[0],
Govind Singh2443fb32016-01-13 17:44:48 +05303604 ce_reg_word_size * sizeof(uint32_t));
Aditya Sathish648ce112018-07-02 16:41:39 +05303605 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303606 + SR_WR_INDEX_ADDRESS),
3607 ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303608 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303609 + CURRENT_SRRI_ADDRESS),
3610 ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303611 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303612 + DST_WR_INDEX_ADDRESS),
3613 ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303614 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
Venkateswara Swamy Bandaru772377c2016-10-03 14:17:28 +05303615 + CURRENT_DRRI_ADDRESS),
3616 ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
Aditya Sathish648ce112018-07-02 16:41:39 +05303617 qdf_print("---");
Govind Singh2443fb32016-01-13 17:44:48 +05303618 }
Govind Singh2443fb32016-01-13 17:44:48 +05303619 return 0;
3620}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303621qdf_export_symbol(hif_dump_ce_registers);
Houston Hoffman85925072016-05-06 17:02:18 -07003622#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3623struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3624 struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3625{
3626 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3627 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3628 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3629 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3630 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3631 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3632 struct CE_ring_state *src_ring = ce_state->src_ring;
3633 struct CE_ring_state *dest_ring = ce_state->dest_ring;
3634
3635 if (src_ring) {
3636 hif_info->ul_pipe.nentries = src_ring->nentries;
3637 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3638 hif_info->ul_pipe.sw_index = src_ring->sw_index;
3639 hif_info->ul_pipe.write_index = src_ring->write_index;
3640 hif_info->ul_pipe.hw_index = src_ring->hw_index;
3641 hif_info->ul_pipe.base_addr_CE_space =
3642 src_ring->base_addr_CE_space;
3643 hif_info->ul_pipe.base_addr_owner_space =
3644 src_ring->base_addr_owner_space;
3645 }
3646
3647
3648 if (dest_ring) {
3649 hif_info->dl_pipe.nentries = dest_ring->nentries;
3650 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3651 hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3652 hif_info->dl_pipe.write_index = dest_ring->write_index;
3653 hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3654 hif_info->dl_pipe.base_addr_CE_space =
3655 dest_ring->base_addr_CE_space;
3656 hif_info->dl_pipe.base_addr_owner_space =
3657 dest_ring->base_addr_owner_space;
3658 }
3659
3660 hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3661 hif_info->ctrl_addr = ce_state->ctrl_addr;
3662
3663 return hif_info;
3664}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303665qdf_export_symbol(hif_get_addl_pipe_info);
Houston Hoffman85925072016-05-06 17:02:18 -07003666
3667uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3668{
3669 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3670
3671 scn->nss_wifi_ol_mode = mode;
3672 return 0;
3673}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303674qdf_export_symbol(hif_set_nss_wifiol_mode);
Houston Hoffman85925072016-05-06 17:02:18 -07003675#endif
3676
Venkateswara Swamy Bandaru5432c1b2016-10-12 19:00:40 +05303677void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3678{
3679 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3680 scn->hif_attribute = hif_attrib;
3681}
3682
Yun Park3fb36442017-08-17 17:37:53 -07003683
3684/* disable interrupts (only applicable for legacy copy engine currently */
Houston Hoffman85925072016-05-06 17:02:18 -07003685void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3686{
3687 struct hif_softc *scn = HIF_GET_SOFTC(osc);
3688 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3689 uint32_t ctrl_addr = CE_state->ctrl_addr;
3690
3691 Q_TARGET_ACCESS_BEGIN(scn);
3692 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3693 Q_TARGET_ACCESS_END(scn);
3694}
Pratik Gandhidc82a772018-01-30 18:57:05 +05303695qdf_export_symbol(hif_disable_interrupt);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303696
3697/**
3698 * hif_fw_event_handler() - hif fw event handler
3699 * @hif_state: pointer to hif ce state structure
3700 *
3701 * Process fw events and raise HTC callback to process fw events.
3702 *
3703 * Return: none
3704 */
3705static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3706{
3707 struct hif_msg_callbacks *msg_callbacks =
3708 &hif_state->msg_callbacks_current;
3709
3710 if (!msg_callbacks->fwEventHandler)
3711 return;
3712
3713 msg_callbacks->fwEventHandler(msg_callbacks->Context,
3714 QDF_STATUS_E_FAILURE);
3715}
3716
3717#ifndef QCA_WIFI_3_0
3718/**
3719 * hif_fw_interrupt_handler() - FW interrupt handler
3720 * @irq: irq number
3721 * @arg: the user pointer
3722 *
3723 * Called from the PCI interrupt handler when a
3724 * firmware-generated interrupt to the Host.
3725 *
Yun Park3fb36442017-08-17 17:37:53 -07003726 * only registered for legacy ce devices
3727 *
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303728 * Return: status of handled irq
3729 */
3730irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3731{
3732 struct hif_softc *scn = arg;
3733 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3734 uint32_t fw_indicator_address, fw_indicator;
3735
3736 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3737 return ATH_ISR_NOSCHED;
3738
3739 fw_indicator_address = hif_state->fw_indicator_address;
3740 /* For sudden unplug this will return ~0 */
3741 fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3742
3743 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3744 /* ACK: clear Target-side pending event */
3745 A_TARGET_WRITE(scn, fw_indicator_address,
3746 fw_indicator & ~FW_IND_EVENT_PENDING);
3747 if (Q_TARGET_ACCESS_END(scn) < 0)
3748 return ATH_ISR_SCHED;
3749
3750 if (hif_state->started) {
3751 hif_fw_event_handler(hif_state);
3752 } else {
3753 /*
3754 * Probable Target failure before we're prepared
3755 * to handle it. Generally unexpected.
Lin Baie2137872018-05-15 13:27:55 +08003756 * fw_indicator used as bitmap, and defined as below:
3757 * FW_IND_EVENT_PENDING 0x1
3758 * FW_IND_INITIALIZED 0x2
3759 * FW_IND_NEEDRECOVER 0x4
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303760 */
3761 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
Lin Baie2137872018-05-15 13:27:55 +08003762 ("%s: Early firmware event indicated 0x%x\n",
3763 __func__, fw_indicator));
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303764 }
3765 } else {
3766 if (Q_TARGET_ACCESS_END(scn) < 0)
3767 return ATH_ISR_SCHED;
3768 }
3769
3770 return ATH_ISR_SCHED;
3771}
3772#else
3773irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3774{
3775 return ATH_ISR_SCHED;
3776}
3777#endif /* #ifdef QCA_WIFI_3_0 */
3778
3779
3780/**
3781 * hif_wlan_disable(): call the platform driver to disable wlan
3782 * @scn: HIF Context
3783 *
3784 * This function passes the con_mode to platform driver to disable
3785 * wlan.
3786 *
3787 * Return: void
3788 */
3789void hif_wlan_disable(struct hif_softc *scn)
3790{
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003791 enum pld_driver_mode mode;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303792 uint32_t con_mode = hif_get_conparam(scn);
3793
Vinay Adella2a6bd8a2018-02-07 20:07:37 +05303794 if (scn->target_status == TARGET_STATUS_RESET)
3795 return;
3796
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303797 if (QDF_GLOBAL_FTM_MODE == con_mode)
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003798 mode = PLD_FTM;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303799 else if (QDF_IS_EPPING_ENABLED(con_mode))
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003800 mode = PLD_EPPING;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303801 else
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003802 mode = PLD_MISSION;
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303803
Yuanyuan Liufd594c22016-04-25 13:59:19 -07003804 pld_wlan_disable(scn->qdf_dev->dev, mode);
Poddar, Siddarthe41943f2016-04-27 15:33:48 +05303805}
Dustin Brown6bdbda52016-09-27 15:52:30 -07003806
Dustin Brown6834d322017-03-20 15:02:48 -07003807int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3808{
3809 QDF_STATUS status;
3810 uint8_t ul_pipe, dl_pipe;
3811 int ul_is_polled, dl_is_polled;
3812
3813 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3814 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3815 HTC_CTRL_RSVD_SVC,
3816 &ul_pipe, &dl_pipe,
3817 &ul_is_polled, &dl_is_polled);
3818 if (status) {
3819 HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3820 return qdf_status_to_os_return(status);
3821 }
3822
3823 *ce_id = dl_pipe;
3824
3825 return 0;
3826}