blob: 95b07423f73a39f296624421edc5ee487d3a9019 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080033#include "hif_io32.h"
34#include <hif.h>
35#include "regtable.h"
36#define ATH_MODULE_NAME hif
37#include <a_debug.h>
38#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080039#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053040#include "qdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080041#ifdef CONFIG_CNSS
42#include <net/cnss.h>
43#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080044#include "hif_debug.h"
45#include "ce_internal.h"
46#include "ce_reg.h"
47#include "ce_assignment.h"
48#include "ce_tasklet.h"
Houston Hoffmanbc693492016-03-14 21:11:41 -070049#include "platform_icnss.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070050#ifndef CONFIG_WIN
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080051#include "qwlan_version.h"
Houston Hoffman56e0d702016-05-05 17:48:06 -070052#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080053
54#define CE_POLL_TIMEOUT 10 /* ms */
55
56/* Forward references */
57static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
58
59/*
60 * Fix EV118783, poll to check whether a BMI response comes
61 * other than waiting for the interruption which may be lost.
62 */
63/* #define BMI_RSP_POLLING */
64#define BMI_RSP_TO_MILLISEC 1000
65
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070066#ifdef CONFIG_BYPASS_QMI
67#define BYPASS_QMI 1
68#else
69#define BYPASS_QMI 0
70#endif
71
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080072
Komal Seelam644263d2016-02-22 20:45:49 +053073static int hif_post_recv_buffers(struct hif_softc *scn);
74static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080075
76static void ce_poll_timeout(void *arg)
77{
78 struct CE_state *CE_state = (struct CE_state *)arg;
79 if (CE_state->timer_inited) {
80 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053081 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080082 }
83}
84
85static unsigned int roundup_pwr2(unsigned int n)
86{
87 int i;
88 unsigned int test_pwr2;
89
90 if (!(n & (n - 1)))
91 return n; /* already a power of 2 */
92
93 test_pwr2 = 4;
94 for (i = 0; i < 29; i++) {
95 if (test_pwr2 > n)
96 return test_pwr2;
97 test_pwr2 = test_pwr2 << 1;
98 }
99
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530100 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800101 return 0;
102}
103
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700104#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
105#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
106
107static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
108 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
109 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
110 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
111 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
112 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
113 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
114 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
115 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
116 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800117#ifdef QCA_WIFI_3_0_ADRASTEA
118 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
119 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
120#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700121};
122
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700123static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
124 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
125 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
126 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
127 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
128 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
129 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
130 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
131 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
132 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
133};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700134
135/* CE_PCI TABLE */
136/*
137 * NOTE: the table below is out of date, though still a useful reference.
138 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
139 * mapping of HTC services to HIF pipes.
140 */
141/*
142 * This authoritative table defines Copy Engine configuration and the mapping
143 * of services/endpoints to CEs. A subset of this information is passed to
144 * the Target during startup as a prerequisite to entering BMI phase.
145 * See:
146 * target_service_to_ce_map - Target-side mapping
147 * hif_map_service_to_pipe - Host-side mapping
148 * target_ce_config - Target-side configuration
149 * host_ce_config - Host-side configuration
150 ============================================================================
151 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
152 | | | ctio | Size | Frequency
153 | | | n | |
154 ============================================================================
155 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
156 descriptor | | | | O(100B) | and regular
157 download | | | | |
158 ----------------------------------------------------------------------------
159 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
160 indication | | | | O(10B) | regular
161 upload | | | | |
162 ----------------------------------------------------------------------------
163 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
164 upload | | | | O(1000B) | (frequent
165 e.g. noise | | | | | during IP1.0
166 packets | | | | | testing)
167 ----------------------------------------------------------------------------
168 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
169 download | | | | O(1000B) | (frequent
170 e.g. | | | | | during IP1.0
171 misdirecte | | | | | testing)
172 d EAPOL | | | | |
173 packets | | | | |
174 ----------------------------------------------------------------------------
175 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
176 | DATA_VO (uplink) | | | |
177 ----------------------------------------------------------------------------
178 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
179 | DATA_VO (downlink) | | | |
180 ----------------------------------------------------------------------------
181 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
182 | | | | O(100B) |
183 ----------------------------------------------------------------------------
184 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
185 messages | (downlink) | | | O(100B) |
186 | | | | |
187 ----------------------------------------------------------------------------
188 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
189 | HTC_RAW_STREAMS | | | |
190 | (uplink) | | | |
191 ----------------------------------------------------------------------------
192 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
193 | HTC_RAW_STREAMS | | | |
194 | (downlink) | | | |
195 ----------------------------------------------------------------------------
196 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
197 | | | | | infrequent
198 ============================================================================
199 */
200
201/*
202 * Map from service/endpoint to Copy Engine.
203 * This table is derived from the CE_PCI TABLE, above.
204 * It is passed to the Target at startup for use by firmware.
205 */
206static struct service_to_pipe target_service_to_ce_map_wlan[] = {
207 {
208 WMI_DATA_VO_SVC,
209 PIPEDIR_OUT, /* out = UL = host -> target */
210 3,
211 },
212 {
213 WMI_DATA_VO_SVC,
214 PIPEDIR_IN, /* in = DL = target -> host */
215 2,
216 },
217 {
218 WMI_DATA_BK_SVC,
219 PIPEDIR_OUT, /* out = UL = host -> target */
220 3,
221 },
222 {
223 WMI_DATA_BK_SVC,
224 PIPEDIR_IN, /* in = DL = target -> host */
225 2,
226 },
227 {
228 WMI_DATA_BE_SVC,
229 PIPEDIR_OUT, /* out = UL = host -> target */
230 3,
231 },
232 {
233 WMI_DATA_BE_SVC,
234 PIPEDIR_IN, /* in = DL = target -> host */
235 2,
236 },
237 {
238 WMI_DATA_VI_SVC,
239 PIPEDIR_OUT, /* out = UL = host -> target */
240 3,
241 },
242 {
243 WMI_DATA_VI_SVC,
244 PIPEDIR_IN, /* in = DL = target -> host */
245 2,
246 },
247 {
248 WMI_CONTROL_SVC,
249 PIPEDIR_OUT, /* out = UL = host -> target */
250 3,
251 },
252 {
253 WMI_CONTROL_SVC,
254 PIPEDIR_IN, /* in = DL = target -> host */
255 2,
256 },
257 {
258 HTC_CTRL_RSVD_SVC,
259 PIPEDIR_OUT, /* out = UL = host -> target */
260 0, /* could be moved to 3 (share with WMI) */
261 },
262 {
263 HTC_CTRL_RSVD_SVC,
264 PIPEDIR_IN, /* in = DL = target -> host */
265 2,
266 },
267 {
268 HTC_RAW_STREAMS_SVC, /* not currently used */
269 PIPEDIR_OUT, /* out = UL = host -> target */
270 0,
271 },
272 {
273 HTC_RAW_STREAMS_SVC, /* not currently used */
274 PIPEDIR_IN, /* in = DL = target -> host */
275 2,
276 },
277 {
278 HTT_DATA_MSG_SVC,
279 PIPEDIR_OUT, /* out = UL = host -> target */
280 4,
281 },
282 {
283 HTT_DATA_MSG_SVC,
284 PIPEDIR_IN, /* in = DL = target -> host */
285 1,
286 },
287 {
288 WDI_IPA_TX_SVC,
289 PIPEDIR_OUT, /* in = DL = target -> host */
290 5,
291 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800292#if defined(QCA_WIFI_3_0_ADRASTEA)
293 {
294 HTT_DATA2_MSG_SVC,
295 PIPEDIR_IN, /* in = DL = target -> host */
296 9,
297 },
298 {
299 HTT_DATA3_MSG_SVC,
300 PIPEDIR_IN, /* in = DL = target -> host */
301 10,
302 },
303#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700304 /* (Additions here) */
305
306 { /* Must be last */
307 0,
308 0,
309 0,
310 },
311};
312
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700313static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
314 {
315 WMI_DATA_VO_SVC,
316 PIPEDIR_OUT, /* out = UL = host -> target */
317 3,
318 },
319 {
320 WMI_DATA_VO_SVC,
321 PIPEDIR_IN, /* in = DL = target -> host */
322 2,
323 },
324 {
325 WMI_DATA_BK_SVC,
326 PIPEDIR_OUT, /* out = UL = host -> target */
327 3,
328 },
329 {
330 WMI_DATA_BK_SVC,
331 PIPEDIR_IN, /* in = DL = target -> host */
332 2,
333 },
334 {
335 WMI_DATA_BE_SVC,
336 PIPEDIR_OUT, /* out = UL = host -> target */
337 3,
338 },
339 {
340 WMI_DATA_BE_SVC,
341 PIPEDIR_IN, /* in = DL = target -> host */
342 2,
343 },
344 {
345 WMI_DATA_VI_SVC,
346 PIPEDIR_OUT, /* out = UL = host -> target */
347 3,
348 },
349 {
350 WMI_DATA_VI_SVC,
351 PIPEDIR_IN, /* in = DL = target -> host */
352 2,
353 },
354 {
355 WMI_CONTROL_SVC,
356 PIPEDIR_OUT, /* out = UL = host -> target */
357 3,
358 },
359 {
360 WMI_CONTROL_SVC,
361 PIPEDIR_IN, /* in = DL = target -> host */
362 2,
363 },
364 {
365 HTC_CTRL_RSVD_SVC,
366 PIPEDIR_OUT, /* out = UL = host -> target */
367 0, /* could be moved to 3 (share with WMI) */
368 },
369 {
370 HTC_CTRL_RSVD_SVC,
371 PIPEDIR_IN, /* in = DL = target -> host */
372 1,
373 },
374 {
375 HTC_RAW_STREAMS_SVC, /* not currently used */
376 PIPEDIR_OUT, /* out = UL = host -> target */
377 0,
378 },
379 {
380 HTC_RAW_STREAMS_SVC, /* not currently used */
381 PIPEDIR_IN, /* in = DL = target -> host */
382 1,
383 },
384 {
385 HTT_DATA_MSG_SVC,
386 PIPEDIR_OUT, /* out = UL = host -> target */
387 4,
388 },
389#if WLAN_FEATURE_FASTPATH
390 {
391 HTT_DATA_MSG_SVC,
392 PIPEDIR_IN, /* in = DL = target -> host */
393 5,
394 },
395#else /* WLAN_FEATURE_FASTPATH */
396 {
397 HTT_DATA_MSG_SVC,
398 PIPEDIR_IN, /* in = DL = target -> host */
399 1,
400 },
401#endif /* WLAN_FEATURE_FASTPATH */
402
403 /* (Additions here) */
404
405 { /* Must be last */
406 0,
407 0,
408 0,
409 },
410};
411
412
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700413static struct service_to_pipe *target_service_to_ce_map =
414 target_service_to_ce_map_wlan;
415static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
416
417static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
418static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
419
420static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
421 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
422 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
423 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
424 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
425 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
426 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
427 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
428 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
429 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
430 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
431 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
432 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
433 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
434 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
435 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
436 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
437 {0, 0, 0,}, /* Must be last */
438};
439
440/**
441 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
442 * @ce_state : pointer to the state context of the CE
443 *
444 * Description:
445 * Sets htt_rx_data attribute of the state structure if the
446 * CE serves one of the HTT DATA services.
447 *
448 * Return:
449 * false (attribute set to false)
450 * true (attribute set to true);
451 */
452bool ce_mark_datapath(struct CE_state *ce_state)
453{
454 struct service_to_pipe *svc_map;
455 size_t map_sz;
456 int i;
457 bool rc = false;
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700458 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ce_state->scn);
459 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700460
461 if (ce_state != NULL) {
Houston Hoffman75ef5a52016-04-14 17:15:49 -0700462 if (QDF_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700463 svc_map = target_service_to_ce_map_wlan_epping;
464 map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
465 sizeof(struct service_to_pipe);
466 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -0700467 switch (tgt_info->target_type) {
468 default:
469 svc_map = target_service_to_ce_map_wlan;
470 map_sz =
471 sizeof(target_service_to_ce_map_wlan) /
472 sizeof(struct service_to_pipe);
473 break;
474 case TARGET_TYPE_AR900B:
475 case TARGET_TYPE_QCA9984:
476 case TARGET_TYPE_IPQ4019:
477 case TARGET_TYPE_QCA9888:
478 case TARGET_TYPE_AR9888:
479 case TARGET_TYPE_AR9888V2:
480 svc_map = target_service_to_ce_map_ar900b;
481 map_sz =
482 sizeof(target_service_to_ce_map_ar900b)
483 / sizeof(struct service_to_pipe);
484 break;
485 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700486 }
487 for (i = 0; i < map_sz; i++) {
488 if ((svc_map[i].pipenum == ce_state->id) &&
489 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
490 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
491 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
492 /* HTT CEs are unidirectional */
493 if (svc_map[i].pipedir == PIPEDIR_IN)
494 ce_state->htt_rx_data = true;
495 else
496 ce_state->htt_tx_data = true;
497 rc = true;
498 }
499 }
500 }
501 return rc;
502}
503
Houston Hoffman47808172016-05-06 10:04:21 -0700504/**
505 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
506 * @ce_id: ce in question
507 * @ring: ring state being examined
508 * @type: "src_ring" or "dest_ring" string for identifying the ring
509 *
510 * Warns on non-zero index values.
511 * Causes a kernel panic if the ring is not empty durring initialization.
512 */
513static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
514 char *type)
515{
516 if (ring->write_index != 0 || ring->sw_index != 0)
517 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
518 ce_id, type, ring->sw_index, ring->write_index);
519 if (ring->write_index != ring->sw_index)
520 QDF_BUG(0);
521}
522
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800523/*
524 * Initialize a Copy Engine based on caller-supplied attributes.
525 * This may be called once to initialize both source and destination
526 * rings or it may be called twice for separate source and destination
527 * initialization. It may be that only one side or the other is
528 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700529 *
530 * This should be called durring the initialization sequence before
531 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800532 */
Komal Seelam644263d2016-02-22 20:45:49 +0530533struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800534 unsigned int CE_id, struct CE_attr *attr)
535{
536 struct CE_state *CE_state;
537 uint32_t ctrl_addr;
538 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530539 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800540 bool malloc_CE_state = false;
541 bool malloc_src_ring = false;
542
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530543 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800544 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800545 CE_state = scn->ce_id_to_state[CE_id];
546
547 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800548 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530549 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800550 if (!CE_state) {
551 HIF_ERROR("%s: CE_state has no mem", __func__);
552 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800553 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700554 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530555 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700556 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530557 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700558
559 CE_state->id = CE_id;
560 CE_state->ctrl_addr = ctrl_addr;
561 CE_state->state = CE_RUNNING;
562 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800563 }
564 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800565
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530566 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800567 if (attr == NULL) {
568 /* Already initialized; caller wants the handle */
569 return (struct CE_handle *)CE_state;
570 }
571
572#ifdef ADRASTEA_SHADOW_REGISTERS
573 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
574 __func__);
575#endif
576
577 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530578 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800579 else
580 CE_state->src_sz_max = attr->src_sz_max;
581
Houston Hoffman68e837e2015-12-04 12:57:24 -0800582 ce_init_ce_desc_event_log(CE_id,
583 attr->src_nentries + attr->dest_nentries);
584
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800585 /* source ring setup */
586 nentries = attr->src_nentries;
587 if (nentries) {
588 struct CE_ring_state *src_ring;
589 unsigned CE_nbytes;
590 char *ptr;
591 uint64_t dma_addr;
592 nentries = roundup_pwr2(nentries);
593 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530594 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800595 } else {
596 CE_nbytes = sizeof(struct CE_ring_state)
597 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530598 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800599 if (!ptr) {
600 /* cannot allocate src ring. If the
601 * CE_state is allocated locally free
602 * CE_State and return error.
603 */
604 HIF_ERROR("%s: src ring has no mem", __func__);
605 if (malloc_CE_state) {
606 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800607 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530608 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800609 malloc_CE_state = false;
610 }
611 return NULL;
612 } else {
613 /* we can allocate src ring.
614 * Mark that the src ring is
615 * allocated locally
616 */
617 malloc_src_ring = true;
618 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530619 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800620
621 src_ring = CE_state->src_ring =
622 (struct CE_ring_state *)ptr;
623 ptr += sizeof(struct CE_ring_state);
624 src_ring->nentries = nentries;
625 src_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700626 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
627 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800628 src_ring->hw_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700629 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn,
630 ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800631 src_ring->sw_index = src_ring->hw_index;
632 src_ring->write_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700633 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn,
634 ctrl_addr);
635
636 ce_ring_test_initial_indexes(CE_id, src_ring,
637 "src_ring");
638
Houston Hoffman4411ad42016-03-14 21:12:04 -0700639 if (Q_TARGET_ACCESS_END(scn) < 0)
640 goto error_target_access;
641
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800642 src_ring->low_water_mark_nentries = 0;
643 src_ring->high_water_mark_nentries = nentries;
644 src_ring->per_transfer_context = (void **)ptr;
645
646 /* Legacy platforms that do not support cache
647 * coherent DMA are unsupported
648 */
649 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530650 qdf_mem_alloc_consistent(scn->qdf_dev,
651 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800652 (nentries *
653 sizeof(struct CE_src_desc) +
654 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530655 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800656 if (src_ring->base_addr_owner_space_unaligned
657 == NULL) {
658 HIF_ERROR("%s: src ring has no DMA mem",
659 __func__);
660 goto error_no_dma_mem;
661 }
662 src_ring->base_addr_CE_space_unaligned = base_addr;
663
664 if (src_ring->
665 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
666 - 1)) {
667 src_ring->base_addr_CE_space =
668 (src_ring->base_addr_CE_space_unaligned
669 + CE_DESC_RING_ALIGN -
670 1) & ~(CE_DESC_RING_ALIGN - 1);
671
672 src_ring->base_addr_owner_space =
673 (void
674 *)(((size_t) src_ring->
675 base_addr_owner_space_unaligned +
676 CE_DESC_RING_ALIGN -
677 1) & ~(CE_DESC_RING_ALIGN - 1));
678 } else {
679 src_ring->base_addr_CE_space =
680 src_ring->base_addr_CE_space_unaligned;
681 src_ring->base_addr_owner_space =
682 src_ring->
683 base_addr_owner_space_unaligned;
684 }
685 /*
686 * Also allocate a shadow src ring in
687 * regular mem to use for faster access.
688 */
689 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530690 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800691 sizeof(struct CE_src_desc) +
692 CE_DESC_RING_ALIGN);
693 if (src_ring->shadow_base_unaligned == NULL) {
694 HIF_ERROR("%s: src ring no shadow_base mem",
695 __func__);
696 goto error_no_dma_mem;
697 }
698 src_ring->shadow_base = (struct CE_src_desc *)
699 (((size_t) src_ring->shadow_base_unaligned +
700 CE_DESC_RING_ALIGN - 1) &
701 ~(CE_DESC_RING_ALIGN - 1));
702
Houston Hoffman4411ad42016-03-14 21:12:04 -0700703 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
704 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800705 dma_addr = src_ring->base_addr_CE_space;
706 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
707 (uint32_t)(dma_addr & 0xFFFFFFFF));
Houston Hoffmanf789c662016-04-12 15:39:04 -0700708
709 /* if SR_BA_ADDRESS_HIGH register exists */
710 if (SR_BA_ADDRESS_HIGH) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800711 uint32_t tmp;
712 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
713 scn, ctrl_addr);
714 tmp &= ~0x1F;
715 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
716 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
717 ctrl_addr, (uint32_t)dma_addr);
718 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800719 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
720 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
721#ifdef BIG_ENDIAN_HOST
722 /* Enable source ring byte swap for big endian host */
723 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
724#endif
725 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
726 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700727 if (Q_TARGET_ACCESS_END(scn) < 0)
728 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800729 }
730 }
731
732 /* destination ring setup */
733 nentries = attr->dest_nentries;
734 if (nentries) {
735 struct CE_ring_state *dest_ring;
736 unsigned CE_nbytes;
737 char *ptr;
738 uint64_t dma_addr;
739
740 nentries = roundup_pwr2(nentries);
741 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530742 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800743 } else {
744 CE_nbytes = sizeof(struct CE_ring_state)
745 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530746 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800747 if (!ptr) {
748 /* cannot allocate dst ring. If the CE_state
749 * or src ring is allocated locally free
750 * CE_State and src ring and return error.
751 */
752 HIF_ERROR("%s: dest ring has no mem",
753 __func__);
754 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530755 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800756 CE_state->src_ring = NULL;
757 malloc_src_ring = false;
758 }
759 if (malloc_CE_state) {
760 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800761 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530762 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800763 malloc_CE_state = false;
764 }
765 return NULL;
766 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530767 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800768
769 dest_ring = CE_state->dest_ring =
770 (struct CE_ring_state *)ptr;
771 ptr += sizeof(struct CE_ring_state);
772 dest_ring->nentries = nentries;
773 dest_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700774 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
775 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800776 dest_ring->sw_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700777 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn,
778 ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800779 dest_ring->write_index =
Houston Hoffman47808172016-05-06 10:04:21 -0700780 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn,
781 ctrl_addr);
782
783 ce_ring_test_initial_indexes(CE_id, dest_ring,
784 "dest_ring");
785
Houston Hoffman4411ad42016-03-14 21:12:04 -0700786 if (Q_TARGET_ACCESS_END(scn) < 0)
787 goto error_target_access;
788
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800789 dest_ring->low_water_mark_nentries = 0;
790 dest_ring->high_water_mark_nentries = nentries;
791 dest_ring->per_transfer_context = (void **)ptr;
792
793 /* Legacy platforms that do not support cache
794 * coherent DMA are unsupported */
795 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530796 qdf_mem_alloc_consistent(scn->qdf_dev,
797 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800798 (nentries *
799 sizeof(struct CE_dest_desc) +
800 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530801 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 if (dest_ring->base_addr_owner_space_unaligned
803 == NULL) {
804 HIF_ERROR("%s: dest ring has no DMA mem",
805 __func__);
806 goto error_no_dma_mem;
807 }
808 dest_ring->base_addr_CE_space_unaligned = base_addr;
809
810 /* Correctly initialize memory to 0 to
811 * prevent garbage data crashing system
812 * when download firmware
813 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530814 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800815 nentries * sizeof(struct CE_dest_desc) +
816 CE_DESC_RING_ALIGN);
817
818 if (dest_ring->
819 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
820 1)) {
821
822 dest_ring->base_addr_CE_space =
823 (dest_ring->
824 base_addr_CE_space_unaligned +
825 CE_DESC_RING_ALIGN -
826 1) & ~(CE_DESC_RING_ALIGN - 1);
827
828 dest_ring->base_addr_owner_space =
829 (void
830 *)(((size_t) dest_ring->
831 base_addr_owner_space_unaligned +
832 CE_DESC_RING_ALIGN -
833 1) & ~(CE_DESC_RING_ALIGN - 1));
834 } else {
835 dest_ring->base_addr_CE_space =
836 dest_ring->base_addr_CE_space_unaligned;
837 dest_ring->base_addr_owner_space =
838 dest_ring->
839 base_addr_owner_space_unaligned;
840 }
841
Houston Hoffman4411ad42016-03-14 21:12:04 -0700842 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
843 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800844 dma_addr = dest_ring->base_addr_CE_space;
845 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
846 (uint32_t)(dma_addr & 0xFFFFFFFF));
Houston Hoffmanf789c662016-04-12 15:39:04 -0700847
848 /* if DR_BA_ADDRESS_HIGH exists */
849 if (DR_BA_ADDRESS_HIGH) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800850 uint32_t tmp;
851 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
852 ctrl_addr);
853 tmp &= ~0x1F;
854 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
855 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
856 ctrl_addr, (uint32_t)dma_addr);
857 }
Houston Hoffmanf789c662016-04-12 15:39:04 -0700858
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800859 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
860#ifdef BIG_ENDIAN_HOST
861 /* Enable Dest ring byte swap for big endian host */
862 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
863#endif
864 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
865 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700866 if (Q_TARGET_ACCESS_END(scn) < 0)
867 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800868
869 /* epping */
870 /* poll timer */
871 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530872 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800873 &CE_state->poll_timer,
874 ce_poll_timeout,
875 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530876 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800877 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530878 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800879 CE_POLL_TIMEOUT);
880 }
881 }
882 }
883
884 /* Enable CE error interrupts */
Houston Hoffman4411ad42016-03-14 21:12:04 -0700885 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
886 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800887 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700888 if (Q_TARGET_ACCESS_END(scn) < 0)
889 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800890
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700891 /* update the htt_data attribute */
892 ce_mark_datapath(CE_state);
893
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800894 return (struct CE_handle *)CE_state;
895
Houston Hoffman4411ad42016-03-14 21:12:04 -0700896error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800897error_no_dma_mem:
898 ce_fini((struct CE_handle *)CE_state);
899 return NULL;
900}
901
902#ifdef WLAN_FEATURE_FASTPATH
903/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700904 * hif_enable_fastpath() Update that we have enabled fastpath mode
905 * @hif_ctx: HIF context
906 *
907 * For use in data path
908 *
909 * Retrun: void
910 */
911void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
912{
913 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
914
915 HIF_INFO("Enabling fastpath mode\n");
916 scn->fastpath_mode_on = true;
917}
918
919/**
920 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
921 * @hif_ctx: HIF Context
922 *
923 * For use in data path to skip HTC
924 *
925 * Return: bool
926 */
927bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
928{
929 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
930
931 return scn->fastpath_mode_on;
932}
933
934/**
935 * hif_get_ce_handle - API to get CE handle for FastPath mode
936 * @hif_ctx: HIF Context
937 * @id: CopyEngine Id
938 *
939 * API to return CE handle for fastpath mode
940 *
941 * Return: void
942 */
943void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
944{
945 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
946
947 return scn->ce_id_to_state[id];
948}
949
950/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800951 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
952 * No processing is required inside this function.
953 * @ce_hdl: Cope engine handle
954 * Using an assert, this function makes sure that,
955 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700956 *
957 * This is called while dismantling CE structures. No other thread
958 * should be using these structures while dismantling is occuring
959 * therfore no locking is needed.
960 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800961 * Return: none
962 */
963void
964ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
965{
966 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
967 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530968 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800969 uint32_t sw_index, write_index;
970
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700971 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800972 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
973 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800974 sw_index = src_ring->sw_index;
975 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800976
977 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530978 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800979 }
980}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700981
982/**
983 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
984 * @ce_hdl: Handle to CE
985 *
986 * These buffers are never allocated on the fly, but
987 * are allocated only once during HIF start and freed
988 * only once during HIF stop.
989 * NOTE:
990 * The assumption here is there is no in-flight DMA in progress
991 * currently, so that buffers can be freed up safely.
992 *
993 * Return: NONE
994 */
995void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
996{
997 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
998 struct CE_ring_state *dst_ring = ce_state->dest_ring;
999 qdf_nbuf_t nbuf;
1000 int i;
1001
1002 if (!ce_state->fastpath_handler)
1003 return;
1004 /*
1005 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1006 * this CE is completely full: does not leave one blank space, to
1007 * distinguish between empty queue & full queue. So free all the
1008 * entries.
1009 */
1010 for (i = 0; i < dst_ring->nentries; i++) {
1011 nbuf = dst_ring->per_transfer_context[i];
1012
1013 /*
1014 * The reasons for doing this check are:
1015 * 1) Protect against calling cleanup before allocating buffers
1016 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1017 * could have a partially filled ring, because of a memory
1018 * allocation failure in the middle of allocating ring.
1019 * This check accounts for that case, checking
1020 * fastpath_mode_on flag or started flag would not have
1021 * covered that case. This is not in performance path,
1022 * so OK to do this.
1023 */
1024 if (nbuf)
1025 qdf_nbuf_free(nbuf);
1026 }
1027}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001028
1029/**
1030 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1031 * @scn: HIF handle
1032 *
1033 * Datapath Rx CEs are special case, where we reuse all the message buffers.
1034 * Hence we have to post all the entries in the pipe, even, in the beginning
1035 * unlike for other CE pipes where one less than dest_nentries are filled in
1036 * the beginning.
1037 *
1038 * Return: None
1039 */
1040static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1041{
1042 int pipe_num;
1043 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1044
1045 if (scn->fastpath_mode_on == false)
1046 return;
1047
1048 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1049 struct HIF_CE_pipe_info *pipe_info =
1050 &hif_state->pipe_info[pipe_num];
1051 struct CE_state *ce_state =
1052 scn->ce_id_to_state[pipe_info->pipe_num];
1053
1054 if (ce_state->htt_rx_data)
1055 atomic_inc(&pipe_info->recv_bufs_needed);
1056 }
1057}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001058#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001059static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001060{
1061}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001062
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001063static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001064{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001065 return false;
1066}
1067
1068static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1069{
1070 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001071}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001072#endif /* WLAN_FEATURE_FASTPATH */
1073
1074void ce_fini(struct CE_handle *copyeng)
1075{
1076 struct CE_state *CE_state = (struct CE_state *)copyeng;
1077 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +05301078 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001079
1080 CE_state->state = CE_UNUSED;
1081 scn->ce_id_to_state[CE_id] = NULL;
1082 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001083 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001084 ce_h2t_tx_ce_cleanup(copyeng);
1085
1086 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301087 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001088 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301089 qdf_mem_free_consistent(scn->qdf_dev,
1090 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001091 (CE_state->src_ring->nentries *
1092 sizeof(struct CE_src_desc) +
1093 CE_DESC_RING_ALIGN),
1094 CE_state->src_ring->
1095 base_addr_owner_space_unaligned,
1096 CE_state->src_ring->
1097 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301098 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001099 }
1100 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001101 /* Cleanup the datapath Rx ring */
1102 ce_t2h_msg_ce_cleanup(copyeng);
1103
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001104 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301105 qdf_mem_free_consistent(scn->qdf_dev,
1106 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001107 (CE_state->dest_ring->nentries *
1108 sizeof(struct CE_dest_desc) +
1109 CE_DESC_RING_ALIGN),
1110 CE_state->dest_ring->
1111 base_addr_owner_space_unaligned,
1112 CE_state->dest_ring->
1113 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301114 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001115
1116 /* epping */
1117 if (CE_state->timer_inited) {
1118 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301119 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001120 }
1121 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301122 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001123}
1124
Komal Seelam5584a7c2016-02-24 19:22:48 +05301125void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001126{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301127 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001128
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301129 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001130 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301131 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001132 sizeof(hif_state->msg_callbacks_current));
1133}
1134
1135/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301136QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +05301137hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001138 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301139 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001140{
Komal Seelam644263d2016-02-22 20:45:49 +05301141 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301142 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001143 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1144 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1145 int bytes = nbytes, nfrags = 0;
1146 struct ce_sendlist sendlist;
1147 int status, i = 0;
1148 unsigned int mux_id = 0;
1149
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301150 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001151
1152 transfer_id =
1153 (mux_id & MUX_ID_MASK) |
1154 (transfer_id & TRANSACTION_ID_MASK);
1155 data_attr &= DESC_DATA_FLAG_MASK;
1156 /*
1157 * The common case involves sending multiple fragments within a
1158 * single download (the tx descriptor and the tx frame header).
1159 * So, optimize for the case of multiple fragments by not even
1160 * checking whether it's necessary to use a sendlist.
1161 * The overhead of using a sendlist for a single buffer download
1162 * is not a big deal, since it happens rarely (for WMI messages).
1163 */
1164 ce_sendlist_init(&sendlist);
1165 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301166 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001167 int frag_bytes;
1168
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301169 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1170 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001171 /*
1172 * Clear the packet offset for all but the first CE desc.
1173 */
1174 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301175 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001176
1177 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1178 frag_bytes >
1179 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301180 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001181 (nbuf,
1182 nfrags) ? 0 :
1183 CE_SEND_FLAG_SWAP_DISABLE,
1184 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301185 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001186 HIF_ERROR("%s: error, frag_num %d larger than limit",
1187 __func__, nfrags);
1188 return status;
1189 }
1190 bytes -= frag_bytes;
1191 nfrags++;
1192 } while (bytes > 0);
1193
1194 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301195 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001196 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301197 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001198 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301199 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001200 }
1201 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301202 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001203
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301204 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001205 HIF_ERROR("%s: error CE handle is null", __func__);
1206 return A_ERROR;
1207 }
1208
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301209 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301210 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301211 (uint8_t *)(qdf_nbuf_data(nbuf)),
1212 sizeof(qdf_nbuf_data(nbuf))));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301214 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001215
1216 return status;
1217}
1218
Komal Seelam5584a7c2016-02-24 19:22:48 +05301219void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1220 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001221{
Komal Seelam644263d2016-02-22 20:45:49 +05301222 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1223
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001224 if (!force) {
1225 int resources;
1226 /*
1227 * Decide whether to actually poll for completions, or just
1228 * wait for a later chance. If there seem to be plenty of
1229 * resources left, then just wait, since checking involves
1230 * reading a CE register, which is a relatively expensive
1231 * operation.
1232 */
Komal Seelam644263d2016-02-22 20:45:49 +05301233 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001234 /*
1235 * If at least 50% of the total resources are still available,
1236 * don't bother checking again yet.
1237 */
1238 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
1239 return;
1240 }
1241 }
Houston Hoffman56e0d702016-05-05 17:48:06 -07001242#if ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001243 ce_per_engine_servicereap(scn, pipe);
1244#else
1245 ce_per_engine_service(scn, pipe);
1246#endif
1247}
1248
Komal Seelam5584a7c2016-02-24 19:22:48 +05301249uint16_t
1250hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001251{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301252 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001253 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1254 uint16_t rv;
1255
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301256 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001257 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301258 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001259 return rv;
1260}
1261
1262/* Called by lower (CE) layer when a send to Target completes. */
1263void
1264hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301265 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001266 unsigned int nbytes, unsigned int transfer_id,
1267 unsigned int sw_index, unsigned int hw_index,
1268 unsigned int toeplitz_hash_result)
1269{
1270 struct HIF_CE_pipe_info *pipe_info =
1271 (struct HIF_CE_pipe_info *)ce_context;
1272 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301273 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001274 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001275 struct hif_msg_callbacks *msg_callbacks =
1276 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001277
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001278 do {
1279 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001280 * The upper layer callback will be triggered
1281 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001282 */
Houston Hoffman85118512015-09-28 14:17:11 -07001283 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam6ee55902016-04-11 17:11:07 +05301284 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301285 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001286 else
1287 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001288 msg_callbacks->Context,
1289 transfer_context, transfer_id,
1290 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001291 }
1292
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301293 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001294 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301295 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001296 } while (ce_completed_send_next(copyeng,
1297 &ce_context, &transfer_context,
1298 &CE_data, &nbytes, &transfer_id,
1299 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301300 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301}
1302
Houston Hoffman910c6262015-09-28 12:56:25 -07001303/**
1304 * hif_ce_do_recv(): send message from copy engine to upper layers
1305 * @msg_callbacks: structure containing callback and callback context
1306 * @netbuff: skb containing message
1307 * @nbytes: number of bytes in the message
1308 * @pipe_info: used for the pipe_number info
1309 *
1310 * Checks the packet length, configures the lenght in the netbuff,
1311 * and calls the upper layer callback.
1312 *
1313 * return: None
1314 */
1315static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301316 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001317 struct HIF_CE_pipe_info *pipe_info) {
1318 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301319 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001320 msg_callbacks->
1321 rxCompletionHandler(msg_callbacks->Context,
1322 netbuf, pipe_info->pipe_num);
1323 } else {
1324 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1325 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301326 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001327 }
1328}
1329
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001330/* Called by lower (CE) layer when data is received from the Target. */
1331void
1332hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301333 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001334 unsigned int nbytes, unsigned int transfer_id,
1335 unsigned int flags)
1336{
1337 struct HIF_CE_pipe_info *pipe_info =
1338 (struct HIF_CE_pipe_info *)ce_context;
1339 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001340 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301341 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001342#ifdef HIF_PCI
1343 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1344#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001345 struct hif_msg_callbacks *msg_callbacks =
1346 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301347 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001348
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001349 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001350#ifdef HIF_PCI
1351 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1352#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301353 qdf_nbuf_unmap_single(scn->qdf_dev,
1354 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301355 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001356
Houston Hoffman910c6262015-09-28 12:56:25 -07001357 atomic_inc(&pipe_info->recv_bufs_needed);
1358 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam6ee55902016-04-11 17:11:07 +05301359 if (scn->target_status == TARGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301360 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001361 else
1362 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001363 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001364
1365 /* Set up force_break flag if num of receices reaches
1366 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001367 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301368 count = ce_state->receive_count;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301369 if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001370 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001371 break;
1372 }
1373 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1374 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301375 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001376
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377}
1378
1379/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1380
1381void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301382hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001383 struct hif_msg_callbacks *callbacks)
1384{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301385 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001386
1387#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1388 spin_lock_init(&pcie_access_log_lock);
1389#endif
1390 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301391 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001392 sizeof(hif_state->msg_callbacks_pending));
1393
1394}
1395
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001396int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1397{
1398 struct CE_handle *ce_diag = hif_state->ce_diag;
1399 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301400 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001401 struct hif_msg_callbacks *hif_msg_callbacks =
1402 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001403
1404 /* daemonize("hif_compl_thread"); */
1405
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001406 if (scn->ce_count == 0) {
1407 HIF_ERROR("%s: Invalid ce_count\n", __func__);
1408 return -EINVAL;
1409 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001410
1411 if (!hif_msg_callbacks ||
1412 !hif_msg_callbacks->rxCompletionHandler ||
1413 !hif_msg_callbacks->txCompletionHandler) {
1414 HIF_ERROR("%s: no completion handler registered", __func__);
1415 return -EFAULT;
1416 }
1417
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 A_TARGET_ACCESS_LIKELY(scn);
1419 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1420 struct CE_attr attr;
1421 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422
1423 pipe_info = &hif_state->pipe_info[pipe_num];
1424 if (pipe_info->ce_hdl == ce_diag) {
1425 continue; /* Handle Diagnostic CE specially */
1426 }
1427 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001428 if (attr.src_nentries) {
1429 /* pipe used to send to target */
1430 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1431 __func__, pipe_num, pipe_info);
1432 ce_send_cb_register(pipe_info->ce_hdl,
1433 hif_pci_ce_send_done, pipe_info,
1434 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001435 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1436 }
1437 if (attr.dest_nentries) {
1438 /* pipe used to receive from target */
1439 ce_recv_cb_register(pipe_info->ce_hdl,
1440 hif_pci_ce_recv_data, pipe_info,
1441 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001442 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001443
1444 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301445 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001446 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001447
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001448 A_TARGET_ACCESS_UNLIKELY(scn);
1449 return 0;
1450}
1451
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001452/*
1453 * Install pending msg callbacks.
1454 *
1455 * TBDXXX: This hack is needed because upper layers install msg callbacks
1456 * for use with HTC before BMI is done; yet this HIF implementation
1457 * needs to continue to use BMI msg callbacks. Really, upper layers
1458 * should not register HTC callbacks until AFTER BMI phase.
1459 */
Komal Seelam644263d2016-02-22 20:45:49 +05301460static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001461{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301462 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001463
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301464 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001465 &hif_state->msg_callbacks_pending,
1466 sizeof(hif_state->msg_callbacks_pending));
1467}
1468
Komal Seelam5584a7c2016-02-24 19:22:48 +05301469void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1470 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001471{
1472 int ul_is_polled, dl_is_polled;
1473
Komal Seelam644263d2016-02-22 20:45:49 +05301474 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001475 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1476}
1477
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001478/**
1479 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301480 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001481 *
1482 * Output the pipe error counts of each pipe to log file
1483 *
1484 * Return: N/A
1485 */
Komal Seelam644263d2016-02-22 20:45:49 +05301486void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301488 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001489 int pipe_num;
1490
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001491 if (hif_state == NULL) {
1492 HIF_ERROR("%s hif_state is NULL", __func__);
1493 return;
1494 }
1495 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1496 struct HIF_CE_pipe_info *pipe_info;
1497
1498 pipe_info = &hif_state->pipe_info[pipe_num];
1499
1500 if (pipe_info->nbuf_alloc_err_count > 0 ||
1501 pipe_info->nbuf_dma_err_count > 0 ||
1502 pipe_info->nbuf_ce_enqueue_err_count)
1503 HIF_ERROR(
1504 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1505 __func__, pipe_info->pipe_num,
1506 atomic_read(&pipe_info->recv_bufs_needed),
1507 pipe_info->nbuf_alloc_err_count,
1508 pipe_info->nbuf_dma_err_count,
1509 pipe_info->nbuf_ce_enqueue_err_count);
1510 }
1511}
1512
1513static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1514{
1515 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301516 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301517 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301518 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001519 uint32_t bufs_posted = 0;
1520
1521 buf_sz = pipe_info->buf_sz;
1522 if (buf_sz == 0) {
1523 /* Unused Copy Engine */
1524 return 0;
1525 }
1526
1527 ce_hdl = pipe_info->ce_hdl;
1528
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301529 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001530 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301531 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301532 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001533 int status;
1534
1535 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301536 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001537
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301538 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001539 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301540 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001541 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301542 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001543 &pipe_info->recv_bufs_needed_lock);
1544 HIF_ERROR(
1545 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1546 __func__, pipe_info->pipe_num,
1547 atomic_read(&pipe_info->recv_bufs_needed),
1548 pipe_info->nbuf_alloc_err_count);
1549 atomic_inc(&pipe_info->recv_bufs_needed);
1550 return 1;
1551 }
1552
1553 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301554 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001555 * CE_data = dma_map_single(dev, data, buf_sz, );
1556 * DMA_FROM_DEVICE);
1557 */
1558 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301559 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301560 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001561
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301562 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
1563 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001564 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301565 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001566 HIF_ERROR(
1567 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
1568 __func__, pipe_info->pipe_num,
1569 atomic_read(&pipe_info->recv_bufs_needed),
1570 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301571 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001572 atomic_inc(&pipe_info->recv_bufs_needed);
1573 return 1;
1574 }
1575
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301576 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001577
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301578 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001579 buf_sz, DMA_FROM_DEVICE);
1580 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301581 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001582 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301583 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001584 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301585 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001586 HIF_ERROR(
1587 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1588 __func__, pipe_info->pipe_num,
1589 atomic_read(&pipe_info->recv_bufs_needed),
1590 pipe_info->nbuf_ce_enqueue_err_count);
1591 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301592 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001593 return 1;
1594 }
1595
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301596 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001597 bufs_posted++;
1598 }
1599 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001600 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1602 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001603 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001604 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1605 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001606 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001607 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1608
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301609 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001610
1611 return 0;
1612}
1613
1614/*
1615 * Try to post all desired receive buffers for all pipes.
1616 * Returns 0 if all desired buffers are posted,
1617 * non-zero if were were unable to completely
1618 * replenish receive buffers.
1619 */
Komal Seelam644263d2016-02-22 20:45:49 +05301620static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301622 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001623 int pipe_num, rv = 0;
1624
1625 A_TARGET_ACCESS_LIKELY(scn);
1626 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1627 struct HIF_CE_pipe_info *pipe_info;
1628
1629 pipe_info = &hif_state->pipe_info[pipe_num];
1630 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1631 rv = 1;
1632 goto done;
1633 }
1634 }
1635
1636done:
1637 A_TARGET_ACCESS_UNLIKELY(scn);
1638
1639 return rv;
1640}
1641
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301642QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001643{
Komal Seelam644263d2016-02-22 20:45:49 +05301644 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301645 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001646
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001647 hif_update_fastpath_recv_bufs_cnt(scn);
1648
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001649 hif_msg_callbacks_install(scn);
1650
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001651 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301652 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001653
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001654 /* Post buffers once to start things off. */
1655 (void)hif_post_recv_buffers(scn);
1656
1657 hif_state->started = true;
1658
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301659 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001660}
1661
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001662void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1663{
Komal Seelam644263d2016-02-22 20:45:49 +05301664 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001665 struct CE_handle *ce_hdl;
1666 uint32_t buf_sz;
1667 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301668 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301669 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001670 void *per_CE_context;
1671
1672 buf_sz = pipe_info->buf_sz;
1673 if (buf_sz == 0) {
1674 /* Unused Copy Engine */
1675 return;
1676 }
1677
1678 hif_state = pipe_info->HIF_CE_state;
1679 if (!hif_state->started) {
1680 return;
1681 }
1682
Komal Seelam02cf2f82016-02-22 20:44:25 +05301683 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001684 ce_hdl = pipe_info->ce_hdl;
1685
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301686 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001687 return;
1688 }
1689 while (ce_revoke_recv_next
1690 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301691 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301692 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301693 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301694 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001695 }
1696}
1697
1698void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1699{
1700 struct CE_handle *ce_hdl;
1701 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301702 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301703 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001704 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301705 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001706 unsigned int nbytes;
1707 unsigned int id;
1708 uint32_t buf_sz;
1709 uint32_t toeplitz_hash_result;
1710
1711 buf_sz = pipe_info->buf_sz;
1712 if (buf_sz == 0) {
1713 /* Unused Copy Engine */
1714 return;
1715 }
1716
1717 hif_state = pipe_info->HIF_CE_state;
1718 if (!hif_state->started) {
1719 return;
1720 }
1721
Komal Seelam02cf2f82016-02-22 20:44:25 +05301722 scn = HIF_GET_SOFTC(hif_state);
1723
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724 ce_hdl = pipe_info->ce_hdl;
1725
1726 while (ce_cancel_send_next
1727 (ce_hdl, &per_CE_context,
1728 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301729 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001730 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1731 /*
1732 * Packets enqueued by htt_h2t_ver_req_msg() and
1733 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1734 * freed in htt_htc_misc_pkt_pool_free() in
1735 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001736 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001737 * which they are queued in.
1738 */
Nirav Shahd7f91592016-04-21 14:18:43 +05301739 if (id == scn->htc_htt_tx_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740 return;
Nirav Shahd7f91592016-04-21 14:18:43 +05301741 /* Indicate the completion to higher
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742 * layer to free the buffer */
1743 hif_state->msg_callbacks_current.
1744 txCompletionHandler(hif_state->
1745 msg_callbacks_current.Context,
1746 netbuf, id, toeplitz_hash_result);
1747 }
1748 }
1749}
1750
1751/*
1752 * Cleanup residual buffers for device shutdown:
1753 * buffers that were enqueued for receive
1754 * buffers that were to be sent
1755 * Note: Buffers that had completed but which were
1756 * not yet processed are on a completion queue. They
1757 * are handled when the completion thread shuts down.
1758 */
1759void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1760{
1761 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301762 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001763
Komal Seelam02cf2f82016-02-22 20:44:25 +05301764 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001765 struct HIF_CE_pipe_info *pipe_info;
1766
1767 pipe_info = &hif_state->pipe_info[pipe_num];
1768 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1769 hif_send_buffer_cleanup_on_pipe(pipe_info);
1770 }
1771}
1772
Komal Seelam5584a7c2016-02-24 19:22:48 +05301773void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001774{
Komal Seelam644263d2016-02-22 20:45:49 +05301775 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301776 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301777
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001778 hif_buffer_cleanup(hif_state);
1779}
1780
Komal Seelam5584a7c2016-02-24 19:22:48 +05301781void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001782{
Komal Seelam644263d2016-02-22 20:45:49 +05301783 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301784 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785 int pipe_num;
1786
1787 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001788
1789 /*
1790 * At this point, asynchronous threads are stopped,
1791 * The Target should not DMA nor interrupt, Host code may
1792 * not initiate anything more. So we just need to clean
1793 * up Host-side state.
1794 */
1795
1796 if (scn->athdiag_procfs_inited) {
1797 athdiag_procfs_remove();
1798 scn->athdiag_procfs_inited = false;
1799 }
1800
1801 hif_buffer_cleanup(hif_state);
1802
1803 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1804 struct HIF_CE_pipe_info *pipe_info;
1805
1806 pipe_info = &hif_state->pipe_info[pipe_num];
1807 if (pipe_info->ce_hdl) {
1808 ce_fini(pipe_info->ce_hdl);
1809 pipe_info->ce_hdl = NULL;
1810 pipe_info->buf_sz = 0;
1811 }
1812 }
1813
1814 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301815 qdf_timer_stop(&hif_state->sleep_timer);
1816 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001817 hif_state->sleep_timer_init = false;
1818 }
1819
1820 hif_state->started = false;
1821}
1822
Houston Hoffman854e67f2016-03-14 21:11:39 -07001823/**
1824 * hif_get_target_ce_config() - get copy engine configuration
1825 * @target_ce_config_ret: basic copy engine configuration
1826 * @target_ce_config_sz_ret: size of the basic configuration in bytes
1827 * @target_service_to_ce_map_ret: service mapping for the copy engines
1828 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
1829 * @target_shadow_reg_cfg_ret: shadow register configuration
1830 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
1831 *
1832 * providing accessor to these values outside of this file.
1833 * currently these are stored in static pointers to const sections.
1834 * there are multiple configurations that are selected from at compile time.
1835 * Runtime selection would need to consider mode, target type and bus type.
1836 *
1837 * Return: return by parameter.
1838 */
1839void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
1840 int *target_ce_config_sz_ret,
1841 struct service_to_pipe **target_service_to_ce_map_ret,
1842 int *target_service_to_ce_map_sz_ret,
1843 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
1844 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001845{
Houston Hoffman854e67f2016-03-14 21:11:39 -07001846 *target_ce_config_ret = target_ce_config;
1847 *target_ce_config_sz_ret = target_ce_config_sz;
1848 *target_service_to_ce_map_ret = target_service_to_ce_map;
1849 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
1850
1851 if (target_shadow_reg_cfg_ret)
1852 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
1853
1854 if (shadow_cfg_sz_ret)
1855 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001856}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001857
1858/**
1859 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301860 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001861 *
1862 * This function passes the con_mode and CE configuration to
1863 * platform driver to enable wlan.
1864 *
Houston Hoffman108da402016-03-14 21:11:24 -07001865 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001866 */
Houston Hoffman108da402016-03-14 21:11:24 -07001867int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001868{
1869 struct icnss_wlan_enable_cfg cfg;
1870 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301871 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001872
Houston Hoffman854e67f2016-03-14 21:11:39 -07001873 hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
1874 &cfg.num_ce_tgt_cfg,
1875 (struct service_to_pipe **)&cfg.ce_svc_cfg,
1876 &cfg.num_ce_svc_pipe_cfg,
1877 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
1878 &cfg.num_shadow_reg_cfg);
1879
1880 /* translate from structure size to array size */
1881 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
1882 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
1883 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001884
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301885 if (QDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001886 mode = ICNSS_FTM;
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001887 else if (QDF_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001889 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001891
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001892 if (BYPASS_QMI)
1893 return 0;
1894 else
1895 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001896}
1897
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001898#define CE_EPPING_USES_IRQ true
1899
Houston Hoffman108da402016-03-14 21:11:24 -07001900/**
1901 * hif_ce_prepare_config() - load the correct static tables.
1902 * @scn: hif context
1903 *
1904 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001905 */
Houston Hoffman108da402016-03-14 21:11:24 -07001906void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001907{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301908 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001909 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1910 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1911
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001912 /* if epping is enabled we need to use the epping configuration. */
Houston Hoffman75ef5a52016-04-14 17:15:49 -07001913 if (QDF_IS_EPPING_ENABLED(mode)) {
1914 if (CE_EPPING_USES_IRQ)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001915 host_ce_config = host_ce_config_wlan_epping_irq;
1916 else
1917 host_ce_config = host_ce_config_wlan_epping_poll;
1918 target_ce_config = target_ce_config_wlan_epping;
1919 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1920 target_service_to_ce_map =
1921 target_service_to_ce_map_wlan_epping;
1922 target_service_to_ce_map_sz =
1923 sizeof(target_service_to_ce_map_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07001924 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
1925 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926 }
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07001927
1928 switch (tgt_info->target_type) {
1929 default:
1930 break;
1931 case TARGET_TYPE_AR900B:
1932 case TARGET_TYPE_QCA9984:
1933 case TARGET_TYPE_IPQ4019:
1934 case TARGET_TYPE_QCA9888:
1935 host_ce_config = host_ce_config_wlan_ar900b;
1936 target_ce_config = target_ce_config_wlan_ar900b;
1937 target_ce_config_sz = sizeof(target_ce_config_wlan_ar900b);
1938
1939 target_service_to_ce_map = target_service_to_ce_map_ar900b;
1940 target_service_to_ce_map_sz =
1941 sizeof(target_service_to_ce_map_ar900b);
1942 break;
1943
1944 case TARGET_TYPE_AR9888:
1945 case TARGET_TYPE_AR9888V2:
1946 host_ce_config = host_ce_config_wlan_ar9888;
1947 target_ce_config = target_ce_config_wlan_ar9888;
1948 target_ce_config_sz = sizeof(target_ce_config_wlan_ar9888);
1949
1950 target_service_to_ce_map = target_service_to_ce_map_ar900b;
1951 target_service_to_ce_map_sz =
1952 sizeof(target_service_to_ce_map_ar900b);
1953 break;
1954 }
Houston Hoffman108da402016-03-14 21:11:24 -07001955}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001956
Houston Hoffman108da402016-03-14 21:11:24 -07001957/**
1958 * hif_ce_open() - do ce specific allocations
1959 * @hif_sc: pointer to hif context
1960 *
1961 * return: 0 for success or QDF_STATUS_E_NOMEM
1962 */
1963QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
1964{
1965 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001966
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301967 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07001968 return QDF_STATUS_SUCCESS;
1969}
1970
1971/**
1972 * hif_ce_close() - do ce specific free
1973 * @hif_sc: pointer to hif context
1974 */
1975void hif_ce_close(struct hif_softc *hif_sc)
1976{
1977}
1978
1979/**
1980 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
1981 * @hif_sc: hif context
1982 *
1983 * uses state variables to support cleaning up when hif_config_ce fails.
1984 */
1985void hif_unconfig_ce(struct hif_softc *hif_sc)
1986{
1987 int pipe_num;
1988 struct HIF_CE_pipe_info *pipe_info;
1989 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1990
1991 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
1992 pipe_info = &hif_state->pipe_info[pipe_num];
1993 if (pipe_info->ce_hdl) {
1994 ce_unregister_irq(hif_state, (1 << pipe_num));
1995 hif_sc->request_irq_done = false;
1996 ce_fini(pipe_info->ce_hdl);
1997 pipe_info->ce_hdl = NULL;
1998 pipe_info->buf_sz = 0;
1999 }
2000 }
Houston Hoffman108da402016-03-14 21:11:24 -07002001 if (hif_sc->athdiag_procfs_inited) {
2002 athdiag_procfs_remove();
2003 hif_sc->athdiag_procfs_inited = false;
2004 }
2005}
2006
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002007#ifdef CONFIG_BYPASS_QMI
2008#define FW_SHARED_MEM (2 * 1024 * 1024)
2009
2010/**
2011 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2012 * @scn: pointer to HIF structure
2013 *
2014 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2015 *
2016 * Return: void
2017 */
2018static void hif_post_static_buf_to_target(struct hif_softc *scn)
2019{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002020 void *target_va;
2021 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002022
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002023 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2024 FW_SHARED_MEM, &target_pa);
2025 if (NULL == target_va) {
2026 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002027 return;
2028 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07002029 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2030 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002031}
2032#else
2033static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2034{
2035 return;
2036}
2037#endif
2038
Houston Hoffman108da402016-03-14 21:11:24 -07002039/**
2040 * hif_config_ce() - configure copy engines
2041 * @scn: hif context
2042 *
2043 * Prepares fw, copy engine hardware and host sw according
2044 * to the attributes selected by hif_ce_prepare_config.
2045 *
2046 * also calls athdiag_procfs_init
2047 *
2048 * return: 0 for success nonzero for failure.
2049 */
2050int hif_config_ce(struct hif_softc *scn)
2051{
2052 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2053 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2054 struct HIF_CE_pipe_info *pipe_info;
2055 int pipe_num;
2056#ifdef ADRASTEA_SHADOW_REGISTERS
2057 int i;
2058#endif
2059 QDF_STATUS rv = QDF_STATUS_SUCCESS;
2060
2061 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002062
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07002063 hif_post_static_buf_to_target(scn);
2064
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002065 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07002066
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002067 hif_config_rri_on_ddr(scn);
2068
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002069 /* During CE initializtion */
2070 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002071 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2072 struct CE_attr *attr;
2073
2074 pipe_info = &hif_state->pipe_info[pipe_num];
2075 pipe_info->pipe_num = pipe_num;
2076 pipe_info->HIF_CE_state = hif_state;
2077 attr = &host_ce_config[pipe_num];
2078 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302079 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002080 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302081 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002082 A_TARGET_ACCESS_UNLIKELY(scn);
2083 goto err;
2084 }
2085
2086 if (pipe_num == DIAG_CE_ID) {
2087 /* Reserve the ultimate CE for
2088 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07002089 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002090 continue;
2091 }
2092
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302093 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2094 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002095 if (attr->dest_nentries > 0) {
2096 atomic_set(&pipe_info->recv_bufs_needed,
2097 init_buffer_count(attr->dest_nentries - 1));
2098 } else {
2099 atomic_set(&pipe_info->recv_bufs_needed, 0);
2100 }
2101 ce_tasklet_init(hif_state, (1 << pipe_num));
2102 ce_register_irq(hif_state, (1 << pipe_num));
2103 scn->request_irq_done = true;
2104 }
2105
2106 if (athdiag_procfs_init(scn) != 0) {
2107 A_TARGET_ACCESS_UNLIKELY(scn);
2108 goto err;
2109 }
2110 scn->athdiag_procfs_inited = true;
2111
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002112 HIF_INFO_MED("%s: ce_init done", __func__);
2113
Houston Hoffman108da402016-03-14 21:11:24 -07002114 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002115
2116 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
2117
2118#ifdef ADRASTEA_SHADOW_REGISTERS
2119 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
2120 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2121 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
2122 __func__, i,
2123 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2124 }
2125#endif
2126
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302127 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002128
2129err:
2130 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07002131 hif_unconfig_ce(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002132 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302133 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002134}
2135
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002136#ifdef WLAN_FEATURE_FASTPATH
2137/**
2138 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2139 * @handler: Callback funtcion
2140 * @context: handle for callback function
2141 *
2142 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2143 */
Houston Hoffman127467f2016-04-26 22:37:14 -07002144int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2145 fastpath_msg_handler handler,
2146 void *context)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002147{
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002148 struct CE_state *ce_state;
Houston Hoffman127467f2016-04-26 22:37:14 -07002149 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002150 int i;
2151
2152 QDF_ASSERT(scn != NULL);
2153
2154 if (!scn->fastpath_mode_on) {
2155 HIF_WARN("Fastpath mode disabled\n");
2156 return QDF_STATUS_E_FAILURE;
2157 }
2158
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002159 for (i = 0; i < scn->ce_count; i++) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002160 ce_state = scn->ce_id_to_state[i];
2161 if (ce_state->htt_rx_data) {
2162 ce_state->fastpath_handler = handler;
2163 ce_state->context = context;
2164 }
2165 }
2166
2167 return QDF_STATUS_SUCCESS;
2168}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07002169#endif
2170
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002171#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08002172/**
2173 * hif_ipa_get_ce_resource() - get uc resource on hif
2174 * @scn: bus context
2175 * @ce_sr_base_paddr: copyengine source ring base physical address
2176 * @ce_sr_ring_size: copyengine source ring size
2177 * @ce_reg_paddr: copyengine register physical address
2178 *
2179 * IPA micro controller data path offload feature enabled,
2180 * HIF should release copy engine related resource information to IPA UC
2181 * IPA UC will access hardware resource with released information
2182 *
2183 * Return: None
2184 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302185void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302186 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002187 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302188 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002189{
Komal Seelam644263d2016-02-22 20:45:49 +05302190 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302191 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002192 struct HIF_CE_pipe_info *pipe_info =
2193 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2194 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2195
2196 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2197 ce_reg_paddr);
2198 return;
2199}
2200#endif /* IPA_OFFLOAD */
2201
2202
2203#ifdef ADRASTEA_SHADOW_REGISTERS
2204
2205/*
2206 Current shadow register config
2207
2208 -----------------------------------------------------------
2209 Shadow Register | CE | src/dst write index
2210 -----------------------------------------------------------
2211 0 | 0 | src
2212 1 No Config - Doesn't point to anything
2213 2 No Config - Doesn't point to anything
2214 3 | 3 | src
2215 4 | 4 | src
2216 5 | 5 | src
2217 6 No Config - Doesn't point to anything
2218 7 | 7 | src
2219 8 No Config - Doesn't point to anything
2220 9 No Config - Doesn't point to anything
2221 10 No Config - Doesn't point to anything
2222 11 No Config - Doesn't point to anything
2223 -----------------------------------------------------------
2224 12 No Config - Doesn't point to anything
2225 13 | 1 | dst
2226 14 | 2 | dst
2227 15 No Config - Doesn't point to anything
2228 16 No Config - Doesn't point to anything
2229 17 No Config - Doesn't point to anything
2230 18 No Config - Doesn't point to anything
2231 19 | 7 | dst
2232 20 | 8 | dst
2233 21 No Config - Doesn't point to anything
2234 22 No Config - Doesn't point to anything
2235 23 No Config - Doesn't point to anything
2236 -----------------------------------------------------------
2237
2238
2239 ToDo - Move shadow register config to following in the future
2240 This helps free up a block of shadow registers towards the end.
2241 Can be used for other purposes
2242
2243 -----------------------------------------------------------
2244 Shadow Register | CE | src/dst write index
2245 -----------------------------------------------------------
2246 0 | 0 | src
2247 1 | 3 | src
2248 2 | 4 | src
2249 3 | 5 | src
2250 4 | 7 | src
2251 -----------------------------------------------------------
2252 5 | 1 | dst
2253 6 | 2 | dst
2254 7 | 7 | dst
2255 8 | 8 | dst
2256 -----------------------------------------------------------
2257 9 No Config - Doesn't point to anything
2258 12 No Config - Doesn't point to anything
2259 13 No Config - Doesn't point to anything
2260 14 No Config - Doesn't point to anything
2261 15 No Config - Doesn't point to anything
2262 16 No Config - Doesn't point to anything
2263 17 No Config - Doesn't point to anything
2264 18 No Config - Doesn't point to anything
2265 19 No Config - Doesn't point to anything
2266 20 No Config - Doesn't point to anything
2267 21 No Config - Doesn't point to anything
2268 22 No Config - Doesn't point to anything
2269 23 No Config - Doesn't point to anything
2270 -----------------------------------------------------------
2271*/
2272
Komal Seelam644263d2016-02-22 20:45:49 +05302273u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002274{
2275 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002276 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002277
Houston Hoffmane6330442016-02-26 12:19:11 -08002278 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002279 case 0:
2280 addr = SHADOW_VALUE0;
2281 break;
2282 case 3:
2283 addr = SHADOW_VALUE3;
2284 break;
2285 case 4:
2286 addr = SHADOW_VALUE4;
2287 break;
2288 case 5:
2289 addr = SHADOW_VALUE5;
2290 break;
2291 case 7:
2292 addr = SHADOW_VALUE7;
2293 break;
2294 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002295 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302296 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002297 }
2298 return addr;
2299
2300}
2301
Komal Seelam644263d2016-02-22 20:45:49 +05302302u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002303{
2304 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002305 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002306
Houston Hoffmane6330442016-02-26 12:19:11 -08002307 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002308 case 1:
2309 addr = SHADOW_VALUE13;
2310 break;
2311 case 2:
2312 addr = SHADOW_VALUE14;
2313 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002314 case 5:
2315 addr = SHADOW_VALUE17;
2316 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002317 case 7:
2318 addr = SHADOW_VALUE19;
2319 break;
2320 case 8:
2321 addr = SHADOW_VALUE20;
2322 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002323 case 9:
2324 addr = SHADOW_VALUE21;
2325 break;
2326 case 10:
2327 addr = SHADOW_VALUE22;
2328 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002329 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002330 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302331 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002332 }
2333
2334 return addr;
2335
2336}
2337#endif
2338
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002339#if defined(FEATURE_LRO)
2340/**
2341 * ce_lro_flush_cb_register() - register the LRO flush
2342 * callback
2343 * @scn: HIF context
2344 * @handler: callback function
2345 * @data: opaque data pointer to be passed back
2346 *
2347 * Store the LRO flush callback provided
2348 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002349 * Return: Number of instances the callback is registered for
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002350 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002351int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
2352 void (handler)(void *), void *data)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002353{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002354 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002355 int i;
2356 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302357 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002358
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302359 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002360
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002361 if (scn != NULL) {
2362 for (i = 0; i < scn->ce_count; i++) {
2363 ce_state = scn->ce_id_to_state[i];
2364 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2365 ce_state->lro_flush_cb = handler;
2366 ce_state->lro_data = data;
2367 rc++;
2368 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002369 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002370 } else {
2371 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002372 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002373 return rc;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002374}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002375
2376/**
2377 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2378 * callback
2379 * @scn: HIF context
2380 *
2381 * Remove the LRO flush callback
2382 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002383 * Return: Number of instances the callback is de-registered
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002384 */
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002385int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002386{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002387 int rc = 0;
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002388 int i;
2389 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302390 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002391
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302392 QDF_ASSERT(scn != NULL);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002393 if (scn != NULL) {
2394 for (i = 0; i < scn->ce_count; i++) {
2395 ce_state = scn->ce_id_to_state[i];
2396 if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2397 ce_state->lro_flush_cb = NULL;
2398 ce_state->lro_data = NULL;
2399 rc++;
2400 }
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002401 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002402 } else {
2403 HIF_ERROR("%s: hif_state NULL!", __func__);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002404 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002405 return rc;
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002406}
2407#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002408
2409/**
2410 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2411 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302412 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002413 * @svc_id: Service ID for which the mapping is needed.
2414 * @ul_pipe: address of the container in which ul pipe is returned.
2415 * @dl_pipe: address of the container in which dl pipe is returned.
2416 * @ul_is_polled: address of the container in which a bool
2417 * indicating if the UL CE for this service
2418 * is polled is returned.
2419 * @dl_is_polled: address of the container in which a bool
2420 * indicating if the DL CE for this service
2421 * is polled is returned.
2422 *
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002423 * Return: Indicates whether the service has been found in the table.
2424 * Upon return, ul_is_polled is updated only if ul_pipe is updated.
2425 * There will be warning logs if either leg has not been updated
2426 * because it missed the entry in the table (but this is not an err).
Sanjay Devnanic319c822015-11-06 16:44:28 -08002427 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302428int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002429 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2430 int *dl_is_polled)
2431{
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002432 int status = QDF_STATUS_E_INVAL;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002433 unsigned int i;
2434 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002435 struct service_to_pipe *tgt_svc_map_to_use;
2436 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302437 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2438 uint32_t mode = hif_get_conparam(scn);
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002439 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002440 bool dl_updated = false;
2441 bool ul_updated = false;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002442
Houston Hoffman75ef5a52016-04-14 17:15:49 -07002443 if (QDF_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002444 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2445 sz_tgt_svc_map_to_use =
2446 sizeof(target_service_to_ce_map_wlan_epping);
2447 } else {
Houston Hoffmanfb698ef2016-05-05 19:50:44 -07002448 switch (tgt_info->target_type) {
2449 default:
2450 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2451 sz_tgt_svc_map_to_use =
2452 sizeof(target_service_to_ce_map_wlan);
2453 break;
2454 case TARGET_TYPE_AR900B:
2455 case TARGET_TYPE_QCA9984:
2456 case TARGET_TYPE_IPQ4019:
2457 case TARGET_TYPE_QCA9888:
2458 case TARGET_TYPE_AR9888:
2459 case TARGET_TYPE_AR9888V2:
2460 tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
2461 sz_tgt_svc_map_to_use =
2462 sizeof(target_service_to_ce_map_ar900b);
2463 break;
2464 }
Sanjay Devnanic319c822015-11-06 16:44:28 -08002465 }
2466
2467 *dl_is_polled = 0; /* polling for received messages not supported */
2468
2469 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2470
2471 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2472 if (element.service_id == svc_id) {
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002473 if (element.pipedir == PIPEDIR_OUT) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002474 *ul_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002475 *ul_is_polled =
2476 (host_ce_config[*ul_pipe].flags &
2477 CE_ATTR_DISABLE_INTR) != 0;
2478 ul_updated = true;
2479 } else if (element.pipedir == PIPEDIR_IN) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002480 *dl_pipe = element.pipenum;
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002481 dl_updated = true;
2482 }
2483 status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002484 }
2485 }
Manjunathappa Prakash32afe372016-04-29 11:12:41 -07002486 if (ul_updated == false)
2487 HIF_WARN("%s: ul pipe is NOT updated for service %d",
2488 __func__, svc_id);
2489 if (dl_updated == false)
2490 HIF_WARN("%s: dl pipe is NOT updated for service %d",
2491 __func__, svc_id);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002492
2493 return status;
2494}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002495
2496#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302497inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002498 uint32_t CE_ctrl_addr)
2499{
2500 uint32_t read_from_hw, srri_from_ddr = 0;
2501
2502 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2503
2504 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2505
2506 if (read_from_hw != srri_from_ddr) {
2507 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2508 srri_from_ddr, read_from_hw,
2509 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302510 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002511 }
2512 return srri_from_ddr;
2513}
2514
2515
Komal Seelam644263d2016-02-22 20:45:49 +05302516inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002517 uint32_t CE_ctrl_addr)
2518{
2519 uint32_t read_from_hw, drri_from_ddr = 0;
2520
2521 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2522
2523 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2524
2525 if (read_from_hw != drri_from_ddr) {
2526 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2527 drri_from_ddr, read_from_hw,
2528 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302529 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002530 }
2531 return drri_from_ddr;
2532}
2533
2534#endif
2535
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002536#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002537/**
2538 * hif_get_src_ring_read_index(): Called to get the SRRI
2539 *
Komal Seelam644263d2016-02-22 20:45:49 +05302540 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002541 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2542 *
2543 * This function returns the SRRI to the caller. For CEs that
2544 * dont have interrupts enabled, we look at the DDR based SRRI
2545 *
2546 * Return: SRRI
2547 */
Komal Seelam644263d2016-02-22 20:45:49 +05302548inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002549 uint32_t CE_ctrl_addr)
2550{
2551 struct CE_attr attr;
2552
2553 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2554 if (attr.flags & CE_ATTR_DISABLE_INTR)
2555 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2556 else
2557 return A_TARGET_READ(scn,
2558 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2559}
2560
2561/**
2562 * hif_get_dst_ring_read_index(): Called to get the DRRI
2563 *
Komal Seelam644263d2016-02-22 20:45:49 +05302564 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002565 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2566 *
2567 * This function returns the DRRI to the caller. For CEs that
2568 * dont have interrupts enabled, we look at the DDR based DRRI
2569 *
2570 * Return: DRRI
2571 */
Komal Seelam644263d2016-02-22 20:45:49 +05302572inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002573 uint32_t CE_ctrl_addr)
2574{
2575 struct CE_attr attr;
2576
2577 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2578
2579 if (attr.flags & CE_ATTR_DISABLE_INTR)
2580 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2581 else
2582 return A_TARGET_READ(scn,
2583 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2584}
2585
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002586/**
2587 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2588 *
Komal Seelam644263d2016-02-22 20:45:49 +05302589 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002590 *
2591 * This function allocates non cached memory on ddr and sends
2592 * the physical address of this memory to the CE hardware. The
2593 * hardware updates the RRI on this particular location.
2594 *
2595 * Return: None
2596 */
Komal Seelam644263d2016-02-22 20:45:49 +05302597static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002598{
2599 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302600 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002601 uint32_t high_paddr, low_paddr;
2602 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302603 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2604 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2605 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002606
2607 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2608 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2609
2610 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2611
2612 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2613 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2614
2615 for (i = 0; i < CE_COUNT; i++)
2616 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2617
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302618 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002619
2620 return;
2621}
2622#else
2623
2624/**
2625 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2626 *
Komal Seelam644263d2016-02-22 20:45:49 +05302627 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002628 *
2629 * This is a dummy implementation for platforms that don't
2630 * support this functionality.
2631 *
2632 * Return: None
2633 */
Komal Seelam644263d2016-02-22 20:45:49 +05302634static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002635{
2636 return;
2637}
2638#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302639
2640/**
2641 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302642 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302643 *
2644 * Output the copy engine registers
2645 *
2646 * Return: 0 for success or error code
2647 */
Komal Seelam644263d2016-02-22 20:45:49 +05302648int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302649{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302650 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302651 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2652 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2653 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2654 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302655 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302656
Houston Hoffmand6f946c2016-04-06 15:16:00 -07002657 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
2658 if (scn->ce_id_to_state[i] == NULL) {
2659 HIF_DBG("CE%d not used.", i);
2660 continue;
2661 }
2662
Komal Seelam644263d2016-02-22 20:45:49 +05302663 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302664 (uint8_t *) &ce_reg_values[i][0],
2665 ce_reg_word_size * sizeof(uint32_t));
2666
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302667 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302668 HIF_ERROR("Dumping CE register failed!");
2669 return -EACCES;
2670 }
2671 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302672 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Govind Singh2443fb32016-01-13 17:44:48 +05302673 (uint8_t *) &ce_reg_values[i][0],
2674 ce_reg_word_size * sizeof(uint32_t));
2675 }
Govind Singh2443fb32016-01-13 17:44:48 +05302676 return 0;
2677}