blob: 6c99148cb94997ff3359a695289c3e71e5b47ca1 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
33#include <bmi_msg.h>
34#include "hif_io32.h"
35#include <hif.h>
36#include "regtable.h"
37#define ATH_MODULE_NAME hif
38#include <a_debug.h>
39#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053041#include "qdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#ifdef CONFIG_CNSS
43#include <net/cnss.h>
44#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080045#include "epping_main.h"
46#include "hif_debug.h"
47#include "ce_internal.h"
48#include "ce_reg.h"
49#include "ce_assignment.h"
50#include "ce_tasklet.h"
Houston Hoffmanbc693492016-03-14 21:11:41 -070051#include "platform_icnss.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "qwlan_version.h"
Manjunathappa Prakash7399f142016-04-13 23:38:16 -070053#include <cds_api.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080054
55#define CE_POLL_TIMEOUT 10 /* ms */
56
57/* Forward references */
58static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
59
60/*
61 * Fix EV118783, poll to check whether a BMI response comes
62 * other than waiting for the interruption which may be lost.
63 */
64/* #define BMI_RSP_POLLING */
65#define BMI_RSP_TO_MILLISEC 1000
66
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070067#ifdef CONFIG_BYPASS_QMI
68#define BYPASS_QMI 1
69#else
70#define BYPASS_QMI 0
71#endif
72
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080073
Komal Seelam644263d2016-02-22 20:45:49 +053074static int hif_post_recv_buffers(struct hif_softc *scn);
75static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080076
77static void ce_poll_timeout(void *arg)
78{
79 struct CE_state *CE_state = (struct CE_state *)arg;
80 if (CE_state->timer_inited) {
81 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053082 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080083 }
84}
85
86static unsigned int roundup_pwr2(unsigned int n)
87{
88 int i;
89 unsigned int test_pwr2;
90
91 if (!(n & (n - 1)))
92 return n; /* already a power of 2 */
93
94 test_pwr2 = 4;
95 for (i = 0; i < 29; i++) {
96 if (test_pwr2 > n)
97 return test_pwr2;
98 test_pwr2 = test_pwr2 << 1;
99 }
100
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530101 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800102 return 0;
103}
104
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700105#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
106#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
107
108static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
109 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
110 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
111 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
112 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
113 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
114 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
115 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
116 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
117 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
118};
119
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700120static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
121 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
122 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
123 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
124 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
125 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
126 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
127 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
128 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
129 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
130};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700131
132/* CE_PCI TABLE */
133/*
134 * NOTE: the table below is out of date, though still a useful reference.
135 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
136 * mapping of HTC services to HIF pipes.
137 */
138/*
139 * This authoritative table defines Copy Engine configuration and the mapping
140 * of services/endpoints to CEs. A subset of this information is passed to
141 * the Target during startup as a prerequisite to entering BMI phase.
142 * See:
143 * target_service_to_ce_map - Target-side mapping
144 * hif_map_service_to_pipe - Host-side mapping
145 * target_ce_config - Target-side configuration
146 * host_ce_config - Host-side configuration
147 ============================================================================
148 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
149 | | | ctio | Size | Frequency
150 | | | n | |
151 ============================================================================
152 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
153 descriptor | | | | O(100B) | and regular
154 download | | | | |
155 ----------------------------------------------------------------------------
156 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
157 indication | | | | O(10B) | regular
158 upload | | | | |
159 ----------------------------------------------------------------------------
160 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
161 upload | | | | O(1000B) | (frequent
162 e.g. noise | | | | | during IP1.0
163 packets | | | | | testing)
164 ----------------------------------------------------------------------------
165 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
166 download | | | | O(1000B) | (frequent
167 e.g. | | | | | during IP1.0
168 misdirecte | | | | | testing)
169 d EAPOL | | | | |
170 packets | | | | |
171 ----------------------------------------------------------------------------
172 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
173 | DATA_VO (uplink) | | | |
174 ----------------------------------------------------------------------------
175 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
176 | DATA_VO (downlink) | | | |
177 ----------------------------------------------------------------------------
178 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
179 | | | | O(100B) |
180 ----------------------------------------------------------------------------
181 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
182 messages | (downlink) | | | O(100B) |
183 | | | | |
184 ----------------------------------------------------------------------------
185 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
186 | HTC_RAW_STREAMS | | | |
187 | (uplink) | | | |
188 ----------------------------------------------------------------------------
189 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
190 | HTC_RAW_STREAMS | | | |
191 | (downlink) | | | |
192 ----------------------------------------------------------------------------
193 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
194 | | | | | infrequent
195 ============================================================================
196 */
197
198/*
199 * Map from service/endpoint to Copy Engine.
200 * This table is derived from the CE_PCI TABLE, above.
201 * It is passed to the Target at startup for use by firmware.
202 */
203static struct service_to_pipe target_service_to_ce_map_wlan[] = {
204 {
205 WMI_DATA_VO_SVC,
206 PIPEDIR_OUT, /* out = UL = host -> target */
207 3,
208 },
209 {
210 WMI_DATA_VO_SVC,
211 PIPEDIR_IN, /* in = DL = target -> host */
212 2,
213 },
214 {
215 WMI_DATA_BK_SVC,
216 PIPEDIR_OUT, /* out = UL = host -> target */
217 3,
218 },
219 {
220 WMI_DATA_BK_SVC,
221 PIPEDIR_IN, /* in = DL = target -> host */
222 2,
223 },
224 {
225 WMI_DATA_BE_SVC,
226 PIPEDIR_OUT, /* out = UL = host -> target */
227 3,
228 },
229 {
230 WMI_DATA_BE_SVC,
231 PIPEDIR_IN, /* in = DL = target -> host */
232 2,
233 },
234 {
235 WMI_DATA_VI_SVC,
236 PIPEDIR_OUT, /* out = UL = host -> target */
237 3,
238 },
239 {
240 WMI_DATA_VI_SVC,
241 PIPEDIR_IN, /* in = DL = target -> host */
242 2,
243 },
244 {
245 WMI_CONTROL_SVC,
246 PIPEDIR_OUT, /* out = UL = host -> target */
247 3,
248 },
249 {
250 WMI_CONTROL_SVC,
251 PIPEDIR_IN, /* in = DL = target -> host */
252 2,
253 },
254 {
255 HTC_CTRL_RSVD_SVC,
256 PIPEDIR_OUT, /* out = UL = host -> target */
257 0, /* could be moved to 3 (share with WMI) */
258 },
259 {
260 HTC_CTRL_RSVD_SVC,
261 PIPEDIR_IN, /* in = DL = target -> host */
262 2,
263 },
264 {
265 HTC_RAW_STREAMS_SVC, /* not currently used */
266 PIPEDIR_OUT, /* out = UL = host -> target */
267 0,
268 },
269 {
270 HTC_RAW_STREAMS_SVC, /* not currently used */
271 PIPEDIR_IN, /* in = DL = target -> host */
272 2,
273 },
274 {
275 HTT_DATA_MSG_SVC,
276 PIPEDIR_OUT, /* out = UL = host -> target */
277 4,
278 },
279 {
280 HTT_DATA_MSG_SVC,
281 PIPEDIR_IN, /* in = DL = target -> host */
282 1,
283 },
284 {
285 WDI_IPA_TX_SVC,
286 PIPEDIR_OUT, /* in = DL = target -> host */
287 5,
288 },
289 /* (Additions here) */
290
291 { /* Must be last */
292 0,
293 0,
294 0,
295 },
296};
297
298static struct service_to_pipe *target_service_to_ce_map =
299 target_service_to_ce_map_wlan;
300static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
301
302static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
303static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
304
305static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
306 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
307 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
308 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
309 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
310 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
311 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
312 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
313 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
314 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
315 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
316 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
317 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
318 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
319 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
320 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
321 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
322 {0, 0, 0,}, /* Must be last */
323};
324
325/**
326 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
327 * @ce_state : pointer to the state context of the CE
328 *
329 * Description:
330 * Sets htt_rx_data attribute of the state structure if the
331 * CE serves one of the HTT DATA services.
332 *
333 * Return:
334 * false (attribute set to false)
335 * true (attribute set to true);
336 */
337bool ce_mark_datapath(struct CE_state *ce_state)
338{
339 struct service_to_pipe *svc_map;
340 size_t map_sz;
341 int i;
342 bool rc = false;
343
344 if (ce_state != NULL) {
345 if (WLAN_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
346 svc_map = target_service_to_ce_map_wlan_epping;
347 map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
348 sizeof(struct service_to_pipe);
349 } else {
350 svc_map = target_service_to_ce_map_wlan;
351 map_sz = sizeof(target_service_to_ce_map_wlan) /
352 sizeof(struct service_to_pipe);
353 }
354 for (i = 0; i < map_sz; i++) {
355 if ((svc_map[i].pipenum == ce_state->id) &&
356 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
357 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
358 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
359 /* HTT CEs are unidirectional */
360 if (svc_map[i].pipedir == PIPEDIR_IN)
361 ce_state->htt_rx_data = true;
362 else
363 ce_state->htt_tx_data = true;
364 rc = true;
365 }
366 }
367 }
368 return rc;
369}
370
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800371/*
372 * Initialize a Copy Engine based on caller-supplied attributes.
373 * This may be called once to initialize both source and destination
374 * rings or it may be called twice for separate source and destination
375 * initialization. It may be that only one side or the other is
376 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700377 *
378 * This should be called durring the initialization sequence before
379 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800380 */
Komal Seelam644263d2016-02-22 20:45:49 +0530381struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800382 unsigned int CE_id, struct CE_attr *attr)
383{
384 struct CE_state *CE_state;
385 uint32_t ctrl_addr;
386 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530387 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800388 bool malloc_CE_state = false;
389 bool malloc_src_ring = false;
390
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530391 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800392 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800393 CE_state = scn->ce_id_to_state[CE_id];
394
395 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800396 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530397 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800398 if (!CE_state) {
399 HIF_ERROR("%s: CE_state has no mem", __func__);
400 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800401 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700402 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530403 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700404 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530405 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700406
407 CE_state->id = CE_id;
408 CE_state->ctrl_addr = ctrl_addr;
409 CE_state->state = CE_RUNNING;
410 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800411 }
412 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800413
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530414 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800415 if (attr == NULL) {
416 /* Already initialized; caller wants the handle */
417 return (struct CE_handle *)CE_state;
418 }
419
420#ifdef ADRASTEA_SHADOW_REGISTERS
421 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
422 __func__);
423#endif
424
425 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530426 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800427 else
428 CE_state->src_sz_max = attr->src_sz_max;
429
Houston Hoffman68e837e2015-12-04 12:57:24 -0800430 ce_init_ce_desc_event_log(CE_id,
431 attr->src_nentries + attr->dest_nentries);
432
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800433 /* source ring setup */
434 nentries = attr->src_nentries;
435 if (nentries) {
436 struct CE_ring_state *src_ring;
437 unsigned CE_nbytes;
438 char *ptr;
439 uint64_t dma_addr;
440 nentries = roundup_pwr2(nentries);
441 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530442 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800443 } else {
444 CE_nbytes = sizeof(struct CE_ring_state)
445 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530446 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800447 if (!ptr) {
448 /* cannot allocate src ring. If the
449 * CE_state is allocated locally free
450 * CE_State and return error.
451 */
452 HIF_ERROR("%s: src ring has no mem", __func__);
453 if (malloc_CE_state) {
454 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800455 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530456 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800457 malloc_CE_state = false;
458 }
459 return NULL;
460 } else {
461 /* we can allocate src ring.
462 * Mark that the src ring is
463 * allocated locally
464 */
465 malloc_src_ring = true;
466 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530467 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800468
469 src_ring = CE_state->src_ring =
470 (struct CE_ring_state *)ptr;
471 ptr += sizeof(struct CE_ring_state);
472 src_ring->nentries = nentries;
473 src_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700474 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
475 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800476 src_ring->hw_index =
477 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
478 src_ring->sw_index = src_ring->hw_index;
479 src_ring->write_index =
480 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700481 if (Q_TARGET_ACCESS_END(scn) < 0)
482 goto error_target_access;
483
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800484 src_ring->low_water_mark_nentries = 0;
485 src_ring->high_water_mark_nentries = nentries;
486 src_ring->per_transfer_context = (void **)ptr;
487
488 /* Legacy platforms that do not support cache
489 * coherent DMA are unsupported
490 */
491 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530492 qdf_mem_alloc_consistent(scn->qdf_dev,
493 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800494 (nentries *
495 sizeof(struct CE_src_desc) +
496 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530497 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800498 if (src_ring->base_addr_owner_space_unaligned
499 == NULL) {
500 HIF_ERROR("%s: src ring has no DMA mem",
501 __func__);
502 goto error_no_dma_mem;
503 }
504 src_ring->base_addr_CE_space_unaligned = base_addr;
505
506 if (src_ring->
507 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
508 - 1)) {
509 src_ring->base_addr_CE_space =
510 (src_ring->base_addr_CE_space_unaligned
511 + CE_DESC_RING_ALIGN -
512 1) & ~(CE_DESC_RING_ALIGN - 1);
513
514 src_ring->base_addr_owner_space =
515 (void
516 *)(((size_t) src_ring->
517 base_addr_owner_space_unaligned +
518 CE_DESC_RING_ALIGN -
519 1) & ~(CE_DESC_RING_ALIGN - 1));
520 } else {
521 src_ring->base_addr_CE_space =
522 src_ring->base_addr_CE_space_unaligned;
523 src_ring->base_addr_owner_space =
524 src_ring->
525 base_addr_owner_space_unaligned;
526 }
527 /*
528 * Also allocate a shadow src ring in
529 * regular mem to use for faster access.
530 */
531 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530532 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800533 sizeof(struct CE_src_desc) +
534 CE_DESC_RING_ALIGN);
535 if (src_ring->shadow_base_unaligned == NULL) {
536 HIF_ERROR("%s: src ring no shadow_base mem",
537 __func__);
538 goto error_no_dma_mem;
539 }
540 src_ring->shadow_base = (struct CE_src_desc *)
541 (((size_t) src_ring->shadow_base_unaligned +
542 CE_DESC_RING_ALIGN - 1) &
543 ~(CE_DESC_RING_ALIGN - 1));
544
Houston Hoffman4411ad42016-03-14 21:12:04 -0700545 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
546 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800547 dma_addr = src_ring->base_addr_CE_space;
548 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
549 (uint32_t)(dma_addr & 0xFFFFFFFF));
550#ifdef WLAN_ENABLE_QCA6180
551 {
552 uint32_t tmp;
553 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
554 scn, ctrl_addr);
555 tmp &= ~0x1F;
556 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
557 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
558 ctrl_addr, (uint32_t)dma_addr);
559 }
560#endif
561 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
562 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
563#ifdef BIG_ENDIAN_HOST
564 /* Enable source ring byte swap for big endian host */
565 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
566#endif
567 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
568 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700569 if (Q_TARGET_ACCESS_END(scn) < 0)
570 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800571 }
572 }
573
574 /* destination ring setup */
575 nentries = attr->dest_nentries;
576 if (nentries) {
577 struct CE_ring_state *dest_ring;
578 unsigned CE_nbytes;
579 char *ptr;
580 uint64_t dma_addr;
581
582 nentries = roundup_pwr2(nentries);
583 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530584 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800585 } else {
586 CE_nbytes = sizeof(struct CE_ring_state)
587 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530588 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800589 if (!ptr) {
590 /* cannot allocate dst ring. If the CE_state
591 * or src ring is allocated locally free
592 * CE_State and src ring and return error.
593 */
594 HIF_ERROR("%s: dest ring has no mem",
595 __func__);
596 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530597 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800598 CE_state->src_ring = NULL;
599 malloc_src_ring = false;
600 }
601 if (malloc_CE_state) {
602 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800603 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530604 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800605 malloc_CE_state = false;
606 }
607 return NULL;
608 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530609 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800610
611 dest_ring = CE_state->dest_ring =
612 (struct CE_ring_state *)ptr;
613 ptr += sizeof(struct CE_ring_state);
614 dest_ring->nentries = nentries;
615 dest_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700616 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
617 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800618 dest_ring->sw_index =
619 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
620 dest_ring->write_index =
621 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700622 if (Q_TARGET_ACCESS_END(scn) < 0)
623 goto error_target_access;
624
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800625 dest_ring->low_water_mark_nentries = 0;
626 dest_ring->high_water_mark_nentries = nentries;
627 dest_ring->per_transfer_context = (void **)ptr;
628
629 /* Legacy platforms that do not support cache
630 * coherent DMA are unsupported */
631 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530632 qdf_mem_alloc_consistent(scn->qdf_dev,
633 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800634 (nentries *
635 sizeof(struct CE_dest_desc) +
636 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530637 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800638 if (dest_ring->base_addr_owner_space_unaligned
639 == NULL) {
640 HIF_ERROR("%s: dest ring has no DMA mem",
641 __func__);
642 goto error_no_dma_mem;
643 }
644 dest_ring->base_addr_CE_space_unaligned = base_addr;
645
646 /* Correctly initialize memory to 0 to
647 * prevent garbage data crashing system
648 * when download firmware
649 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530650 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800651 nentries * sizeof(struct CE_dest_desc) +
652 CE_DESC_RING_ALIGN);
653
654 if (dest_ring->
655 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
656 1)) {
657
658 dest_ring->base_addr_CE_space =
659 (dest_ring->
660 base_addr_CE_space_unaligned +
661 CE_DESC_RING_ALIGN -
662 1) & ~(CE_DESC_RING_ALIGN - 1);
663
664 dest_ring->base_addr_owner_space =
665 (void
666 *)(((size_t) dest_ring->
667 base_addr_owner_space_unaligned +
668 CE_DESC_RING_ALIGN -
669 1) & ~(CE_DESC_RING_ALIGN - 1));
670 } else {
671 dest_ring->base_addr_CE_space =
672 dest_ring->base_addr_CE_space_unaligned;
673 dest_ring->base_addr_owner_space =
674 dest_ring->
675 base_addr_owner_space_unaligned;
676 }
677
Houston Hoffman4411ad42016-03-14 21:12:04 -0700678 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
679 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800680 dma_addr = dest_ring->base_addr_CE_space;
681 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
682 (uint32_t)(dma_addr & 0xFFFFFFFF));
683#ifdef WLAN_ENABLE_QCA6180
684 {
685 uint32_t tmp;
686 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
687 ctrl_addr);
688 tmp &= ~0x1F;
689 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
690 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
691 ctrl_addr, (uint32_t)dma_addr);
692 }
693#endif
694 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
695#ifdef BIG_ENDIAN_HOST
696 /* Enable Dest ring byte swap for big endian host */
697 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
698#endif
699 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
700 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700701 if (Q_TARGET_ACCESS_END(scn) < 0)
702 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800703
704 /* epping */
705 /* poll timer */
706 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530707 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800708 &CE_state->poll_timer,
709 ce_poll_timeout,
710 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530711 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800712 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530713 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800714 CE_POLL_TIMEOUT);
715 }
716 }
717 }
718
719 /* Enable CE error interrupts */
Houston Hoffman4411ad42016-03-14 21:12:04 -0700720 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
721 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800722 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700723 if (Q_TARGET_ACCESS_END(scn) < 0)
724 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800725
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700726 /* update the htt_data attribute */
727 ce_mark_datapath(CE_state);
728
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800729 return (struct CE_handle *)CE_state;
730
Houston Hoffman4411ad42016-03-14 21:12:04 -0700731error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800732error_no_dma_mem:
733 ce_fini((struct CE_handle *)CE_state);
734 return NULL;
735}
736
737#ifdef WLAN_FEATURE_FASTPATH
738/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700739 * hif_enable_fastpath() Update that we have enabled fastpath mode
740 * @hif_ctx: HIF context
741 *
742 * For use in data path
743 *
744 * Retrun: void
745 */
746void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
747{
748 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
749
750 HIF_INFO("Enabling fastpath mode\n");
751 scn->fastpath_mode_on = true;
752}
753
754/**
755 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
756 * @hif_ctx: HIF Context
757 *
758 * For use in data path to skip HTC
759 *
760 * Return: bool
761 */
762bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
763{
764 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
765
766 return scn->fastpath_mode_on;
767}
768
769/**
770 * hif_get_ce_handle - API to get CE handle for FastPath mode
771 * @hif_ctx: HIF Context
772 * @id: CopyEngine Id
773 *
774 * API to return CE handle for fastpath mode
775 *
776 * Return: void
777 */
778void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
779{
780 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
781
782 return scn->ce_id_to_state[id];
783}
784
785/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800786 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
787 * No processing is required inside this function.
788 * @ce_hdl: Cope engine handle
789 * Using an assert, this function makes sure that,
790 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700791 *
792 * This is called while dismantling CE structures. No other thread
793 * should be using these structures while dismantling is occuring
794 * therfore no locking is needed.
795 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800796 * Return: none
797 */
798void
799ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
800{
801 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
802 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530803 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800804 uint32_t sw_index, write_index;
805
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700806 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800807 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
808 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800809 sw_index = src_ring->sw_index;
810 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800811
812 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530813 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800814 }
815}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700816
817/**
818 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
819 * @ce_hdl: Handle to CE
820 *
821 * These buffers are never allocated on the fly, but
822 * are allocated only once during HIF start and freed
823 * only once during HIF stop.
824 * NOTE:
825 * The assumption here is there is no in-flight DMA in progress
826 * currently, so that buffers can be freed up safely.
827 *
828 * Return: NONE
829 */
830void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
831{
832 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
833 struct CE_ring_state *dst_ring = ce_state->dest_ring;
834 qdf_nbuf_t nbuf;
835 int i;
836
837 if (!ce_state->fastpath_handler)
838 return;
839 /*
840 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
841 * this CE is completely full: does not leave one blank space, to
842 * distinguish between empty queue & full queue. So free all the
843 * entries.
844 */
845 for (i = 0; i < dst_ring->nentries; i++) {
846 nbuf = dst_ring->per_transfer_context[i];
847
848 /*
849 * The reasons for doing this check are:
850 * 1) Protect against calling cleanup before allocating buffers
851 * 2) In a corner case, FASTPATH_mode_on may be set, but we
852 * could have a partially filled ring, because of a memory
853 * allocation failure in the middle of allocating ring.
854 * This check accounts for that case, checking
855 * fastpath_mode_on flag or started flag would not have
856 * covered that case. This is not in performance path,
857 * so OK to do this.
858 */
859 if (nbuf)
860 qdf_nbuf_free(nbuf);
861 }
862}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700863
864/**
865 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
866 * @scn: HIF handle
867 *
868 * Datapath Rx CEs are special case, where we reuse all the message buffers.
869 * Hence we have to post all the entries in the pipe, even, in the beginning
870 * unlike for other CE pipes where one less than dest_nentries are filled in
871 * the beginning.
872 *
873 * Return: None
874 */
875static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
876{
877 int pipe_num;
878 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
879
880 if (scn->fastpath_mode_on == false)
881 return;
882
883 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
884 struct HIF_CE_pipe_info *pipe_info =
885 &hif_state->pipe_info[pipe_num];
886 struct CE_state *ce_state =
887 scn->ce_id_to_state[pipe_info->pipe_num];
888
889 if (ce_state->htt_rx_data)
890 atomic_inc(&pipe_info->recv_bufs_needed);
891 }
892}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800893#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700894static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800895{
896}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700897
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700898static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700899{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700900 return false;
901}
902
903static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
904{
905 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700906}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800907#endif /* WLAN_FEATURE_FASTPATH */
908
909void ce_fini(struct CE_handle *copyeng)
910{
911 struct CE_state *CE_state = (struct CE_state *)copyeng;
912 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +0530913 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800914
915 CE_state->state = CE_UNUSED;
916 scn->ce_id_to_state[CE_id] = NULL;
917 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700918 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800919 ce_h2t_tx_ce_cleanup(copyeng);
920
921 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530922 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800923 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530924 qdf_mem_free_consistent(scn->qdf_dev,
925 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800926 (CE_state->src_ring->nentries *
927 sizeof(struct CE_src_desc) +
928 CE_DESC_RING_ALIGN),
929 CE_state->src_ring->
930 base_addr_owner_space_unaligned,
931 CE_state->src_ring->
932 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530933 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800934 }
935 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700936 /* Cleanup the datapath Rx ring */
937 ce_t2h_msg_ce_cleanup(copyeng);
938
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800939 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530940 qdf_mem_free_consistent(scn->qdf_dev,
941 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 (CE_state->dest_ring->nentries *
943 sizeof(struct CE_dest_desc) +
944 CE_DESC_RING_ALIGN),
945 CE_state->dest_ring->
946 base_addr_owner_space_unaligned,
947 CE_state->dest_ring->
948 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530949 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950
951 /* epping */
952 if (CE_state->timer_inited) {
953 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530954 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800955 }
956 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530957 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958}
959
Komal Seelam5584a7c2016-02-24 19:22:48 +0530960void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800961{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530962 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800963
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530964 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800965 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530966 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800967 sizeof(hif_state->msg_callbacks_current));
968}
969
970/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530971QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +0530972hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800973 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530974 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800975{
Komal Seelam644263d2016-02-22 20:45:49 +0530976 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530977 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800978 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
979 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
980 int bytes = nbytes, nfrags = 0;
981 struct ce_sendlist sendlist;
982 int status, i = 0;
983 unsigned int mux_id = 0;
984
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530985 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800986
987 transfer_id =
988 (mux_id & MUX_ID_MASK) |
989 (transfer_id & TRANSACTION_ID_MASK);
990 data_attr &= DESC_DATA_FLAG_MASK;
991 /*
992 * The common case involves sending multiple fragments within a
993 * single download (the tx descriptor and the tx frame header).
994 * So, optimize for the case of multiple fragments by not even
995 * checking whether it's necessary to use a sendlist.
996 * The overhead of using a sendlist for a single buffer download
997 * is not a big deal, since it happens rarely (for WMI messages).
998 */
999 ce_sendlist_init(&sendlist);
1000 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301001 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001002 int frag_bytes;
1003
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301004 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1005 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001006 /*
1007 * Clear the packet offset for all but the first CE desc.
1008 */
1009 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301010 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001011
1012 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1013 frag_bytes >
1014 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301015 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001016 (nbuf,
1017 nfrags) ? 0 :
1018 CE_SEND_FLAG_SWAP_DISABLE,
1019 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301020 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001021 HIF_ERROR("%s: error, frag_num %d larger than limit",
1022 __func__, nfrags);
1023 return status;
1024 }
1025 bytes -= frag_bytes;
1026 nfrags++;
1027 } while (bytes > 0);
1028
1029 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301030 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001031 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301032 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001033 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301034 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001035 }
1036 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301037 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001038
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301039 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001040 HIF_ERROR("%s: error CE handle is null", __func__);
1041 return A_ERROR;
1042 }
1043
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301044 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301045 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301046 (uint8_t *)(qdf_nbuf_data(nbuf)),
1047 sizeof(qdf_nbuf_data(nbuf))));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001048 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301049 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001050
1051 return status;
1052}
1053
Komal Seelam5584a7c2016-02-24 19:22:48 +05301054void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1055 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001056{
Komal Seelam644263d2016-02-22 20:45:49 +05301057 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1058
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001059 if (!force) {
1060 int resources;
1061 /*
1062 * Decide whether to actually poll for completions, or just
1063 * wait for a later chance. If there seem to be plenty of
1064 * resources left, then just wait, since checking involves
1065 * reading a CE register, which is a relatively expensive
1066 * operation.
1067 */
Komal Seelam644263d2016-02-22 20:45:49 +05301068 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001069 /*
1070 * If at least 50% of the total resources are still available,
1071 * don't bother checking again yet.
1072 */
1073 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
1074 return;
1075 }
1076 }
Houston Hoffman56936832016-03-16 12:16:24 -07001077#ifdef ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001078 ce_per_engine_servicereap(scn, pipe);
1079#else
1080 ce_per_engine_service(scn, pipe);
1081#endif
1082}
1083
Komal Seelam5584a7c2016-02-24 19:22:48 +05301084uint16_t
1085hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001086{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301087 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001088 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1089 uint16_t rv;
1090
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301091 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001092 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301093 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001094 return rv;
1095}
1096
1097/* Called by lower (CE) layer when a send to Target completes. */
1098void
1099hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301100 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001101 unsigned int nbytes, unsigned int transfer_id,
1102 unsigned int sw_index, unsigned int hw_index,
1103 unsigned int toeplitz_hash_result)
1104{
1105 struct HIF_CE_pipe_info *pipe_info =
1106 (struct HIF_CE_pipe_info *)ce_context;
1107 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301108 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001109 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001110 struct hif_msg_callbacks *msg_callbacks =
1111 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001112
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001113 do {
1114 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001115 * The upper layer callback will be triggered
1116 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001117 */
Houston Hoffman85118512015-09-28 14:17:11 -07001118 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +05301119 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001120 == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301121 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001122 else
1123 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001124 msg_callbacks->Context,
1125 transfer_context, transfer_id,
1126 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001127 }
1128
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301129 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001130 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301131 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001132 } while (ce_completed_send_next(copyeng,
1133 &ce_context, &transfer_context,
1134 &CE_data, &nbytes, &transfer_id,
1135 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301136 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001137}
1138
Houston Hoffman910c6262015-09-28 12:56:25 -07001139/**
1140 * hif_ce_do_recv(): send message from copy engine to upper layers
1141 * @msg_callbacks: structure containing callback and callback context
1142 * @netbuff: skb containing message
1143 * @nbytes: number of bytes in the message
1144 * @pipe_info: used for the pipe_number info
1145 *
1146 * Checks the packet length, configures the lenght in the netbuff,
1147 * and calls the upper layer callback.
1148 *
1149 * return: None
1150 */
1151static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301152 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001153 struct HIF_CE_pipe_info *pipe_info) {
1154 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301155 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001156 msg_callbacks->
1157 rxCompletionHandler(msg_callbacks->Context,
1158 netbuf, pipe_info->pipe_num);
1159 } else {
1160 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1161 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301162 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001163 }
1164}
1165
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001166/* Called by lower (CE) layer when data is received from the Target. */
1167void
1168hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301169 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001170 unsigned int nbytes, unsigned int transfer_id,
1171 unsigned int flags)
1172{
1173 struct HIF_CE_pipe_info *pipe_info =
1174 (struct HIF_CE_pipe_info *)ce_context;
1175 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001176 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301177 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001178#ifdef HIF_PCI
1179 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1180#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001181 struct hif_msg_callbacks *msg_callbacks =
1182 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301183 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001184
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001185 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001186#ifdef HIF_PCI
1187 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1188#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301189 qdf_nbuf_unmap_single(scn->qdf_dev,
1190 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301191 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001192
Houston Hoffman910c6262015-09-28 12:56:25 -07001193 atomic_inc(&pipe_info->recv_bufs_needed);
1194 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301195 if (scn->target_status == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301196 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001197 else
1198 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001199 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001200
1201 /* Set up force_break flag if num of receices reaches
1202 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001203 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301204 count = ce_state->receive_count;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301205 if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001206 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001207 break;
1208 }
1209 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1210 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301211 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001212
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001213}
1214
1215/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1216
1217void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301218hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001219 struct hif_msg_callbacks *callbacks)
1220{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301221 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001222
1223#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1224 spin_lock_init(&pcie_access_log_lock);
1225#endif
1226 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301227 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001228 sizeof(hif_state->msg_callbacks_pending));
1229
1230}
1231
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001232int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1233{
1234 struct CE_handle *ce_diag = hif_state->ce_diag;
1235 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301236 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001237 struct hif_msg_callbacks *hif_msg_callbacks =
1238 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001239
1240 /* daemonize("hif_compl_thread"); */
1241
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001242 if (scn->ce_count == 0) {
1243 HIF_ERROR("%s: Invalid ce_count\n", __func__);
1244 return -EINVAL;
1245 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001246
1247 if (!hif_msg_callbacks ||
1248 !hif_msg_callbacks->rxCompletionHandler ||
1249 !hif_msg_callbacks->txCompletionHandler) {
1250 HIF_ERROR("%s: no completion handler registered", __func__);
1251 return -EFAULT;
1252 }
1253
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001254 A_TARGET_ACCESS_LIKELY(scn);
1255 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1256 struct CE_attr attr;
1257 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001258
1259 pipe_info = &hif_state->pipe_info[pipe_num];
1260 if (pipe_info->ce_hdl == ce_diag) {
1261 continue; /* Handle Diagnostic CE specially */
1262 }
1263 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001264 if (attr.src_nentries) {
1265 /* pipe used to send to target */
1266 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1267 __func__, pipe_num, pipe_info);
1268 ce_send_cb_register(pipe_info->ce_hdl,
1269 hif_pci_ce_send_done, pipe_info,
1270 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001271 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1272 }
1273 if (attr.dest_nentries) {
1274 /* pipe used to receive from target */
1275 ce_recv_cb_register(pipe_info->ce_hdl,
1276 hif_pci_ce_recv_data, pipe_info,
1277 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001278 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001279
1280 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301281 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001282 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001283
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001284 A_TARGET_ACCESS_UNLIKELY(scn);
1285 return 0;
1286}
1287
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001288/*
1289 * Install pending msg callbacks.
1290 *
1291 * TBDXXX: This hack is needed because upper layers install msg callbacks
1292 * for use with HTC before BMI is done; yet this HIF implementation
1293 * needs to continue to use BMI msg callbacks. Really, upper layers
1294 * should not register HTC callbacks until AFTER BMI phase.
1295 */
Komal Seelam644263d2016-02-22 20:45:49 +05301296static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001297{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301298 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001299
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301300 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001301 &hif_state->msg_callbacks_pending,
1302 sizeof(hif_state->msg_callbacks_pending));
1303}
1304
Komal Seelam5584a7c2016-02-24 19:22:48 +05301305void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1306 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001307{
1308 int ul_is_polled, dl_is_polled;
1309
Komal Seelam644263d2016-02-22 20:45:49 +05301310 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001311 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1312}
1313
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001314/**
1315 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301316 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001317 *
1318 * Output the pipe error counts of each pipe to log file
1319 *
1320 * Return: N/A
1321 */
Komal Seelam644263d2016-02-22 20:45:49 +05301322void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001323{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301324 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001325 int pipe_num;
1326
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001327 if (hif_state == NULL) {
1328 HIF_ERROR("%s hif_state is NULL", __func__);
1329 return;
1330 }
1331 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1332 struct HIF_CE_pipe_info *pipe_info;
1333
1334 pipe_info = &hif_state->pipe_info[pipe_num];
1335
1336 if (pipe_info->nbuf_alloc_err_count > 0 ||
1337 pipe_info->nbuf_dma_err_count > 0 ||
1338 pipe_info->nbuf_ce_enqueue_err_count)
1339 HIF_ERROR(
1340 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1341 __func__, pipe_info->pipe_num,
1342 atomic_read(&pipe_info->recv_bufs_needed),
1343 pipe_info->nbuf_alloc_err_count,
1344 pipe_info->nbuf_dma_err_count,
1345 pipe_info->nbuf_ce_enqueue_err_count);
1346 }
1347}
1348
1349static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1350{
1351 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301352 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301353 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301354 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001355 uint32_t bufs_posted = 0;
1356
1357 buf_sz = pipe_info->buf_sz;
1358 if (buf_sz == 0) {
1359 /* Unused Copy Engine */
1360 return 0;
1361 }
1362
1363 ce_hdl = pipe_info->ce_hdl;
1364
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301365 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001366 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301367 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301368 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001369 int status;
1370
1371 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301372 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001373
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301374 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001375 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301376 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001377 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301378 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001379 &pipe_info->recv_bufs_needed_lock);
1380 HIF_ERROR(
1381 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1382 __func__, pipe_info->pipe_num,
1383 atomic_read(&pipe_info->recv_bufs_needed),
1384 pipe_info->nbuf_alloc_err_count);
1385 atomic_inc(&pipe_info->recv_bufs_needed);
1386 return 1;
1387 }
1388
1389 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301390 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001391 * CE_data = dma_map_single(dev, data, buf_sz, );
1392 * DMA_FROM_DEVICE);
1393 */
1394 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301395 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301396 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001397
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301398 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
1399 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001400 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301401 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001402 HIF_ERROR(
1403 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
1404 __func__, pipe_info->pipe_num,
1405 atomic_read(&pipe_info->recv_bufs_needed),
1406 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301407 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001408 atomic_inc(&pipe_info->recv_bufs_needed);
1409 return 1;
1410 }
1411
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301412 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001413
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301414 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001415 buf_sz, DMA_FROM_DEVICE);
1416 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301417 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301419 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001420 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301421 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001422 HIF_ERROR(
1423 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1424 __func__, pipe_info->pipe_num,
1425 atomic_read(&pipe_info->recv_bufs_needed),
1426 pipe_info->nbuf_ce_enqueue_err_count);
1427 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301428 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001429 return 1;
1430 }
1431
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301432 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001433 bufs_posted++;
1434 }
1435 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001436 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001437 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1438 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001439 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001440 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1441 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001442 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001443 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1444
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301445 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001446
1447 return 0;
1448}
1449
1450/*
1451 * Try to post all desired receive buffers for all pipes.
1452 * Returns 0 if all desired buffers are posted,
1453 * non-zero if were were unable to completely
1454 * replenish receive buffers.
1455 */
Komal Seelam644263d2016-02-22 20:45:49 +05301456static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001457{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301458 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001459 int pipe_num, rv = 0;
1460
1461 A_TARGET_ACCESS_LIKELY(scn);
1462 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1463 struct HIF_CE_pipe_info *pipe_info;
1464
1465 pipe_info = &hif_state->pipe_info[pipe_num];
1466 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1467 rv = 1;
1468 goto done;
1469 }
1470 }
1471
1472done:
1473 A_TARGET_ACCESS_UNLIKELY(scn);
1474
1475 return rv;
1476}
1477
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301478QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001479{
Komal Seelam644263d2016-02-22 20:45:49 +05301480 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301481 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001482
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001483 hif_update_fastpath_recv_bufs_cnt(scn);
1484
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001485 hif_msg_callbacks_install(scn);
1486
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001487 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301488 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001489
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001490 /* Post buffers once to start things off. */
1491 (void)hif_post_recv_buffers(scn);
1492
1493 hif_state->started = true;
1494
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301495 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001496}
1497
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001498void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1499{
Komal Seelam644263d2016-02-22 20:45:49 +05301500 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001501 struct CE_handle *ce_hdl;
1502 uint32_t buf_sz;
1503 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301504 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301505 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 void *per_CE_context;
1507
1508 buf_sz = pipe_info->buf_sz;
1509 if (buf_sz == 0) {
1510 /* Unused Copy Engine */
1511 return;
1512 }
1513
1514 hif_state = pipe_info->HIF_CE_state;
1515 if (!hif_state->started) {
1516 return;
1517 }
1518
Komal Seelam02cf2f82016-02-22 20:44:25 +05301519 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001520 ce_hdl = pipe_info->ce_hdl;
1521
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301522 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001523 return;
1524 }
1525 while (ce_revoke_recv_next
1526 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301527 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301528 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301529 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301530 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001531 }
1532}
1533
1534void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1535{
1536 struct CE_handle *ce_hdl;
1537 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301538 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301539 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001540 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301541 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001542 unsigned int nbytes;
1543 unsigned int id;
1544 uint32_t buf_sz;
1545 uint32_t toeplitz_hash_result;
1546
1547 buf_sz = pipe_info->buf_sz;
1548 if (buf_sz == 0) {
1549 /* Unused Copy Engine */
1550 return;
1551 }
1552
1553 hif_state = pipe_info->HIF_CE_state;
1554 if (!hif_state->started) {
1555 return;
1556 }
1557
Komal Seelam02cf2f82016-02-22 20:44:25 +05301558 scn = HIF_GET_SOFTC(hif_state);
1559
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001560 ce_hdl = pipe_info->ce_hdl;
1561
1562 while (ce_cancel_send_next
1563 (ce_hdl, &per_CE_context,
1564 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301565 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001566 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1567 /*
1568 * Packets enqueued by htt_h2t_ver_req_msg() and
1569 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1570 * freed in htt_htc_misc_pkt_pool_free() in
1571 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001572 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001573 * which they are queued in.
1574 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301575 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001576 return;
1577 /* Indicate the completion to higer
1578 * layer to free the buffer */
1579 hif_state->msg_callbacks_current.
1580 txCompletionHandler(hif_state->
1581 msg_callbacks_current.Context,
1582 netbuf, id, toeplitz_hash_result);
1583 }
1584 }
1585}
1586
1587/*
1588 * Cleanup residual buffers for device shutdown:
1589 * buffers that were enqueued for receive
1590 * buffers that were to be sent
1591 * Note: Buffers that had completed but which were
1592 * not yet processed are on a completion queue. They
1593 * are handled when the completion thread shuts down.
1594 */
1595void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1596{
1597 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301598 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001599
Komal Seelam02cf2f82016-02-22 20:44:25 +05301600 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001601 struct HIF_CE_pipe_info *pipe_info;
1602
1603 pipe_info = &hif_state->pipe_info[pipe_num];
1604 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1605 hif_send_buffer_cleanup_on_pipe(pipe_info);
1606 }
1607}
1608
Komal Seelam5584a7c2016-02-24 19:22:48 +05301609void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001610{
Komal Seelam644263d2016-02-22 20:45:49 +05301611 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301612 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301613
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001614 hif_buffer_cleanup(hif_state);
1615}
1616
Komal Seelam5584a7c2016-02-24 19:22:48 +05301617void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001618{
Komal Seelam644263d2016-02-22 20:45:49 +05301619 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301620 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001621 int pipe_num;
1622
1623 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001624
1625 /*
1626 * At this point, asynchronous threads are stopped,
1627 * The Target should not DMA nor interrupt, Host code may
1628 * not initiate anything more. So we just need to clean
1629 * up Host-side state.
1630 */
1631
1632 if (scn->athdiag_procfs_inited) {
1633 athdiag_procfs_remove();
1634 scn->athdiag_procfs_inited = false;
1635 }
1636
1637 hif_buffer_cleanup(hif_state);
1638
1639 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1640 struct HIF_CE_pipe_info *pipe_info;
1641
1642 pipe_info = &hif_state->pipe_info[pipe_num];
1643 if (pipe_info->ce_hdl) {
1644 ce_fini(pipe_info->ce_hdl);
1645 pipe_info->ce_hdl = NULL;
1646 pipe_info->buf_sz = 0;
1647 }
1648 }
1649
1650 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301651 qdf_timer_stop(&hif_state->sleep_timer);
1652 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001653 hif_state->sleep_timer_init = false;
1654 }
1655
1656 hif_state->started = false;
1657}
1658
Houston Hoffman854e67f2016-03-14 21:11:39 -07001659/**
1660 * hif_get_target_ce_config() - get copy engine configuration
1661 * @target_ce_config_ret: basic copy engine configuration
1662 * @target_ce_config_sz_ret: size of the basic configuration in bytes
1663 * @target_service_to_ce_map_ret: service mapping for the copy engines
1664 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
1665 * @target_shadow_reg_cfg_ret: shadow register configuration
1666 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
1667 *
1668 * providing accessor to these values outside of this file.
1669 * currently these are stored in static pointers to const sections.
1670 * there are multiple configurations that are selected from at compile time.
1671 * Runtime selection would need to consider mode, target type and bus type.
1672 *
1673 * Return: return by parameter.
1674 */
1675void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
1676 int *target_ce_config_sz_ret,
1677 struct service_to_pipe **target_service_to_ce_map_ret,
1678 int *target_service_to_ce_map_sz_ret,
1679 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
1680 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001681{
Houston Hoffman854e67f2016-03-14 21:11:39 -07001682 *target_ce_config_ret = target_ce_config;
1683 *target_ce_config_sz_ret = target_ce_config_sz;
1684 *target_service_to_ce_map_ret = target_service_to_ce_map;
1685 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
1686
1687 if (target_shadow_reg_cfg_ret)
1688 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
1689
1690 if (shadow_cfg_sz_ret)
1691 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001692}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001693
1694/**
1695 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301696 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697 *
1698 * This function passes the con_mode and CE configuration to
1699 * platform driver to enable wlan.
1700 *
Houston Hoffman108da402016-03-14 21:11:24 -07001701 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001702 */
Houston Hoffman108da402016-03-14 21:11:24 -07001703int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001704{
1705 struct icnss_wlan_enable_cfg cfg;
1706 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301707 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001708
Houston Hoffman854e67f2016-03-14 21:11:39 -07001709 hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
1710 &cfg.num_ce_tgt_cfg,
1711 (struct service_to_pipe **)&cfg.ce_svc_cfg,
1712 &cfg.num_ce_svc_pipe_cfg,
1713 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
1714 &cfg.num_shadow_reg_cfg);
1715
1716 /* translate from structure size to array size */
1717 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
1718 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
1719 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301721 if (QDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001722 mode = ICNSS_FTM;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301723 else if (WLAN_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001725 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001726 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001727
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001728 if (BYPASS_QMI)
1729 return 0;
1730 else
1731 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001732}
1733
Houston Hoffman108da402016-03-14 21:11:24 -07001734/**
1735 * hif_ce_prepare_config() - load the correct static tables.
1736 * @scn: hif context
1737 *
1738 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001739 */
Houston Hoffman108da402016-03-14 21:11:24 -07001740void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001741{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301742 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001743 /* if epping is enabled we need to use the epping configuration. */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301744 if (WLAN_IS_EPPING_ENABLED(mode)) {
1745 if (WLAN_IS_EPPING_IRQ(mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001746 host_ce_config = host_ce_config_wlan_epping_irq;
1747 else
1748 host_ce_config = host_ce_config_wlan_epping_poll;
1749 target_ce_config = target_ce_config_wlan_epping;
1750 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1751 target_service_to_ce_map =
1752 target_service_to_ce_map_wlan_epping;
1753 target_service_to_ce_map_sz =
1754 sizeof(target_service_to_ce_map_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07001755 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
1756 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001757 }
Houston Hoffman108da402016-03-14 21:11:24 -07001758}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001759
Houston Hoffman108da402016-03-14 21:11:24 -07001760/**
1761 * hif_ce_open() - do ce specific allocations
1762 * @hif_sc: pointer to hif context
1763 *
1764 * return: 0 for success or QDF_STATUS_E_NOMEM
1765 */
1766QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
1767{
1768 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001769
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301770 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07001771 return QDF_STATUS_SUCCESS;
1772}
1773
1774/**
1775 * hif_ce_close() - do ce specific free
1776 * @hif_sc: pointer to hif context
1777 */
1778void hif_ce_close(struct hif_softc *hif_sc)
1779{
1780}
1781
1782/**
1783 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
1784 * @hif_sc: hif context
1785 *
1786 * uses state variables to support cleaning up when hif_config_ce fails.
1787 */
1788void hif_unconfig_ce(struct hif_softc *hif_sc)
1789{
1790 int pipe_num;
1791 struct HIF_CE_pipe_info *pipe_info;
1792 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1793
1794 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
1795 pipe_info = &hif_state->pipe_info[pipe_num];
1796 if (pipe_info->ce_hdl) {
1797 ce_unregister_irq(hif_state, (1 << pipe_num));
1798 hif_sc->request_irq_done = false;
1799 ce_fini(pipe_info->ce_hdl);
1800 pipe_info->ce_hdl = NULL;
1801 pipe_info->buf_sz = 0;
1802 }
1803 }
Houston Hoffman108da402016-03-14 21:11:24 -07001804 if (hif_sc->athdiag_procfs_inited) {
1805 athdiag_procfs_remove();
1806 hif_sc->athdiag_procfs_inited = false;
1807 }
1808}
1809
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001810#ifdef CONFIG_BYPASS_QMI
1811#define FW_SHARED_MEM (2 * 1024 * 1024)
1812
1813/**
1814 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
1815 * @scn: pointer to HIF structure
1816 *
1817 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
1818 *
1819 * Return: void
1820 */
1821static void hif_post_static_buf_to_target(struct hif_softc *scn)
1822{
1823 uint32_t CE_data;
1824 uint8_t *g_fw_mem;
1825 uint32_t phys_addr;
1826
1827 g_fw_mem = kzalloc(FW_SHARED_MEM, GFP_KERNEL);
1828
1829 CE_data = dma_map_single(scn->cdf_dev->dev, g_fw_mem,
1830 FW_SHARED_MEM, CDF_DMA_FROM_DEVICE);
1831 HIF_TRACE("g_fw_mem %p physical 0x%x\n", g_fw_mem, CE_data);
1832
1833 if (dma_mapping_error(scn->cdf_dev->dev, CE_data)) {
1834 pr_err("DMA map failed\n");
1835 return;
1836 }
1837
1838 phys_addr = virt_to_phys((scn->mem + BYPASS_QMI_TEMP_REGISTER));
1839 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, CE_data);
1840 HIF_TRACE("Write phy address 0x%x into scratch reg %p phy add 0x%x",
1841 CE_data, (scn->mem + BYPASS_QMI_TEMP_REGISTER), phys_addr);
1842}
1843#else
1844static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
1845{
1846 return;
1847}
1848#endif
1849
Houston Hoffman108da402016-03-14 21:11:24 -07001850/**
1851 * hif_config_ce() - configure copy engines
1852 * @scn: hif context
1853 *
1854 * Prepares fw, copy engine hardware and host sw according
1855 * to the attributes selected by hif_ce_prepare_config.
1856 *
1857 * also calls athdiag_procfs_init
1858 *
1859 * return: 0 for success nonzero for failure.
1860 */
1861int hif_config_ce(struct hif_softc *scn)
1862{
1863 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1864 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1865 struct HIF_CE_pipe_info *pipe_info;
1866 int pipe_num;
1867#ifdef ADRASTEA_SHADOW_REGISTERS
1868 int i;
1869#endif
1870 QDF_STATUS rv = QDF_STATUS_SUCCESS;
1871
1872 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001873
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001874 hif_post_static_buf_to_target(scn);
1875
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001876 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07001877
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001878 hif_config_rri_on_ddr(scn);
1879
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001880 /* During CE initializtion */
1881 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001882 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1883 struct CE_attr *attr;
1884
1885 pipe_info = &hif_state->pipe_info[pipe_num];
1886 pipe_info->pipe_num = pipe_num;
1887 pipe_info->HIF_CE_state = hif_state;
1888 attr = &host_ce_config[pipe_num];
1889 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301890 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001891 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301892 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001893 A_TARGET_ACCESS_UNLIKELY(scn);
1894 goto err;
1895 }
1896
1897 if (pipe_num == DIAG_CE_ID) {
1898 /* Reserve the ultimate CE for
1899 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07001900 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 continue;
1902 }
1903
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301904 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
1905 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001906 if (attr->dest_nentries > 0) {
1907 atomic_set(&pipe_info->recv_bufs_needed,
1908 init_buffer_count(attr->dest_nentries - 1));
1909 } else {
1910 atomic_set(&pipe_info->recv_bufs_needed, 0);
1911 }
1912 ce_tasklet_init(hif_state, (1 << pipe_num));
1913 ce_register_irq(hif_state, (1 << pipe_num));
1914 scn->request_irq_done = true;
1915 }
1916
1917 if (athdiag_procfs_init(scn) != 0) {
1918 A_TARGET_ACCESS_UNLIKELY(scn);
1919 goto err;
1920 }
1921 scn->athdiag_procfs_inited = true;
1922
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001923 HIF_INFO_MED("%s: ce_init done", __func__);
1924
Houston Hoffman108da402016-03-14 21:11:24 -07001925 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001926
1927 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1928
1929#ifdef ADRASTEA_SHADOW_REGISTERS
1930 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1931 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1932 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1933 __func__, i,
1934 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1935 }
1936#endif
1937
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301938 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001939
1940err:
1941 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07001942 hif_unconfig_ce(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001943 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301944 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001945}
1946
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001947#ifdef WLAN_FEATURE_FASTPATH
1948/**
1949 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
1950 * @handler: Callback funtcion
1951 * @context: handle for callback function
1952 *
1953 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
1954 */
1955int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
1956{
1957 struct hif_softc *scn =
1958 (struct hif_softc *)cds_get_context(QDF_MODULE_ID_HIF);
1959 struct CE_state *ce_state;
1960 int i;
1961
1962 QDF_ASSERT(scn != NULL);
1963
1964 if (!scn->fastpath_mode_on) {
1965 HIF_WARN("Fastpath mode disabled\n");
1966 return QDF_STATUS_E_FAILURE;
1967 }
1968
1969 for (i = 0; i < CE_COUNT_MAX; i++) {
1970 ce_state = scn->ce_id_to_state[i];
1971 if (ce_state->htt_rx_data) {
1972 ce_state->fastpath_handler = handler;
1973 ce_state->context = context;
1974 }
1975 }
1976
1977 return QDF_STATUS_SUCCESS;
1978}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001979#endif
1980
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001981#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001982/**
1983 * hif_ipa_get_ce_resource() - get uc resource on hif
1984 * @scn: bus context
1985 * @ce_sr_base_paddr: copyengine source ring base physical address
1986 * @ce_sr_ring_size: copyengine source ring size
1987 * @ce_reg_paddr: copyengine register physical address
1988 *
1989 * IPA micro controller data path offload feature enabled,
1990 * HIF should release copy engine related resource information to IPA UC
1991 * IPA UC will access hardware resource with released information
1992 *
1993 * Return: None
1994 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05301995void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301996 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001997 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301998 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001999{
Komal Seelam644263d2016-02-22 20:45:49 +05302000 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302001 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002002 struct HIF_CE_pipe_info *pipe_info =
2003 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2004 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2005
2006 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2007 ce_reg_paddr);
2008 return;
2009}
2010#endif /* IPA_OFFLOAD */
2011
2012
2013#ifdef ADRASTEA_SHADOW_REGISTERS
2014
2015/*
2016 Current shadow register config
2017
2018 -----------------------------------------------------------
2019 Shadow Register | CE | src/dst write index
2020 -----------------------------------------------------------
2021 0 | 0 | src
2022 1 No Config - Doesn't point to anything
2023 2 No Config - Doesn't point to anything
2024 3 | 3 | src
2025 4 | 4 | src
2026 5 | 5 | src
2027 6 No Config - Doesn't point to anything
2028 7 | 7 | src
2029 8 No Config - Doesn't point to anything
2030 9 No Config - Doesn't point to anything
2031 10 No Config - Doesn't point to anything
2032 11 No Config - Doesn't point to anything
2033 -----------------------------------------------------------
2034 12 No Config - Doesn't point to anything
2035 13 | 1 | dst
2036 14 | 2 | dst
2037 15 No Config - Doesn't point to anything
2038 16 No Config - Doesn't point to anything
2039 17 No Config - Doesn't point to anything
2040 18 No Config - Doesn't point to anything
2041 19 | 7 | dst
2042 20 | 8 | dst
2043 21 No Config - Doesn't point to anything
2044 22 No Config - Doesn't point to anything
2045 23 No Config - Doesn't point to anything
2046 -----------------------------------------------------------
2047
2048
2049 ToDo - Move shadow register config to following in the future
2050 This helps free up a block of shadow registers towards the end.
2051 Can be used for other purposes
2052
2053 -----------------------------------------------------------
2054 Shadow Register | CE | src/dst write index
2055 -----------------------------------------------------------
2056 0 | 0 | src
2057 1 | 3 | src
2058 2 | 4 | src
2059 3 | 5 | src
2060 4 | 7 | src
2061 -----------------------------------------------------------
2062 5 | 1 | dst
2063 6 | 2 | dst
2064 7 | 7 | dst
2065 8 | 8 | dst
2066 -----------------------------------------------------------
2067 9 No Config - Doesn't point to anything
2068 12 No Config - Doesn't point to anything
2069 13 No Config - Doesn't point to anything
2070 14 No Config - Doesn't point to anything
2071 15 No Config - Doesn't point to anything
2072 16 No Config - Doesn't point to anything
2073 17 No Config - Doesn't point to anything
2074 18 No Config - Doesn't point to anything
2075 19 No Config - Doesn't point to anything
2076 20 No Config - Doesn't point to anything
2077 21 No Config - Doesn't point to anything
2078 22 No Config - Doesn't point to anything
2079 23 No Config - Doesn't point to anything
2080 -----------------------------------------------------------
2081*/
2082
Komal Seelam644263d2016-02-22 20:45:49 +05302083u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002084{
2085 u32 addr = 0;
2086
2087 switch (COPY_ENGINE_ID(ctrl_addr)) {
2088 case 0:
2089 addr = SHADOW_VALUE0;
2090 break;
2091 case 3:
2092 addr = SHADOW_VALUE3;
2093 break;
2094 case 4:
2095 addr = SHADOW_VALUE4;
2096 break;
2097 case 5:
2098 addr = SHADOW_VALUE5;
2099 break;
2100 case 7:
2101 addr = SHADOW_VALUE7;
2102 break;
2103 default:
Rajeev Kumar74f77642016-04-14 16:54:32 -07002104 HIF_ERROR("invalid CE ctrl_addr %d",
2105 COPY_ENGINE_ID(ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302106 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002107
2108 }
2109 return addr;
2110
2111}
2112
Komal Seelam644263d2016-02-22 20:45:49 +05302113u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002114{
2115 u32 addr = 0;
2116
2117 switch (COPY_ENGINE_ID(ctrl_addr)) {
2118 case 1:
2119 addr = SHADOW_VALUE13;
2120 break;
2121 case 2:
2122 addr = SHADOW_VALUE14;
2123 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002124 case 5:
2125 addr = SHADOW_VALUE17;
2126 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002127 case 7:
2128 addr = SHADOW_VALUE19;
2129 break;
2130 case 8:
2131 addr = SHADOW_VALUE20;
2132 break;
2133 default:
Rajeev Kumar74f77642016-04-14 16:54:32 -07002134 HIF_ERROR("invalid CE ctrl_addr %d",
2135 COPY_ENGINE_ID(ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302136 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002137 }
2138
2139 return addr;
2140
2141}
2142#endif
2143
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002144#if defined(FEATURE_LRO)
2145/**
2146 * ce_lro_flush_cb_register() - register the LRO flush
2147 * callback
2148 * @scn: HIF context
2149 * @handler: callback function
2150 * @data: opaque data pointer to be passed back
2151 *
2152 * Store the LRO flush callback provided
2153 *
2154 * Return: none
2155 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302156void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002157 void (handler)(void *), void *data)
2158{
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002159 int i;
2160 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302161 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002162
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302163 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002164
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002165 for (i = 0; i < CE_COUNT_MAX; i++) {
2166 ce_state = scn->ce_id_to_state[i];
2167 if (ce_state->htt_rx_data) {
2168 ce_state->lro_flush_cb = handler;
2169 ce_state->lro_data = data;
2170 }
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002171 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002172}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002173
2174/**
2175 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2176 * callback
2177 * @scn: HIF context
2178 *
2179 * Remove the LRO flush callback
2180 *
2181 * Return: none
2182 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302183void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002184{
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002185 int i;
2186 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302187 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002188
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302189 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002190
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002191 for (i = 0; i < CE_COUNT_MAX; i++) {
2192 ce_state = scn->ce_id_to_state[i];
2193 if (ce_state->htt_rx_data) {
2194 ce_state->lro_flush_cb = NULL;
2195 ce_state->lro_data = NULL;
2196 }
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002197 }
2198}
2199#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002200
2201/**
2202 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2203 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302204 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002205 * @svc_id: Service ID for which the mapping is needed.
2206 * @ul_pipe: address of the container in which ul pipe is returned.
2207 * @dl_pipe: address of the container in which dl pipe is returned.
2208 * @ul_is_polled: address of the container in which a bool
2209 * indicating if the UL CE for this service
2210 * is polled is returned.
2211 * @dl_is_polled: address of the container in which a bool
2212 * indicating if the DL CE for this service
2213 * is polled is returned.
2214 *
2215 * Return: Indicates whether this operation was successful.
2216 */
2217
Komal Seelam5584a7c2016-02-24 19:22:48 +05302218int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002219 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2220 int *dl_is_polled)
2221{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302222 int status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002223 unsigned int i;
2224 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002225 struct service_to_pipe *tgt_svc_map_to_use;
2226 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302227 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2228 uint32_t mode = hif_get_conparam(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002229
Komal Seelambd7c51d2016-02-24 10:27:30 +05302230 if (WLAN_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002231 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2232 sz_tgt_svc_map_to_use =
2233 sizeof(target_service_to_ce_map_wlan_epping);
2234 } else {
2235 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2236 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2237 }
2238
2239 *dl_is_polled = 0; /* polling for received messages not supported */
2240
2241 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2242
2243 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2244 if (element.service_id == svc_id) {
2245
2246 if (element.pipedir == PIPEDIR_OUT)
2247 *ul_pipe = element.pipenum;
2248
2249 else if (element.pipedir == PIPEDIR_IN)
2250 *dl_pipe = element.pipenum;
2251 }
2252 }
2253
2254 *ul_is_polled =
2255 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2256
2257 return status;
2258}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002259
2260#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302261inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002262 uint32_t CE_ctrl_addr)
2263{
2264 uint32_t read_from_hw, srri_from_ddr = 0;
2265
2266 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2267
2268 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2269
2270 if (read_from_hw != srri_from_ddr) {
2271 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2272 srri_from_ddr, read_from_hw,
2273 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302274 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002275 }
2276 return srri_from_ddr;
2277}
2278
2279
Komal Seelam644263d2016-02-22 20:45:49 +05302280inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002281 uint32_t CE_ctrl_addr)
2282{
2283 uint32_t read_from_hw, drri_from_ddr = 0;
2284
2285 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2286
2287 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2288
2289 if (read_from_hw != drri_from_ddr) {
2290 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2291 drri_from_ddr, read_from_hw,
2292 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302293 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002294 }
2295 return drri_from_ddr;
2296}
2297
2298#endif
2299
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002300#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002301/**
2302 * hif_get_src_ring_read_index(): Called to get the SRRI
2303 *
Komal Seelam644263d2016-02-22 20:45:49 +05302304 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002305 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2306 *
2307 * This function returns the SRRI to the caller. For CEs that
2308 * dont have interrupts enabled, we look at the DDR based SRRI
2309 *
2310 * Return: SRRI
2311 */
Komal Seelam644263d2016-02-22 20:45:49 +05302312inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002313 uint32_t CE_ctrl_addr)
2314{
2315 struct CE_attr attr;
2316
2317 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2318 if (attr.flags & CE_ATTR_DISABLE_INTR)
2319 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2320 else
2321 return A_TARGET_READ(scn,
2322 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2323}
2324
2325/**
2326 * hif_get_dst_ring_read_index(): Called to get the DRRI
2327 *
Komal Seelam644263d2016-02-22 20:45:49 +05302328 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002329 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2330 *
2331 * This function returns the DRRI to the caller. For CEs that
2332 * dont have interrupts enabled, we look at the DDR based DRRI
2333 *
2334 * Return: DRRI
2335 */
Komal Seelam644263d2016-02-22 20:45:49 +05302336inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002337 uint32_t CE_ctrl_addr)
2338{
2339 struct CE_attr attr;
2340
2341 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2342
2343 if (attr.flags & CE_ATTR_DISABLE_INTR)
2344 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2345 else
2346 return A_TARGET_READ(scn,
2347 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2348}
2349
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002350/**
2351 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2352 *
Komal Seelam644263d2016-02-22 20:45:49 +05302353 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002354 *
2355 * This function allocates non cached memory on ddr and sends
2356 * the physical address of this memory to the CE hardware. The
2357 * hardware updates the RRI on this particular location.
2358 *
2359 * Return: None
2360 */
Komal Seelam644263d2016-02-22 20:45:49 +05302361static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002362{
2363 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302364 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002365 uint32_t high_paddr, low_paddr;
2366 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302367 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2368 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2369 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002370
2371 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2372 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2373
2374 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2375
2376 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2377 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2378
2379 for (i = 0; i < CE_COUNT; i++)
2380 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2381
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302382 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002383
2384 return;
2385}
2386#else
2387
2388/**
2389 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2390 *
Komal Seelam644263d2016-02-22 20:45:49 +05302391 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002392 *
2393 * This is a dummy implementation for platforms that don't
2394 * support this functionality.
2395 *
2396 * Return: None
2397 */
Komal Seelam644263d2016-02-22 20:45:49 +05302398static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002399{
2400 return;
2401}
2402#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302403
2404/**
2405 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302406 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302407 *
2408 * Output the copy engine registers
2409 *
2410 * Return: 0 for success or error code
2411 */
Komal Seelam644263d2016-02-22 20:45:49 +05302412int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302413{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302414 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302415 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2416 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2417 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2418 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302419 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302420
2421 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
Komal Seelam644263d2016-02-22 20:45:49 +05302422 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302423 (uint8_t *) &ce_reg_values[i][0],
2424 ce_reg_word_size * sizeof(uint32_t));
2425
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302426 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302427 HIF_ERROR("Dumping CE register failed!");
2428 return -EACCES;
2429 }
2430 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302431 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Govind Singh2443fb32016-01-13 17:44:48 +05302432 (uint8_t *) &ce_reg_values[i][0],
2433 ce_reg_word_size * sizeof(uint32_t));
2434 }
Govind Singh2443fb32016-01-13 17:44:48 +05302435 return 0;
2436}