blob: 9ac17b4c1c4631d4417a7f1f6e69fb4caf120328 [file] [log] [blame]
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001/*
Prashanth Bhattadfcae6b2015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080027#include "targcfg.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053028#include "qdf_lock.h"
29#include "qdf_status.h"
30#include "qdf_status.h"
31#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080032#include <targaddrs.h>
33#include <bmi_msg.h>
34#include "hif_io32.h"
35#include <hif.h>
36#include "regtable.h"
37#define ATH_MODULE_NAME hif
38#include <a_debug.h>
39#include "hif_main.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080040#include "ce_api.h"
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053041#include "qdf_trace.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080042#ifdef CONFIG_CNSS
43#include <net/cnss.h>
44#endif
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080045#include "epping_main.h"
46#include "hif_debug.h"
47#include "ce_internal.h"
48#include "ce_reg.h"
49#include "ce_assignment.h"
50#include "ce_tasklet.h"
Houston Hoffmanbc693492016-03-14 21:11:41 -070051#include "platform_icnss.h"
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080052#include "qwlan_version.h"
Manjunathappa Prakash7399f142016-04-13 23:38:16 -070053#include <cds_api.h>
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080054
55#define CE_POLL_TIMEOUT 10 /* ms */
56
57/* Forward references */
58static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
59
60/*
61 * Fix EV118783, poll to check whether a BMI response comes
62 * other than waiting for the interruption which may be lost.
63 */
64/* #define BMI_RSP_POLLING */
65#define BMI_RSP_TO_MILLISEC 1000
66
Yuanyuan Liua7a282f2016-04-15 12:55:04 -070067#ifdef CONFIG_BYPASS_QMI
68#define BYPASS_QMI 1
69#else
70#define BYPASS_QMI 0
71#endif
72
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080073
Komal Seelam644263d2016-02-22 20:45:49 +053074static int hif_post_recv_buffers(struct hif_softc *scn);
75static void hif_config_rri_on_ddr(struct hif_softc *scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080076
77static void ce_poll_timeout(void *arg)
78{
79 struct CE_state *CE_state = (struct CE_state *)arg;
80 if (CE_state->timer_inited) {
81 ce_per_engine_service(CE_state->scn, CE_state->id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +053082 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -080083 }
84}
85
86static unsigned int roundup_pwr2(unsigned int n)
87{
88 int i;
89 unsigned int test_pwr2;
90
91 if (!(n & (n - 1)))
92 return n; /* already a power of 2 */
93
94 test_pwr2 = 4;
95 for (i = 0; i < 29; i++) {
96 if (test_pwr2 > n)
97 return test_pwr2;
98 test_pwr2 = test_pwr2 << 1;
99 }
100
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530101 QDF_ASSERT(0); /* n too large */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800102 return 0;
103}
104
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700105#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
106#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
107
108static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
109 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
110 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
111 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
112 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
113 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
114 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
115 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
116 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
117 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
Houston Hoffmane6330442016-02-26 12:19:11 -0800118#ifdef QCA_WIFI_3_0_ADRASTEA
119 { 9, ADRASTEA_DST_WR_INDEX_OFFSET},
120 { 10, ADRASTEA_DST_WR_INDEX_OFFSET},
121#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700122};
123
Vishwajith Upendra70efc752016-04-18 11:23:49 -0700124static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
125 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
126 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
127 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
128 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
129 { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
130 { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
131 { 5, ADRASTEA_DST_WR_INDEX_OFFSET},
132 { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
133 { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
134};
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700135
136/* CE_PCI TABLE */
137/*
138 * NOTE: the table below is out of date, though still a useful reference.
139 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
140 * mapping of HTC services to HIF pipes.
141 */
142/*
143 * This authoritative table defines Copy Engine configuration and the mapping
144 * of services/endpoints to CEs. A subset of this information is passed to
145 * the Target during startup as a prerequisite to entering BMI phase.
146 * See:
147 * target_service_to_ce_map - Target-side mapping
148 * hif_map_service_to_pipe - Host-side mapping
149 * target_ce_config - Target-side configuration
150 * host_ce_config - Host-side configuration
151 ============================================================================
152 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
153 | | | ctio | Size | Frequency
154 | | | n | |
155 ============================================================================
156 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
157 descriptor | | | | O(100B) | and regular
158 download | | | | |
159 ----------------------------------------------------------------------------
160 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
161 indication | | | | O(10B) | regular
162 upload | | | | |
163 ----------------------------------------------------------------------------
164 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
165 upload | | | | O(1000B) | (frequent
166 e.g. noise | | | | | during IP1.0
167 packets | | | | | testing)
168 ----------------------------------------------------------------------------
169 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
170 download | | | | O(1000B) | (frequent
171 e.g. | | | | | during IP1.0
172 misdirecte | | | | | testing)
173 d EAPOL | | | | |
174 packets | | | | |
175 ----------------------------------------------------------------------------
176 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
177 | DATA_VO (uplink) | | | |
178 ----------------------------------------------------------------------------
179 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
180 | DATA_VO (downlink) | | | |
181 ----------------------------------------------------------------------------
182 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
183 | | | | O(100B) |
184 ----------------------------------------------------------------------------
185 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
186 messages | (downlink) | | | O(100B) |
187 | | | | |
188 ----------------------------------------------------------------------------
189 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
190 | HTC_RAW_STREAMS | | | |
191 | (uplink) | | | |
192 ----------------------------------------------------------------------------
193 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
194 | HTC_RAW_STREAMS | | | |
195 | (downlink) | | | |
196 ----------------------------------------------------------------------------
197 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
198 | | | | | infrequent
199 ============================================================================
200 */
201
202/*
203 * Map from service/endpoint to Copy Engine.
204 * This table is derived from the CE_PCI TABLE, above.
205 * It is passed to the Target at startup for use by firmware.
206 */
207static struct service_to_pipe target_service_to_ce_map_wlan[] = {
208 {
209 WMI_DATA_VO_SVC,
210 PIPEDIR_OUT, /* out = UL = host -> target */
211 3,
212 },
213 {
214 WMI_DATA_VO_SVC,
215 PIPEDIR_IN, /* in = DL = target -> host */
216 2,
217 },
218 {
219 WMI_DATA_BK_SVC,
220 PIPEDIR_OUT, /* out = UL = host -> target */
221 3,
222 },
223 {
224 WMI_DATA_BK_SVC,
225 PIPEDIR_IN, /* in = DL = target -> host */
226 2,
227 },
228 {
229 WMI_DATA_BE_SVC,
230 PIPEDIR_OUT, /* out = UL = host -> target */
231 3,
232 },
233 {
234 WMI_DATA_BE_SVC,
235 PIPEDIR_IN, /* in = DL = target -> host */
236 2,
237 },
238 {
239 WMI_DATA_VI_SVC,
240 PIPEDIR_OUT, /* out = UL = host -> target */
241 3,
242 },
243 {
244 WMI_DATA_VI_SVC,
245 PIPEDIR_IN, /* in = DL = target -> host */
246 2,
247 },
248 {
249 WMI_CONTROL_SVC,
250 PIPEDIR_OUT, /* out = UL = host -> target */
251 3,
252 },
253 {
254 WMI_CONTROL_SVC,
255 PIPEDIR_IN, /* in = DL = target -> host */
256 2,
257 },
258 {
259 HTC_CTRL_RSVD_SVC,
260 PIPEDIR_OUT, /* out = UL = host -> target */
261 0, /* could be moved to 3 (share with WMI) */
262 },
263 {
264 HTC_CTRL_RSVD_SVC,
265 PIPEDIR_IN, /* in = DL = target -> host */
266 2,
267 },
268 {
269 HTC_RAW_STREAMS_SVC, /* not currently used */
270 PIPEDIR_OUT, /* out = UL = host -> target */
271 0,
272 },
273 {
274 HTC_RAW_STREAMS_SVC, /* not currently used */
275 PIPEDIR_IN, /* in = DL = target -> host */
276 2,
277 },
278 {
279 HTT_DATA_MSG_SVC,
280 PIPEDIR_OUT, /* out = UL = host -> target */
281 4,
282 },
283 {
284 HTT_DATA_MSG_SVC,
285 PIPEDIR_IN, /* in = DL = target -> host */
286 1,
287 },
288 {
289 WDI_IPA_TX_SVC,
290 PIPEDIR_OUT, /* in = DL = target -> host */
291 5,
292 },
Houston Hoffmane6330442016-02-26 12:19:11 -0800293#if defined(QCA_WIFI_3_0_ADRASTEA)
294 {
295 HTT_DATA2_MSG_SVC,
296 PIPEDIR_IN, /* in = DL = target -> host */
297 9,
298 },
299 {
300 HTT_DATA3_MSG_SVC,
301 PIPEDIR_IN, /* in = DL = target -> host */
302 10,
303 },
304#endif
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700305 /* (Additions here) */
306
307 { /* Must be last */
308 0,
309 0,
310 0,
311 },
312};
313
314static struct service_to_pipe *target_service_to_ce_map =
315 target_service_to_ce_map_wlan;
316static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
317
318static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
319static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
320
321static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
322 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
323 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
324 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
325 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
326 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
327 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
328 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
329 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
330 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
331 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
332 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
333 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
334 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
335 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
336 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
337 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
338 {0, 0, 0,}, /* Must be last */
339};
340
341/**
342 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
343 * @ce_state : pointer to the state context of the CE
344 *
345 * Description:
346 * Sets htt_rx_data attribute of the state structure if the
347 * CE serves one of the HTT DATA services.
348 *
349 * Return:
350 * false (attribute set to false)
351 * true (attribute set to true);
352 */
353bool ce_mark_datapath(struct CE_state *ce_state)
354{
355 struct service_to_pipe *svc_map;
356 size_t map_sz;
357 int i;
358 bool rc = false;
359
360 if (ce_state != NULL) {
361 if (WLAN_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
362 svc_map = target_service_to_ce_map_wlan_epping;
363 map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
364 sizeof(struct service_to_pipe);
365 } else {
366 svc_map = target_service_to_ce_map_wlan;
367 map_sz = sizeof(target_service_to_ce_map_wlan) /
368 sizeof(struct service_to_pipe);
369 }
370 for (i = 0; i < map_sz; i++) {
371 if ((svc_map[i].pipenum == ce_state->id) &&
372 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
373 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
374 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
375 /* HTT CEs are unidirectional */
376 if (svc_map[i].pipedir == PIPEDIR_IN)
377 ce_state->htt_rx_data = true;
378 else
379 ce_state->htt_tx_data = true;
380 rc = true;
381 }
382 }
383 }
384 return rc;
385}
386
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800387/*
388 * Initialize a Copy Engine based on caller-supplied attributes.
389 * This may be called once to initialize both source and destination
390 * rings or it may be called twice for separate source and destination
391 * initialization. It may be that only one side or the other is
392 * initialized by software/firmware.
Houston Hoffman233e9092015-09-02 13:37:21 -0700393 *
394 * This should be called durring the initialization sequence before
395 * interupts are enabled, so we don't have to worry about thread safety.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800396 */
Komal Seelam644263d2016-02-22 20:45:49 +0530397struct CE_handle *ce_init(struct hif_softc *scn,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800398 unsigned int CE_id, struct CE_attr *attr)
399{
400 struct CE_state *CE_state;
401 uint32_t ctrl_addr;
402 unsigned int nentries;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530403 qdf_dma_addr_t base_addr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800404 bool malloc_CE_state = false;
405 bool malloc_src_ring = false;
406
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530407 QDF_ASSERT(CE_id < scn->ce_count);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800408 ctrl_addr = CE_BASE_ADDRESS(CE_id);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800409 CE_state = scn->ce_id_to_state[CE_id];
410
411 if (!CE_state) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800412 CE_state =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530413 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800414 if (!CE_state) {
415 HIF_ERROR("%s: CE_state has no mem", __func__);
416 return NULL;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800417 }
Houston Hoffman233e9092015-09-02 13:37:21 -0700418 malloc_CE_state = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530419 qdf_mem_zero(CE_state, sizeof(*CE_state));
Houston Hoffman233e9092015-09-02 13:37:21 -0700420 scn->ce_id_to_state[CE_id] = CE_state;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530421 qdf_spinlock_create(&CE_state->ce_index_lock);
Houston Hoffman233e9092015-09-02 13:37:21 -0700422
423 CE_state->id = CE_id;
424 CE_state->ctrl_addr = ctrl_addr;
425 CE_state->state = CE_RUNNING;
426 CE_state->attr_flags = attr->flags;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800427 }
428 CE_state->scn = scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800429
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530430 qdf_atomic_init(&CE_state->rx_pending);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800431 if (attr == NULL) {
432 /* Already initialized; caller wants the handle */
433 return (struct CE_handle *)CE_state;
434 }
435
436#ifdef ADRASTEA_SHADOW_REGISTERS
437 HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
438 __func__);
439#endif
440
441 if (CE_state->src_sz_max)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530442 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800443 else
444 CE_state->src_sz_max = attr->src_sz_max;
445
Houston Hoffman68e837e2015-12-04 12:57:24 -0800446 ce_init_ce_desc_event_log(CE_id,
447 attr->src_nentries + attr->dest_nentries);
448
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800449 /* source ring setup */
450 nentries = attr->src_nentries;
451 if (nentries) {
452 struct CE_ring_state *src_ring;
453 unsigned CE_nbytes;
454 char *ptr;
455 uint64_t dma_addr;
456 nentries = roundup_pwr2(nentries);
457 if (CE_state->src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530458 QDF_ASSERT(CE_state->src_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800459 } else {
460 CE_nbytes = sizeof(struct CE_ring_state)
461 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530462 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800463 if (!ptr) {
464 /* cannot allocate src ring. If the
465 * CE_state is allocated locally free
466 * CE_State and return error.
467 */
468 HIF_ERROR("%s: src ring has no mem", __func__);
469 if (malloc_CE_state) {
470 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800471 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530472 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800473 malloc_CE_state = false;
474 }
475 return NULL;
476 } else {
477 /* we can allocate src ring.
478 * Mark that the src ring is
479 * allocated locally
480 */
481 malloc_src_ring = true;
482 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530483 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800484
485 src_ring = CE_state->src_ring =
486 (struct CE_ring_state *)ptr;
487 ptr += sizeof(struct CE_ring_state);
488 src_ring->nentries = nentries;
489 src_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700490 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
491 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800492 src_ring->hw_index =
493 CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
494 src_ring->sw_index = src_ring->hw_index;
495 src_ring->write_index =
496 CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700497 if (Q_TARGET_ACCESS_END(scn) < 0)
498 goto error_target_access;
499
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800500 src_ring->low_water_mark_nentries = 0;
501 src_ring->high_water_mark_nentries = nentries;
502 src_ring->per_transfer_context = (void **)ptr;
503
504 /* Legacy platforms that do not support cache
505 * coherent DMA are unsupported
506 */
507 src_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530508 qdf_mem_alloc_consistent(scn->qdf_dev,
509 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800510 (nentries *
511 sizeof(struct CE_src_desc) +
512 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530513 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800514 if (src_ring->base_addr_owner_space_unaligned
515 == NULL) {
516 HIF_ERROR("%s: src ring has no DMA mem",
517 __func__);
518 goto error_no_dma_mem;
519 }
520 src_ring->base_addr_CE_space_unaligned = base_addr;
521
522 if (src_ring->
523 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
524 - 1)) {
525 src_ring->base_addr_CE_space =
526 (src_ring->base_addr_CE_space_unaligned
527 + CE_DESC_RING_ALIGN -
528 1) & ~(CE_DESC_RING_ALIGN - 1);
529
530 src_ring->base_addr_owner_space =
531 (void
532 *)(((size_t) src_ring->
533 base_addr_owner_space_unaligned +
534 CE_DESC_RING_ALIGN -
535 1) & ~(CE_DESC_RING_ALIGN - 1));
536 } else {
537 src_ring->base_addr_CE_space =
538 src_ring->base_addr_CE_space_unaligned;
539 src_ring->base_addr_owner_space =
540 src_ring->
541 base_addr_owner_space_unaligned;
542 }
543 /*
544 * Also allocate a shadow src ring in
545 * regular mem to use for faster access.
546 */
547 src_ring->shadow_base_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530548 qdf_mem_malloc(nentries *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800549 sizeof(struct CE_src_desc) +
550 CE_DESC_RING_ALIGN);
551 if (src_ring->shadow_base_unaligned == NULL) {
552 HIF_ERROR("%s: src ring no shadow_base mem",
553 __func__);
554 goto error_no_dma_mem;
555 }
556 src_ring->shadow_base = (struct CE_src_desc *)
557 (((size_t) src_ring->shadow_base_unaligned +
558 CE_DESC_RING_ALIGN - 1) &
559 ~(CE_DESC_RING_ALIGN - 1));
560
Houston Hoffman4411ad42016-03-14 21:12:04 -0700561 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
562 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800563 dma_addr = src_ring->base_addr_CE_space;
564 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
565 (uint32_t)(dma_addr & 0xFFFFFFFF));
566#ifdef WLAN_ENABLE_QCA6180
567 {
568 uint32_t tmp;
569 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
570 scn, ctrl_addr);
571 tmp &= ~0x1F;
572 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
573 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
574 ctrl_addr, (uint32_t)dma_addr);
575 }
576#endif
577 CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
578 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
579#ifdef BIG_ENDIAN_HOST
580 /* Enable source ring byte swap for big endian host */
581 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
582#endif
583 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
584 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700585 if (Q_TARGET_ACCESS_END(scn) < 0)
586 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800587 }
588 }
589
590 /* destination ring setup */
591 nentries = attr->dest_nentries;
592 if (nentries) {
593 struct CE_ring_state *dest_ring;
594 unsigned CE_nbytes;
595 char *ptr;
596 uint64_t dma_addr;
597
598 nentries = roundup_pwr2(nentries);
599 if (CE_state->dest_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530600 QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800601 } else {
602 CE_nbytes = sizeof(struct CE_ring_state)
603 + (nentries * sizeof(void *));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530604 ptr = qdf_mem_malloc(CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800605 if (!ptr) {
606 /* cannot allocate dst ring. If the CE_state
607 * or src ring is allocated locally free
608 * CE_State and src ring and return error.
609 */
610 HIF_ERROR("%s: dest ring has no mem",
611 __func__);
612 if (malloc_src_ring) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530613 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800614 CE_state->src_ring = NULL;
615 malloc_src_ring = false;
616 }
617 if (malloc_CE_state) {
618 /* allocated CE_state locally */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800619 scn->ce_id_to_state[CE_id] = NULL;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530620 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800621 malloc_CE_state = false;
622 }
623 return NULL;
624 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530625 qdf_mem_zero(ptr, CE_nbytes);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800626
627 dest_ring = CE_state->dest_ring =
628 (struct CE_ring_state *)ptr;
629 ptr += sizeof(struct CE_ring_state);
630 dest_ring->nentries = nentries;
631 dest_ring->nentries_mask = nentries - 1;
Houston Hoffman4411ad42016-03-14 21:12:04 -0700632 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
633 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800634 dest_ring->sw_index =
635 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
636 dest_ring->write_index =
637 CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700638 if (Q_TARGET_ACCESS_END(scn) < 0)
639 goto error_target_access;
640
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800641 dest_ring->low_water_mark_nentries = 0;
642 dest_ring->high_water_mark_nentries = nentries;
643 dest_ring->per_transfer_context = (void **)ptr;
644
645 /* Legacy platforms that do not support cache
646 * coherent DMA are unsupported */
647 dest_ring->base_addr_owner_space_unaligned =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530648 qdf_mem_alloc_consistent(scn->qdf_dev,
649 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800650 (nentries *
651 sizeof(struct CE_dest_desc) +
652 CE_DESC_RING_ALIGN),
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530653 &base_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800654 if (dest_ring->base_addr_owner_space_unaligned
655 == NULL) {
656 HIF_ERROR("%s: dest ring has no DMA mem",
657 __func__);
658 goto error_no_dma_mem;
659 }
660 dest_ring->base_addr_CE_space_unaligned = base_addr;
661
662 /* Correctly initialize memory to 0 to
663 * prevent garbage data crashing system
664 * when download firmware
665 */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530666 qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800667 nentries * sizeof(struct CE_dest_desc) +
668 CE_DESC_RING_ALIGN);
669
670 if (dest_ring->
671 base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
672 1)) {
673
674 dest_ring->base_addr_CE_space =
675 (dest_ring->
676 base_addr_CE_space_unaligned +
677 CE_DESC_RING_ALIGN -
678 1) & ~(CE_DESC_RING_ALIGN - 1);
679
680 dest_ring->base_addr_owner_space =
681 (void
682 *)(((size_t) dest_ring->
683 base_addr_owner_space_unaligned +
684 CE_DESC_RING_ALIGN -
685 1) & ~(CE_DESC_RING_ALIGN - 1));
686 } else {
687 dest_ring->base_addr_CE_space =
688 dest_ring->base_addr_CE_space_unaligned;
689 dest_ring->base_addr_owner_space =
690 dest_ring->
691 base_addr_owner_space_unaligned;
692 }
693
Houston Hoffman4411ad42016-03-14 21:12:04 -0700694 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
695 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800696 dma_addr = dest_ring->base_addr_CE_space;
697 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
698 (uint32_t)(dma_addr & 0xFFFFFFFF));
699#ifdef WLAN_ENABLE_QCA6180
700 {
701 uint32_t tmp;
702 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
703 ctrl_addr);
704 tmp &= ~0x1F;
705 dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
706 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
707 ctrl_addr, (uint32_t)dma_addr);
708 }
709#endif
710 CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
711#ifdef BIG_ENDIAN_HOST
712 /* Enable Dest ring byte swap for big endian host */
713 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
714#endif
715 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
716 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700717 if (Q_TARGET_ACCESS_END(scn) < 0)
718 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800719
720 /* epping */
721 /* poll timer */
722 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530723 qdf_timer_init(scn->qdf_dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800724 &CE_state->poll_timer,
725 ce_poll_timeout,
726 CE_state,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530727 QDF_TIMER_TYPE_SW);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800728 CE_state->timer_inited = true;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530729 qdf_timer_mod(&CE_state->poll_timer,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800730 CE_POLL_TIMEOUT);
731 }
732 }
733 }
734
735 /* Enable CE error interrupts */
Houston Hoffman4411ad42016-03-14 21:12:04 -0700736 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
737 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800738 CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
Houston Hoffman4411ad42016-03-14 21:12:04 -0700739 if (Q_TARGET_ACCESS_END(scn) < 0)
740 goto error_target_access;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800741
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700742 /* update the htt_data attribute */
743 ce_mark_datapath(CE_state);
744
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800745 return (struct CE_handle *)CE_state;
746
Houston Hoffman4411ad42016-03-14 21:12:04 -0700747error_target_access:
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800748error_no_dma_mem:
749 ce_fini((struct CE_handle *)CE_state);
750 return NULL;
751}
752
753#ifdef WLAN_FEATURE_FASTPATH
754/**
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700755 * hif_enable_fastpath() Update that we have enabled fastpath mode
756 * @hif_ctx: HIF context
757 *
758 * For use in data path
759 *
760 * Retrun: void
761 */
762void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
763{
764 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
765
766 HIF_INFO("Enabling fastpath mode\n");
767 scn->fastpath_mode_on = true;
768}
769
770/**
771 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
772 * @hif_ctx: HIF Context
773 *
774 * For use in data path to skip HTC
775 *
776 * Return: bool
777 */
778bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
779{
780 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
781
782 return scn->fastpath_mode_on;
783}
784
785/**
786 * hif_get_ce_handle - API to get CE handle for FastPath mode
787 * @hif_ctx: HIF Context
788 * @id: CopyEngine Id
789 *
790 * API to return CE handle for fastpath mode
791 *
792 * Return: void
793 */
794void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
795{
796 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
797
798 return scn->ce_id_to_state[id];
799}
800
801/**
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800802 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
803 * No processing is required inside this function.
804 * @ce_hdl: Cope engine handle
805 * Using an assert, this function makes sure that,
806 * the TX CE has been processed completely.
Houston Hoffman9a831ef2015-09-03 14:42:40 -0700807 *
808 * This is called while dismantling CE structures. No other thread
809 * should be using these structures while dismantling is occuring
810 * therfore no locking is needed.
811 *
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800812 * Return: none
813 */
814void
815ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
816{
817 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
818 struct CE_ring_state *src_ring = ce_state->src_ring;
Komal Seelam644263d2016-02-22 20:45:49 +0530819 struct hif_softc *sc = ce_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800820 uint32_t sw_index, write_index;
821
Houston Hoffmanc7d54292016-04-13 18:55:37 -0700822 if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800823 HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
824 __func__, __LINE__);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800825 sw_index = src_ring->sw_index;
826 write_index = src_ring->sw_index;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800827
828 /* At this point Tx CE should be clean */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530829 qdf_assert_always(sw_index == write_index);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800830 }
831}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700832
833/**
834 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
835 * @ce_hdl: Handle to CE
836 *
837 * These buffers are never allocated on the fly, but
838 * are allocated only once during HIF start and freed
839 * only once during HIF stop.
840 * NOTE:
841 * The assumption here is there is no in-flight DMA in progress
842 * currently, so that buffers can be freed up safely.
843 *
844 * Return: NONE
845 */
846void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
847{
848 struct CE_state *ce_state = (struct CE_state *)ce_hdl;
849 struct CE_ring_state *dst_ring = ce_state->dest_ring;
850 qdf_nbuf_t nbuf;
851 int i;
852
853 if (!ce_state->fastpath_handler)
854 return;
855 /*
856 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
857 * this CE is completely full: does not leave one blank space, to
858 * distinguish between empty queue & full queue. So free all the
859 * entries.
860 */
861 for (i = 0; i < dst_ring->nentries; i++) {
862 nbuf = dst_ring->per_transfer_context[i];
863
864 /*
865 * The reasons for doing this check are:
866 * 1) Protect against calling cleanup before allocating buffers
867 * 2) In a corner case, FASTPATH_mode_on may be set, but we
868 * could have a partially filled ring, because of a memory
869 * allocation failure in the middle of allocating ring.
870 * This check accounts for that case, checking
871 * fastpath_mode_on flag or started flag would not have
872 * covered that case. This is not in performance path,
873 * so OK to do this.
874 */
875 if (nbuf)
876 qdf_nbuf_free(nbuf);
877 }
878}
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700879
880/**
881 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
882 * @scn: HIF handle
883 *
884 * Datapath Rx CEs are special case, where we reuse all the message buffers.
885 * Hence we have to post all the entries in the pipe, even, in the beginning
886 * unlike for other CE pipes where one less than dest_nentries are filled in
887 * the beginning.
888 *
889 * Return: None
890 */
891static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
892{
893 int pipe_num;
894 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
895
896 if (scn->fastpath_mode_on == false)
897 return;
898
899 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
900 struct HIF_CE_pipe_info *pipe_info =
901 &hif_state->pipe_info[pipe_num];
902 struct CE_state *ce_state =
903 scn->ce_id_to_state[pipe_info->pipe_num];
904
905 if (ce_state->htt_rx_data)
906 atomic_inc(&pipe_info->recv_bufs_needed);
907 }
908}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800909#else
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700910static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800911{
912}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700913
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700914static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700915{
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -0700916 return false;
917}
918
919static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
920{
921 return false;
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700922}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800923#endif /* WLAN_FEATURE_FASTPATH */
924
925void ce_fini(struct CE_handle *copyeng)
926{
927 struct CE_state *CE_state = (struct CE_state *)copyeng;
928 unsigned int CE_id = CE_state->id;
Komal Seelam644263d2016-02-22 20:45:49 +0530929 struct hif_softc *scn = CE_state->scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800930
931 CE_state->state = CE_UNUSED;
932 scn->ce_id_to_state[CE_id] = NULL;
933 if (CE_state->src_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700934 /* Cleanup the datapath Tx ring */
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800935 ce_h2t_tx_ce_cleanup(copyeng);
936
937 if (CE_state->src_ring->shadow_base_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530938 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800939 if (CE_state->src_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530940 qdf_mem_free_consistent(scn->qdf_dev,
941 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800942 (CE_state->src_ring->nentries *
943 sizeof(struct CE_src_desc) +
944 CE_DESC_RING_ALIGN),
945 CE_state->src_ring->
946 base_addr_owner_space_unaligned,
947 CE_state->src_ring->
948 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530949 qdf_mem_free(CE_state->src_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800950 }
951 if (CE_state->dest_ring) {
Manjunathappa Prakash7399f142016-04-13 23:38:16 -0700952 /* Cleanup the datapath Rx ring */
953 ce_t2h_msg_ce_cleanup(copyeng);
954
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800955 if (CE_state->dest_ring->base_addr_owner_space_unaligned)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530956 qdf_mem_free_consistent(scn->qdf_dev,
957 scn->qdf_dev->dev,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800958 (CE_state->dest_ring->nentries *
959 sizeof(struct CE_dest_desc) +
960 CE_DESC_RING_ALIGN),
961 CE_state->dest_ring->
962 base_addr_owner_space_unaligned,
963 CE_state->dest_ring->
964 base_addr_CE_space, 0);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530965 qdf_mem_free(CE_state->dest_ring);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800966
967 /* epping */
968 if (CE_state->timer_inited) {
969 CE_state->timer_inited = false;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530970 qdf_timer_free(&CE_state->poll_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800971 }
972 }
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530973 qdf_mem_free(CE_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800974}
975
Komal Seelam5584a7c2016-02-24 19:22:48 +0530976void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800977{
Komal Seelam02cf2f82016-02-22 20:44:25 +0530978 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800979
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530980 qdf_mem_zero(&hif_state->msg_callbacks_pending,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800981 sizeof(hif_state->msg_callbacks_pending));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530982 qdf_mem_zero(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800983 sizeof(hif_state->msg_callbacks_current));
984}
985
986/* Send the first nbytes bytes of the buffer */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +0530987QDF_STATUS
Komal Seelam5584a7c2016-02-24 19:22:48 +0530988hif_send_head(struct hif_opaque_softc *hif_ctx,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800989 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +0530990 qdf_nbuf_t nbuf, unsigned int data_attr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800991{
Komal Seelam644263d2016-02-22 20:45:49 +0530992 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +0530993 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -0800994 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
995 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
996 int bytes = nbytes, nfrags = 0;
997 struct ce_sendlist sendlist;
998 int status, i = 0;
999 unsigned int mux_id = 0;
1000
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301001 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001002
1003 transfer_id =
1004 (mux_id & MUX_ID_MASK) |
1005 (transfer_id & TRANSACTION_ID_MASK);
1006 data_attr &= DESC_DATA_FLAG_MASK;
1007 /*
1008 * The common case involves sending multiple fragments within a
1009 * single download (the tx descriptor and the tx frame header).
1010 * So, optimize for the case of multiple fragments by not even
1011 * checking whether it's necessary to use a sendlist.
1012 * The overhead of using a sendlist for a single buffer download
1013 * is not a big deal, since it happens rarely (for WMI messages).
1014 */
1015 ce_sendlist_init(&sendlist);
1016 do {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301017 qdf_dma_addr_t frag_paddr;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001018 int frag_bytes;
1019
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301020 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1021 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001022 /*
1023 * Clear the packet offset for all but the first CE desc.
1024 */
1025 if (i++ > 0)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301026 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001027
1028 status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1029 frag_bytes >
1030 bytes ? bytes : frag_bytes,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301031 qdf_nbuf_get_frag_is_wordstream
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001032 (nbuf,
1033 nfrags) ? 0 :
1034 CE_SEND_FLAG_SWAP_DISABLE,
1035 data_attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301036 if (status != QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001037 HIF_ERROR("%s: error, frag_num %d larger than limit",
1038 __func__, nfrags);
1039 return status;
1040 }
1041 bytes -= frag_bytes;
1042 nfrags++;
1043 } while (bytes > 0);
1044
1045 /* Make sure we have resources to handle this request */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301046 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001047 if (pipe_info->num_sends_allowed < nfrags) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301048 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001049 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301050 return QDF_STATUS_E_RESOURCES;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001051 }
1052 pipe_info->num_sends_allowed -= nfrags;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301053 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001054
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301055 if (qdf_unlikely(ce_hdl == NULL)) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001056 HIF_ERROR("%s: error CE handle is null", __func__);
1057 return A_ERROR;
1058 }
1059
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301060 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301061 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301062 (uint8_t *)(qdf_nbuf_data(nbuf)),
1063 sizeof(qdf_nbuf_data(nbuf))));
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001064 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301065 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001066
1067 return status;
1068}
1069
Komal Seelam5584a7c2016-02-24 19:22:48 +05301070void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1071 int force)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001072{
Komal Seelam644263d2016-02-22 20:45:49 +05301073 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1074
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001075 if (!force) {
1076 int resources;
1077 /*
1078 * Decide whether to actually poll for completions, or just
1079 * wait for a later chance. If there seem to be plenty of
1080 * resources left, then just wait, since checking involves
1081 * reading a CE register, which is a relatively expensive
1082 * operation.
1083 */
Komal Seelam644263d2016-02-22 20:45:49 +05301084 resources = hif_get_free_queue_number(hif_ctx, pipe);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001085 /*
1086 * If at least 50% of the total resources are still available,
1087 * don't bother checking again yet.
1088 */
1089 if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
1090 return;
1091 }
1092 }
Houston Hoffman56936832016-03-16 12:16:24 -07001093#ifdef ATH_11AC_TXCOMPACT
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001094 ce_per_engine_servicereap(scn, pipe);
1095#else
1096 ce_per_engine_service(scn, pipe);
1097#endif
1098}
1099
Komal Seelam5584a7c2016-02-24 19:22:48 +05301100uint16_t
1101hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001102{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301103 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001104 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1105 uint16_t rv;
1106
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301107 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001108 rv = pipe_info->num_sends_allowed;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301109 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001110 return rv;
1111}
1112
1113/* Called by lower (CE) layer when a send to Target completes. */
1114void
1115hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301116 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001117 unsigned int nbytes, unsigned int transfer_id,
1118 unsigned int sw_index, unsigned int hw_index,
1119 unsigned int toeplitz_hash_result)
1120{
1121 struct HIF_CE_pipe_info *pipe_info =
1122 (struct HIF_CE_pipe_info *)ce_context;
1123 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301124 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001125 unsigned int sw_idx = sw_index, hw_idx = hw_index;
Houston Hoffman85118512015-09-28 14:17:11 -07001126 struct hif_msg_callbacks *msg_callbacks =
1127 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001128
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001129 do {
1130 /*
Houston Hoffman85118512015-09-28 14:17:11 -07001131 * The upper layer callback will be triggered
1132 * when last fragment is complteted.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001133 */
Houston Hoffman85118512015-09-28 14:17:11 -07001134 if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
Komal Seelam02cf2f82016-02-22 20:44:25 +05301135 if (scn->target_status
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001136 == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301137 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001138 else
1139 msg_callbacks->txCompletionHandler(
Houston Hoffman85118512015-09-28 14:17:11 -07001140 msg_callbacks->Context,
1141 transfer_context, transfer_id,
1142 toeplitz_hash_result);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001143 }
1144
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301145 qdf_spin_lock(&pipe_info->completion_freeq_lock);
Houston Hoffman85118512015-09-28 14:17:11 -07001146 pipe_info->num_sends_allowed++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301147 qdf_spin_unlock(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001148 } while (ce_completed_send_next(copyeng,
1149 &ce_context, &transfer_context,
1150 &CE_data, &nbytes, &transfer_id,
1151 &sw_idx, &hw_idx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301152 &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001153}
1154
Houston Hoffman910c6262015-09-28 12:56:25 -07001155/**
1156 * hif_ce_do_recv(): send message from copy engine to upper layers
1157 * @msg_callbacks: structure containing callback and callback context
1158 * @netbuff: skb containing message
1159 * @nbytes: number of bytes in the message
1160 * @pipe_info: used for the pipe_number info
1161 *
1162 * Checks the packet length, configures the lenght in the netbuff,
1163 * and calls the upper layer callback.
1164 *
1165 * return: None
1166 */
1167static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301168 qdf_nbuf_t netbuf, int nbytes,
Houston Hoffman910c6262015-09-28 12:56:25 -07001169 struct HIF_CE_pipe_info *pipe_info) {
1170 if (nbytes <= pipe_info->buf_sz) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301171 qdf_nbuf_set_pktlen(netbuf, nbytes);
Houston Hoffman910c6262015-09-28 12:56:25 -07001172 msg_callbacks->
1173 rxCompletionHandler(msg_callbacks->Context,
1174 netbuf, pipe_info->pipe_num);
1175 } else {
1176 HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1177 __func__, netbuf, nbytes);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301178 qdf_nbuf_free(netbuf);
Houston Hoffman910c6262015-09-28 12:56:25 -07001179 }
1180}
1181
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001182/* Called by lower (CE) layer when data is received from the Target. */
1183void
1184hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301185 void *transfer_context, qdf_dma_addr_t CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001186 unsigned int nbytes, unsigned int transfer_id,
1187 unsigned int flags)
1188{
1189 struct HIF_CE_pipe_info *pipe_info =
1190 (struct HIF_CE_pipe_info *)ce_context;
1191 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001192 struct CE_state *ce_state = (struct CE_state *) copyeng;
Komal Seelam644263d2016-02-22 20:45:49 +05301193 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001194#ifdef HIF_PCI
1195 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1196#endif
Houston Hoffman910c6262015-09-28 12:56:25 -07001197 struct hif_msg_callbacks *msg_callbacks =
1198 &hif_state->msg_callbacks_current;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301199 uint32_t count;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001200
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001201 do {
Houston Hoffmane02e12d2016-03-14 21:11:36 -07001202#ifdef HIF_PCI
1203 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1204#endif
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301205 qdf_nbuf_unmap_single(scn->qdf_dev,
1206 (qdf_nbuf_t) transfer_context,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301207 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001208
Houston Hoffman910c6262015-09-28 12:56:25 -07001209 atomic_inc(&pipe_info->recv_bufs_needed);
1210 hif_post_recv_buffers_for_pipe(pipe_info);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301211 if (scn->target_status == OL_TRGET_STATUS_RESET)
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301212 qdf_nbuf_free(transfer_context);
Houston Hoffman49794a32015-12-21 12:14:56 -08001213 else
1214 hif_ce_do_recv(msg_callbacks, transfer_context,
Houston Hoffman9c0f80a2015-09-28 18:36:36 -07001215 nbytes, pipe_info);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001216
1217 /* Set up force_break flag if num of receices reaches
1218 * MAX_NUM_OF_RECEIVES */
Houston Hoffman5bf441a2015-09-02 11:52:10 -07001219 ce_state->receive_count++;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301220 count = ce_state->receive_count;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301221 if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
Houston Hoffman18c7fc52015-09-02 11:44:42 -07001222 ce_state->force_break = 1;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001223 break;
1224 }
1225 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1226 &CE_data, &nbytes, &transfer_id,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301227 &flags) == QDF_STATUS_SUCCESS);
Houston Hoffmanf4607852015-12-17 17:14:40 -08001228
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001229}
1230
1231/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1232
1233void
Komal Seelam5584a7c2016-02-24 19:22:48 +05301234hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001235 struct hif_msg_callbacks *callbacks)
1236{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301237 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001238
1239#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1240 spin_lock_init(&pcie_access_log_lock);
1241#endif
1242 /* Save callbacks for later installation */
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301243 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001244 sizeof(hif_state->msg_callbacks_pending));
1245
1246}
1247
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001248int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1249{
1250 struct CE_handle *ce_diag = hif_state->ce_diag;
1251 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301252 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001253 struct hif_msg_callbacks *hif_msg_callbacks =
1254 &hif_state->msg_callbacks_current;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001255
1256 /* daemonize("hif_compl_thread"); */
1257
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001258 if (scn->ce_count == 0) {
1259 HIF_ERROR("%s: Invalid ce_count\n", __func__);
1260 return -EINVAL;
1261 }
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001262
1263 if (!hif_msg_callbacks ||
1264 !hif_msg_callbacks->rxCompletionHandler ||
1265 !hif_msg_callbacks->txCompletionHandler) {
1266 HIF_ERROR("%s: no completion handler registered", __func__);
1267 return -EFAULT;
1268 }
1269
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001270 A_TARGET_ACCESS_LIKELY(scn);
1271 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1272 struct CE_attr attr;
1273 struct HIF_CE_pipe_info *pipe_info;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001274
1275 pipe_info = &hif_state->pipe_info[pipe_num];
1276 if (pipe_info->ce_hdl == ce_diag) {
1277 continue; /* Handle Diagnostic CE specially */
1278 }
1279 attr = host_ce_config[pipe_num];
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001280 if (attr.src_nentries) {
1281 /* pipe used to send to target */
1282 HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
1283 __func__, pipe_num, pipe_info);
1284 ce_send_cb_register(pipe_info->ce_hdl,
1285 hif_pci_ce_send_done, pipe_info,
1286 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001287 pipe_info->num_sends_allowed = attr.src_nentries - 1;
1288 }
1289 if (attr.dest_nentries) {
1290 /* pipe used to receive from target */
1291 ce_recv_cb_register(pipe_info->ce_hdl,
1292 hif_pci_ce_recv_data, pipe_info,
1293 attr.flags & CE_ATTR_DISABLE_INTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001294 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001295
1296 if (attr.src_nentries)
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301297 qdf_spinlock_create(&pipe_info->completion_freeq_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001298 }
Houston Hoffman6666df72015-11-30 16:48:35 -08001299
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001300 A_TARGET_ACCESS_UNLIKELY(scn);
1301 return 0;
1302}
1303
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001304/*
1305 * Install pending msg callbacks.
1306 *
1307 * TBDXXX: This hack is needed because upper layers install msg callbacks
1308 * for use with HTC before BMI is done; yet this HIF implementation
1309 * needs to continue to use BMI msg callbacks. Really, upper layers
1310 * should not register HTC callbacks until AFTER BMI phase.
1311 */
Komal Seelam644263d2016-02-22 20:45:49 +05301312static void hif_msg_callbacks_install(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001313{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301314 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001315
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301316 qdf_mem_copy(&hif_state->msg_callbacks_current,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001317 &hif_state->msg_callbacks_pending,
1318 sizeof(hif_state->msg_callbacks_pending));
1319}
1320
Komal Seelam5584a7c2016-02-24 19:22:48 +05301321void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1322 uint8_t *DLPipe)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001323{
1324 int ul_is_polled, dl_is_polled;
1325
Komal Seelam644263d2016-02-22 20:45:49 +05301326 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001327 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1328}
1329
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001330/**
1331 * hif_dump_pipe_debug_count() - Log error count
Komal Seelam644263d2016-02-22 20:45:49 +05301332 * @scn: hif_softc pointer.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001333 *
1334 * Output the pipe error counts of each pipe to log file
1335 *
1336 * Return: N/A
1337 */
Komal Seelam644263d2016-02-22 20:45:49 +05301338void hif_dump_pipe_debug_count(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001339{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301340 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001341 int pipe_num;
1342
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001343 if (hif_state == NULL) {
1344 HIF_ERROR("%s hif_state is NULL", __func__);
1345 return;
1346 }
1347 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1348 struct HIF_CE_pipe_info *pipe_info;
1349
1350 pipe_info = &hif_state->pipe_info[pipe_num];
1351
1352 if (pipe_info->nbuf_alloc_err_count > 0 ||
1353 pipe_info->nbuf_dma_err_count > 0 ||
1354 pipe_info->nbuf_ce_enqueue_err_count)
1355 HIF_ERROR(
1356 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1357 __func__, pipe_info->pipe_num,
1358 atomic_read(&pipe_info->recv_bufs_needed),
1359 pipe_info->nbuf_alloc_err_count,
1360 pipe_info->nbuf_dma_err_count,
1361 pipe_info->nbuf_ce_enqueue_err_count);
1362 }
1363}
1364
1365static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1366{
1367 struct CE_handle *ce_hdl;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301368 qdf_size_t buf_sz;
Komal Seelam644263d2016-02-22 20:45:49 +05301369 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301370 QDF_STATUS ret;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001371 uint32_t bufs_posted = 0;
1372
1373 buf_sz = pipe_info->buf_sz;
1374 if (buf_sz == 0) {
1375 /* Unused Copy Engine */
1376 return 0;
1377 }
1378
1379 ce_hdl = pipe_info->ce_hdl;
1380
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301381 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001382 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301383 qdf_dma_addr_t CE_data; /* CE space buffer address */
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301384 qdf_nbuf_t nbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001385 int status;
1386
1387 atomic_dec(&pipe_info->recv_bufs_needed);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301388 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001389
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301390 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001391 if (!nbuf) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301392 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001393 pipe_info->nbuf_alloc_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301394 qdf_spin_unlock_bh(
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001395 &pipe_info->recv_bufs_needed_lock);
1396 HIF_ERROR(
1397 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1398 __func__, pipe_info->pipe_num,
1399 atomic_read(&pipe_info->recv_bufs_needed),
1400 pipe_info->nbuf_alloc_err_count);
1401 atomic_inc(&pipe_info->recv_bufs_needed);
1402 return 1;
1403 }
1404
1405 /*
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301406 * qdf_nbuf_peek_header(nbuf, &data, &unused);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001407 * CE_data = dma_map_single(dev, data, buf_sz, );
1408 * DMA_FROM_DEVICE);
1409 */
1410 ret =
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301411 qdf_nbuf_map_single(scn->qdf_dev, nbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301412 QDF_DMA_FROM_DEVICE);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001413
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301414 if (unlikely(ret != QDF_STATUS_SUCCESS)) {
1415 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001416 pipe_info->nbuf_dma_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301417 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001418 HIF_ERROR(
1419 "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
1420 __func__, pipe_info->pipe_num,
1421 atomic_read(&pipe_info->recv_bufs_needed),
1422 pipe_info->nbuf_dma_err_count);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301423 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001424 atomic_inc(&pipe_info->recv_bufs_needed);
1425 return 1;
1426 }
1427
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301428 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001429
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301430 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001431 buf_sz, DMA_FROM_DEVICE);
1432 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301433 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001434 if (status != EOK) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301435 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001436 pipe_info->nbuf_ce_enqueue_err_count++;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301437 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001438 HIF_ERROR(
1439 "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
1440 __func__, pipe_info->pipe_num,
1441 atomic_read(&pipe_info->recv_bufs_needed),
1442 pipe_info->nbuf_ce_enqueue_err_count);
1443 atomic_inc(&pipe_info->recv_bufs_needed);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301444 qdf_nbuf_free(nbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001445 return 1;
1446 }
1447
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301448 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001449 bufs_posted++;
1450 }
1451 pipe_info->nbuf_alloc_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001452 (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001453 pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1454 pipe_info->nbuf_dma_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001455 (pipe_info->nbuf_dma_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001456 pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1457 pipe_info->nbuf_ce_enqueue_err_count =
Houston Hoffman56936832016-03-16 12:16:24 -07001458 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001459 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1460
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301461 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001462
1463 return 0;
1464}
1465
1466/*
1467 * Try to post all desired receive buffers for all pipes.
1468 * Returns 0 if all desired buffers are posted,
1469 * non-zero if were were unable to completely
1470 * replenish receive buffers.
1471 */
Komal Seelam644263d2016-02-22 20:45:49 +05301472static int hif_post_recv_buffers(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001473{
Komal Seelam02cf2f82016-02-22 20:44:25 +05301474 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001475 int pipe_num, rv = 0;
1476
1477 A_TARGET_ACCESS_LIKELY(scn);
1478 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1479 struct HIF_CE_pipe_info *pipe_info;
1480
1481 pipe_info = &hif_state->pipe_info[pipe_num];
1482 if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1483 rv = 1;
1484 goto done;
1485 }
1486 }
1487
1488done:
1489 A_TARGET_ACCESS_UNLIKELY(scn);
1490
1491 return rv;
1492}
1493
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301494QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001495{
Komal Seelam644263d2016-02-22 20:45:49 +05301496 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301497 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001498
Manjunathappa Prakash4a9c3a82016-04-14 01:12:14 -07001499 hif_update_fastpath_recv_bufs_cnt(scn);
1500
Houston Hoffman9c12f7f2015-09-28 16:52:14 -07001501 hif_msg_callbacks_install(scn);
1502
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001503 if (hif_completion_thread_startup(hif_state))
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301504 return QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001505
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001506 /* Post buffers once to start things off. */
1507 (void)hif_post_recv_buffers(scn);
1508
1509 hif_state->started = true;
1510
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301511 return QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001512}
1513
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001514void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1515{
Komal Seelam644263d2016-02-22 20:45:49 +05301516 struct hif_softc *scn;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001517 struct CE_handle *ce_hdl;
1518 uint32_t buf_sz;
1519 struct HIF_CE_state *hif_state;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301520 qdf_nbuf_t netbuf;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301521 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001522 void *per_CE_context;
1523
1524 buf_sz = pipe_info->buf_sz;
1525 if (buf_sz == 0) {
1526 /* Unused Copy Engine */
1527 return;
1528 }
1529
1530 hif_state = pipe_info->HIF_CE_state;
1531 if (!hif_state->started) {
1532 return;
1533 }
1534
Komal Seelam02cf2f82016-02-22 20:44:25 +05301535 scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001536 ce_hdl = pipe_info->ce_hdl;
1537
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301538 if (scn->qdf_dev == NULL) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001539 return;
1540 }
1541 while (ce_revoke_recv_next
1542 (ce_hdl, &per_CE_context, (void **)&netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301543 &CE_data) == QDF_STATUS_SUCCESS) {
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301544 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301545 QDF_DMA_FROM_DEVICE);
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301546 qdf_nbuf_free(netbuf);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001547 }
1548}
1549
1550void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1551{
1552 struct CE_handle *ce_hdl;
1553 struct HIF_CE_state *hif_state;
Komal Seelam644263d2016-02-22 20:45:49 +05301554 struct hif_softc *scn;
Vishwajith Upendra70f8b6e2016-03-01 16:28:23 +05301555 qdf_nbuf_t netbuf;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001556 void *per_CE_context;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301557 qdf_dma_addr_t CE_data;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001558 unsigned int nbytes;
1559 unsigned int id;
1560 uint32_t buf_sz;
1561 uint32_t toeplitz_hash_result;
1562
1563 buf_sz = pipe_info->buf_sz;
1564 if (buf_sz == 0) {
1565 /* Unused Copy Engine */
1566 return;
1567 }
1568
1569 hif_state = pipe_info->HIF_CE_state;
1570 if (!hif_state->started) {
1571 return;
1572 }
1573
Komal Seelam02cf2f82016-02-22 20:44:25 +05301574 scn = HIF_GET_SOFTC(hif_state);
1575
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001576 ce_hdl = pipe_info->ce_hdl;
1577
1578 while (ce_cancel_send_next
1579 (ce_hdl, &per_CE_context,
1580 (void **)&netbuf, &CE_data, &nbytes,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301581 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001582 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1583 /*
1584 * Packets enqueued by htt_h2t_ver_req_msg() and
1585 * htt_h2t_rx_ring_cfg_msg_ll() have already been
1586 * freed in htt_htc_misc_pkt_pool_free() in
1587 * wlantl_close(), so do not free them here again
Houston Hoffman29573d92015-10-20 17:49:44 -07001588 * by checking whether it's the endpoint
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001589 * which they are queued in.
1590 */
Komal Seelam02cf2f82016-02-22 20:44:25 +05301591 if (id == scn->htc_endpoint)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001592 return;
1593 /* Indicate the completion to higer
1594 * layer to free the buffer */
1595 hif_state->msg_callbacks_current.
1596 txCompletionHandler(hif_state->
1597 msg_callbacks_current.Context,
1598 netbuf, id, toeplitz_hash_result);
1599 }
1600 }
1601}
1602
1603/*
1604 * Cleanup residual buffers for device shutdown:
1605 * buffers that were enqueued for receive
1606 * buffers that were to be sent
1607 * Note: Buffers that had completed but which were
1608 * not yet processed are on a completion queue. They
1609 * are handled when the completion thread shuts down.
1610 */
1611void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
1612{
1613 int pipe_num;
Komal Seelam644263d2016-02-22 20:45:49 +05301614 struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001615
Komal Seelam02cf2f82016-02-22 20:44:25 +05301616 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001617 struct HIF_CE_pipe_info *pipe_info;
1618
1619 pipe_info = &hif_state->pipe_info[pipe_num];
1620 hif_recv_buffer_cleanup_on_pipe(pipe_info);
1621 hif_send_buffer_cleanup_on_pipe(pipe_info);
1622 }
1623}
1624
Komal Seelam5584a7c2016-02-24 19:22:48 +05301625void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001626{
Komal Seelam644263d2016-02-22 20:45:49 +05301627 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301628 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Komal Seelam644263d2016-02-22 20:45:49 +05301629
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001630 hif_buffer_cleanup(hif_state);
1631}
1632
Komal Seelam5584a7c2016-02-24 19:22:48 +05301633void hif_stop(struct hif_opaque_softc *hif_ctx)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001634{
Komal Seelam644263d2016-02-22 20:45:49 +05301635 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05301636 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001637 int pipe_num;
1638
1639 scn->hif_init_done = false;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001640
1641 /*
1642 * At this point, asynchronous threads are stopped,
1643 * The Target should not DMA nor interrupt, Host code may
1644 * not initiate anything more. So we just need to clean
1645 * up Host-side state.
1646 */
1647
1648 if (scn->athdiag_procfs_inited) {
1649 athdiag_procfs_remove();
1650 scn->athdiag_procfs_inited = false;
1651 }
1652
1653 hif_buffer_cleanup(hif_state);
1654
1655 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1656 struct HIF_CE_pipe_info *pipe_info;
1657
1658 pipe_info = &hif_state->pipe_info[pipe_num];
1659 if (pipe_info->ce_hdl) {
1660 ce_fini(pipe_info->ce_hdl);
1661 pipe_info->ce_hdl = NULL;
1662 pipe_info->buf_sz = 0;
1663 }
1664 }
1665
1666 if (hif_state->sleep_timer_init) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301667 qdf_timer_stop(&hif_state->sleep_timer);
1668 qdf_timer_free(&hif_state->sleep_timer);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001669 hif_state->sleep_timer_init = false;
1670 }
1671
1672 hif_state->started = false;
1673}
1674
Houston Hoffman854e67f2016-03-14 21:11:39 -07001675/**
1676 * hif_get_target_ce_config() - get copy engine configuration
1677 * @target_ce_config_ret: basic copy engine configuration
1678 * @target_ce_config_sz_ret: size of the basic configuration in bytes
1679 * @target_service_to_ce_map_ret: service mapping for the copy engines
1680 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
1681 * @target_shadow_reg_cfg_ret: shadow register configuration
1682 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
1683 *
1684 * providing accessor to these values outside of this file.
1685 * currently these are stored in static pointers to const sections.
1686 * there are multiple configurations that are selected from at compile time.
1687 * Runtime selection would need to consider mode, target type and bus type.
1688 *
1689 * Return: return by parameter.
1690 */
1691void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
1692 int *target_ce_config_sz_ret,
1693 struct service_to_pipe **target_service_to_ce_map_ret,
1694 int *target_service_to_ce_map_sz_ret,
1695 struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
1696 int *shadow_cfg_sz_ret)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001697{
Houston Hoffman854e67f2016-03-14 21:11:39 -07001698 *target_ce_config_ret = target_ce_config;
1699 *target_ce_config_sz_ret = target_ce_config_sz;
1700 *target_service_to_ce_map_ret = target_service_to_ce_map;
1701 *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
1702
1703 if (target_shadow_reg_cfg_ret)
1704 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
1705
1706 if (shadow_cfg_sz_ret)
1707 *shadow_cfg_sz_ret = shadow_cfg_sz;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001708}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001709
1710/**
1711 * hif_wlan_enable(): call the platform driver to enable wlan
Komal Seelambd7c51d2016-02-24 10:27:30 +05301712 * @scn: HIF Context
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001713 *
1714 * This function passes the con_mode and CE configuration to
1715 * platform driver to enable wlan.
1716 *
Houston Hoffman108da402016-03-14 21:11:24 -07001717 * Return: linux error code
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001718 */
Houston Hoffman108da402016-03-14 21:11:24 -07001719int hif_wlan_enable(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001720{
1721 struct icnss_wlan_enable_cfg cfg;
1722 enum icnss_driver_mode mode;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301723 uint32_t con_mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001724
Houston Hoffman854e67f2016-03-14 21:11:39 -07001725 hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
1726 &cfg.num_ce_tgt_cfg,
1727 (struct service_to_pipe **)&cfg.ce_svc_cfg,
1728 &cfg.num_ce_svc_pipe_cfg,
1729 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
1730 &cfg.num_shadow_reg_cfg);
1731
1732 /* translate from structure size to array size */
1733 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
1734 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
1735 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001736
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301737 if (QDF_GLOBAL_FTM_MODE == con_mode)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001738 mode = ICNSS_FTM;
Komal Seelambd7c51d2016-02-24 10:27:30 +05301739 else if (WLAN_IS_EPPING_ENABLED(con_mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001740 mode = ICNSS_EPPING;
Peng Xu7b962532015-10-02 17:17:03 -07001741 else
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001742 mode = ICNSS_MISSION;
Peng Xu7b962532015-10-02 17:17:03 -07001743
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001744 if (BYPASS_QMI)
1745 return 0;
1746 else
1747 return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001748}
1749
Houston Hoffman108da402016-03-14 21:11:24 -07001750/**
1751 * hif_ce_prepare_config() - load the correct static tables.
1752 * @scn: hif context
1753 *
1754 * Epping uses different static attribute tables than mission mode.
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001755 */
Houston Hoffman108da402016-03-14 21:11:24 -07001756void hif_ce_prepare_config(struct hif_softc *scn)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001757{
Komal Seelambd7c51d2016-02-24 10:27:30 +05301758 uint32_t mode = hif_get_conparam(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001759 /* if epping is enabled we need to use the epping configuration. */
Komal Seelambd7c51d2016-02-24 10:27:30 +05301760 if (WLAN_IS_EPPING_ENABLED(mode)) {
1761 if (WLAN_IS_EPPING_IRQ(mode))
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001762 host_ce_config = host_ce_config_wlan_epping_irq;
1763 else
1764 host_ce_config = host_ce_config_wlan_epping_poll;
1765 target_ce_config = target_ce_config_wlan_epping;
1766 target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
1767 target_service_to_ce_map =
1768 target_service_to_ce_map_wlan_epping;
1769 target_service_to_ce_map_sz =
1770 sizeof(target_service_to_ce_map_wlan_epping);
Vishwajith Upendra70efc752016-04-18 11:23:49 -07001771 target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
1772 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001773 }
Houston Hoffman108da402016-03-14 21:11:24 -07001774}
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001775
Houston Hoffman108da402016-03-14 21:11:24 -07001776/**
1777 * hif_ce_open() - do ce specific allocations
1778 * @hif_sc: pointer to hif context
1779 *
1780 * return: 0 for success or QDF_STATUS_E_NOMEM
1781 */
1782QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
1783{
1784 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001785
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301786 qdf_spinlock_create(&hif_state->keep_awake_lock);
Houston Hoffman108da402016-03-14 21:11:24 -07001787 return QDF_STATUS_SUCCESS;
1788}
1789
1790/**
1791 * hif_ce_close() - do ce specific free
1792 * @hif_sc: pointer to hif context
1793 */
1794void hif_ce_close(struct hif_softc *hif_sc)
1795{
1796}
1797
1798/**
1799 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
1800 * @hif_sc: hif context
1801 *
1802 * uses state variables to support cleaning up when hif_config_ce fails.
1803 */
1804void hif_unconfig_ce(struct hif_softc *hif_sc)
1805{
1806 int pipe_num;
1807 struct HIF_CE_pipe_info *pipe_info;
1808 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1809
1810 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
1811 pipe_info = &hif_state->pipe_info[pipe_num];
1812 if (pipe_info->ce_hdl) {
1813 ce_unregister_irq(hif_state, (1 << pipe_num));
1814 hif_sc->request_irq_done = false;
1815 ce_fini(pipe_info->ce_hdl);
1816 pipe_info->ce_hdl = NULL;
1817 pipe_info->buf_sz = 0;
1818 }
1819 }
Houston Hoffman108da402016-03-14 21:11:24 -07001820 if (hif_sc->athdiag_procfs_inited) {
1821 athdiag_procfs_remove();
1822 hif_sc->athdiag_procfs_inited = false;
1823 }
1824}
1825
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001826#ifdef CONFIG_BYPASS_QMI
1827#define FW_SHARED_MEM (2 * 1024 * 1024)
1828
1829/**
1830 * hif_post_static_buf_to_target() - post static buffer to WLAN FW
1831 * @scn: pointer to HIF structure
1832 *
1833 * WLAN FW needs 2MB memory from DDR when QMI is disabled.
1834 *
1835 * Return: void
1836 */
1837static void hif_post_static_buf_to_target(struct hif_softc *scn)
1838{
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07001839 void *target_va;
1840 phys_addr_t target_pa;
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001841
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07001842 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1843 FW_SHARED_MEM, &target_pa);
1844 if (NULL == target_va) {
1845 HIF_TRACE("Memory allocation failed could not post target buf");
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001846 return;
1847 }
Hardik Kantilal Patelc5dc5f22016-04-21 14:11:33 -07001848 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
1849 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001850}
1851#else
1852static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
1853{
1854 return;
1855}
1856#endif
1857
Houston Hoffman108da402016-03-14 21:11:24 -07001858/**
1859 * hif_config_ce() - configure copy engines
1860 * @scn: hif context
1861 *
1862 * Prepares fw, copy engine hardware and host sw according
1863 * to the attributes selected by hif_ce_prepare_config.
1864 *
1865 * also calls athdiag_procfs_init
1866 *
1867 * return: 0 for success nonzero for failure.
1868 */
1869int hif_config_ce(struct hif_softc *scn)
1870{
1871 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1872 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1873 struct HIF_CE_pipe_info *pipe_info;
1874 int pipe_num;
1875#ifdef ADRASTEA_SHADOW_REGISTERS
1876 int i;
1877#endif
1878 QDF_STATUS rv = QDF_STATUS_SUCCESS;
1879
1880 scn->notice_send = true;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001881
Yuanyuan Liua7a282f2016-04-15 12:55:04 -07001882 hif_post_static_buf_to_target(scn);
1883
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001884 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
Houston Hoffman108da402016-03-14 21:11:24 -07001885
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08001886 hif_config_rri_on_ddr(scn);
1887
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001888 /* During CE initializtion */
1889 scn->ce_count = HOST_CE_COUNT;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001890 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1891 struct CE_attr *attr;
1892
1893 pipe_info = &hif_state->pipe_info[pipe_num];
1894 pipe_info->pipe_num = pipe_num;
1895 pipe_info->HIF_CE_state = hif_state;
1896 attr = &host_ce_config[pipe_num];
1897 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301898 QDF_ASSERT(pipe_info->ce_hdl != NULL);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001899 if (pipe_info->ce_hdl == NULL) {
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301900 rv = QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001901 A_TARGET_ACCESS_UNLIKELY(scn);
1902 goto err;
1903 }
1904
1905 if (pipe_num == DIAG_CE_ID) {
1906 /* Reserve the ultimate CE for
1907 * Diagnostic Window support */
Houston Hoffmanc1d9a412016-03-30 21:07:57 -07001908 hif_state->ce_diag = pipe_info->ce_hdl;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001909 continue;
1910 }
1911
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301912 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
1913 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001914 if (attr->dest_nentries > 0) {
1915 atomic_set(&pipe_info->recv_bufs_needed,
1916 init_buffer_count(attr->dest_nentries - 1));
1917 } else {
1918 atomic_set(&pipe_info->recv_bufs_needed, 0);
1919 }
1920 ce_tasklet_init(hif_state, (1 << pipe_num));
1921 ce_register_irq(hif_state, (1 << pipe_num));
1922 scn->request_irq_done = true;
1923 }
1924
1925 if (athdiag_procfs_init(scn) != 0) {
1926 A_TARGET_ACCESS_UNLIKELY(scn);
1927 goto err;
1928 }
1929 scn->athdiag_procfs_inited = true;
1930
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001931 HIF_INFO_MED("%s: ce_init done", __func__);
1932
Houston Hoffman108da402016-03-14 21:11:24 -07001933 init_tasklet_workers(hif_hdl);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001934
1935 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
1936
1937#ifdef ADRASTEA_SHADOW_REGISTERS
1938 HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
1939 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
1940 HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
1941 __func__, i,
1942 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
1943 }
1944#endif
1945
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301946 return rv != QDF_STATUS_SUCCESS;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001947
1948err:
1949 /* Failure, so clean up */
Houston Hoffman108da402016-03-14 21:11:24 -07001950 hif_unconfig_ce(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001951 HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05301952 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001953}
1954
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001955#ifdef WLAN_FEATURE_FASTPATH
1956/**
1957 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
1958 * @handler: Callback funtcion
1959 * @context: handle for callback function
1960 *
1961 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
1962 */
1963int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
1964{
1965 struct hif_softc *scn =
1966 (struct hif_softc *)cds_get_context(QDF_MODULE_ID_HIF);
1967 struct CE_state *ce_state;
1968 int i;
1969
1970 QDF_ASSERT(scn != NULL);
1971
1972 if (!scn->fastpath_mode_on) {
1973 HIF_WARN("Fastpath mode disabled\n");
1974 return QDF_STATUS_E_FAILURE;
1975 }
1976
1977 for (i = 0; i < CE_COUNT_MAX; i++) {
1978 ce_state = scn->ce_id_to_state[i];
1979 if (ce_state->htt_rx_data) {
1980 ce_state->fastpath_handler = handler;
1981 ce_state->context = context;
1982 }
1983 }
1984
1985 return QDF_STATUS_SUCCESS;
1986}
Manjunathappa Prakash7399f142016-04-13 23:38:16 -07001987#endif
1988
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08001989#ifdef IPA_OFFLOAD
Leo Changd85f78d2015-11-13 10:55:34 -08001990/**
1991 * hif_ipa_get_ce_resource() - get uc resource on hif
1992 * @scn: bus context
1993 * @ce_sr_base_paddr: copyengine source ring base physical address
1994 * @ce_sr_ring_size: copyengine source ring size
1995 * @ce_reg_paddr: copyengine register physical address
1996 *
1997 * IPA micro controller data path offload feature enabled,
1998 * HIF should release copy engine related resource information to IPA UC
1999 * IPA UC will access hardware resource with released information
2000 *
2001 * Return: None
2002 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302003void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302004 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002005 uint32_t *ce_sr_ring_size,
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302006 qdf_dma_addr_t *ce_reg_paddr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002007{
Komal Seelam644263d2016-02-22 20:45:49 +05302008 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
Komal Seelam02cf2f82016-02-22 20:44:25 +05302009 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002010 struct HIF_CE_pipe_info *pipe_info =
2011 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2012 struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2013
2014 ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2015 ce_reg_paddr);
2016 return;
2017}
2018#endif /* IPA_OFFLOAD */
2019
2020
2021#ifdef ADRASTEA_SHADOW_REGISTERS
2022
2023/*
2024 Current shadow register config
2025
2026 -----------------------------------------------------------
2027 Shadow Register | CE | src/dst write index
2028 -----------------------------------------------------------
2029 0 | 0 | src
2030 1 No Config - Doesn't point to anything
2031 2 No Config - Doesn't point to anything
2032 3 | 3 | src
2033 4 | 4 | src
2034 5 | 5 | src
2035 6 No Config - Doesn't point to anything
2036 7 | 7 | src
2037 8 No Config - Doesn't point to anything
2038 9 No Config - Doesn't point to anything
2039 10 No Config - Doesn't point to anything
2040 11 No Config - Doesn't point to anything
2041 -----------------------------------------------------------
2042 12 No Config - Doesn't point to anything
2043 13 | 1 | dst
2044 14 | 2 | dst
2045 15 No Config - Doesn't point to anything
2046 16 No Config - Doesn't point to anything
2047 17 No Config - Doesn't point to anything
2048 18 No Config - Doesn't point to anything
2049 19 | 7 | dst
2050 20 | 8 | dst
2051 21 No Config - Doesn't point to anything
2052 22 No Config - Doesn't point to anything
2053 23 No Config - Doesn't point to anything
2054 -----------------------------------------------------------
2055
2056
2057 ToDo - Move shadow register config to following in the future
2058 This helps free up a block of shadow registers towards the end.
2059 Can be used for other purposes
2060
2061 -----------------------------------------------------------
2062 Shadow Register | CE | src/dst write index
2063 -----------------------------------------------------------
2064 0 | 0 | src
2065 1 | 3 | src
2066 2 | 4 | src
2067 3 | 5 | src
2068 4 | 7 | src
2069 -----------------------------------------------------------
2070 5 | 1 | dst
2071 6 | 2 | dst
2072 7 | 7 | dst
2073 8 | 8 | dst
2074 -----------------------------------------------------------
2075 9 No Config - Doesn't point to anything
2076 12 No Config - Doesn't point to anything
2077 13 No Config - Doesn't point to anything
2078 14 No Config - Doesn't point to anything
2079 15 No Config - Doesn't point to anything
2080 16 No Config - Doesn't point to anything
2081 17 No Config - Doesn't point to anything
2082 18 No Config - Doesn't point to anything
2083 19 No Config - Doesn't point to anything
2084 20 No Config - Doesn't point to anything
2085 21 No Config - Doesn't point to anything
2086 22 No Config - Doesn't point to anything
2087 23 No Config - Doesn't point to anything
2088 -----------------------------------------------------------
2089*/
2090
Komal Seelam644263d2016-02-22 20:45:49 +05302091u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002092{
2093 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002094 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002095
Houston Hoffmane6330442016-02-26 12:19:11 -08002096 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002097 case 0:
2098 addr = SHADOW_VALUE0;
2099 break;
2100 case 3:
2101 addr = SHADOW_VALUE3;
2102 break;
2103 case 4:
2104 addr = SHADOW_VALUE4;
2105 break;
2106 case 5:
2107 addr = SHADOW_VALUE5;
2108 break;
2109 case 7:
2110 addr = SHADOW_VALUE7;
2111 break;
2112 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002113 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302114 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002115 }
2116 return addr;
2117
2118}
2119
Komal Seelam644263d2016-02-22 20:45:49 +05302120u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002121{
2122 u32 addr = 0;
Houston Hoffmane6330442016-02-26 12:19:11 -08002123 u32 ce = COPY_ENGINE_ID(ctrl_addr);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002124
Houston Hoffmane6330442016-02-26 12:19:11 -08002125 switch (ce) {
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002126 case 1:
2127 addr = SHADOW_VALUE13;
2128 break;
2129 case 2:
2130 addr = SHADOW_VALUE14;
2131 break;
Vishwajith Upendra70efc752016-04-18 11:23:49 -07002132 case 5:
2133 addr = SHADOW_VALUE17;
2134 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002135 case 7:
2136 addr = SHADOW_VALUE19;
2137 break;
2138 case 8:
2139 addr = SHADOW_VALUE20;
2140 break;
Houston Hoffmane6330442016-02-26 12:19:11 -08002141 case 9:
2142 addr = SHADOW_VALUE21;
2143 break;
2144 case 10:
2145 addr = SHADOW_VALUE22;
2146 break;
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002147 default:
Houston Hoffmane6330442016-02-26 12:19:11 -08002148 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302149 QDF_ASSERT(0);
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002150 }
2151
2152 return addr;
2153
2154}
2155#endif
2156
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002157#if defined(FEATURE_LRO)
2158/**
2159 * ce_lro_flush_cb_register() - register the LRO flush
2160 * callback
2161 * @scn: HIF context
2162 * @handler: callback function
2163 * @data: opaque data pointer to be passed back
2164 *
2165 * Store the LRO flush callback provided
2166 *
2167 * Return: none
2168 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302169void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002170 void (handler)(void *), void *data)
2171{
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002172 int i;
2173 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302174 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002175
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302176 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002177
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002178 for (i = 0; i < CE_COUNT_MAX; i++) {
2179 ce_state = scn->ce_id_to_state[i];
2180 if (ce_state->htt_rx_data) {
2181 ce_state->lro_flush_cb = handler;
2182 ce_state->lro_data = data;
2183 }
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002184 }
Prakash Dhavalid5c9f1c2015-11-08 19:04:44 -08002185}
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002186
2187/**
2188 * ce_lro_flush_cb_deregister() - deregister the LRO flush
2189 * callback
2190 * @scn: HIF context
2191 *
2192 * Remove the LRO flush callback
2193 *
2194 * Return: none
2195 */
Komal Seelam5584a7c2016-02-24 19:22:48 +05302196void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002197{
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002198 int i;
2199 struct CE_state *ce_state;
Komal Seelam5584a7c2016-02-24 19:22:48 +05302200 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002201
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302202 QDF_ASSERT(scn != NULL);
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002203
Houston Hoffmanc7d54292016-04-13 18:55:37 -07002204 for (i = 0; i < CE_COUNT_MAX; i++) {
2205 ce_state = scn->ce_id_to_state[i];
2206 if (ce_state->htt_rx_data) {
2207 ce_state->lro_flush_cb = NULL;
2208 ce_state->lro_data = NULL;
2209 }
Dhanashri Atre65b674f2015-10-30 15:12:03 -07002210 }
2211}
2212#endif
Sanjay Devnanic319c822015-11-06 16:44:28 -08002213
2214/**
2215 * hif_map_service_to_pipe() - returns the ce ids pertaining to
2216 * this service
Komal Seelam644263d2016-02-22 20:45:49 +05302217 * @scn: hif_softc pointer.
Sanjay Devnanic319c822015-11-06 16:44:28 -08002218 * @svc_id: Service ID for which the mapping is needed.
2219 * @ul_pipe: address of the container in which ul pipe is returned.
2220 * @dl_pipe: address of the container in which dl pipe is returned.
2221 * @ul_is_polled: address of the container in which a bool
2222 * indicating if the UL CE for this service
2223 * is polled is returned.
2224 * @dl_is_polled: address of the container in which a bool
2225 * indicating if the DL CE for this service
2226 * is polled is returned.
2227 *
2228 * Return: Indicates whether this operation was successful.
2229 */
2230
Komal Seelam5584a7c2016-02-24 19:22:48 +05302231int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
Sanjay Devnanic319c822015-11-06 16:44:28 -08002232 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2233 int *dl_is_polled)
2234{
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302235 int status = QDF_STATUS_SUCCESS;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002236 unsigned int i;
2237 struct service_to_pipe element;
Sanjay Devnanic319c822015-11-06 16:44:28 -08002238 struct service_to_pipe *tgt_svc_map_to_use;
2239 size_t sz_tgt_svc_map_to_use;
Komal Seelambd7c51d2016-02-24 10:27:30 +05302240 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2241 uint32_t mode = hif_get_conparam(scn);
Sanjay Devnanic319c822015-11-06 16:44:28 -08002242
Komal Seelambd7c51d2016-02-24 10:27:30 +05302243 if (WLAN_IS_EPPING_ENABLED(mode)) {
Sanjay Devnanic319c822015-11-06 16:44:28 -08002244 tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
2245 sz_tgt_svc_map_to_use =
2246 sizeof(target_service_to_ce_map_wlan_epping);
2247 } else {
2248 tgt_svc_map_to_use = target_service_to_ce_map_wlan;
2249 sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
2250 }
2251
2252 *dl_is_polled = 0; /* polling for received messages not supported */
2253
2254 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2255
2256 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2257 if (element.service_id == svc_id) {
2258
2259 if (element.pipedir == PIPEDIR_OUT)
2260 *ul_pipe = element.pipenum;
2261
2262 else if (element.pipedir == PIPEDIR_IN)
2263 *dl_pipe = element.pipenum;
2264 }
2265 }
2266
2267 *ul_is_polled =
2268 (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
2269
2270 return status;
2271}
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002272
2273#ifdef SHADOW_REG_DEBUG
Komal Seelam644263d2016-02-22 20:45:49 +05302274inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002275 uint32_t CE_ctrl_addr)
2276{
2277 uint32_t read_from_hw, srri_from_ddr = 0;
2278
2279 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2280
2281 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2282
2283 if (read_from_hw != srri_from_ddr) {
2284 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2285 srri_from_ddr, read_from_hw,
2286 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302287 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002288 }
2289 return srri_from_ddr;
2290}
2291
2292
Komal Seelam644263d2016-02-22 20:45:49 +05302293inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002294 uint32_t CE_ctrl_addr)
2295{
2296 uint32_t read_from_hw, drri_from_ddr = 0;
2297
2298 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2299
2300 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2301
2302 if (read_from_hw != drri_from_ddr) {
2303 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
2304 drri_from_ddr, read_from_hw,
2305 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302306 QDF_ASSERT(0);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002307 }
2308 return drri_from_ddr;
2309}
2310
2311#endif
2312
Houston Hoffman3d0cda82015-12-03 13:25:05 -08002313#ifdef ADRASTEA_RRI_ON_DDR
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002314/**
2315 * hif_get_src_ring_read_index(): Called to get the SRRI
2316 *
Komal Seelam644263d2016-02-22 20:45:49 +05302317 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002318 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2319 *
2320 * This function returns the SRRI to the caller. For CEs that
2321 * dont have interrupts enabled, we look at the DDR based SRRI
2322 *
2323 * Return: SRRI
2324 */
Komal Seelam644263d2016-02-22 20:45:49 +05302325inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002326 uint32_t CE_ctrl_addr)
2327{
2328 struct CE_attr attr;
2329
2330 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2331 if (attr.flags & CE_ATTR_DISABLE_INTR)
2332 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2333 else
2334 return A_TARGET_READ(scn,
2335 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2336}
2337
2338/**
2339 * hif_get_dst_ring_read_index(): Called to get the DRRI
2340 *
Komal Seelam644263d2016-02-22 20:45:49 +05302341 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002342 * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2343 *
2344 * This function returns the DRRI to the caller. For CEs that
2345 * dont have interrupts enabled, we look at the DDR based DRRI
2346 *
2347 * Return: DRRI
2348 */
Komal Seelam644263d2016-02-22 20:45:49 +05302349inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002350 uint32_t CE_ctrl_addr)
2351{
2352 struct CE_attr attr;
2353
2354 attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2355
2356 if (attr.flags & CE_ATTR_DISABLE_INTR)
2357 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2358 else
2359 return A_TARGET_READ(scn,
2360 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2361}
2362
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002363/**
2364 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2365 *
Komal Seelam644263d2016-02-22 20:45:49 +05302366 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002367 *
2368 * This function allocates non cached memory on ddr and sends
2369 * the physical address of this memory to the CE hardware. The
2370 * hardware updates the RRI on this particular location.
2371 *
2372 * Return: None
2373 */
Komal Seelam644263d2016-02-22 20:45:49 +05302374static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002375{
2376 unsigned int i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302377 qdf_dma_addr_t paddr_rri_on_ddr;
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002378 uint32_t high_paddr, low_paddr;
2379 scn->vaddr_rri_on_ddr =
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302380 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2381 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
2382 &paddr_rri_on_ddr);
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002383
2384 low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
2385 high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
2386
2387 HIF_ERROR("%s using srri and drri from DDR\n", __func__);
2388
2389 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2390 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2391
2392 for (i = 0; i < CE_COUNT; i++)
2393 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2394
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302395 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002396
2397 return;
2398}
2399#else
2400
2401/**
2402 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2403 *
Komal Seelam644263d2016-02-22 20:45:49 +05302404 * @scn: hif_softc pointer
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002405 *
2406 * This is a dummy implementation for platforms that don't
2407 * support this functionality.
2408 *
2409 * Return: None
2410 */
Komal Seelam644263d2016-02-22 20:45:49 +05302411static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
Sanjay Devnanib925d7e2015-11-12 14:43:58 -08002412{
2413 return;
2414}
2415#endif
Govind Singh2443fb32016-01-13 17:44:48 +05302416
2417/**
2418 * hif_dump_ce_registers() - dump ce registers
Komal Seelam5584a7c2016-02-24 19:22:48 +05302419 * @scn: hif_opaque_softc pointer.
Govind Singh2443fb32016-01-13 17:44:48 +05302420 *
2421 * Output the copy engine registers
2422 *
2423 * Return: 0 for success or error code
2424 */
Komal Seelam644263d2016-02-22 20:45:49 +05302425int hif_dump_ce_registers(struct hif_softc *scn)
Govind Singh2443fb32016-01-13 17:44:48 +05302426{
Komal Seelam5584a7c2016-02-24 19:22:48 +05302427 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
Govind Singh2443fb32016-01-13 17:44:48 +05302428 uint32_t ce_reg_address = CE0_BASE_ADDRESS;
2429 uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
2430 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
2431 uint16_t i;
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302432 QDF_STATUS status;
Govind Singh2443fb32016-01-13 17:44:48 +05302433
2434 for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
Komal Seelam644263d2016-02-22 20:45:49 +05302435 status = hif_diag_read_mem(hif_hdl, ce_reg_address,
Govind Singh2443fb32016-01-13 17:44:48 +05302436 (uint8_t *) &ce_reg_values[i][0],
2437 ce_reg_word_size * sizeof(uint32_t));
2438
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302439 if (status != QDF_STATUS_SUCCESS) {
Govind Singh2443fb32016-01-13 17:44:48 +05302440 HIF_ERROR("Dumping CE register failed!");
2441 return -EACCES;
2442 }
2443 HIF_ERROR("CE%d Registers:", i);
Chouhan, Anuragfc06aa92016-03-03 19:05:05 +05302444 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
Govind Singh2443fb32016-01-13 17:44:48 +05302445 (uint8_t *) &ce_reg_values[i][0],
2446 ce_reg_word_size * sizeof(uint32_t));
2447 }
Govind Singh2443fb32016-01-13 17:44:48 +05302448 return 0;
2449}