blob: 181a800cbf1fae6450a3cc9f93fe19fbcbff03a2 [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/spinlock.h>
18#include <linux/mutex.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053021#include <dsp/q6core.h>
22#include <dsp/audio_cal_utils.h>
Laxminath Kasam38070be2017-08-17 18:21:59 +053023#include <dsp/apr_audio-v2.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053024#include <ipc/apr.h>
Laxminath Kasam38070be2017-08-17 18:21:59 +053025#include "adsp_err.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053026
27#define TIMEOUT_MS 1000
28/*
29 * AVS bring up in the modem is optimitized for the new
30 * Sub System Restart design and 100 milliseconds timeout
31 * is sufficient to make sure the Q6 will be ready.
32 */
33#define Q6_READY_TIMEOUT_MS 100
34
35enum {
36 META_CAL,
37 CUST_TOP_CAL,
38 CORE_MAX_CAL
39};
40
Laxminath Kasam38070be2017-08-17 18:21:59 +053041enum ver_query_status {
42 VER_QUERY_UNATTEMPTED,
43 VER_QUERY_UNSUPPORTED,
44 VER_QUERY_SUPPORTED
45};
46
47struct q6core_avcs_ver_info {
48 enum ver_query_status status;
49 struct avcs_fwk_ver_info ver_info;
50};
51
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053052struct q6core_str {
53 struct apr_svc *core_handle_q;
54 wait_queue_head_t bus_bw_req_wait;
55 wait_queue_head_t cmd_req_wait;
Laxminath Kasam38070be2017-08-17 18:21:59 +053056 wait_queue_head_t avcs_fwk_ver_req_wait;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053057 u32 bus_bw_resp_received;
58 enum cmd_flags {
59 FLAG_NONE,
60 FLAG_CMDRSP_LICENSE_RESULT
61 } cmd_resp_received_flag;
Laxminath Kasam38070be2017-08-17 18:21:59 +053062 u32 avcs_fwk_ver_resp_received;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053063 struct mutex cmd_lock;
Laxminath Kasam38070be2017-08-17 18:21:59 +053064 struct mutex ver_lock;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053065 union {
66 struct avcs_cmdrsp_get_license_validation_result
67 cmdrsp_license_result;
68 } cmd_resp_payload;
69 u32 param;
70 struct cal_type_data *cal_data[CORE_MAX_CAL];
71 uint32_t mem_map_cal_handle;
72 int32_t adsp_status;
Laxminath Kasam38070be2017-08-17 18:21:59 +053073 struct q6core_avcs_ver_info q6core_avcs_ver_info;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053074};
75
76static struct q6core_str q6core_lcl;
77
78struct generic_get_data_ {
79 int valid;
80 int size_in_ints;
81 int ints[];
82};
83static struct generic_get_data_ *generic_get_data;
84
Laxminath Kasam38070be2017-08-17 18:21:59 +053085static int parse_fwk_version_info(uint32_t *payload)
86{
87 size_t fwk_ver_size;
88 size_t svc_size;
89 int num_services;
90 int ret = 0;
91
92 pr_debug("%s: Payload info num services %d\n",
93 __func__, payload[4]);
94 /*
95 * payload1[4] is the number of services running on DSP
96 * Based on this info, we copy the payload into core
97 * avcs version info structure.
98 */
99 num_services = payload[4];
100 q6core_lcl.q6core_avcs_ver_info.ver_info.avcs_fwk_version.
101 num_services = num_services;
102 if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
103 pr_err("%s: num_services: %d greater than max services: %d\n",
104 __func__, num_services, VSS_MAX_AVCS_NUM_SERVICES);
105 ret = -EINVAL;
106 goto done;
107 }
108 fwk_ver_size = sizeof(struct avcs_get_fwk_version);
109 svc_size = num_services * sizeof(struct avs_svc_api_info);
110 /*
111 * Dynamically allocate memory for all
112 * the services based on num_services
113 */
114 q6core_lcl.q6core_avcs_ver_info.ver_info.services = NULL;
115 q6core_lcl.q6core_avcs_ver_info.ver_info.services =
116 kzalloc(svc_size, GFP_ATOMIC);
117 if (q6core_lcl.q6core_avcs_ver_info.ver_info.services == NULL) {
118 ret = -ENOMEM;
119 goto done;
120 }
121 /*
122 * memcpy is done twice because the memory allocated for
123 * q6core_lcl.q6core_avcs_ver_info.ver_info is not
124 * contiguous.
125 */
126 memcpy(&q6core_lcl.q6core_avcs_ver_info.ver_info,
127 (uint8_t *)payload, fwk_ver_size);
128 memcpy(q6core_lcl.q6core_avcs_ver_info.ver_info.services,
129 (uint8_t *)&payload[sizeof(struct avcs_get_fwk_version)/
130 sizeof(uint32_t)], svc_size);
131 ret = 0;
132done:
133 return ret;
134}
135
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530136static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
137{
138 uint32_t *payload1;
Laxminath Kasam38070be2017-08-17 18:21:59 +0530139 int ret = 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530140
141 if (data == NULL) {
142 pr_err("%s: data argument is null\n", __func__);
143 return -EINVAL;
144 }
145
146 pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%x\n",
147 __func__,
148 data->payload_size, data->opcode);
149
150 switch (data->opcode) {
151
152 case APR_BASIC_RSP_RESULT:{
153
154 if (data->payload_size == 0) {
155 pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
156 __func__);
157 return 0;
158 }
159
160 payload1 = data->payload;
161
162 switch (payload1[0]) {
163
164 case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
165 pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
166 __func__, payload1[1]);
167 q6core_lcl.bus_bw_resp_received = 1;
168 wake_up(&q6core_lcl.bus_bw_req_wait);
169 break;
170 case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
171 pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
172 __func__, payload1[1]);
173 q6core_lcl.bus_bw_resp_received = 1;
174 wake_up(&q6core_lcl.bus_bw_req_wait);
175 break;
176 case AVCS_CMD_REGISTER_TOPOLOGIES:
177 pr_debug("%s: Cmd = AVCS_CMD_REGISTER_TOPOLOGIES status[0x%x]\n",
178 __func__, payload1[1]);
179 /* -ADSP status to match Linux error standard */
180 q6core_lcl.adsp_status = -payload1[1];
181 q6core_lcl.bus_bw_resp_received = 1;
182 wake_up(&q6core_lcl.bus_bw_req_wait);
183 break;
184 case AVCS_CMD_DEREGISTER_TOPOLOGIES:
185 pr_debug("%s: Cmd = AVCS_CMD_DEREGISTER_TOPOLOGIES status[0x%x]\n",
186 __func__, payload1[1]);
187 q6core_lcl.bus_bw_resp_received = 1;
188 wake_up(&q6core_lcl.bus_bw_req_wait);
189 break;
Laxminath Kasam38070be2017-08-17 18:21:59 +0530190 case AVCS_CMD_GET_FWK_VERSION:
191 pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
192 __func__, adsp_err_get_err_str(payload1[1]));
193 /* ADSP status to match Linux error standard */
194 q6core_lcl.adsp_status = -payload1[1];
195 if (payload1[1] == ADSP_EUNSUPPORTED)
196 q6core_lcl.q6core_avcs_ver_info.status =
197 VER_QUERY_UNSUPPORTED;
198 q6core_lcl.avcs_fwk_ver_resp_received = 1;
199 wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
200 break;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530201 default:
202 pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
203 __func__,
204 payload1[0], payload1[1], data->opcode);
205 break;
206 }
207 break;
208 }
209
210 case RESET_EVENTS:{
211 pr_debug("%s: Reset event received in Core service\n",
212 __func__);
Laxminath Kasam38070be2017-08-17 18:21:59 +0530213 /* no reset done as the data will not change after SSR*/
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530214 q6core_lcl.core_handle_q = NULL;
215 break;
216 }
217 case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
218 payload1 = data->payload;
219 pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
220 __func__, payload1[0]);
221 q6core_lcl.mem_map_cal_handle = payload1[0];
222 q6core_lcl.bus_bw_resp_received = 1;
223 wake_up(&q6core_lcl.bus_bw_req_wait);
224 break;
225 case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
226 payload1 = data->payload;
227 q6core_lcl.param = payload1[0];
228 pr_debug("%s: Received ADSP get state response 0x%x\n",
229 __func__, q6core_lcl.param);
230 /* ensure .param is updated prior to .bus_bw_resp_received */
231 wmb();
232 q6core_lcl.bus_bw_resp_received = 1;
233 wake_up(&q6core_lcl.bus_bw_req_wait);
234 break;
235 case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
236 payload1 = data->payload;
237 pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
238 __func__, payload1[0]);
239 q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result
240 = payload1[0];
241 q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
242 wake_up(&q6core_lcl.cmd_req_wait);
243 break;
Laxminath Kasam38070be2017-08-17 18:21:59 +0530244 case AVCS_CMDRSP_GET_FWK_VERSION:
245 pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
246 __func__);
247 payload1 = data->payload;
248 q6core_lcl.q6core_avcs_ver_info.status = VER_QUERY_SUPPORTED;
249 q6core_lcl.avcs_fwk_ver_resp_received = 1;
250 ret = parse_fwk_version_info(payload1);
251 if (ret < 0)
252 pr_err("%s: Failed to parse payload:%d\n",
253 __func__, ret);
254 wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
255 break;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530256 default:
257 pr_err("%s: Message id from adsp core svc: 0x%x\n",
258 __func__, data->opcode);
259 if (generic_get_data) {
260 generic_get_data->valid = 1;
261 generic_get_data->size_in_ints =
262 data->payload_size/sizeof(int);
263 pr_debug("callback size = %i\n",
264 data->payload_size);
265 memcpy(generic_get_data->ints, data->payload,
266 data->payload_size);
267 q6core_lcl.bus_bw_resp_received = 1;
268 wake_up(&q6core_lcl.bus_bw_req_wait);
269 break;
270 }
271 break;
272 }
273
274 return 0;
275}
276
277void ocm_core_open(void)
278{
279 if (q6core_lcl.core_handle_q == NULL)
280 q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
281 aprv2_core_fn_q, 0xFFFFFFFF, NULL);
282 pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
283 if (q6core_lcl.core_handle_q == NULL)
284 pr_err("%s: Unable to register CORE\n", __func__);
285}
286
287struct cal_block_data *cal_utils_get_cal_block_by_key(
288 struct cal_type_data *cal_type, uint32_t key)
289{
290 struct list_head *ptr, *next;
291 struct cal_block_data *cal_block = NULL;
292 struct audio_cal_info_metainfo *metainfo;
293
294 list_for_each_safe(ptr, next,
295 &cal_type->cal_blocks) {
296
297 cal_block = list_entry(ptr,
298 struct cal_block_data, list);
299 metainfo = (struct audio_cal_info_metainfo *)
300 cal_block->cal_info;
301 if (metainfo->nKey != key) {
302 pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
303 __func__, metainfo->nKey, key);
304 } else {
305 pr_debug("%s: metainfo key match found", __func__);
306 return cal_block;
307 }
308 }
309 return NULL;
310}
311
Laxminath Kasam38070be2017-08-17 18:21:59 +0530312static int q6core_send_get_avcs_fwk_ver_cmd(void)
313{
314 struct apr_hdr avcs_ver_cmd;
315 int ret;
316
317 avcs_ver_cmd.hdr_field =
318 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
319 APR_PKT_VER);
320 avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
321 avcs_ver_cmd.src_port = 0;
322 avcs_ver_cmd.dest_port = 0;
323 avcs_ver_cmd.token = 0;
324 avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
325
326 q6core_lcl.adsp_status = 0;
327 q6core_lcl.avcs_fwk_ver_resp_received = 0;
328
329 ret = apr_send_pkt(q6core_lcl.core_handle_q,
330 (uint32_t *) &avcs_ver_cmd);
331 if (ret < 0) {
332 pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
333 ret);
334 goto done;
335 }
336
337 ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
338 (q6core_lcl.avcs_fwk_ver_resp_received == 1),
339 msecs_to_jiffies(TIMEOUT_MS));
340 if (!ret) {
341 pr_err("%s: wait_event timeout for AVCS fwk version info\n",
342 __func__);
343 ret = -ETIMEDOUT;
344 goto done;
345 }
346
347 if (q6core_lcl.adsp_status < 0) {
348 /*
349 * adsp_err_get_err_str expects a positive value but we store
350 * the DSP error as negative to match the Linux error standard.
351 * Pass in the negated value so adsp_err_get_err_str returns
352 * the correct string.
353 */
354 pr_err("%s: DSP returned error[%s]\n", __func__,
355 adsp_err_get_err_str(-q6core_lcl.adsp_status));
356 ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
357 goto done;
358 }
359
360 ret = 0;
361
362done:
363 return ret;
364}
365
366int q6core_get_service_version(uint32_t service_id,
367 struct avcs_fwk_ver_info *ver_info,
368 size_t size)
369{
370 int i;
371 uint32_t num_services;
372 size_t svc_size;
373
374 svc_size = q6core_get_avcs_service_size(service_id);
375 if (svc_size != size) {
376 pr_err("%s: Expected size: %ld, Provided size: %ld",
377 __func__, svc_size, size);
378 return -EINVAL;
379 }
380
381 num_services =
382 q6core_lcl.q6core_avcs_ver_info.ver_info.
383 avcs_fwk_version.num_services;
384
385 if (ver_info == NULL) {
386 pr_err("%s: NULL parameter ver_info\n", __func__);
387 return -EINVAL;
388 }
389
390 memcpy(ver_info, &q6core_lcl.q6core_avcs_ver_info.
391 ver_info.avcs_fwk_version, sizeof(struct avcs_get_fwk_version));
392
393 if (service_id == AVCS_SERVICE_ID_ALL) {
394 memcpy(&ver_info->services[0], &q6core_lcl.
395 q6core_avcs_ver_info.ver_info.services[0],
396 (num_services * sizeof(struct avs_svc_api_info)));
397 } else {
398 for (i = 0; i < num_services; i++) {
399 if (q6core_lcl.q6core_avcs_ver_info.
400 ver_info.services[i].service_id == service_id) {
401 memcpy(&ver_info->services[0],
402 &q6core_lcl.q6core_avcs_ver_info.
403 ver_info.services[i], size);
404 break;
405 }
406 }
407 }
408
409 return 0;
410}
411EXPORT_SYMBOL(q6core_get_service_version);
412
413size_t q6core_get_avcs_service_size(uint32_t service_id)
414{
415 int ret = 0;
416 uint32_t num_services;
417
418 num_services =
419 q6core_lcl.q6core_avcs_ver_info.ver_info.
420 avcs_fwk_version.num_services;
421
422 mutex_lock(&(q6core_lcl.ver_lock));
423 pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
424 q6core_lcl.q6core_avcs_ver_info.status);
425
426 switch (q6core_lcl.q6core_avcs_ver_info.status) {
427 case VER_QUERY_SUPPORTED:
428 pr_debug("%s: AVCS FWK version query already attempted\n",
429 __func__);
430 ret = num_services * sizeof(struct avs_svc_api_info);
431 break;
432 case VER_QUERY_UNSUPPORTED:
433 ret = -EOPNOTSUPP;
434 break;
435 case VER_QUERY_UNATTEMPTED:
436 pr_debug("%s: Attempting AVCS FWK version query\n", __func__);
437 if (q6core_is_adsp_ready()) {
438 ret = q6core_send_get_avcs_fwk_ver_cmd();
439 if (ret == 0)
440 ret = num_services *
441 sizeof(struct avs_svc_api_info);
442 } else {
443 pr_err("%s: ADSP is not ready to query version\n",
444 __func__);
445 ret = -ENODEV;
446 }
447 break;
448 default:
449 pr_err("%s: Invalid version query status %d\n", __func__,
450 q6core_lcl.q6core_avcs_ver_info.status);
451 ret = -EINVAL;
452 break;
453 }
454 mutex_unlock(&(q6core_lcl.ver_lock));
455
456 if (service_id != AVCS_SERVICE_ID_ALL)
457 return sizeof(struct avs_svc_api_info);
458
459 return ret;
460}
461EXPORT_SYMBOL(q6core_get_avcs_service_size);
462
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530463int32_t core_set_license(uint32_t key, uint32_t module_id)
464{
465 struct avcs_cmd_set_license *cmd_setl = NULL;
466 struct cal_block_data *cal_block = NULL;
467 int rc = 0, packet_size = 0;
468
469 pr_debug("%s: key:0x%x, id:0x%x\n", __func__, key, module_id);
470
471 mutex_lock(&(q6core_lcl.cmd_lock));
472 if (q6core_lcl.cal_data[META_CAL] == NULL) {
473 pr_err("%s: cal_data not initialized yet!!\n", __func__);
474 rc = -EINVAL;
475 goto cmd_unlock;
476 }
477
478 mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
479 cal_block = cal_utils_get_cal_block_by_key(
480 q6core_lcl.cal_data[META_CAL], key);
481 if (cal_block == NULL ||
482 cal_block->cal_data.kvaddr == NULL ||
483 cal_block->cal_data.size <= 0) {
484 pr_err("%s: Invalid cal block to send", __func__);
485 rc = -EINVAL;
486 goto cal_data_unlock;
487 }
488
489 packet_size = sizeof(struct avcs_cmd_set_license) +
490 cal_block->cal_data.size;
491 /*round up total packet_size to next 4 byte boundary*/
492 packet_size = ((packet_size + 0x3)>>2)<<2;
493
494 cmd_setl = kzalloc(packet_size, GFP_KERNEL);
495 if (cmd_setl == NULL) {
496 rc = -ENOMEM;
497 goto cal_data_unlock;
498 }
499
500 ocm_core_open();
501 if (q6core_lcl.core_handle_q == NULL) {
502 pr_err("%s: apr registration for CORE failed\n", __func__);
503 rc = -ENODEV;
504 goto fail_cmd;
505 }
506
507 cmd_setl->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
508 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
509 cmd_setl->hdr.pkt_size = packet_size;
510 cmd_setl->hdr.src_port = 0;
511 cmd_setl->hdr.dest_port = 0;
512 cmd_setl->hdr.token = 0;
513 cmd_setl->hdr.opcode = AVCS_CMD_SET_LICENSE;
514 cmd_setl->id = module_id;
515 cmd_setl->overwrite = 1;
516 cmd_setl->size = cal_block->cal_data.size;
517 memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
518 cal_block->cal_data.kvaddr,
519 cal_block->cal_data.size);
520 pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
521 __func__, cmd_setl->hdr.opcode,
522 cmd_setl->id, cmd_setl->size);
523 rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
524 if (rc < 0)
525 pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
526 __func__, cmd_setl->hdr.opcode, rc);
527
528fail_cmd:
529 kfree(cmd_setl);
530cal_data_unlock:
531 mutex_unlock(&((q6core_lcl.cal_data[META_CAL])->lock));
532cmd_unlock:
533 mutex_unlock(&(q6core_lcl.cmd_lock));
534
535 return rc;
536}
537
538int32_t core_get_license_status(uint32_t module_id)
539{
540 struct avcs_cmd_get_license_validation_result get_lvr_cmd;
541 int ret = 0;
542
543 pr_debug("%s: module_id 0x%x", __func__, module_id);
544
545 mutex_lock(&(q6core_lcl.cmd_lock));
546 ocm_core_open();
547 if (q6core_lcl.core_handle_q == NULL) {
548 pr_err("%s: apr registration for CORE failed\n", __func__);
549 ret = -ENODEV;
550 goto fail_cmd;
551 }
552
553 get_lvr_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
554 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
555 get_lvr_cmd.hdr.pkt_size =
556 sizeof(struct avcs_cmd_get_license_validation_result);
557
558 get_lvr_cmd.hdr.src_port = 0;
559 get_lvr_cmd.hdr.dest_port = 0;
560 get_lvr_cmd.hdr.token = 0;
561 get_lvr_cmd.hdr.opcode = AVCS_CMD_GET_LICENSE_VALIDATION_RESULT;
562 get_lvr_cmd.id = module_id;
563
564
565 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &get_lvr_cmd);
566 if (ret < 0) {
567 pr_err("%s: license_validation request failed, err %d\n",
568 __func__, ret);
569 ret = -EREMOTE;
570 goto fail_cmd;
571 }
572
573 q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
574 mutex_unlock(&(q6core_lcl.cmd_lock));
575 ret = wait_event_timeout(q6core_lcl.cmd_req_wait,
576 (q6core_lcl.cmd_resp_received_flag ==
577 FLAG_CMDRSP_LICENSE_RESULT),
578 msecs_to_jiffies(TIMEOUT_MS));
579 mutex_lock(&(q6core_lcl.cmd_lock));
580 if (!ret) {
581 pr_err("%s: wait_event timeout for CMDRSP_LICENSE_RESULT\n",
582 __func__);
583 ret = -ETIME;
584 goto fail_cmd;
585 }
586 q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
587 ret = q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result;
588
589fail_cmd:
590 mutex_unlock(&(q6core_lcl.cmd_lock));
591 pr_info("%s: cmdrsp_license_result.result = 0x%x for module 0x%x\n",
592 __func__, ret, module_id);
593 return ret;
594}
595
596uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
597{
598 struct adsp_dolby_manufacturer_id payload;
599 int rc = 0;
600
601 pr_debug("%s: manufacturer_id :%d\n", __func__, manufacturer_id);
602 mutex_lock(&(q6core_lcl.cmd_lock));
603 ocm_core_open();
604 if (q6core_lcl.core_handle_q) {
605 payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
606 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
607 payload.hdr.pkt_size =
608 sizeof(struct adsp_dolby_manufacturer_id);
609 payload.hdr.src_port = 0;
610 payload.hdr.dest_port = 0;
611 payload.hdr.token = 0;
612 payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
613 payload.manufacturer_id = manufacturer_id;
614 pr_debug("%s: Send Dolby security opcode=0x%x manufacturer ID = %d\n",
615 __func__,
616 payload.hdr.opcode, payload.manufacturer_id);
617 rc = apr_send_pkt(q6core_lcl.core_handle_q,
618 (uint32_t *)&payload);
619 if (rc < 0)
620 pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
621 __func__, payload.hdr.opcode, rc);
622 }
623 mutex_unlock(&(q6core_lcl.cmd_lock));
624 return rc;
625}
626
627/**
628 * q6core_is_adsp_ready - check adsp ready status
629 *
630 * Returns true if adsp is ready otherwise returns false
631 */
632bool q6core_is_adsp_ready(void)
633{
634 int rc = 0;
635 bool ret = false;
636 struct apr_hdr hdr;
637
638 pr_debug("%s: enter\n", __func__);
639 memset(&hdr, 0, sizeof(hdr));
640 hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
641 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
642 hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
643 hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
644
645 mutex_lock(&(q6core_lcl.cmd_lock));
646 ocm_core_open();
647 if (q6core_lcl.core_handle_q) {
648 q6core_lcl.bus_bw_resp_received = 0;
649 rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
650 if (rc < 0) {
651 pr_err("%s: Get ADSP state APR packet send event %d\n",
652 __func__, rc);
653 goto bail;
654 }
655
656 rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
657 (q6core_lcl.bus_bw_resp_received == 1),
658 msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
659 if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
660 /* ensure to read updated param by callback thread */
661 rmb();
662 ret = !!q6core_lcl.param;
663 }
664 }
665bail:
666 pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
667 mutex_unlock(&(q6core_lcl.cmd_lock));
668 return ret;
669}
670EXPORT_SYMBOL(q6core_is_adsp_ready);
671
672static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
673 uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
674{
675 struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
676 struct avs_shared_map_region_payload *mregions = NULL;
677 void *mmap_region_cmd = NULL;
678 void *payload = NULL;
679 int ret = 0;
680 int i = 0;
681 int cmd_size = 0;
682
683 cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
684 + sizeof(struct avs_shared_map_region_payload)
685 * bufcnt;
686
687 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
688 if (mmap_region_cmd == NULL)
689 return -ENOMEM;
690
691 mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
692 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
693 APR_HDR_LEN(APR_HDR_SIZE),
694 APR_PKT_VER);
695 mmap_regions->hdr.pkt_size = cmd_size;
696 mmap_regions->hdr.src_port = 0;
697 mmap_regions->hdr.dest_port = 0;
698 mmap_regions->hdr.token = 0;
699 mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
700 mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
701 mmap_regions->num_regions = bufcnt & 0x00ff;
702 mmap_regions->property_flag = 0x00;
703
704 payload = ((u8 *) mmap_region_cmd +
705 sizeof(struct avs_cmd_shared_mem_map_regions));
706 mregions = (struct avs_shared_map_region_payload *)payload;
707
708 for (i = 0; i < bufcnt; i++) {
709 mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
710 mregions->shm_addr_msw =
711 msm_audio_populate_upper_32_bits(buf_add[i]);
712 mregions->mem_size_bytes = bufsz[i];
713 ++mregions;
714 }
715
716 pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
717 __func__, buf_add, bufsz[0], mmap_regions->num_regions);
718
719 *map_handle = 0;
720 q6core_lcl.bus_bw_resp_received = 0;
721 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
722 mmap_regions);
723 if (ret < 0) {
724 pr_err("%s: mmap regions failed %d\n",
725 __func__, ret);
726 ret = -EINVAL;
727 goto done;
728 }
729
730 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
731 (q6core_lcl.bus_bw_resp_received == 1),
732 msecs_to_jiffies(TIMEOUT_MS));
733 if (!ret) {
734 pr_err("%s: timeout. waited for memory map\n", __func__);
735 ret = -ETIME;
736 goto done;
737 }
738
739 *map_handle = q6core_lcl.mem_map_cal_handle;
740done:
741 kfree(mmap_region_cmd);
742 return ret;
743}
744
745static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
746{
747 struct avs_cmd_shared_mem_unmap_regions unmap_regions;
748 int ret = 0;
749
750 memset(&unmap_regions, 0, sizeof(unmap_regions));
751 unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
752 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
753 unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
754 unmap_regions.hdr.src_svc = APR_SVC_ADSP_CORE;
755 unmap_regions.hdr.src_domain = APR_DOMAIN_APPS;
756 unmap_regions.hdr.src_port = 0;
757 unmap_regions.hdr.dest_svc = APR_SVC_ADSP_CORE;
758 unmap_regions.hdr.dest_domain = APR_DOMAIN_ADSP;
759 unmap_regions.hdr.dest_port = 0;
760 unmap_regions.hdr.token = 0;
761 unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
762 unmap_regions.mem_map_handle = mem_map_handle;
763
764 q6core_lcl.bus_bw_resp_received = 0;
765
766 pr_debug("%s: unmap regions map handle %d\n",
767 __func__, mem_map_handle);
768
769 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
770 &unmap_regions);
771 if (ret < 0) {
772 pr_err("%s: unmap regions failed %d\n",
773 __func__, ret);
774 ret = -EINVAL;
775 goto done;
776 }
777
778 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
779 (q6core_lcl.bus_bw_resp_received == 1),
780 msecs_to_jiffies(TIMEOUT_MS));
781 if (!ret) {
782 pr_err("%s: timeout. waited for memory_unmap\n",
783 __func__);
784 ret = -ETIME;
785 goto done;
786 }
787done:
788 return ret;
789}
790
791static int q6core_dereg_all_custom_topologies(void)
792{
793 int ret = 0;
794 struct avcs_cmd_deregister_topologies dereg_top;
795
796 memset(&dereg_top, 0, sizeof(dereg_top));
797 dereg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
798 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
799 dereg_top.hdr.pkt_size = sizeof(dereg_top);
800 dereg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
801 dereg_top.hdr.src_domain = APR_DOMAIN_APPS;
802 dereg_top.hdr.src_port = 0;
803 dereg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
804 dereg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
805 dereg_top.hdr.dest_port = 0;
806 dereg_top.hdr.token = 0;
807 dereg_top.hdr.opcode = AVCS_CMD_DEREGISTER_TOPOLOGIES;
808 dereg_top.payload_addr_lsw = 0;
809 dereg_top.payload_addr_msw = 0;
810 dereg_top.mem_map_handle = 0;
811 dereg_top.payload_size = 0;
812 dereg_top.mode = AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES;
813
814 q6core_lcl.bus_bw_resp_received = 0;
815
816 pr_debug("%s: Deregister topologies mode %d\n",
817 __func__, dereg_top.mode);
818
819 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &dereg_top);
820 if (ret < 0) {
821 pr_err("%s: Deregister topologies failed %d\n",
822 __func__, ret);
823 goto done;
824 }
825
826 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
827 (q6core_lcl.bus_bw_resp_received == 1),
828 msecs_to_jiffies(TIMEOUT_MS));
829 if (!ret) {
830 pr_err("%s: wait_event timeout for Deregister topologies\n",
831 __func__);
832 goto done;
833 }
834done:
835 return ret;
836}
837
838static int q6core_send_custom_topologies(void)
839{
840 int ret = 0;
841 int ret2 = 0;
842 struct cal_block_data *cal_block = NULL;
843 struct avcs_cmd_register_topologies reg_top;
844
845 if (!q6core_is_adsp_ready()) {
846 pr_err("%s: ADSP is not ready!\n", __func__);
847 return -ENODEV;
848 }
849
850 memset(&reg_top, 0, sizeof(reg_top));
851 mutex_lock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
852 mutex_lock(&q6core_lcl.cmd_lock);
853
854 cal_block = cal_utils_get_only_cal_block(
855 q6core_lcl.cal_data[CUST_TOP_CAL]);
856 if (cal_block == NULL) {
857 pr_debug("%s: cal block is NULL!\n", __func__);
858 goto unlock;
859 }
860 if (cal_block->cal_data.size <= 0) {
861 pr_debug("%s: cal size is %zd not sending\n",
862 __func__, cal_block->cal_data.size);
863 goto unlock;
864 }
865
866 q6core_dereg_all_custom_topologies();
867
868 ret = q6core_map_memory_regions(&cal_block->cal_data.paddr, 0,
869 (uint32_t *)&cal_block->map_data.map_size, 1,
870 &cal_block->map_data.q6map_handle);
871 if (!ret) {
872 pr_err("%s: q6core_map_memory_regions failed\n", __func__);
873 goto unlock;
874 }
875
876 reg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
877 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
878 reg_top.hdr.pkt_size = sizeof(reg_top);
879 reg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
880 reg_top.hdr.src_domain = APR_DOMAIN_APPS;
881 reg_top.hdr.src_port = 0;
882 reg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
883 reg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
884 reg_top.hdr.dest_port = 0;
885 reg_top.hdr.token = 0;
886 reg_top.hdr.opcode = AVCS_CMD_REGISTER_TOPOLOGIES;
887 reg_top.payload_addr_lsw =
888 lower_32_bits(cal_block->cal_data.paddr);
889 reg_top.payload_addr_msw =
890 msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
891 reg_top.mem_map_handle = cal_block->map_data.q6map_handle;
892 reg_top.payload_size = cal_block->cal_data.size;
893
894 q6core_lcl.adsp_status = 0;
895 q6core_lcl.bus_bw_resp_received = 0;
896
897 pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
898 __func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
899 cal_block->map_data.q6map_handle);
900
901 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &reg_top);
902 if (ret < 0) {
903 pr_err("%s: Register topologies failed %d\n",
904 __func__, ret);
905 goto unmap;
906 }
907
908 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
909 (q6core_lcl.bus_bw_resp_received == 1),
910 msecs_to_jiffies(TIMEOUT_MS));
911 if (!ret) {
912 pr_err("%s: wait_event timeout for Register topologies\n",
913 __func__);
914 goto unmap;
915 }
916
917 if (q6core_lcl.adsp_status < 0)
918 ret = q6core_lcl.adsp_status;
919unmap:
920 ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
921 if (!ret2) {
922 pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
923 __func__, cal_block->map_data.q6map_handle);
924 ret = ret2;
925 goto unlock;
926 }
927
928unlock:
929 mutex_unlock(&q6core_lcl.cmd_lock);
930 mutex_unlock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
931
932 return ret;
933}
934
935static int get_cal_type_index(int32_t cal_type)
936{
937 int ret = -EINVAL;
938
939 switch (cal_type) {
940 case AUDIO_CORE_METAINFO_CAL_TYPE:
941 ret = META_CAL;
942 break;
943 case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
944 ret = CUST_TOP_CAL;
945 break;
946 default:
947 pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
948 }
949 return ret;
950}
951
952static int q6core_alloc_cal(int32_t cal_type,
953 size_t data_size, void *data)
954{
955 int ret = 0;
956 int cal_index;
957
958 cal_index = get_cal_type_index(cal_type);
959 if (cal_index < 0) {
960 pr_err("%s: could not get cal index %d!\n",
961 __func__, cal_index);
962 ret = -EINVAL;
963 goto done;
964 }
965
966
967 ret = cal_utils_alloc_cal(data_size, data,
968 q6core_lcl.cal_data[cal_index], 0, NULL);
969 if (ret < 0) {
970 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
971 __func__, ret, cal_type);
972 goto done;
973 }
974done:
975 return ret;
976}
977
978static int q6core_dealloc_cal(int32_t cal_type,
979 size_t data_size, void *data)
980{
981 int ret = 0;
982 int cal_index;
983
984 cal_index = get_cal_type_index(cal_type);
985 if (cal_index < 0) {
986 pr_err("%s: could not get cal index %d!\n",
987 __func__, cal_index);
988 ret = -EINVAL;
989 goto done;
990 }
991
992
993 ret = cal_utils_dealloc_cal(data_size, data,
994 q6core_lcl.cal_data[cal_index]);
995 if (ret < 0) {
996 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
997 __func__, ret, cal_type);
998 goto done;
999 }
1000done:
1001 return ret;
1002}
1003
1004static int q6core_set_cal(int32_t cal_type,
1005 size_t data_size, void *data)
1006{
1007 int ret = 0;
1008 int cal_index;
1009
1010 cal_index = get_cal_type_index(cal_type);
1011 if (cal_index < 0) {
1012 pr_err("%s: could not get cal index %d!\n",
1013 __func__, cal_index);
1014 ret = -EINVAL;
1015 goto done;
1016 }
1017
1018
1019 ret = cal_utils_set_cal(data_size, data,
1020 q6core_lcl.cal_data[cal_index], 0, NULL);
1021 if (ret < 0) {
1022 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
1023 __func__, ret, cal_type);
1024 goto done;
1025 }
1026
1027 if (cal_index == CUST_TOP_CAL)
1028 ret = q6core_send_custom_topologies();
1029done:
1030 return ret;
1031}
1032
1033static void q6core_delete_cal_data(void)
1034{
1035 pr_debug("%s:\n", __func__);
1036
1037 cal_utils_destroy_cal_types(CORE_MAX_CAL, q6core_lcl.cal_data);
1038}
1039
1040
1041static int q6core_init_cal_data(void)
1042{
1043 int ret = 0;
1044 struct cal_type_info cal_type_info[] = {
1045 {{AUDIO_CORE_METAINFO_CAL_TYPE,
1046 {q6core_alloc_cal, q6core_dealloc_cal, NULL,
1047 q6core_set_cal, NULL, NULL} },
1048 {NULL, NULL, cal_utils_match_buf_num} },
1049
1050 {{CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
1051 {q6core_alloc_cal, q6core_dealloc_cal, NULL,
1052 q6core_set_cal, NULL, NULL} },
1053 {NULL, NULL, cal_utils_match_buf_num} }
1054 };
1055 pr_debug("%s:\n", __func__);
1056
1057 ret = cal_utils_create_cal_types(CORE_MAX_CAL,
1058 q6core_lcl.cal_data, cal_type_info);
1059 if (ret < 0) {
1060 pr_err("%s: could not create cal type!\n",
1061 __func__);
1062 goto err;
1063 }
1064
1065 return ret;
1066err:
1067 q6core_delete_cal_data();
1068 return ret;
1069}
1070
1071static int __init core_init(void)
1072{
Laxminath Kasam38070be2017-08-17 18:21:59 +05301073 memset(&q6core_lcl, 0, sizeof(struct q6core_str));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301074 init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301075 init_waitqueue_head(&q6core_lcl.cmd_req_wait);
Laxminath Kasam38070be2017-08-17 18:21:59 +05301076 init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301077 q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
1078 mutex_init(&q6core_lcl.cmd_lock);
Laxminath Kasam38070be2017-08-17 18:21:59 +05301079 mutex_init(&q6core_lcl.ver_lock);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301080
1081 q6core_init_cal_data();
Laxminath Kasam38070be2017-08-17 18:21:59 +05301082
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301083 return 0;
1084}
1085module_init(core_init);
1086
1087static void __exit core_exit(void)
1088{
1089 mutex_destroy(&q6core_lcl.cmd_lock);
Laxminath Kasam38070be2017-08-17 18:21:59 +05301090 mutex_destroy(&q6core_lcl.ver_lock);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301091 q6core_delete_cal_data();
1092}
1093module_exit(core_exit);
1094MODULE_DESCRIPTION("ADSP core driver");
1095MODULE_LICENSE("GPL v2");