blob: 19151139eeff748e464f861699036da855e3a242 [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/jiffies.h>
17#include <linux/uaccess.h>
18#include <linux/atomic.h>
19#include <linux/wait.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053020#include <sound/asound.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053021#include <asoc/msm-dts-srs-tm-config.h>
22#include <dsp/apr_audio-v2.h>
23#include <dsp/q6adm-v2.h>
24#include <dsp/q6audio-v2.h>
25#include <dsp/q6afe-v2.h>
26#include <dsp/audio_cal_utils.h>
27#include <ipc/apr.h>
28#include "adsp_err.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053029
30#define TIMEOUT_MS 1000
31
32#define RESET_COPP_ID 99
33#define INVALID_COPP_ID 0xFF
34/* Used for inband payload copy, max size is 4k */
35/* 2 is to account for module & param ID in payload */
36#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
37
38#define ULL_SUPPORTED_BITS_PER_SAMPLE 16
39#define ULL_SUPPORTED_SAMPLE_RATE 48000
40
41#ifndef CONFIG_DOLBY_DAP
42#undef DOLBY_ADM_COPP_TOPOLOGY_ID
43#define DOLBY_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFE
44#endif
45
46#ifndef CONFIG_DOLBY_DS2
47#undef DS2_ADM_COPP_TOPOLOGY_ID
48#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
49#endif
50
51/* ENUM for adm_status */
52enum adm_cal_status {
53 ADM_STATUS_CALIBRATION_REQUIRED = 0,
54 ADM_STATUS_MAX,
55};
56
57struct adm_copp {
58
59 atomic_t id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
60 atomic_t cnt[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
61 atomic_t topology[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
62 atomic_t mode[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
63 atomic_t stat[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
64 atomic_t rate[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
65 atomic_t bit_width[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
66 atomic_t channels[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
67 atomic_t app_type[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
68 atomic_t acdb_id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
69 wait_queue_head_t wait[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
70 wait_queue_head_t adm_delay_wait[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
71 atomic_t adm_delay_stat[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
72 uint32_t adm_delay[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
73 unsigned long adm_status[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
74};
75
76struct source_tracking_data {
77 struct ion_client *ion_client;
78 struct ion_handle *ion_handle;
79 struct param_outband memmap;
80 int apr_cmd_status;
81};
82
83struct adm_ctl {
84 void *apr;
85
86 struct adm_copp copp;
87
88 atomic_t matrix_map_stat;
89 wait_queue_head_t matrix_map_wait;
90
91 atomic_t adm_stat;
92 wait_queue_head_t adm_wait;
93
94 struct cal_type_data *cal_data[ADM_MAX_CAL_TYPES];
95
96 atomic_t mem_map_handles[ADM_MEM_MAP_INDEX_MAX];
97 atomic_t mem_map_index;
98
99 struct param_outband outband_memmap;
100 struct source_tracking_data sourceTrackingData;
101
102 int set_custom_topology;
103 int ec_ref_rx;
104 int num_ec_ref_rx_chans;
105 int ec_ref_rx_bit_width;
106 int ec_ref_rx_sampling_rate;
107};
108
109static struct adm_ctl this_adm;
110
111struct adm_multi_ch_map {
112 bool set_channel_map;
113 char channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL];
114};
115
116#define ADM_MCH_MAP_IDX_PLAYBACK 0
117#define ADM_MCH_MAP_IDX_REC 1
118static struct adm_multi_ch_map multi_ch_maps[2] = {
119 { false,
120 {0, 0, 0, 0, 0, 0, 0, 0}
121 },
122 { false,
123 {0, 0, 0, 0, 0, 0, 0, 0}
124 }
125};
126
127static int adm_get_parameters[MAX_COPPS_PER_PORT * ADM_GET_PARAMETER_LENGTH];
128static int adm_module_topo_list[
129 MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH];
130
131int adm_validate_and_get_port_index(int port_id)
132{
133 int index;
134 int ret;
135
136 ret = q6audio_validate_port(port_id);
137 if (ret < 0) {
138 pr_err("%s: port validation failed id 0x%x ret %d\n",
139 __func__, port_id, ret);
140 return -EINVAL;
141 }
142
143 index = afe_get_port_index(port_id);
144 if (index < 0 || index >= AFE_MAX_PORTS) {
145 pr_err("%s: Invalid port idx %d port_id 0x%x\n",
146 __func__, index,
147 port_id);
148 return -EINVAL;
149 }
150 pr_debug("%s: port_idx- %d\n", __func__, index);
151 return index;
152}
153
154int adm_get_default_copp_idx(int port_id)
155{
156 int port_idx = adm_validate_and_get_port_index(port_id), idx;
157
158 if (port_idx < 0) {
159 pr_err("%s: Invalid port id: 0x%x", __func__, port_id);
160 return -EINVAL;
161 }
162 pr_debug("%s: port_idx:%d\n", __func__, port_idx);
163 for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
164 if (atomic_read(&this_adm.copp.id[port_idx][idx]) !=
165 RESET_COPP_ID)
166 return idx;
167 }
168 return -EINVAL;
169}
170
171int adm_get_topology_for_port_from_copp_id(int port_id, int copp_id)
172{
173 int port_idx = adm_validate_and_get_port_index(port_id), idx;
174
175 if (port_idx < 0) {
176 pr_err("%s: Invalid port id: 0x%x", __func__, port_id);
177 return 0;
178 }
179 for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
180 if (atomic_read(&this_adm.copp.id[port_idx][idx]) == copp_id)
181 return atomic_read(&this_adm.copp.topology[port_idx]
182 [idx]);
183 pr_err("%s: Invalid copp_id %d port_id 0x%x\n",
184 __func__, copp_id, port_id);
185 return 0;
186}
187
188int adm_get_topology_for_port_copp_idx(int port_id, int copp_idx)
189{
190 int port_idx = adm_validate_and_get_port_index(port_id);
191
192 if (port_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
193 pr_err("%s: Invalid port: 0x%x copp id: 0x%x",
194 __func__, port_id, copp_idx);
195 return 0;
196 }
197 return atomic_read(&this_adm.copp.topology[port_idx][copp_idx]);
198}
199
200int adm_get_indexes_from_copp_id(int copp_id, int *copp_idx, int *port_idx)
201{
202 int p_idx, c_idx;
203
204 for (p_idx = 0; p_idx < AFE_MAX_PORTS; p_idx++) {
205 for (c_idx = 0; c_idx < MAX_COPPS_PER_PORT; c_idx++) {
206 if (atomic_read(&this_adm.copp.id[p_idx][c_idx])
207 == copp_id) {
208 if (copp_idx != NULL)
209 *copp_idx = c_idx;
210 if (port_idx != NULL)
211 *port_idx = p_idx;
212 return 0;
213 }
214 }
215 }
216 return -EINVAL;
217}
218
219static int adm_get_copp_id(int port_idx, int copp_idx)
220{
221 pr_debug("%s: port_idx:%d copp_idx:%d\n", __func__, port_idx, copp_idx);
222
223 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
224 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
225 return -EINVAL;
226 }
227 return atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
228}
229
230static int adm_get_idx_if_copp_exists(int port_idx, int topology, int mode,
231 int rate, int bit_width, int app_type)
232{
233 int idx;
234
235 pr_debug("%s: port_idx-%d, topology-0x%x, mode-%d, rate-%d, bit_width-%d\n",
236 __func__, port_idx, topology, mode, rate, bit_width);
237
238 for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++)
239 if ((topology ==
240 atomic_read(&this_adm.copp.topology[port_idx][idx])) &&
241 (mode == atomic_read(&this_adm.copp.mode[port_idx][idx])) &&
242 (rate == atomic_read(&this_adm.copp.rate[port_idx][idx])) &&
243 (bit_width ==
244 atomic_read(&this_adm.copp.bit_width[port_idx][idx])) &&
245 (app_type ==
246 atomic_read(&this_adm.copp.app_type[port_idx][idx])))
247 return idx;
248 return -EINVAL;
249}
250
251static int adm_get_next_available_copp(int port_idx)
252{
253 int idx;
254
255 pr_debug("%s:\n", __func__);
256 for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
257 pr_debug("%s: copp_id:0x%x port_idx:%d idx:%d\n", __func__,
258 atomic_read(&this_adm.copp.id[port_idx][idx]),
259 port_idx, idx);
260 if (atomic_read(&this_adm.copp.id[port_idx][idx]) ==
261 RESET_COPP_ID)
262 break;
263 }
264 return idx;
265}
266
267int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
268 void *srs_params)
269{
270 struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
271 struct adm_cmd_set_pp_params_v5 *adm_params_ = NULL;
272 __s32 sz = 0, param_id, module_id = SRS_TRUMEDIA_MODULE_ID, outband = 0;
273 int ret = 0, port_idx;
274
275 pr_debug("SRS - %s", __func__);
276
277 port_id = afe_convert_virtual_to_portid(port_id);
278 port_idx = adm_validate_and_get_port_index(port_id);
279 if (port_idx < 0) {
280 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
281 return -EINVAL;
282 }
283 switch (srs_tech_id) {
284 case SRS_ID_GLOBAL: {
285 struct srs_trumedia_params_GLOBAL *glb_params = NULL;
286
287 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
288 sizeof(struct srs_trumedia_params_GLOBAL);
289 adm_params = kzalloc(sz, GFP_KERNEL);
290 if (!adm_params) {
291 pr_err("%s, adm params memory alloc failed\n",
292 __func__);
293 return -ENOMEM;
294 }
295 adm_params->payload_size =
296 sizeof(struct srs_trumedia_params_GLOBAL) +
297 sizeof(struct adm_param_data_v5);
298 param_id = SRS_TRUMEDIA_PARAMS;
299 adm_params->params.param_size =
300 sizeof(struct srs_trumedia_params_GLOBAL);
301 glb_params = (struct srs_trumedia_params_GLOBAL *)
302 ((u8 *)adm_params +
303 sizeof(struct adm_cmd_set_pp_params_inband_v5));
304 memcpy(glb_params, srs_params,
305 sizeof(struct srs_trumedia_params_GLOBAL));
306 break;
307 }
308 case SRS_ID_WOWHD: {
309 struct srs_trumedia_params_WOWHD *whd_params = NULL;
310
311 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
312 sizeof(struct srs_trumedia_params_WOWHD);
313 adm_params = kzalloc(sz, GFP_KERNEL);
314 if (!adm_params) {
315 pr_err("%s, adm params memory alloc failed\n",
316 __func__);
317 return -ENOMEM;
318 }
319 adm_params->payload_size =
320 sizeof(struct srs_trumedia_params_WOWHD) +
321 sizeof(struct adm_param_data_v5);
322 param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
323 adm_params->params.param_size =
324 sizeof(struct srs_trumedia_params_WOWHD);
325 whd_params = (struct srs_trumedia_params_WOWHD *)
326 ((u8 *)adm_params +
327 sizeof(struct adm_cmd_set_pp_params_inband_v5));
328 memcpy(whd_params, srs_params,
329 sizeof(struct srs_trumedia_params_WOWHD));
330 break;
331 }
332 case SRS_ID_CSHP: {
333 struct srs_trumedia_params_CSHP *chp_params = NULL;
334
335 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
336 sizeof(struct srs_trumedia_params_CSHP);
337 adm_params = kzalloc(sz, GFP_KERNEL);
338 if (!adm_params) {
339 pr_err("%s, adm params memory alloc failed\n",
340 __func__);
341 return -ENOMEM;
342 }
343 adm_params->payload_size =
344 sizeof(struct srs_trumedia_params_CSHP) +
345 sizeof(struct adm_param_data_v5);
346 param_id = SRS_TRUMEDIA_PARAMS_CSHP;
347 adm_params->params.param_size =
348 sizeof(struct srs_trumedia_params_CSHP);
349 chp_params = (struct srs_trumedia_params_CSHP *)
350 ((u8 *)adm_params +
351 sizeof(struct adm_cmd_set_pp_params_inband_v5));
352 memcpy(chp_params, srs_params,
353 sizeof(struct srs_trumedia_params_CSHP));
354 break;
355 }
356 case SRS_ID_HPF: {
357 struct srs_trumedia_params_HPF *hpf_params = NULL;
358
359 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
360 sizeof(struct srs_trumedia_params_HPF);
361 adm_params = kzalloc(sz, GFP_KERNEL);
362 if (!adm_params) {
363 pr_err("%s, adm params memory alloc failed\n",
364 __func__);
365 return -ENOMEM;
366 }
367 adm_params->payload_size =
368 sizeof(struct srs_trumedia_params_HPF) +
369 sizeof(struct adm_param_data_v5);
370 param_id = SRS_TRUMEDIA_PARAMS_HPF;
371 adm_params->params.param_size =
372 sizeof(struct srs_trumedia_params_HPF);
373 hpf_params = (struct srs_trumedia_params_HPF *)
374 ((u8 *)adm_params +
375 sizeof(struct adm_cmd_set_pp_params_inband_v5));
376 memcpy(hpf_params, srs_params,
377 sizeof(struct srs_trumedia_params_HPF));
378 break;
379 }
380 case SRS_ID_AEQ: {
381 int *update_params_ptr = (int *)this_adm.outband_memmap.kvaddr;
382
383 outband = 1;
384 adm_params = kzalloc(sizeof(struct adm_cmd_set_pp_params_v5),
385 GFP_KERNEL);
386 adm_params_ = (struct adm_cmd_set_pp_params_v5 *)adm_params;
387 if (!adm_params_) {
388 pr_err("%s, adm params memory alloc failed\n",
389 __func__);
390 return -ENOMEM;
391 }
392
393 sz = sizeof(struct srs_trumedia_params_AEQ);
394 if (update_params_ptr == NULL) {
395 pr_err("ADM_SRS_TRUMEDIA - %s: null memmap for AEQ params\n",
396 __func__);
397 ret = -EINVAL;
398 goto fail_cmd;
399 }
400 param_id = SRS_TRUMEDIA_PARAMS_AEQ;
401 *update_params_ptr++ = module_id;
402 *update_params_ptr++ = param_id;
403 *update_params_ptr++ = sz;
404 memcpy(update_params_ptr, srs_params, sz);
405
406 adm_params_->payload_size = sz + 12;
407
408 break;
409 }
410 case SRS_ID_HL: {
411 struct srs_trumedia_params_HL *hl_params = NULL;
412
413 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
414 sizeof(struct srs_trumedia_params_HL);
415 adm_params = kzalloc(sz, GFP_KERNEL);
416 if (!adm_params) {
417 pr_err("%s, adm params memory alloc failed\n",
418 __func__);
419 return -ENOMEM;
420 }
421 adm_params->payload_size =
422 sizeof(struct srs_trumedia_params_HL) +
423 sizeof(struct adm_param_data_v5);
424 param_id = SRS_TRUMEDIA_PARAMS_HL;
425 adm_params->params.param_size =
426 sizeof(struct srs_trumedia_params_HL);
427 hl_params = (struct srs_trumedia_params_HL *)
428 ((u8 *)adm_params +
429 sizeof(struct adm_cmd_set_pp_params_inband_v5));
430 memcpy(hl_params, srs_params,
431 sizeof(struct srs_trumedia_params_HL));
432 break;
433 }
434 case SRS_ID_GEQ: {
435 struct srs_trumedia_params_GEQ *geq_params = NULL;
436
437 sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
438 sizeof(struct srs_trumedia_params_GEQ);
439 adm_params = kzalloc(sz, GFP_KERNEL);
440 if (!adm_params) {
441 pr_err("%s, adm params memory alloc failed\n",
442 __func__);
443 return -ENOMEM;
444 }
445 adm_params->payload_size =
446 sizeof(struct srs_trumedia_params_GEQ) +
447 sizeof(struct adm_param_data_v5);
448 param_id = SRS_TRUMEDIA_PARAMS_GEQ;
449 adm_params->params.param_size =
450 sizeof(struct srs_trumedia_params_GEQ);
451 geq_params = (struct srs_trumedia_params_GEQ *)
452 ((u8 *)adm_params +
453 sizeof(struct adm_cmd_set_pp_params_inband_v5));
454 memcpy(geq_params, srs_params,
455 sizeof(struct srs_trumedia_params_GEQ));
456 pr_debug("SRS - %s: GEQ params prepared\n", __func__);
457 break;
458 }
459 default:
460 goto fail_cmd;
461 }
462
463 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
464 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
465 adm_params->hdr.src_svc = APR_SVC_ADM;
466 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
467 adm_params->hdr.src_port = port_id;
468 adm_params->hdr.dest_svc = APR_SVC_ADM;
469 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
470 adm_params->hdr.dest_port =
471 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
472 adm_params->hdr.token = port_idx << 16 | copp_idx;
473 adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
474 if (outband && this_adm.outband_memmap.paddr) {
475 adm_params->hdr.pkt_size =
476 sizeof(struct adm_cmd_set_pp_params_v5);
477 adm_params->payload_addr_lsw = lower_32_bits(
478 this_adm.outband_memmap.paddr);
479 adm_params->payload_addr_msw = msm_audio_populate_upper_32_bits(
480 this_adm.outband_memmap.paddr);
481 adm_params->mem_map_handle = atomic_read(&this_adm.
482 mem_map_handles[ADM_SRS_TRUMEDIA]);
483 } else {
484 adm_params->hdr.pkt_size = sz;
485 adm_params->payload_addr_lsw = 0;
486 adm_params->payload_addr_msw = 0;
487 adm_params->mem_map_handle = 0;
488
489 adm_params->params.module_id = module_id;
490 adm_params->params.param_id = param_id;
491 adm_params->params.reserved = 0;
492 }
493
494 pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
495 __func__, adm_params->hdr.dest_port,
496 adm_params->payload_size, module_id, param_id);
497
498 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
499 ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
500 if (ret < 0) {
501 pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
502 port_id);
503 ret = -EINVAL;
504 goto fail_cmd;
505 }
506 /* Wait for the callback with copp id */
507 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
508 atomic_read(&this_adm.copp.stat
509 [port_idx][copp_idx]) >= 0,
510 msecs_to_jiffies(TIMEOUT_MS));
511 if (!ret) {
512 pr_err("%s: SRS set params timed out port = %d\n",
513 __func__, port_id);
514 ret = -EINVAL;
515 goto fail_cmd;
516 } else if (atomic_read(&this_adm.copp.stat
517 [port_idx][copp_idx]) > 0) {
518 pr_err("%s: DSP returned error[%s]\n",
519 __func__, adsp_err_get_err_str(
520 atomic_read(&this_adm.copp.stat
521 [port_idx][copp_idx])));
522 ret = adsp_err_get_lnx_err_code(
523 atomic_read(&this_adm.copp.stat
524 [port_idx][copp_idx]));
525 goto fail_cmd;
526 }
527
528fail_cmd:
529 kfree(adm_params);
530 return ret;
531}
532
533static int adm_populate_channel_weight(u16 *ptr,
534 struct msm_pcm_channel_mixer *ch_mixer,
535 int channel_index)
536{
537 u16 i, j, start_index = 0;
538
539 if (channel_index > ch_mixer->output_channel) {
540 pr_err("%s: channel index %d is larger than output_channel %d\n",
541 __func__, channel_index, ch_mixer->output_channel);
542 return -EINVAL;
543 }
544
545 for (i = 0; i < ch_mixer->output_channel; i++) {
546 pr_debug("%s: weight for output %d:", __func__, i);
547 for (j = 0; j < ADM_MAX_CHANNELS; j++)
548 pr_debug(" %d",
549 ch_mixer->channel_weight[i][j]);
550 pr_debug("\n");
551 }
552
553 for (i = 0; i < channel_index; ++i)
554 start_index += ch_mixer->input_channels[i];
555
556 for (i = 0; i < ch_mixer->output_channel; ++i) {
557 for (j = start_index;
558 j < start_index +
559 ch_mixer->input_channels[channel_index]; j++) {
560 *ptr = ch_mixer->channel_weight[i][j];
561 pr_debug("%s: ptr[%d][%d] = %d\n",
562 __func__, i, j, *ptr);
563 ptr++;
564 }
565 }
566
567 return 0;
568}
569
570/*
571 * adm_programable_channel_mixer
572 *
573 * Receives port_id, copp_idx, session_id, session_type, ch_mixer
574 * and channel_index to send ADM command to mix COPP data.
575 *
576 * port_id - Passed value, port_id for which backend is wanted
577 * copp_idx - Passed value, copp_idx for which COPP is wanted
578 * session_id - Passed value, session_id for which session is needed
579 * session_type - Passed value, session_type for RX or TX
580 * ch_mixer - Passed value, ch_mixer for which channel mixer config is needed
581 * channel_index - Passed value, channel_index for which channel is needed
582 */
583int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
584 int session_type,
585 struct msm_pcm_channel_mixer *ch_mixer,
586 int channel_index)
587{
588 struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
589 struct adm_param_data_v5 data_v5;
590 int ret = 0, port_idx, sz = 0, param_size = 0;
591 u16 *adm_pspd_params;
592 u16 *ptr;
593 int index = 0;
594
595 pr_debug("%s: port_id = %d\n", __func__, port_id);
596 port_id = afe_convert_virtual_to_portid(port_id);
597 port_idx = adm_validate_and_get_port_index(port_id);
598 if (port_idx < 0) {
599 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
600 return -EINVAL;
601 }
602 /*
603 * First 8 bytes are 4 bytes as rule number, 2 bytes as output
604 * channel and 2 bytes as input channel.
605 * 2 * ch_mixer->output_channel means output channel mapping.
606 * 2 * ch_mixer->input_channels[channel_index]) means input
607 * channel mapping.
608 * 2 * ch_mixer->input_channels[channel_index] *
609 * ch_mixer->output_channel) means the channel mixer weighting
610 * coefficients.
611 * param_size needs to be a multiple of 4 bytes.
612 */
613
614 param_size = 2 * (4 + ch_mixer->output_channel +
615 ch_mixer->input_channels[channel_index] +
616 ch_mixer->input_channels[channel_index] *
617 ch_mixer->output_channel);
618 roundup(param_size, 4);
619
620 sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
621 sizeof(struct default_chmixer_param_id_coeff) +
622 sizeof(struct adm_param_data_v5) + param_size;
623 pr_debug("%s: sz = %d\n", __func__, sz);
624 adm_params = kzalloc(sz, GFP_KERNEL);
625 if (!adm_params)
626 return -ENOMEM;
627
628 adm_params->payload_addr_lsw = 0;
629 adm_params->payload_addr_msw = 0;
630 adm_params->mem_map_handle = 0;
631 adm_params->direction = session_type;
632 adm_params->sessionid = session_id;
633 pr_debug("%s: copp_id = %d, session id %d\n", __func__,
634 atomic_read(&this_adm.copp.id[port_idx][copp_idx]),
635 session_id);
636 adm_params->deviceid = atomic_read(
637 &this_adm.copp.id[port_idx][copp_idx]);
638 adm_params->reserved = 0;
639
640 data_v5.module_id = MTMX_MODULE_ID_DEFAULT_CHMIXER;
641 data_v5.param_id = DEFAULT_CHMIXER_PARAM_ID_COEFF;
642 data_v5.reserved = 0;
643 data_v5.param_size = param_size;
644 adm_params->payload_size =
645 sizeof(struct default_chmixer_param_id_coeff) +
646 sizeof(struct adm_param_data_v5) + data_v5.param_size;
647 adm_pspd_params = (u16 *)((u8 *)adm_params +
648 sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5));
649 memcpy(adm_pspd_params, &data_v5, sizeof(data_v5));
650
651 adm_pspd_params = (u16 *)((u8 *)adm_params +
652 sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)
653 + sizeof(data_v5));
654
655 adm_pspd_params[0] = ch_mixer->rule;
656 adm_pspd_params[2] = ch_mixer->output_channel;
657 adm_pspd_params[3] = ch_mixer->input_channels[channel_index];
658 index = 4;
659
660 if (ch_mixer->output_channel == 1) {
661 adm_pspd_params[index] = PCM_CHANNEL_FC;
662 } else if (ch_mixer->output_channel == 2) {
663 adm_pspd_params[index] = PCM_CHANNEL_FL;
664 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
665 } else if (ch_mixer->output_channel == 3) {
666 adm_pspd_params[index] = PCM_CHANNEL_FL;
667 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
668 adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
669 } else if (ch_mixer->output_channel == 4) {
670 adm_pspd_params[index] = PCM_CHANNEL_FL;
671 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
672 adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
673 adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
674 } else if (ch_mixer->output_channel == 5) {
675 adm_pspd_params[index] = PCM_CHANNEL_FL;
676 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
677 adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
678 adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
679 adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
680 } else if (ch_mixer->output_channel == 6) {
681 adm_pspd_params[index] = PCM_CHANNEL_FL;
682 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
683 adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
684 adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
685 adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
686 adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
687 } else if (ch_mixer->output_channel == 8) {
688 adm_pspd_params[index] = PCM_CHANNEL_FL;
689 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
690 adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
691 adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
692 adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
693 adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
694 adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
695 adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
696 }
697
698 index = index + ch_mixer->output_channel;
699 if (ch_mixer->input_channels[channel_index] == 1) {
700 adm_pspd_params[index] = PCM_CHANNEL_FC;
701 } else if (ch_mixer->input_channels[channel_index] == 2) {
702 adm_pspd_params[index] = PCM_CHANNEL_FL;
703 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
704 } else if (ch_mixer->input_channels[channel_index] == 3) {
705 adm_pspd_params[index] = PCM_CHANNEL_FL;
706 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
707 adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
708 } else if (ch_mixer->input_channels[channel_index] == 4) {
709 adm_pspd_params[index] = PCM_CHANNEL_FL;
710 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
711 adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
712 adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
713 } else if (ch_mixer->input_channels[channel_index] == 5) {
714 adm_pspd_params[index] = PCM_CHANNEL_FL;
715 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
716 adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
717 adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
718 adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
719 } else if (ch_mixer->input_channels[channel_index] == 6) {
720 adm_pspd_params[index] = PCM_CHANNEL_FL;
721 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
722 adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
723 adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
724 adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
725 adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
726 } else if (ch_mixer->input_channels[channel_index] == 8) {
727 adm_pspd_params[index] = PCM_CHANNEL_FL;
728 adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
729 adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
730 adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
731 adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
732 adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
733 adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
734 adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
735 }
736
737 index = index + ch_mixer->input_channels[channel_index];
738 ret = adm_populate_channel_weight(&adm_pspd_params[index],
739 ch_mixer, channel_index);
740 if (!ret) {
741 pr_err("%s: fail to get channel weight with error %d\n",
742 __func__, ret);
743 goto fail_cmd;
744 }
745
746 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
747 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
748 adm_params->hdr.src_svc = APR_SVC_ADM;
749 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
750 adm_params->hdr.src_port = port_id;
751 adm_params->hdr.dest_svc = APR_SVC_ADM;
752 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
753 adm_params->hdr.dest_port =
754 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
755 adm_params->hdr.token = port_idx << 16 | copp_idx;
756 adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
757 adm_params->hdr.pkt_size = sz;
758 adm_params->payload_addr_lsw = 0;
759 adm_params->payload_addr_msw = 0;
760 adm_params->mem_map_handle = 0;
761 adm_params->reserved = 0;
762
763 ptr = (u16 *)adm_params;
764 for (index = 0; index < (sz / 2); index++)
765 pr_debug("%s: adm_params[%d] = 0x%x\n",
766 __func__, index, (unsigned int)ptr[index]);
767
768 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], 0);
769 ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
770 if (ret < 0) {
771 pr_err("%s: Set params failed port %d rc %d\n", __func__,
772 port_id, ret);
773 ret = -EINVAL;
774 goto fail_cmd;
775 }
776
777 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
778 atomic_read(
779 &this_adm.copp.stat[port_idx][copp_idx]) >= 0,
780 msecs_to_jiffies(TIMEOUT_MS));
781 if (!ret) {
782 pr_err("%s: set params timed out port = %d\n",
783 __func__, port_id);
784 ret = -ETIMEDOUT;
785 goto fail_cmd;
786 }
787 ret = 0;
788fail_cmd:
789 kfree(adm_params);
790
791 return ret;
792}
793
794int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx,
795 unsigned int session_id, char *params,
796 uint32_t params_length)
797{
798 struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
799 int sz, rc = 0, port_idx;
800
801 pr_debug("%s:\n", __func__);
802 port_id = afe_convert_virtual_to_portid(port_id);
803 port_idx = adm_validate_and_get_port_index(port_id);
804 if (port_idx < 0) {
805 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
806 return -EINVAL;
807 }
808
809 sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
810 params_length;
811 adm_params = kzalloc(sz, GFP_KERNEL);
812 if (!adm_params) {
813 pr_err("%s, adm params memory alloc failed\n", __func__);
814 return -ENOMEM;
815 }
816
817 memcpy(((u8 *)adm_params +
818 sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)),
819 params, params_length);
820 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
821 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
822 adm_params->hdr.pkt_size = sz;
823 adm_params->hdr.src_svc = APR_SVC_ADM;
824 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
825 adm_params->hdr.src_port = port_id;
826 adm_params->hdr.dest_svc = APR_SVC_ADM;
827 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
828 adm_params->hdr.dest_port = 0; /* Ignored */;
829 adm_params->hdr.token = port_idx << 16 | copp_idx;
830 adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
831 adm_params->payload_addr_lsw = 0;
832 adm_params->payload_addr_msw = 0;
833 adm_params->mem_map_handle = 0;
834 adm_params->payload_size = params_length;
835 /* direction RX as 0 */
836 adm_params->direction = ADM_MATRIX_ID_AUDIO_RX;
837 /* session id for this cmd to be applied on */
838 adm_params->sessionid = session_id;
839 adm_params->deviceid =
840 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
841 adm_params->reserved = 0;
842 pr_debug("%s: deviceid %d, session_id %d, src_port %d, dest_port %d\n",
843 __func__, adm_params->deviceid, adm_params->sessionid,
844 adm_params->hdr.src_port, adm_params->hdr.dest_port);
845 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
846 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
847 if (rc < 0) {
848 pr_err("%s: Set params failed port = 0x%x rc %d\n",
849 __func__, port_id, rc);
850 rc = -EINVAL;
851 goto set_stereo_to_custom_stereo_return;
852 }
853 /* Wait for the callback */
854 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
855 atomic_read(&this_adm.copp.stat
856 [port_idx][copp_idx]) >= 0,
857 msecs_to_jiffies(TIMEOUT_MS));
858 if (!rc) {
859 pr_err("%s: Set params timed out port = 0x%x\n", __func__,
860 port_id);
861 rc = -EINVAL;
862 goto set_stereo_to_custom_stereo_return;
863 } else if (atomic_read(&this_adm.copp.stat
864 [port_idx][copp_idx]) > 0) {
865 pr_err("%s: DSP returned error[%s]\n", __func__,
866 adsp_err_get_err_str(atomic_read(
867 &this_adm.copp.stat
868 [port_idx][copp_idx])));
869 rc = adsp_err_get_lnx_err_code(
870 atomic_read(&this_adm.copp.stat
871 [port_idx][copp_idx]));
872 goto set_stereo_to_custom_stereo_return;
873 }
874 rc = 0;
875set_stereo_to_custom_stereo_return:
876 kfree(adm_params);
877 return rc;
878}
879
880int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params,
881 uint32_t params_length)
882{
883 struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
884 int sz, rc = 0;
885 int port_idx;
886
887 pr_debug("%s:\n", __func__);
888 port_id = afe_convert_virtual_to_portid(port_id);
889 port_idx = adm_validate_and_get_port_index(port_id);
890 if (port_idx < 0) {
891 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
892 return -EINVAL;
893 }
894
895 sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
896 adm_params = kzalloc(sz, GFP_KERNEL);
897 if (!adm_params) {
898 pr_err("%s, adm params memory alloc failed", __func__);
899 return -ENOMEM;
900 }
901
902 memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
903 params, params_length);
904 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
905 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
906 adm_params->hdr.pkt_size = sz;
907 adm_params->hdr.src_svc = APR_SVC_ADM;
908 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
909 adm_params->hdr.src_port = port_id;
910 adm_params->hdr.dest_svc = APR_SVC_ADM;
911 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
912 adm_params->hdr.dest_port =
913 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
914 adm_params->hdr.token = port_idx << 16 | copp_idx;
915 adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
916 adm_params->payload_addr_lsw = 0;
917 adm_params->payload_addr_msw = 0;
918 adm_params->mem_map_handle = 0;
919 adm_params->payload_size = params_length;
920
921 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
922 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
923 if (rc < 0) {
924 pr_err("%s: Set params failed port = 0x%x rc %d\n",
925 __func__, port_id, rc);
926 rc = -EINVAL;
927 goto dolby_dap_send_param_return;
928 }
929 /* Wait for the callback */
930 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
931 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
932 msecs_to_jiffies(TIMEOUT_MS));
933 if (!rc) {
934 pr_err("%s: Set params timed out port = 0x%x\n",
935 __func__, port_id);
936 rc = -EINVAL;
937 goto dolby_dap_send_param_return;
938 } else if (atomic_read(&this_adm.copp.stat
939 [port_idx][copp_idx]) > 0) {
940 pr_err("%s: DSP returned error[%s]\n",
941 __func__, adsp_err_get_err_str(
942 atomic_read(&this_adm.copp.stat
943 [port_idx][copp_idx])));
944 rc = adsp_err_get_lnx_err_code(
945 atomic_read(&this_adm.copp.stat
946 [port_idx][copp_idx]));
947 goto dolby_dap_send_param_return;
948 }
949 rc = 0;
950dolby_dap_send_param_return:
951 kfree(adm_params);
952 return rc;
953}
954
955int adm_send_params_v5(int port_id, int copp_idx, char *params,
956 uint32_t params_length)
957{
958 struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
959 int rc = 0;
960 int sz, port_idx;
961
962 pr_debug("%s:\n", __func__);
963 port_id = afe_convert_virtual_to_portid(port_id);
964 port_idx = adm_validate_and_get_port_index(port_id);
965 if (port_idx < 0) {
966 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
967 return -EINVAL;
968 }
969
970 sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
971 adm_params = kzalloc(sz, GFP_KERNEL);
972 if (!adm_params) {
973 pr_err("%s, adm params memory alloc failed", __func__);
974 return -ENOMEM;
975 }
976
977 memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
978 params, params_length);
979 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
980 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
981 adm_params->hdr.pkt_size = sz;
982 adm_params->hdr.src_svc = APR_SVC_ADM;
983 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
984 adm_params->hdr.src_port = port_id;
985 adm_params->hdr.dest_svc = APR_SVC_ADM;
986 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
987 adm_params->hdr.dest_port =
988 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
989 adm_params->hdr.token = port_idx << 16 | copp_idx;
990 adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
991 adm_params->payload_addr_lsw = 0;
992 adm_params->payload_addr_msw = 0;
993 adm_params->mem_map_handle = 0;
994 adm_params->payload_size = params_length;
995
996 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
997 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
998 if (rc < 0) {
999 pr_err("%s: Set params failed port = 0x%x rc %d\n",
1000 __func__, port_id, rc);
1001 rc = -EINVAL;
1002 goto send_param_return;
1003 }
1004 /* Wait for the callback */
1005 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
1006 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
1007 msecs_to_jiffies(TIMEOUT_MS));
1008 if (!rc) {
1009 pr_err("%s: Set params timed out port = 0x%x\n",
1010 __func__, port_id);
1011 rc = -EINVAL;
1012 goto send_param_return;
1013 } else if (atomic_read(&this_adm.copp.stat
1014 [port_idx][copp_idx]) > 0) {
1015 pr_err("%s: DSP returned error[%s]\n",
1016 __func__, adsp_err_get_err_str(
1017 atomic_read(&this_adm.copp.stat
1018 [port_idx][copp_idx])));
1019 rc = adsp_err_get_lnx_err_code(
1020 atomic_read(&this_adm.copp.stat
1021 [port_idx][copp_idx]));
1022 goto send_param_return;
1023 }
1024 rc = 0;
1025send_param_return:
1026 kfree(adm_params);
1027 return rc;
1028}
1029
1030int adm_get_params_v2(int port_id, int copp_idx, uint32_t module_id,
1031 uint32_t param_id, uint32_t params_length,
1032 char *params, uint32_t client_id)
1033{
1034 struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
1035 int rc = 0, i = 0;
1036 int port_idx, idx;
1037 int *params_data = (int *)params;
1038 uint64_t sz = 0;
1039
1040 port_id = afe_convert_virtual_to_portid(port_id);
1041 port_idx = adm_validate_and_get_port_index(port_id);
1042 if (port_idx < 0) {
1043 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
1044 return -EINVAL;
1045 }
1046
1047 sz = (uint64_t)sizeof(struct adm_cmd_get_pp_params_v5) +
1048 (uint64_t)params_length;
1049 /*
1050 * Check if the value of "sz" (which is ultimately assigned to
1051 * "hdr.pkt_size") crosses U16_MAX.
1052 */
1053 if (sz > U16_MAX) {
1054 pr_err("%s: Invalid params_length\n", __func__);
1055 return -EINVAL;
1056 }
1057 adm_params = kzalloc(sz, GFP_KERNEL);
1058 if (!adm_params) {
1059 pr_err("%s: adm params memory alloc failed", __func__);
1060 return -ENOMEM;
1061 }
1062
1063 memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
1064 params, params_length);
1065 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1066 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
1067 adm_params->hdr.pkt_size = sz;
1068 adm_params->hdr.src_svc = APR_SVC_ADM;
1069 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
1070 adm_params->hdr.src_port = port_id;
1071 adm_params->hdr.dest_svc = APR_SVC_ADM;
1072 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
1073 adm_params->hdr.dest_port =
1074 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
1075 adm_params->hdr.token = port_idx << 16 | client_id << 8 | copp_idx;
1076 adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
1077 adm_params->data_payload_addr_lsw = 0;
1078 adm_params->data_payload_addr_msw = 0;
1079 adm_params->mem_map_handle = 0;
1080 adm_params->module_id = module_id;
1081 adm_params->param_id = param_id;
1082 adm_params->param_max_size = params_length;
1083 adm_params->reserved = 0;
1084
1085 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
1086 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
1087 if (rc < 0) {
1088 pr_err("%s: Failed to Get Params on port_id 0x%x %d\n",
1089 __func__, port_id, rc);
1090 rc = -EINVAL;
1091 goto adm_get_param_return;
1092 }
1093 /* Wait for the callback with copp id */
1094 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
1095 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
1096 msecs_to_jiffies(TIMEOUT_MS));
1097 if (!rc) {
1098 pr_err("%s: get params timed out port_id = 0x%x\n", __func__,
1099 port_id);
1100 rc = -EINVAL;
1101 goto adm_get_param_return;
1102 } else if (atomic_read(&this_adm.copp.stat
1103 [port_idx][copp_idx]) > 0) {
1104 pr_err("%s: DSP returned error[%s]\n",
1105 __func__, adsp_err_get_err_str(
1106 atomic_read(&this_adm.copp.stat
1107 [port_idx][copp_idx])));
1108 rc = adsp_err_get_lnx_err_code(
1109 atomic_read(&this_adm.copp.stat
1110 [port_idx][copp_idx]));
1111 goto adm_get_param_return;
1112 }
1113 idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
1114
1115 if (adm_get_parameters[idx] < 0) {
1116 pr_err("%s: Size is invalid %d\n", __func__,
1117 adm_get_parameters[idx]);
1118 rc = -EINVAL;
1119 goto adm_get_param_return;
1120 }
1121 if ((params_data) &&
1122 (ARRAY_SIZE(adm_get_parameters) >
1123 idx) &&
1124 (ARRAY_SIZE(adm_get_parameters) >=
1125 1+adm_get_parameters[idx]+idx) &&
1126 (params_length/sizeof(uint32_t) >=
1127 adm_get_parameters[idx])) {
1128 for (i = 0; i < adm_get_parameters[idx]; i++)
1129 params_data[i] = adm_get_parameters[1+i+idx];
1130
1131 } else {
1132 pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
1133 __func__, ARRAY_SIZE(adm_get_parameters),
1134 (1+adm_get_parameters[idx]+idx),
1135 params_length/sizeof(int),
1136 adm_get_parameters[idx]);
1137 }
1138 rc = 0;
1139adm_get_param_return:
1140 kfree(adm_params);
1141
1142 return rc;
1143}
1144
1145int adm_get_params(int port_id, int copp_idx, uint32_t module_id,
1146 uint32_t param_id, uint32_t params_length, char *params)
1147{
1148 return adm_get_params_v2(port_id, copp_idx, module_id, param_id,
1149 params_length, params, 0);
1150}
1151
1152int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
1153 char *params)
1154{
1155 struct adm_cmd_get_pp_topo_module_list_t *adm_pp_module_list = NULL;
1156 int sz, rc = 0, i = 0;
1157 int port_idx, idx;
1158 int32_t *params_data = (int32_t *)params;
1159 int *topo_list;
1160
1161 pr_debug("%s : port_id %x", __func__, port_id);
1162 port_id = afe_convert_virtual_to_portid(port_id);
1163 port_idx = adm_validate_and_get_port_index(port_id);
1164 if (port_idx < 0) {
1165 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
1166 return -EINVAL;
1167 }
1168
1169 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
1170 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
1171 return -EINVAL;
1172 }
1173
1174 sz = sizeof(struct adm_cmd_get_pp_topo_module_list_t) + param_length;
1175 adm_pp_module_list = kzalloc(sz, GFP_KERNEL);
1176 if (!adm_pp_module_list) {
1177 pr_err("%s, adm params memory alloc failed", __func__);
1178 return -ENOMEM;
1179 }
1180
1181 memcpy(((u8 *)adm_pp_module_list +
1182 sizeof(struct adm_cmd_get_pp_topo_module_list_t)),
1183 params, param_length);
1184 adm_pp_module_list->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1185 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
1186 adm_pp_module_list->hdr.pkt_size = sz;
1187 adm_pp_module_list->hdr.src_svc = APR_SVC_ADM;
1188 adm_pp_module_list->hdr.src_domain = APR_DOMAIN_APPS;
1189 adm_pp_module_list->hdr.src_port = port_id;
1190 adm_pp_module_list->hdr.dest_svc = APR_SVC_ADM;
1191 adm_pp_module_list->hdr.dest_domain = APR_DOMAIN_ADSP;
1192 adm_pp_module_list->hdr.dest_port =
1193 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
1194 adm_pp_module_list->hdr.token = port_idx << 16 | copp_idx;
1195 adm_pp_module_list->hdr.opcode = ADM_CMD_GET_PP_TOPO_MODULE_LIST;
1196 adm_pp_module_list->param_max_size = param_length;
1197 /* Payload address and mmap handle set to zero by kzalloc */
1198
1199 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
1200
1201 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_pp_module_list);
1202 if (rc < 0) {
1203 pr_err("%s: Failed to Get Params on port %d\n", __func__,
1204 port_id);
1205 rc = -EINVAL;
1206 goto adm_pp_module_list_l;
1207 }
1208 /* Wait for the callback with copp id */
1209 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
1210 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
1211 msecs_to_jiffies(TIMEOUT_MS));
1212 if (!rc) {
1213 pr_err("%s: get params timed out port = %d\n", __func__,
1214 port_id);
1215 rc = -EINVAL;
1216 goto adm_pp_module_list_l;
1217 } else if (atomic_read(&this_adm.copp.stat
1218 [port_idx][copp_idx]) > 0) {
1219 pr_err("%s: DSP returned error[%s]\n",
1220 __func__, adsp_err_get_err_str(
1221 atomic_read(&this_adm.copp.stat
1222 [port_idx][copp_idx])));
1223 rc = adsp_err_get_lnx_err_code(
1224 atomic_read(&this_adm.copp.stat
1225 [port_idx][copp_idx]));
1226 goto adm_pp_module_list_l;
1227 }
1228 if (params_data) {
1229 idx = ADM_GET_TOPO_MODULE_LIST_LENGTH * copp_idx;
1230 topo_list = (int *)(adm_module_topo_list + idx);
1231 if (param_length <= ADM_GET_TOPO_MODULE_LIST_LENGTH &&
1232 idx <
1233 (MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH))
1234 memcpy(params_data, topo_list, param_length);
1235 else
1236 pr_debug("%s: i/p size:%d > MAX param size:%d\n",
1237 __func__, param_length,
1238 (int)ADM_GET_TOPO_MODULE_LIST_LENGTH);
1239 for (i = 1; i <= params_data[0]; i++)
1240 pr_debug("module = 0x%x\n", params_data[i]);
1241 }
1242 rc = 0;
1243adm_pp_module_list_l:
1244 kfree(adm_pp_module_list);
1245 pr_debug("%s : rc = %d ", __func__, rc);
1246 return rc;
1247}
1248static void adm_callback_debug_print(struct apr_client_data *data)
1249{
1250 uint32_t *payload;
1251
1252 payload = data->payload;
1253
1254 if (data->payload_size >= 8)
1255 pr_debug("%s: code = 0x%x PL#0[0x%x], PL#1[0x%x], size = %d\n",
1256 __func__, data->opcode, payload[0], payload[1],
1257 data->payload_size);
1258 else if (data->payload_size >= 4)
1259 pr_debug("%s: code = 0x%x PL#0[0x%x], size = %d\n",
1260 __func__, data->opcode, payload[0],
1261 data->payload_size);
1262 else
1263 pr_debug("%s: code = 0x%x, size = %d\n",
1264 __func__, data->opcode, data->payload_size);
1265}
1266
1267int adm_set_multi_ch_map(char *channel_map, int path)
1268{
1269 int idx;
1270
1271 if (path == ADM_PATH_PLAYBACK) {
1272 idx = ADM_MCH_MAP_IDX_PLAYBACK;
1273 } else if (path == ADM_PATH_LIVE_REC) {
1274 idx = ADM_MCH_MAP_IDX_REC;
1275 } else {
1276 pr_err("%s: invalid attempt to set path %d\n", __func__, path);
1277 return -EINVAL;
1278 }
1279
1280 memcpy(multi_ch_maps[idx].channel_mapping, channel_map,
1281 PCM_FORMAT_MAX_NUM_CHANNEL);
1282 multi_ch_maps[idx].set_channel_map = true;
1283
1284 return 0;
1285}
1286
1287int adm_get_multi_ch_map(char *channel_map, int path)
1288{
1289 int idx;
1290
1291 if (path == ADM_PATH_PLAYBACK) {
1292 idx = ADM_MCH_MAP_IDX_PLAYBACK;
1293 } else if (path == ADM_PATH_LIVE_REC) {
1294 idx = ADM_MCH_MAP_IDX_REC;
1295 } else {
1296 pr_err("%s: invalid attempt to get path %d\n", __func__, path);
1297 return -EINVAL;
1298 }
1299
1300 if (multi_ch_maps[idx].set_channel_map) {
1301 memcpy(channel_map, multi_ch_maps[idx].channel_mapping,
1302 PCM_FORMAT_MAX_NUM_CHANNEL);
1303 }
1304
1305 return 0;
1306}
1307
1308static int32_t adm_callback(struct apr_client_data *data, void *priv)
1309{
1310 uint32_t *payload;
1311 int i, j, port_idx, copp_idx, idx, client_id;
1312
1313 if (data == NULL) {
1314 pr_err("%s: data parameter is null\n", __func__);
1315 return -EINVAL;
1316 }
1317
1318 payload = data->payload;
1319
1320 if (data->opcode == RESET_EVENTS) {
1321 pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
1322 __func__,
1323 data->reset_event, data->reset_proc, this_adm.apr);
1324 if (this_adm.apr) {
1325 apr_reset(this_adm.apr);
1326 for (i = 0; i < AFE_MAX_PORTS; i++) {
1327 for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
1328 atomic_set(&this_adm.copp.id[i][j],
1329 RESET_COPP_ID);
1330 atomic_set(&this_adm.copp.cnt[i][j], 0);
1331 atomic_set(
1332 &this_adm.copp.topology[i][j], 0);
1333 atomic_set(&this_adm.copp.mode[i][j],
1334 0);
1335 atomic_set(&this_adm.copp.stat[i][j],
1336 0);
1337 atomic_set(&this_adm.copp.rate[i][j],
1338 0);
1339 atomic_set(
1340 &this_adm.copp.channels[i][j],
1341 0);
1342 atomic_set(
1343 &this_adm.copp.bit_width[i][j], 0);
1344 atomic_set(
1345 &this_adm.copp.app_type[i][j], 0);
1346 atomic_set(
1347 &this_adm.copp.acdb_id[i][j], 0);
1348 this_adm.copp.adm_status[i][j] =
1349 ADM_STATUS_CALIBRATION_REQUIRED;
1350 }
1351 }
1352 this_adm.apr = NULL;
1353 cal_utils_clear_cal_block_q6maps(ADM_MAX_CAL_TYPES,
1354 this_adm.cal_data);
1355 mutex_lock(&this_adm.cal_data
1356 [ADM_CUSTOM_TOP_CAL]->lock);
1357 this_adm.set_custom_topology = 1;
1358 mutex_unlock(&this_adm.cal_data[
1359 ADM_CUSTOM_TOP_CAL]->lock);
1360 rtac_clear_mapping(ADM_RTAC_CAL);
1361 /*
1362 * Free the ION memory and clear the map handles
1363 * for Source Tracking
1364 */
1365 if (this_adm.sourceTrackingData.memmap.paddr != 0) {
1366 msm_audio_ion_free(
1367 this_adm.sourceTrackingData.ion_client,
1368 this_adm.sourceTrackingData.ion_handle);
1369 this_adm.sourceTrackingData.ion_client = NULL;
1370 this_adm.sourceTrackingData.ion_handle = NULL;
1371 this_adm.sourceTrackingData.memmap.size = 0;
1372 this_adm.sourceTrackingData.memmap.kvaddr =
1373 NULL;
1374 this_adm.sourceTrackingData.memmap.paddr = 0;
1375 this_adm.sourceTrackingData.apr_cmd_status = -1;
1376 atomic_set(&this_adm.mem_map_handles[
1377 ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
1378 }
1379 }
1380 return 0;
1381 }
1382
1383 adm_callback_debug_print(data);
1384 if (data->payload_size) {
1385 copp_idx = (data->token) & 0XFF;
1386 port_idx = ((data->token) >> 16) & 0xFF;
1387 client_id = ((data->token) >> 8) & 0xFF;
1388 if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
1389 pr_err("%s: Invalid port idx %d token %d\n",
1390 __func__, port_idx, data->token);
1391 return 0;
1392 }
1393 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
1394 pr_err("%s: Invalid copp idx %d token %d\n",
1395 __func__, copp_idx, data->token);
1396 return 0;
1397 }
1398 if (client_id < 0 || client_id >= ADM_CLIENT_ID_MAX) {
1399 pr_err("%s: Invalid client id %d\n", __func__,
1400 client_id);
1401 return 0;
1402 }
1403 if (data->opcode == APR_BASIC_RSP_RESULT) {
1404 pr_debug("%s: APR_BASIC_RSP_RESULT id 0x%x\n",
1405 __func__, payload[0]);
1406 if (payload[1] != 0) {
1407 pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
1408 __func__, payload[0], payload[1]);
1409 }
1410 switch (payload[0]) {
1411 case ADM_CMD_SET_PP_PARAMS_V5:
1412 pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
1413 __func__);
1414 if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
1415 this_adm.sourceTrackingData.
1416 apr_cmd_status = payload[1];
1417 else if (rtac_make_adm_callback(payload,
1418 data->payload_size))
1419 break;
1420 /*
1421 * if soft volume is called and already
1422 * interrupted break out of the sequence here
1423 */
1424 case ADM_CMD_DEVICE_OPEN_V5:
1425 case ADM_CMD_DEVICE_CLOSE_V5:
1426 case ADM_CMD_DEVICE_OPEN_V6:
1427 pr_debug("%s: Basic callback received, wake up.\n",
1428 __func__);
1429 atomic_set(&this_adm.copp.stat[port_idx]
1430 [copp_idx], payload[1]);
1431 wake_up(
1432 &this_adm.copp.wait[port_idx][copp_idx]);
1433 break;
1434 case ADM_CMD_ADD_TOPOLOGIES:
1435 pr_debug("%s: callback received, ADM_CMD_ADD_TOPOLOGIES.\n",
1436 __func__);
1437 atomic_set(&this_adm.adm_stat, payload[1]);
1438 wake_up(&this_adm.adm_wait);
1439 break;
1440 case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
1441 case ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5:
1442 pr_debug("%s: Basic callback received, wake up.\n",
1443 __func__);
1444 atomic_set(&this_adm.matrix_map_stat,
1445 payload[1]);
1446 wake_up(&this_adm.matrix_map_wait);
1447 break;
1448 case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
1449 pr_debug("%s: ADM_CMD_SHARED_MEM_UNMAP_REGIONS\n",
1450 __func__);
1451 atomic_set(&this_adm.adm_stat, payload[1]);
1452 wake_up(&this_adm.adm_wait);
1453 break;
1454 case ADM_CMD_SHARED_MEM_MAP_REGIONS:
1455 pr_debug("%s: ADM_CMD_SHARED_MEM_MAP_REGIONS\n",
1456 __func__);
1457 /* Should only come here if there is an APR */
1458 /* error or malformed APR packet. Otherwise */
1459 /* response will be returned as */
1460 if (payload[1] != 0) {
1461 pr_err("%s: ADM map error, resuming\n",
1462 __func__);
1463 atomic_set(&this_adm.adm_stat,
1464 payload[1]);
1465 wake_up(&this_adm.adm_wait);
1466 }
1467 break;
1468 case ADM_CMD_GET_PP_PARAMS_V5:
1469 pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
1470 __func__);
1471 /* Should only come here if there is an APR */
1472 /* error or malformed APR packet. Otherwise */
1473 /* response will be returned as */
1474 /* ADM_CMDRSP_GET_PP_PARAMS_V5 */
1475 if (client_id ==
1476 ADM_CLIENT_ID_SOURCE_TRACKING) {
1477 this_adm.sourceTrackingData.
1478 apr_cmd_status = payload[1];
1479 if (payload[1] != 0)
1480 pr_err("%s: ADM get param error = %d\n",
1481 __func__, payload[1]);
1482
1483 atomic_set(&this_adm.copp.stat
1484 [port_idx][copp_idx],
1485 payload[1]);
1486 wake_up(&this_adm.copp.wait
1487 [port_idx][copp_idx]);
1488 } else {
1489 if (payload[1] != 0) {
1490 pr_err("%s: ADM get param error = %d, resuming\n",
1491 __func__, payload[1]);
1492
1493 rtac_make_adm_callback(payload,
1494 data->payload_size);
1495 }
1496 }
1497 break;
1498 case ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5:
1499 pr_debug("%s: ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5\n",
1500 __func__);
1501 atomic_set(&this_adm.copp.stat[port_idx]
1502 [copp_idx], payload[1]);
1503 wake_up(
1504 &this_adm.copp.wait[port_idx][copp_idx]);
1505 break;
1506 case ADM_CMD_GET_PP_TOPO_MODULE_LIST:
1507 pr_debug("%s:ADM_CMD_GET_PP_TOPO_MODULE_LIST\n",
1508 __func__);
1509 if (payload[1] != 0)
1510 pr_err("%s: ADM get topo list error = %d,\n",
1511 __func__, payload[1]);
1512 break;
1513 default:
1514 pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
1515 payload[0]);
1516 break;
1517 }
1518 return 0;
1519 }
1520
1521 switch (data->opcode) {
1522 case ADM_CMDRSP_DEVICE_OPEN_V5:
1523 case ADM_CMDRSP_DEVICE_OPEN_V6: {
1524 struct adm_cmd_rsp_device_open_v5 *open =
1525 (struct adm_cmd_rsp_device_open_v5 *)data->payload;
1526
1527 if (open->copp_id == INVALID_COPP_ID) {
1528 pr_err("%s: invalid coppid rxed %d\n",
1529 __func__, open->copp_id);
1530 atomic_set(&this_adm.copp.stat[port_idx]
1531 [copp_idx], ADSP_EBADPARAM);
1532 wake_up(
1533 &this_adm.copp.wait[port_idx][copp_idx]);
1534 break;
1535 }
1536 atomic_set(&this_adm.copp.stat
1537 [port_idx][copp_idx], payload[0]);
1538 atomic_set(&this_adm.copp.id[port_idx][copp_idx],
1539 open->copp_id);
1540 pr_debug("%s: coppid rxed=%d\n", __func__,
1541 open->copp_id);
1542 wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
1543 }
1544 break;
1545 case ADM_CMDRSP_GET_PP_PARAMS_V5:
1546 pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
1547 if (payload[0] != 0)
1548 pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
1549 __func__, payload[0]);
1550 if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
1551 this_adm.sourceTrackingData.apr_cmd_status =
1552 payload[0];
1553 else if (rtac_make_adm_callback(payload,
1554 data->payload_size))
1555 break;
1556
1557 idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
1558 if ((payload[0] == 0) && (data->payload_size >
1559 (4 * sizeof(*payload))) &&
1560 (data->payload_size - 4 >=
1561 payload[3]) &&
1562 (ARRAY_SIZE(adm_get_parameters) >
1563 idx) &&
1564 (ARRAY_SIZE(adm_get_parameters)-idx-1 >=
1565 payload[3])) {
1566 adm_get_parameters[idx] = payload[3] /
1567 sizeof(uint32_t);
1568 /*
1569 * payload[3] is param_size which is
1570 * expressed in number of bytes
1571 */
1572 pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
1573 __func__, adm_get_parameters[idx]);
1574 /* storing param size then params */
1575 for (i = 0; i < payload[3] /
1576 sizeof(uint32_t); i++)
1577 adm_get_parameters[idx+1+i] =
1578 payload[4+i];
1579 } else if (payload[0] == 0) {
1580 adm_get_parameters[idx] = -1;
1581 pr_err("%s: Out of band case, setting size to %d\n",
1582 __func__, adm_get_parameters[idx]);
1583 } else {
1584 adm_get_parameters[idx] = -1;
1585 pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
1586 __func__, adm_get_parameters[idx]);
1587 }
1588 atomic_set(&this_adm.copp.stat
1589 [port_idx][copp_idx], payload[0]);
1590 wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
1591 break;
1592 case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST:
1593 pr_debug("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST\n",
1594 __func__);
1595 if (payload[0] != 0) {
1596 pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
1597 __func__);
1598 pr_err(":err = 0x%x\n", payload[0]);
1599 } else if (payload[1] >
1600 ((ADM_GET_TOPO_MODULE_LIST_LENGTH /
1601 sizeof(uint32_t)) - 1)) {
1602 pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
1603 __func__);
1604 pr_err(":size = %d\n", payload[1]);
1605 } else {
1606 idx = ADM_GET_TOPO_MODULE_LIST_LENGTH *
1607 copp_idx;
1608 pr_debug("%s:Num modules payload[1] %d\n",
1609 __func__, payload[1]);
1610 adm_module_topo_list[idx] = payload[1];
1611 for (i = 1; i <= payload[1]; i++) {
1612 adm_module_topo_list[idx+i] =
1613 payload[1+i];
1614 pr_debug("%s:payload[%d] = %x\n",
1615 __func__, (i+1), payload[1+i]);
1616 }
1617 }
1618 atomic_set(&this_adm.copp.stat
1619 [port_idx][copp_idx], payload[0]);
1620 wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
1621 break;
1622 case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
1623 pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
1624 __func__);
1625 atomic_set(&this_adm.mem_map_handles[
1626 atomic_read(&this_adm.mem_map_index)],
1627 *payload);
1628 atomic_set(&this_adm.adm_stat, 0);
1629 wake_up(&this_adm.adm_wait);
1630 break;
1631 default:
1632 pr_err("%s: Unknown cmd:0x%x\n", __func__,
1633 data->opcode);
1634 break;
1635 }
1636 }
1637 return 0;
1638}
1639
1640static int adm_memory_map_regions(phys_addr_t *buf_add, uint32_t mempool_id,
1641 uint32_t *bufsz, uint32_t bufcnt)
1642{
1643 struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
1644 struct avs_shared_map_region_payload *mregions = NULL;
1645 void *mmap_region_cmd = NULL;
1646 void *payload = NULL;
1647 int ret = 0;
1648 int i = 0;
1649 int cmd_size = 0;
1650
1651 pr_debug("%s:\n", __func__);
1652 if (this_adm.apr == NULL) {
1653 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
1654 0xFFFFFFFF, &this_adm);
1655 if (this_adm.apr == NULL) {
1656 pr_err("%s: Unable to register ADM\n", __func__);
1657 ret = -ENODEV;
1658 return ret;
1659 }
1660 rtac_set_adm_handle(this_adm.apr);
1661 }
1662
1663 cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
1664 + sizeof(struct avs_shared_map_region_payload)
1665 * bufcnt;
1666
1667 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
1668 if (!mmap_region_cmd)
1669 return -ENOMEM;
1670
1671 mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
1672 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1673 APR_HDR_LEN(APR_HDR_SIZE),
1674 APR_PKT_VER);
1675 mmap_regions->hdr.pkt_size = cmd_size;
1676 mmap_regions->hdr.src_port = 0;
1677
1678 mmap_regions->hdr.dest_port = 0;
1679 mmap_regions->hdr.token = 0;
1680 mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
1681 mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
1682 mmap_regions->num_regions = bufcnt & 0x00ff;
1683 mmap_regions->property_flag = 0x00;
1684
1685 pr_debug("%s: map_regions->num_regions = %d\n", __func__,
1686 mmap_regions->num_regions);
1687 payload = ((u8 *) mmap_region_cmd +
1688 sizeof(struct avs_cmd_shared_mem_map_regions));
1689 mregions = (struct avs_shared_map_region_payload *)payload;
1690
1691 for (i = 0; i < bufcnt; i++) {
1692 mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
1693 mregions->shm_addr_msw =
1694 msm_audio_populate_upper_32_bits(buf_add[i]);
1695 mregions->mem_size_bytes = bufsz[i];
1696 ++mregions;
1697 }
1698
1699 atomic_set(&this_adm.adm_stat, -1);
1700 ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
1701 if (ret < 0) {
1702 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
1703 mmap_regions->hdr.opcode, ret);
1704 ret = -EINVAL;
1705 goto fail_cmd;
1706 }
1707
1708 ret = wait_event_timeout(this_adm.adm_wait,
1709 atomic_read(&this_adm.adm_stat) >= 0,
1710 5 * HZ);
1711 if (!ret) {
1712 pr_err("%s: timeout. waited for memory_map\n", __func__);
1713 ret = -EINVAL;
1714 goto fail_cmd;
1715 } else if (atomic_read(&this_adm.adm_stat) > 0) {
1716 pr_err("%s: DSP returned error[%s]\n",
1717 __func__, adsp_err_get_err_str(
1718 atomic_read(&this_adm.adm_stat)));
1719 ret = adsp_err_get_lnx_err_code(
1720 atomic_read(&this_adm.adm_stat));
1721 goto fail_cmd;
1722 }
1723fail_cmd:
1724 kfree(mmap_region_cmd);
1725 return ret;
1726}
1727
1728static int adm_memory_unmap_regions(void)
1729{
1730 struct avs_cmd_shared_mem_unmap_regions unmap_regions;
1731 int ret = 0;
1732
1733 pr_debug("%s:\n", __func__);
1734 if (this_adm.apr == NULL) {
1735 pr_err("%s: APR handle NULL\n", __func__);
1736 return -EINVAL;
1737 }
1738
1739 unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1740 APR_HDR_LEN(APR_HDR_SIZE),
1741 APR_PKT_VER);
1742 unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
1743 unmap_regions.hdr.src_port = 0;
1744 unmap_regions.hdr.dest_port = 0;
1745 unmap_regions.hdr.token = 0;
1746 unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
1747 unmap_regions.mem_map_handle = atomic_read(&this_adm.
1748 mem_map_handles[atomic_read(&this_adm.mem_map_index)]);
1749 atomic_set(&this_adm.adm_stat, -1);
1750 ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
1751 if (ret < 0) {
1752 pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
1753 unmap_regions.hdr.opcode, ret);
1754 ret = -EINVAL;
1755 goto fail_cmd;
1756 }
1757
1758 ret = wait_event_timeout(this_adm.adm_wait,
1759 atomic_read(&this_adm.adm_stat) >= 0,
1760 5 * HZ);
1761 if (!ret) {
1762 pr_err("%s: timeout. waited for memory_unmap\n",
1763 __func__);
1764 ret = -EINVAL;
1765 goto fail_cmd;
1766 } else if (atomic_read(&this_adm.adm_stat) > 0) {
1767 pr_err("%s: DSP returned error[%s]\n",
1768 __func__, adsp_err_get_err_str(
1769 atomic_read(&this_adm.adm_stat)));
1770 ret = adsp_err_get_lnx_err_code(
1771 atomic_read(&this_adm.adm_stat));
1772 goto fail_cmd;
1773 } else {
1774 pr_debug("%s: Unmap handle 0x%x succeeded\n", __func__,
1775 unmap_regions.mem_map_handle);
1776 }
1777fail_cmd:
1778 return ret;
1779}
1780
1781static int remap_cal_data(struct cal_block_data *cal_block, int cal_index)
1782{
1783 int ret = 0;
1784
1785 if (cal_block->map_data.ion_client == NULL) {
1786 pr_err("%s: No ION allocation for cal index %d!\n",
1787 __func__, cal_index);
1788 ret = -EINVAL;
1789 goto done;
1790 }
1791
1792 if ((cal_block->map_data.map_size > 0) &&
1793 (cal_block->map_data.q6map_handle == 0)) {
1794 atomic_set(&this_adm.mem_map_index, cal_index);
1795 ret = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
1796 (uint32_t *)&cal_block->map_data.map_size, 1);
1797 if (ret < 0) {
1798 pr_err("%s: ADM mmap did not work! size = %zd ret %d\n",
1799 __func__,
1800 cal_block->map_data.map_size, ret);
1801 pr_debug("%s: ADM mmap did not work! addr = 0x%pK, size = %zd ret %d\n",
1802 __func__,
1803 &cal_block->cal_data.paddr,
1804 cal_block->map_data.map_size, ret);
1805 goto done;
1806 }
1807 cal_block->map_data.q6map_handle = atomic_read(&this_adm.
1808 mem_map_handles[cal_index]);
1809 }
1810done:
1811 return ret;
1812}
1813
1814static void send_adm_custom_topology(void)
1815{
1816 struct cal_block_data *cal_block = NULL;
1817 struct cmd_set_topologies adm_top;
1818 int cal_index = ADM_CUSTOM_TOP_CAL;
1819 int result;
1820
1821 if (this_adm.cal_data[cal_index] == NULL)
1822 goto done;
1823
1824 mutex_lock(&this_adm.cal_data[cal_index]->lock);
1825 if (!this_adm.set_custom_topology)
1826 goto unlock;
1827 this_adm.set_custom_topology = 0;
1828
1829 cal_block = cal_utils_get_only_cal_block(this_adm.cal_data[cal_index]);
1830 if (cal_block == NULL)
1831 goto unlock;
1832
1833 pr_debug("%s: Sending cal_index %d\n", __func__, cal_index);
1834
1835 result = remap_cal_data(cal_block, cal_index);
1836 if (result) {
1837 pr_err("%s: Remap_cal_data failed for cal %d!\n",
1838 __func__, cal_index);
1839 goto unlock;
1840 }
1841 atomic_set(&this_adm.mem_map_index, cal_index);
1842 atomic_set(&this_adm.mem_map_handles[cal_index],
1843 cal_block->map_data.q6map_handle);
1844
1845 if (cal_block->cal_data.size == 0) {
1846 pr_debug("%s: No ADM cal to send\n", __func__);
1847 goto unlock;
1848 }
1849
1850 adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1851 APR_HDR_LEN(20), APR_PKT_VER);
1852 adm_top.hdr.pkt_size = sizeof(adm_top);
1853 adm_top.hdr.src_svc = APR_SVC_ADM;
1854 adm_top.hdr.src_domain = APR_DOMAIN_APPS;
1855 adm_top.hdr.src_port = 0;
1856 adm_top.hdr.dest_svc = APR_SVC_ADM;
1857 adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
1858 adm_top.hdr.dest_port = 0;
1859 adm_top.hdr.token = 0;
1860 adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
1861 adm_top.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
1862 adm_top.payload_addr_msw = msm_audio_populate_upper_32_bits(
1863 cal_block->cal_data.paddr);
1864 adm_top.mem_map_handle = cal_block->map_data.q6map_handle;
1865 adm_top.payload_size = cal_block->cal_data.size;
1866
1867 atomic_set(&this_adm.adm_stat, -1);
1868 pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%pK, size = %d\n",
1869 __func__, &cal_block->cal_data.paddr,
1870 adm_top.payload_size);
1871 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
1872 if (result < 0) {
1873 pr_err("%s: Set topologies failed payload size = %zd result %d\n",
1874 __func__, cal_block->cal_data.size, result);
1875 goto unlock;
1876 }
1877 /* Wait for the callback */
1878 result = wait_event_timeout(this_adm.adm_wait,
1879 atomic_read(&this_adm.adm_stat) >= 0,
1880 msecs_to_jiffies(TIMEOUT_MS));
1881 if (!result) {
1882 pr_err("%s: Set topologies timed out payload size = %zd\n",
1883 __func__, cal_block->cal_data.size);
1884 goto unlock;
1885 } else if (atomic_read(&this_adm.adm_stat) > 0) {
1886 pr_err("%s: DSP returned error[%s]\n",
1887 __func__, adsp_err_get_err_str(
1888 atomic_read(&this_adm.adm_stat)));
1889 result = adsp_err_get_lnx_err_code(
1890 atomic_read(&this_adm.adm_stat));
1891 goto unlock;
1892 }
1893unlock:
1894 mutex_unlock(&this_adm.cal_data[cal_index]->lock);
1895done:
1896 return;
1897}
1898
1899static int send_adm_cal_block(int port_id, int copp_idx,
1900 struct cal_block_data *cal_block, int perf_mode,
1901 int app_type, int acdb_id, int sample_rate)
1902{
1903 s32 result = 0;
1904 struct adm_cmd_set_pp_params_v5 adm_params;
1905 int port_idx;
1906
1907 pr_debug("%s: Port id 0x%x sample_rate %d ,\n", __func__,
1908 port_id, sample_rate);
1909 port_id = afe_convert_virtual_to_portid(port_id);
1910 port_idx = adm_validate_and_get_port_index(port_id);
1911 if (port_idx < 0) {
1912 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
1913 return -EINVAL;
1914 }
1915 if (!cal_block) {
1916 pr_debug("%s: No ADM cal to send for port_id = 0x%x!\n",
1917 __func__, port_id);
1918 result = -EINVAL;
1919 goto done;
1920 }
1921 if (cal_block->cal_data.size <= 0) {
1922 pr_debug("%s: No ADM cal send for port_id = 0x%x!\n",
1923 __func__, port_id);
1924 result = -EINVAL;
1925 goto done;
1926 }
1927
1928 if (perf_mode == LEGACY_PCM_MODE &&
1929 ((atomic_read(&this_adm.copp.topology[port_idx][copp_idx])) ==
1930 DS2_ADM_COPP_TOPOLOGY_ID)) {
1931 pr_err("%s: perf_mode %d, topology 0x%x\n", __func__, perf_mode,
1932 atomic_read(
1933 &this_adm.copp.topology[port_idx][copp_idx]));
1934 goto done;
1935 }
1936
1937 adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
1938 APR_HDR_LEN(20), APR_PKT_VER);
1939 adm_params.hdr.pkt_size = sizeof(adm_params);
1940 adm_params.hdr.src_svc = APR_SVC_ADM;
1941 adm_params.hdr.src_domain = APR_DOMAIN_APPS;
1942 adm_params.hdr.src_port = port_id;
1943 adm_params.hdr.dest_svc = APR_SVC_ADM;
1944 adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
1945
1946 adm_params.hdr.token = port_idx << 16 | copp_idx;
1947 adm_params.hdr.dest_port =
1948 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
1949 adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
1950 adm_params.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
1951 adm_params.payload_addr_msw = msm_audio_populate_upper_32_bits(
1952 cal_block->cal_data.paddr);
1953 adm_params.mem_map_handle = cal_block->map_data.q6map_handle;
1954 adm_params.payload_size = cal_block->cal_data.size;
1955
1956 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
1957 pr_debug("%s: Sending SET_PARAMS payload = 0x%pK, size = %d\n",
1958 __func__, &cal_block->cal_data.paddr,
1959 adm_params.payload_size);
1960 result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
1961 if (result < 0) {
1962 pr_err("%s: Set params failed port 0x%x result %d\n",
1963 __func__, port_id, result);
1964 pr_debug("%s: Set params failed port = 0x%x payload = 0x%pK result %d\n",
1965 __func__, port_id, &cal_block->cal_data.paddr, result);
1966 result = -EINVAL;
1967 goto done;
1968 }
1969 /* Wait for the callback */
1970 result = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
1971 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
1972 msecs_to_jiffies(TIMEOUT_MS));
1973 if (!result) {
1974 pr_err("%s: Set params timed out port = 0x%x\n",
1975 __func__, port_id);
1976 pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pK\n",
1977 __func__, port_id, &cal_block->cal_data.paddr);
1978 result = -EINVAL;
1979 goto done;
1980 } else if (atomic_read(&this_adm.copp.stat
1981 [port_idx][copp_idx]) > 0) {
1982 pr_err("%s: DSP returned error[%s]\n",
1983 __func__, adsp_err_get_err_str(
1984 atomic_read(&this_adm.copp.stat
1985 [port_idx][copp_idx])));
1986 result = adsp_err_get_lnx_err_code(
1987 atomic_read(&this_adm.copp.stat
1988 [port_idx][copp_idx]));
1989 goto done;
1990 }
1991
1992done:
1993 return result;
1994}
1995
1996static struct cal_block_data *adm_find_cal_by_path(int cal_index, int path)
1997{
1998 struct list_head *ptr, *next;
1999 struct cal_block_data *cal_block = NULL;
2000 struct audio_cal_info_audproc *audproc_cal_info = NULL;
2001 struct audio_cal_info_audvol *audvol_cal_info = NULL;
2002
2003 pr_debug("%s:\n", __func__);
2004
2005 list_for_each_safe(ptr, next,
2006 &this_adm.cal_data[cal_index]->cal_blocks) {
2007
2008 cal_block = list_entry(ptr,
2009 struct cal_block_data, list);
2010
2011 if (cal_index == ADM_AUDPROC_CAL) {
2012 audproc_cal_info = cal_block->cal_info;
2013 if ((audproc_cal_info->path == path) &&
2014 (cal_block->cal_data.size > 0))
2015 return cal_block;
2016 } else if (cal_index == ADM_AUDVOL_CAL) {
2017 audvol_cal_info = cal_block->cal_info;
2018 if ((audvol_cal_info->path == path) &&
2019 (cal_block->cal_data.size > 0))
2020 return cal_block;
2021 }
2022 }
2023 pr_debug("%s: Can't find ADM cal for cal_index %d, path %d\n",
2024 __func__, cal_index, path);
2025 return NULL;
2026}
2027
2028static struct cal_block_data *adm_find_cal_by_app_type(int cal_index, int path,
2029 int app_type)
2030{
2031 struct list_head *ptr, *next;
2032 struct cal_block_data *cal_block = NULL;
2033 struct audio_cal_info_audproc *audproc_cal_info = NULL;
2034 struct audio_cal_info_audvol *audvol_cal_info = NULL;
2035
2036 pr_debug("%s\n", __func__);
2037
2038 list_for_each_safe(ptr, next,
2039 &this_adm.cal_data[cal_index]->cal_blocks) {
2040
2041 cal_block = list_entry(ptr,
2042 struct cal_block_data, list);
2043
2044 if (cal_index == ADM_AUDPROC_CAL) {
2045 audproc_cal_info = cal_block->cal_info;
2046 if ((audproc_cal_info->path == path) &&
2047 (audproc_cal_info->app_type == app_type) &&
2048 (cal_block->cal_data.size > 0))
2049 return cal_block;
2050 } else if (cal_index == ADM_AUDVOL_CAL) {
2051 audvol_cal_info = cal_block->cal_info;
2052 if ((audvol_cal_info->path == path) &&
2053 (audvol_cal_info->app_type == app_type) &&
2054 (cal_block->cal_data.size > 0))
2055 return cal_block;
2056 }
2057 }
2058 pr_debug("%s: Can't find ADM cali for cal_index %d, path %d, app %d, defaulting to search by path\n",
2059 __func__, cal_index, path, app_type);
2060 return adm_find_cal_by_path(cal_index, path);
2061}
2062
2063
2064static struct cal_block_data *adm_find_cal(int cal_index, int path,
2065 int app_type, int acdb_id,
2066 int sample_rate)
2067{
2068 struct list_head *ptr, *next;
2069 struct cal_block_data *cal_block = NULL;
2070 struct audio_cal_info_audproc *audproc_cal_info = NULL;
2071 struct audio_cal_info_audvol *audvol_cal_info = NULL;
2072
2073 pr_debug("%s:\n", __func__);
2074
2075 list_for_each_safe(ptr, next,
2076 &this_adm.cal_data[cal_index]->cal_blocks) {
2077
2078 cal_block = list_entry(ptr,
2079 struct cal_block_data, list);
2080
2081 if (cal_index == ADM_AUDPROC_CAL) {
2082 audproc_cal_info = cal_block->cal_info;
2083 if ((audproc_cal_info->path == path) &&
2084 (audproc_cal_info->app_type == app_type) &&
2085 (audproc_cal_info->acdb_id == acdb_id) &&
2086 (audproc_cal_info->sample_rate == sample_rate) &&
2087 (cal_block->cal_data.size > 0))
2088 return cal_block;
2089 } else if (cal_index == ADM_AUDVOL_CAL) {
2090 audvol_cal_info = cal_block->cal_info;
2091 if ((audvol_cal_info->path == path) &&
2092 (audvol_cal_info->app_type == app_type) &&
2093 (audvol_cal_info->acdb_id == acdb_id) &&
2094 (cal_block->cal_data.size > 0))
2095 return cal_block;
2096 }
2097 }
2098 pr_debug("%s: Can't find ADM cal for cal_index %d, path %d, app %d, acdb_id %d sample_rate %d defaulting to search by app type\n",
2099 __func__, cal_index, path, app_type, acdb_id, sample_rate);
2100 return adm_find_cal_by_app_type(cal_index, path, app_type);
2101}
2102
2103static int adm_remap_and_send_cal_block(int cal_index, int port_id,
2104 int copp_idx, struct cal_block_data *cal_block, int perf_mode,
2105 int app_type, int acdb_id, int sample_rate)
2106{
2107 int ret = 0;
2108
2109 pr_debug("%s: Sending cal_index cal %d\n", __func__, cal_index);
2110 ret = remap_cal_data(cal_block, cal_index);
2111 if (ret) {
2112 pr_err("%s: Remap_cal_data failed for cal %d!\n",
2113 __func__, cal_index);
2114 goto done;
2115 }
2116 ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode,
2117 app_type, acdb_id, sample_rate);
2118 if (ret < 0)
2119 pr_debug("%s: No cal sent for cal_index %d, port_id = 0x%x! ret %d sample_rate %d\n",
2120 __func__, cal_index, port_id, ret, sample_rate);
2121done:
2122 return ret;
2123}
2124
2125static void send_adm_cal_type(int cal_index, int path, int port_id,
2126 int copp_idx, int perf_mode, int app_type,
2127 int acdb_id, int sample_rate)
2128{
2129 struct cal_block_data *cal_block = NULL;
2130 int ret;
2131
2132 pr_debug("%s: cal index %d\n", __func__, cal_index);
2133
2134 if (this_adm.cal_data[cal_index] == NULL) {
2135 pr_debug("%s: cal_index %d not allocated!\n",
2136 __func__, cal_index);
2137 goto done;
2138 }
2139
2140 mutex_lock(&this_adm.cal_data[cal_index]->lock);
2141 cal_block = adm_find_cal(cal_index, path, app_type, acdb_id,
2142 sample_rate);
2143 if (cal_block == NULL)
2144 goto unlock;
2145
2146 ret = adm_remap_and_send_cal_block(cal_index, port_id, copp_idx,
2147 cal_block, perf_mode, app_type, acdb_id, sample_rate);
2148unlock:
2149 mutex_unlock(&this_adm.cal_data[cal_index]->lock);
2150done:
2151 return;
2152}
2153
2154static int get_cal_path(int path)
2155{
2156 if (path == 0x1)
2157 return RX_DEVICE;
2158 else
2159 return TX_DEVICE;
2160}
2161
2162static void send_adm_cal(int port_id, int copp_idx, int path, int perf_mode,
2163 int app_type, int acdb_id, int sample_rate)
2164{
2165 pr_debug("%s: port id 0x%x copp_idx %d\n", __func__, port_id, copp_idx);
2166
2167 send_adm_cal_type(ADM_AUDPROC_CAL, path, port_id, copp_idx, perf_mode,
2168 app_type, acdb_id, sample_rate);
2169 send_adm_cal_type(ADM_AUDVOL_CAL, path, port_id, copp_idx, perf_mode,
2170 app_type, acdb_id, sample_rate);
2171}
2172
2173int adm_connect_afe_port(int mode, int session_id, int port_id)
2174{
2175 struct adm_cmd_connect_afe_port_v5 cmd;
2176 int ret = 0;
2177 int port_idx, copp_idx = 0;
2178
2179 pr_debug("%s: port_id: 0x%x session id:%d mode:%d\n", __func__,
2180 port_id, session_id, mode);
2181
2182 port_id = afe_convert_virtual_to_portid(port_id);
2183 port_idx = adm_validate_and_get_port_index(port_id);
2184 if (port_idx < 0) {
2185 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
2186 return -EINVAL;
2187 }
2188
2189 if (this_adm.apr == NULL) {
2190 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
2191 0xFFFFFFFF, &this_adm);
2192 if (this_adm.apr == NULL) {
2193 pr_err("%s: Unable to register ADM\n", __func__);
2194 ret = -ENODEV;
2195 return ret;
2196 }
2197 rtac_set_adm_handle(this_adm.apr);
2198 }
2199 pr_debug("%s: Port ID 0x%x, index %d\n", __func__, port_id, port_idx);
2200
2201 cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
2202 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
2203 cmd.hdr.pkt_size = sizeof(cmd);
2204 cmd.hdr.src_svc = APR_SVC_ADM;
2205 cmd.hdr.src_domain = APR_DOMAIN_APPS;
2206 cmd.hdr.src_port = port_id;
2207 cmd.hdr.dest_svc = APR_SVC_ADM;
2208 cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
2209 cmd.hdr.dest_port = 0; /* Ignored */
2210 cmd.hdr.token = port_idx << 16 | copp_idx;
2211 cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
2212
2213 cmd.mode = mode;
2214 cmd.session_id = session_id;
2215 cmd.afe_port_id = port_id;
2216
2217 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
2218 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
2219 if (ret < 0) {
2220 pr_err("%s: ADM enable for port_id: 0x%x failed ret %d\n",
2221 __func__, port_id, ret);
2222 ret = -EINVAL;
2223 goto fail_cmd;
2224 }
2225 /* Wait for the callback with copp id */
2226 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
2227 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
2228 msecs_to_jiffies(TIMEOUT_MS));
2229 if (!ret) {
2230 pr_err("%s: ADM connect timedout for port_id: 0x%x\n",
2231 __func__, port_id);
2232 ret = -EINVAL;
2233 goto fail_cmd;
2234 } else if (atomic_read(&this_adm.copp.stat
2235 [port_idx][copp_idx]) > 0) {
2236 pr_err("%s: DSP returned error[%s]\n",
2237 __func__, adsp_err_get_err_str(
2238 atomic_read(&this_adm.copp.stat
2239 [port_idx][copp_idx])));
2240 ret = adsp_err_get_lnx_err_code(
2241 atomic_read(&this_adm.copp.stat
2242 [port_idx][copp_idx]));
2243 goto fail_cmd;
2244 }
2245 atomic_inc(&this_adm.copp.cnt[port_idx][copp_idx]);
2246 return 0;
2247
2248fail_cmd:
2249
2250 return ret;
2251}
2252
2253int adm_arrange_mch_map(struct adm_cmd_device_open_v5 *open, int path,
2254 int channel_mode)
2255{
2256 int rc = 0, idx;
2257
2258 memset(open->dev_channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
2259 switch (path) {
2260 case ADM_PATH_PLAYBACK:
2261 idx = ADM_MCH_MAP_IDX_PLAYBACK;
2262 break;
2263 case ADM_PATH_LIVE_REC:
2264 case ADM_PATH_NONLIVE_REC:
2265 idx = ADM_MCH_MAP_IDX_REC;
2266 break;
2267 default:
2268 goto non_mch_path;
2269 };
2270 if ((open->dev_num_channel > 2) && multi_ch_maps[idx].set_channel_map) {
2271 memcpy(open->dev_channel_mapping,
2272 multi_ch_maps[idx].channel_mapping,
2273 PCM_FORMAT_MAX_NUM_CHANNEL);
2274 } else {
2275 if (channel_mode == 1) {
2276 open->dev_channel_mapping[0] = PCM_CHANNEL_FC;
2277 } else if (channel_mode == 2) {
2278 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2279 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2280 } else if (channel_mode == 3) {
2281 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2282 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2283 open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
2284 } else if (channel_mode == 4) {
2285 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2286 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2287 open->dev_channel_mapping[2] = PCM_CHANNEL_LS;
2288 open->dev_channel_mapping[3] = PCM_CHANNEL_RS;
2289 } else if (channel_mode == 5) {
2290 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2291 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2292 open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
2293 open->dev_channel_mapping[3] = PCM_CHANNEL_LS;
2294 open->dev_channel_mapping[4] = PCM_CHANNEL_RS;
2295 } else if (channel_mode == 6) {
2296 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2297 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2298 open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
2299 open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
2300 open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
2301 open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
2302 } else if (channel_mode == 7) {
2303 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2304 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2305 open->dev_channel_mapping[2] = PCM_CHANNEL_FC;
2306 open->dev_channel_mapping[3] = PCM_CHANNEL_LFE;
2307 open->dev_channel_mapping[4] = PCM_CHANNEL_LB;
2308 open->dev_channel_mapping[5] = PCM_CHANNEL_RB;
2309 open->dev_channel_mapping[6] = PCM_CHANNEL_CS;
2310 } else if (channel_mode == 8) {
2311 open->dev_channel_mapping[0] = PCM_CHANNEL_FL;
2312 open->dev_channel_mapping[1] = PCM_CHANNEL_FR;
2313 open->dev_channel_mapping[2] = PCM_CHANNEL_LFE;
2314 open->dev_channel_mapping[3] = PCM_CHANNEL_FC;
2315 open->dev_channel_mapping[4] = PCM_CHANNEL_LS;
2316 open->dev_channel_mapping[5] = PCM_CHANNEL_RS;
2317 open->dev_channel_mapping[6] = PCM_CHANNEL_LB;
2318 open->dev_channel_mapping[7] = PCM_CHANNEL_RB;
2319 } else {
2320 pr_err("%s: invalid num_chan %d\n", __func__,
2321 channel_mode);
2322 rc = -EINVAL;
2323 goto inval_ch_mod;
2324 }
2325 }
2326
2327non_mch_path:
2328inval_ch_mod:
2329 return rc;
2330}
2331
2332int adm_arrange_mch_ep2_map(struct adm_cmd_device_open_v6 *open_v6,
2333 int channel_mode)
2334{
2335 int rc = 0;
2336
2337 memset(open_v6->dev_channel_mapping_eid2, 0,
2338 PCM_FORMAT_MAX_NUM_CHANNEL);
2339
2340 if (channel_mode == 1) {
2341 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FC;
2342 } else if (channel_mode == 2) {
2343 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2344 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2345 } else if (channel_mode == 3) {
2346 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2347 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2348 open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
2349 } else if (channel_mode == 4) {
2350 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2351 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2352 open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LS;
2353 open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_RS;
2354 } else if (channel_mode == 5) {
2355 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2356 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2357 open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
2358 open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_LS;
2359 open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_RS;
2360 } else if (channel_mode == 6) {
2361 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2362 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2363 open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
2364 open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
2365 open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
2366 open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
2367 } else if (channel_mode == 8) {
2368 open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
2369 open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
2370 open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
2371 open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
2372 open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
2373 open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
2374 open_v6->dev_channel_mapping_eid2[6] = PCM_CHANNEL_LB;
2375 open_v6->dev_channel_mapping_eid2[7] = PCM_CHANNEL_RB;
2376 } else {
2377 pr_err("%s: invalid num_chan %d\n", __func__,
2378 channel_mode);
2379 rc = -EINVAL;
2380 }
2381
2382 return rc;
2383}
2384
2385int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
2386 int perf_mode, uint16_t bit_width, int app_type, int acdb_id)
2387{
2388 struct adm_cmd_device_open_v5 open;
2389 struct adm_cmd_device_open_v6 open_v6;
2390 int ret = 0;
2391 int port_idx, copp_idx, flags;
2392 int tmp_port = q6audio_get_port_id(port_id);
2393
2394 pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n",
2395 __func__, port_id, path, rate, channel_mode, perf_mode,
2396 topology);
2397
2398 port_id = q6audio_convert_virtual_to_portid(port_id);
2399 port_idx = adm_validate_and_get_port_index(port_id);
2400 if (port_idx < 0) {
2401 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
2402 return -EINVAL;
2403 }
2404
2405 if (this_adm.apr == NULL) {
2406 this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
2407 0xFFFFFFFF, &this_adm);
2408 if (this_adm.apr == NULL) {
2409 pr_err("%s: Unable to register ADM\n", __func__);
2410 return -ENODEV;
2411 }
2412 rtac_set_adm_handle(this_adm.apr);
2413 }
2414
2415 if (perf_mode == ULL_POST_PROCESSING_PCM_MODE) {
2416 flags = ADM_ULL_POST_PROCESSING_DEVICE_SESSION;
2417 if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
2418 (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
2419 (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
2420 topology = DEFAULT_COPP_TOPOLOGY;
2421 } else if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
2422 flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
2423 topology = NULL_COPP_TOPOLOGY;
2424 rate = ULL_SUPPORTED_SAMPLE_RATE;
2425 bit_width = ULL_SUPPORTED_BITS_PER_SAMPLE;
2426 } else if (perf_mode == LOW_LATENCY_PCM_MODE) {
2427 flags = ADM_LOW_LATENCY_DEVICE_SESSION;
2428 if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
2429 (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
2430 (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
2431 topology = DEFAULT_COPP_TOPOLOGY;
2432 } else {
2433 if ((path == ADM_PATH_COMPRESSED_RX) ||
2434 (path == ADM_PATH_COMPRESSED_TX))
2435 flags = 0;
2436 else
2437 flags = ADM_LEGACY_DEVICE_SESSION;
2438 }
2439
2440 if ((topology == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
2441 (topology == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
2442 (topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
2443 rate = 16000;
2444
2445 copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, perf_mode,
2446 rate, bit_width, app_type);
2447 if (copp_idx < 0) {
2448 copp_idx = adm_get_next_available_copp(port_idx);
2449 if (copp_idx >= MAX_COPPS_PER_PORT) {
2450 pr_err("%s: exceeded copp id %d\n",
2451 __func__, copp_idx);
2452 return -EINVAL;
2453 }
2454 atomic_set(&this_adm.copp.cnt[port_idx][copp_idx], 0);
2455 atomic_set(&this_adm.copp.topology[port_idx][copp_idx],
2456 topology);
2457 atomic_set(&this_adm.copp.mode[port_idx][copp_idx],
2458 perf_mode);
2459 atomic_set(&this_adm.copp.rate[port_idx][copp_idx],
2460 rate);
2461 atomic_set(&this_adm.copp.channels[port_idx][copp_idx],
2462 channel_mode);
2463 atomic_set(&this_adm.copp.bit_width[port_idx][copp_idx],
2464 bit_width);
2465 atomic_set(&this_adm.copp.app_type[port_idx][copp_idx],
2466 app_type);
2467 atomic_set(&this_adm.copp.acdb_id[port_idx][copp_idx],
2468 acdb_id);
2469 set_bit(ADM_STATUS_CALIBRATION_REQUIRED,
2470 (void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
2471 if ((path != ADM_PATH_COMPRESSED_RX) &&
2472 (path != ADM_PATH_COMPRESSED_TX))
2473 send_adm_custom_topology();
2474 }
2475
2476 if (this_adm.copp.adm_delay[port_idx][copp_idx] &&
2477 perf_mode == LEGACY_PCM_MODE) {
2478 atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx],
2479 1);
2480 this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
2481 wake_up(&this_adm.copp.adm_delay_wait[port_idx][copp_idx]);
2482 }
2483
2484 /* Create a COPP if port id are not enabled */
2485 if (atomic_read(&this_adm.copp.cnt[port_idx][copp_idx]) == 0) {
2486 pr_debug("%s: open ADM: port_idx: %d, copp_idx: %d\n", __func__,
2487 port_idx, copp_idx);
2488 if ((topology == SRS_TRUMEDIA_TOPOLOGY_ID) &&
2489 perf_mode == LEGACY_PCM_MODE) {
2490 int res;
2491
2492 atomic_set(&this_adm.mem_map_index, ADM_SRS_TRUMEDIA);
2493 msm_dts_srs_tm_ion_memmap(&this_adm.outband_memmap);
2494 res = adm_memory_map_regions(&this_adm.outband_memmap.paddr, 0,
2495 (uint32_t *)&this_adm.outband_memmap.size, 1);
2496 if (res < 0) {
2497 pr_err("%s: SRS adm_memory_map_regions failed ! addr = 0x%pK, size = %d\n",
2498 __func__, (void *)this_adm.outband_memmap.paddr,
2499 (uint32_t)this_adm.outband_memmap.size);
2500 }
2501 }
2502 open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
2503 APR_HDR_LEN(APR_HDR_SIZE),
2504 APR_PKT_VER);
2505 open.hdr.pkt_size = sizeof(open);
2506 open.hdr.src_svc = APR_SVC_ADM;
2507 open.hdr.src_domain = APR_DOMAIN_APPS;
2508 open.hdr.src_port = tmp_port;
2509 open.hdr.dest_svc = APR_SVC_ADM;
2510 open.hdr.dest_domain = APR_DOMAIN_ADSP;
2511 open.hdr.dest_port = tmp_port;
2512 open.hdr.token = port_idx << 16 | copp_idx;
2513 open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
2514 open.flags = flags;
2515 open.mode_of_operation = path;
2516 open.endpoint_id_1 = tmp_port;
2517 open.endpoint_id_2 = 0xFFFF;
2518
2519 if (this_adm.ec_ref_rx && (path != 1)) {
2520 open.endpoint_id_2 = this_adm.ec_ref_rx;
2521 this_adm.ec_ref_rx = -1;
2522 }
2523
2524 open.topology_id = topology;
2525
2526 open.dev_num_channel = channel_mode & 0x00FF;
2527 open.bit_width = bit_width;
2528 WARN_ON((perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) &&
2529 (rate != ULL_SUPPORTED_SAMPLE_RATE));
2530 open.sample_rate = rate;
2531
2532 ret = adm_arrange_mch_map(&open, path, channel_mode);
2533
2534 if (ret)
2535 return ret;
2536
2537 pr_debug("%s: port_id=0x%x rate=%d topology_id=0x%X\n",
2538 __func__, open.endpoint_id_1, open.sample_rate,
2539 open.topology_id);
2540
2541 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
2542
2543 if ((this_adm.num_ec_ref_rx_chans != 0) && (path != 1) &&
2544 (open.endpoint_id_2 != 0xFFFF)) {
2545 memset(&open_v6, 0,
2546 sizeof(struct adm_cmd_device_open_v6));
2547 memcpy(&open_v6, &open,
2548 sizeof(struct adm_cmd_device_open_v5));
2549 open_v6.hdr.opcode = ADM_CMD_DEVICE_OPEN_V6;
2550 open_v6.hdr.pkt_size = sizeof(open_v6);
2551 open_v6.dev_num_channel_eid2 =
2552 this_adm.num_ec_ref_rx_chans;
2553 this_adm.num_ec_ref_rx_chans = 0;
2554
2555 if (this_adm.ec_ref_rx_bit_width != 0) {
2556 open_v6.bit_width_eid2 =
2557 this_adm.ec_ref_rx_bit_width;
2558 this_adm.ec_ref_rx_bit_width = 0;
2559 } else {
2560 open_v6.bit_width_eid2 = bit_width;
2561 }
2562
2563 if (this_adm.ec_ref_rx_sampling_rate != 0) {
2564 open_v6.sample_rate_eid2 =
2565 this_adm.ec_ref_rx_sampling_rate;
2566 this_adm.ec_ref_rx_sampling_rate = 0;
2567 } else {
2568 open_v6.sample_rate_eid2 = rate;
2569 }
2570
2571 pr_debug("%s: eid2_channels=%d eid2_bit_width=%d eid2_rate=%d\n",
2572 __func__, open_v6.dev_num_channel_eid2,
2573 open_v6.bit_width_eid2,
2574 open_v6.sample_rate_eid2);
2575
2576 ret = adm_arrange_mch_ep2_map(&open_v6,
2577 open_v6.dev_num_channel_eid2);
2578
2579 if (ret)
2580 return ret;
2581
2582 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open_v6);
2583 } else {
2584 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
2585 }
2586 if (ret < 0) {
2587 pr_err("%s: port_id: 0x%x for[0x%x] failed %d\n",
2588 __func__, tmp_port, port_id, ret);
2589 return -EINVAL;
2590 }
2591 /* Wait for the callback with copp id */
2592 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
2593 atomic_read(&this_adm.copp.stat
2594 [port_idx][copp_idx]) >= 0,
2595 msecs_to_jiffies(TIMEOUT_MS));
2596 if (!ret) {
2597 pr_err("%s: ADM open timedout for port_id: 0x%x for [0x%x]\n",
2598 __func__, tmp_port, port_id);
2599 return -EINVAL;
2600 } else if (atomic_read(&this_adm.copp.stat
2601 [port_idx][copp_idx]) > 0) {
2602 pr_err("%s: DSP returned error[%s]\n",
2603 __func__, adsp_err_get_err_str(
2604 atomic_read(&this_adm.copp.stat
2605 [port_idx][copp_idx])));
2606 return adsp_err_get_lnx_err_code(
2607 atomic_read(&this_adm.copp.stat
2608 [port_idx][copp_idx]));
2609 }
2610 }
2611 atomic_inc(&this_adm.copp.cnt[port_idx][copp_idx]);
2612 return copp_idx;
2613}
2614
2615void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
2616{
2617 struct audproc_mfc_output_media_fmt mfc_cfg;
2618 struct adm_cmd_device_open_v5 open;
2619 int port_idx;
2620 int sz = 0;
2621 int rc = 0;
2622 int i = 0;
2623
2624 port_id = q6audio_convert_virtual_to_portid(port_id);
2625 port_idx = adm_validate_and_get_port_index(port_id);
2626
2627 if (port_idx < 0) {
2628 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
2629 goto fail_cmd;
2630 }
2631
2632 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
2633 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
2634 goto fail_cmd;
2635 }
2636
2637 sz = sizeof(struct audproc_mfc_output_media_fmt);
2638
2639 mfc_cfg.params.hdr.hdr_field =
2640 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
2641 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
2642 mfc_cfg.params.hdr.pkt_size = sz;
2643 mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
2644 mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
2645 mfc_cfg.params.hdr.src_port = port_id;
2646 mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
2647 mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
2648 mfc_cfg.params.hdr.dest_port =
2649 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
2650 mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
2651 mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
2652 mfc_cfg.params.payload_addr_lsw = 0;
2653 mfc_cfg.params.payload_addr_msw = 0;
2654 mfc_cfg.params.mem_map_handle = 0;
2655 mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
2656 sizeof(mfc_cfg.params);
2657 mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
2658 mfc_cfg.data.param_id =
2659 AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
2660 mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
2661 sizeof(mfc_cfg.data);
2662 mfc_cfg.data.reserved = 0;
2663 mfc_cfg.sampling_rate = dst_sample_rate;
2664 mfc_cfg.bits_per_sample =
2665 atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
2666 open.dev_num_channel = mfc_cfg.num_channels =
2667 atomic_read(&this_adm.copp.channels[port_idx][copp_idx]);
2668
2669 rc = adm_arrange_mch_map(&open, ADM_PATH_PLAYBACK,
2670 mfc_cfg.num_channels);
2671 if (rc < 0) {
2672 pr_err("%s: unable to get channal map\n", __func__);
2673 goto fail_cmd;
2674 }
2675
2676 for (i = 0; i < mfc_cfg.num_channels; i++)
2677 mfc_cfg.channel_type[i] =
2678 (uint16_t) open.dev_channel_mapping[i];
2679
2680 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
2681
2682 pr_debug("%s: mfc config: port_idx %d copp_idx %d copp SR %d copp BW %d copp chan %d o/p SR %d\n",
2683 __func__, port_idx, copp_idx,
2684 atomic_read(&this_adm.copp.rate[port_idx][copp_idx]),
2685 mfc_cfg.bits_per_sample, mfc_cfg.num_channels,
2686 mfc_cfg.sampling_rate);
2687
2688 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
2689
2690 if (rc < 0) {
2691 pr_err("%s: port_id: for[0x%x] failed %d\n",
2692 __func__, port_id, rc);
2693 goto fail_cmd;
2694 }
2695 /* Wait for the callback with copp id */
2696 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
2697 atomic_read(&this_adm.copp.stat
2698 [port_idx][copp_idx]) >= 0,
2699 msecs_to_jiffies(TIMEOUT_MS));
2700 if (!rc) {
2701 pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
2702 __func__, port_id);
2703 goto fail_cmd;
2704 } else if (atomic_read(&this_adm.copp.stat
2705 [port_idx][copp_idx]) > 0) {
2706 pr_err("%s: DSP returned error[%s]\n",
2707 __func__, adsp_err_get_err_str(
2708 atomic_read(&this_adm.copp.stat
2709 [port_idx][copp_idx])));
2710 goto fail_cmd;
2711 }
2712 rc = 0;
2713fail_cmd:
2714 return;
2715}
2716
2717static void route_set_opcode_matrix_id(
2718 struct adm_cmd_matrix_map_routings_v5 **route_addr,
2719 int path, uint32_t passthr_mode)
2720{
2721 struct adm_cmd_matrix_map_routings_v5 *route = *route_addr;
2722
2723 switch (path) {
2724 case ADM_PATH_PLAYBACK:
2725 route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
2726 route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
2727 break;
2728 case ADM_PATH_LIVE_REC:
2729 if (passthr_mode == LISTEN) {
2730 route->hdr.opcode =
2731 ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
2732 route->matrix_id = ADM_MATRIX_ID_LISTEN_TX;
2733 break;
2734 }
2735 /* fall through to set matrix id for non-listen case */
2736 case ADM_PATH_NONLIVE_REC:
2737 route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
2738 route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
2739 break;
2740 case ADM_PATH_COMPRESSED_RX:
2741 route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
2742 route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
2743 break;
2744 case ADM_PATH_COMPRESSED_TX:
2745 route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
2746 route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_TX;
2747 break;
2748 default:
2749 pr_err("%s: Wrong path set[%d]\n", __func__, path);
2750 break;
2751 }
2752 pr_debug("%s: opcode 0x%x, matrix id %d\n",
2753 __func__, route->hdr.opcode, route->matrix_id);
2754}
2755
2756int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
2757 uint32_t passthr_mode)
2758{
2759 struct adm_cmd_matrix_map_routings_v5 *route;
2760 struct adm_session_map_node_v5 *node;
2761 uint16_t *copps_list;
2762 int cmd_size = 0;
2763 int ret = 0, i = 0;
2764 void *payload = NULL;
2765 void *matrix_map = NULL;
2766 int port_idx, copp_idx;
2767
2768 /* Assumes port_ids have already been validated during adm_open */
2769 cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
2770 sizeof(struct adm_session_map_node_v5) +
2771 (sizeof(uint32_t) * payload_map.num_copps));
2772 matrix_map = kzalloc(cmd_size, GFP_KERNEL);
2773 if (matrix_map == NULL) {
2774 pr_err("%s: Mem alloc failed\n", __func__);
2775 ret = -EINVAL;
2776 return ret;
2777 }
2778 route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
2779
2780 route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
2781 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
2782 route->hdr.pkt_size = cmd_size;
2783 route->hdr.src_svc = 0;
2784 route->hdr.src_domain = APR_DOMAIN_APPS;
2785 route->hdr.src_port = 0; /* Ignored */;
2786 route->hdr.dest_svc = APR_SVC_ADM;
2787 route->hdr.dest_domain = APR_DOMAIN_ADSP;
2788 route->hdr.dest_port = 0; /* Ignored */;
2789 route->hdr.token = 0;
2790 route->num_sessions = 1;
2791 route_set_opcode_matrix_id(&route, path, passthr_mode);
2792
2793 payload = ((u8 *)matrix_map +
2794 sizeof(struct adm_cmd_matrix_map_routings_v5));
2795 node = (struct adm_session_map_node_v5 *)payload;
2796
2797 node->session_id = payload_map.session_id;
2798 node->num_copps = payload_map.num_copps;
2799 payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
2800 copps_list = (uint16_t *)payload;
2801 for (i = 0; i < payload_map.num_copps; i++) {
2802 port_idx =
2803 adm_validate_and_get_port_index(payload_map.port_id[i]);
2804 if (port_idx < 0) {
2805 pr_err("%s: Invalid port_id 0x%x\n", __func__,
2806 payload_map.port_id[i]);
2807 ret = -EINVAL;
2808 goto fail_cmd;
2809 }
2810 copp_idx = payload_map.copp_idx[i];
2811 copps_list[i] = atomic_read(&this_adm.copp.id[port_idx]
2812 [copp_idx]);
2813 }
2814 atomic_set(&this_adm.matrix_map_stat, -1);
2815
2816 ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
2817 if (ret < 0) {
2818 pr_err("%s: routing for syream %d failed ret %d\n",
2819 __func__, payload_map.session_id, ret);
2820 ret = -EINVAL;
2821 goto fail_cmd;
2822 }
2823 ret = wait_event_timeout(this_adm.matrix_map_wait,
2824 atomic_read(&this_adm.matrix_map_stat) >= 0,
2825 msecs_to_jiffies(TIMEOUT_MS));
2826 if (!ret) {
2827 pr_err("%s: routing for syream %d failed\n", __func__,
2828 payload_map.session_id);
2829 ret = -EINVAL;
2830 goto fail_cmd;
2831 } else if (atomic_read(&this_adm.matrix_map_stat) > 0) {
2832 pr_err("%s: DSP returned error[%s]\n", __func__,
2833 adsp_err_get_err_str(atomic_read(
2834 &this_adm.matrix_map_stat)));
2835 ret = adsp_err_get_lnx_err_code(
2836 atomic_read(&this_adm.matrix_map_stat));
2837 goto fail_cmd;
2838 }
2839
2840 if ((perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) &&
2841 (path != ADM_PATH_COMPRESSED_RX)) {
2842 for (i = 0; i < payload_map.num_copps; i++) {
2843 port_idx = afe_get_port_index(payload_map.port_id[i]);
2844 copp_idx = payload_map.copp_idx[i];
2845 if (port_idx < 0 || copp_idx < 0 ||
2846 (copp_idx > MAX_COPPS_PER_PORT - 1)) {
2847 pr_err("%s: Invalid idx port_idx %d copp_idx %d\n",
2848 __func__, port_idx, copp_idx);
2849 continue;
2850 }
2851 rtac_add_adm_device(payload_map.port_id[i],
2852 atomic_read(&this_adm.copp.id
2853 [port_idx][copp_idx]),
2854 get_cal_path(path),
2855 payload_map.session_id,
2856 payload_map.app_type[i],
2857 payload_map.acdb_dev_id[i]);
2858
2859 if (!test_bit(ADM_STATUS_CALIBRATION_REQUIRED,
2860 (void *)&this_adm.copp.adm_status[port_idx]
2861 [copp_idx])) {
2862 pr_debug("%s: adm copp[0x%x][%d] already sent",
2863 __func__, port_idx, copp_idx);
2864 continue;
2865 }
2866 send_adm_cal(payload_map.port_id[i], copp_idx,
2867 get_cal_path(path), perf_mode,
2868 payload_map.app_type[i],
2869 payload_map.acdb_dev_id[i],
2870 payload_map.sample_rate[i]);
2871 /* ADM COPP calibration is already sent */
2872 clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
2873 (void *)&this_adm.copp.
2874 adm_status[port_idx][copp_idx]);
2875 pr_debug("%s: copp_id: %d\n", __func__,
2876 atomic_read(&this_adm.copp.id[port_idx]
2877 [copp_idx]));
2878 }
2879 }
2880
2881fail_cmd:
2882 kfree(matrix_map);
2883 return ret;
2884}
2885
2886void adm_ec_ref_rx_id(int port_id)
2887{
2888 this_adm.ec_ref_rx = port_id;
2889 pr_debug("%s: ec_ref_rx:%d\n", __func__, this_adm.ec_ref_rx);
2890}
2891
2892void adm_num_ec_ref_rx_chans(int num_chans)
2893{
2894 this_adm.num_ec_ref_rx_chans = num_chans;
2895 pr_debug("%s: num_ec_ref_rx_chans:%d\n",
2896 __func__, this_adm.num_ec_ref_rx_chans);
2897}
2898
2899void adm_ec_ref_rx_bit_width(int bit_width)
2900{
2901 this_adm.ec_ref_rx_bit_width = bit_width;
2902 pr_debug("%s: ec_ref_rx_bit_width:%d\n",
2903 __func__, this_adm.ec_ref_rx_bit_width);
2904}
2905
2906void adm_ec_ref_rx_sampling_rate(int sampling_rate)
2907{
2908 this_adm.ec_ref_rx_sampling_rate = sampling_rate;
2909 pr_debug("%s: ec_ref_rx_sampling_rate:%d\n",
2910 __func__, this_adm.ec_ref_rx_sampling_rate);
2911}
2912
2913int adm_close(int port_id, int perf_mode, int copp_idx)
2914{
2915 struct apr_hdr close;
2916
2917 int ret = 0, port_idx;
2918 int copp_id = RESET_COPP_ID;
2919
2920 pr_debug("%s: port_id=0x%x perf_mode: %d copp_idx: %d\n", __func__,
2921 port_id, perf_mode, copp_idx);
2922
2923 port_id = q6audio_convert_virtual_to_portid(port_id);
2924 port_idx = adm_validate_and_get_port_index(port_id);
2925 if (port_idx < 0) {
2926 pr_err("%s: Invalid port_id 0x%x\n",
2927 __func__, port_id);
2928 return -EINVAL;
2929 }
2930
2931 if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
2932 pr_err("%s: Invalid copp idx: %d\n", __func__, copp_idx);
2933 return -EINVAL;
2934 }
2935
2936 if (this_adm.copp.adm_delay[port_idx][copp_idx] && perf_mode
2937 == LEGACY_PCM_MODE) {
2938 atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx],
2939 1);
2940 this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
2941 wake_up(&this_adm.copp.adm_delay_wait[port_idx][copp_idx]);
2942 }
2943
2944 atomic_dec(&this_adm.copp.cnt[port_idx][copp_idx]);
2945 if (!(atomic_read(&this_adm.copp.cnt[port_idx][copp_idx]))) {
2946 copp_id = adm_get_copp_id(port_idx, copp_idx);
2947 pr_debug("%s: Closing ADM port_idx:%d copp_idx:%d copp_id:0x%x\n",
2948 __func__, port_idx, copp_idx, copp_id);
2949 if ((!perf_mode) && (this_adm.outband_memmap.paddr != 0) &&
2950 (atomic_read(&this_adm.copp.topology[port_idx][copp_idx]) ==
2951 SRS_TRUMEDIA_TOPOLOGY_ID)) {
2952 atomic_set(&this_adm.mem_map_index,
2953 ADM_SRS_TRUMEDIA);
2954 ret = adm_memory_unmap_regions();
2955 if (ret < 0) {
2956 pr_err("%s: adm mem unmmap err %d",
2957 __func__, ret);
2958 } else {
2959 atomic_set(&this_adm.mem_map_handles
2960 [ADM_SRS_TRUMEDIA], 0);
2961 }
2962 }
2963
2964
2965 if ((afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) &&
2966 this_adm.sourceTrackingData.memmap.paddr) {
2967 atomic_set(&this_adm.mem_map_index,
2968 ADM_MEM_MAP_INDEX_SOURCE_TRACKING);
2969 ret = adm_memory_unmap_regions();
2970 if (ret < 0) {
2971 pr_err("%s: adm mem unmmap err %d",
2972 __func__, ret);
2973 }
2974 msm_audio_ion_free(
2975 this_adm.sourceTrackingData.ion_client,
2976 this_adm.sourceTrackingData.ion_handle);
2977 this_adm.sourceTrackingData.ion_client = NULL;
2978 this_adm.sourceTrackingData.ion_handle = NULL;
2979 this_adm.sourceTrackingData.memmap.size = 0;
2980 this_adm.sourceTrackingData.memmap.kvaddr = NULL;
2981 this_adm.sourceTrackingData.memmap.paddr = 0;
2982 this_adm.sourceTrackingData.apr_cmd_status = -1;
2983 atomic_set(&this_adm.mem_map_handles[
2984 ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
2985 }
2986
2987 close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
2988 APR_HDR_LEN(APR_HDR_SIZE),
2989 APR_PKT_VER);
2990 close.pkt_size = sizeof(close);
2991 close.src_svc = APR_SVC_ADM;
2992 close.src_domain = APR_DOMAIN_APPS;
2993 close.src_port = port_id;
2994 close.dest_svc = APR_SVC_ADM;
2995 close.dest_domain = APR_DOMAIN_ADSP;
2996 close.dest_port = copp_id;
2997 close.token = port_idx << 16 | copp_idx;
2998 close.opcode = ADM_CMD_DEVICE_CLOSE_V5;
2999
3000 atomic_set(&this_adm.copp.id[port_idx][copp_idx],
3001 RESET_COPP_ID);
3002 atomic_set(&this_adm.copp.cnt[port_idx][copp_idx], 0);
3003 atomic_set(&this_adm.copp.topology[port_idx][copp_idx], 0);
3004 atomic_set(&this_adm.copp.mode[port_idx][copp_idx], 0);
3005 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3006 atomic_set(&this_adm.copp.rate[port_idx][copp_idx], 0);
3007 atomic_set(&this_adm.copp.channels[port_idx][copp_idx], 0);
3008 atomic_set(&this_adm.copp.bit_width[port_idx][copp_idx], 0);
3009 atomic_set(&this_adm.copp.app_type[port_idx][copp_idx], 0);
3010
3011 clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
3012 (void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
3013
3014 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
3015 if (ret < 0) {
3016 pr_err("%s: ADM close failed %d\n", __func__, ret);
3017 return -EINVAL;
3018 }
3019
3020 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3021 atomic_read(&this_adm.copp.stat
3022 [port_idx][copp_idx]) >= 0,
3023 msecs_to_jiffies(TIMEOUT_MS));
3024 if (!ret) {
3025 pr_err("%s: ADM cmd Route timedout for port 0x%x\n",
3026 __func__, port_id);
3027 return -EINVAL;
3028 } else if (atomic_read(&this_adm.copp.stat
3029 [port_idx][copp_idx]) > 0) {
3030 pr_err("%s: DSP returned error[%s]\n",
3031 __func__, adsp_err_get_err_str(
3032 atomic_read(&this_adm.copp.stat
3033 [port_idx][copp_idx])));
3034 return adsp_err_get_lnx_err_code(
3035 atomic_read(&this_adm.copp.stat
3036 [port_idx][copp_idx]));
3037 }
3038 }
3039
3040 if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) {
3041 pr_debug("%s: remove adm device from rtac\n", __func__);
3042 rtac_remove_adm_device(port_id, copp_id);
3043 }
3044 return 0;
3045}
3046
3047int send_rtac_audvol_cal(void)
3048{
3049 int ret = 0;
3050 int ret2 = 0;
3051 int i = 0;
3052 int copp_idx, port_idx, acdb_id, app_id, path;
3053 struct cal_block_data *cal_block = NULL;
3054 struct audio_cal_info_audvol *audvol_cal_info = NULL;
3055 struct rtac_adm rtac_adm_data;
3056
3057 mutex_lock(&this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]->lock);
3058
3059 cal_block = cal_utils_get_only_cal_block(
3060 this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]);
3061 if (cal_block == NULL) {
3062 pr_err("%s: can't find cal block!\n", __func__);
3063 goto unlock;
3064 }
3065
3066 audvol_cal_info = cal_block->cal_info;
3067 if (audvol_cal_info == NULL) {
3068 pr_err("%s: audvol_cal_info is NULL!\n", __func__);
3069 goto unlock;
3070 }
3071
3072 get_rtac_adm_data(&rtac_adm_data);
3073 for (; i < rtac_adm_data.num_of_dev; i++) {
3074
3075 acdb_id = rtac_adm_data.device[i].acdb_dev_id;
3076 if (acdb_id == 0)
3077 acdb_id = audvol_cal_info->acdb_id;
3078
3079 app_id = rtac_adm_data.device[i].app_type;
3080 if (app_id == 0)
3081 app_id = audvol_cal_info->app_type;
3082
3083 path = afe_get_port_type(rtac_adm_data.device[i].afe_port);
3084 if ((acdb_id == audvol_cal_info->acdb_id) &&
3085 (app_id == audvol_cal_info->app_type) &&
3086 (path == audvol_cal_info->path)) {
3087
3088 if (adm_get_indexes_from_copp_id(rtac_adm_data.
3089 device[i].copp, &copp_idx, &port_idx) != 0) {
3090 pr_debug("%s: Copp Id %d is not active\n",
3091 __func__,
3092 rtac_adm_data.device[i].copp);
3093 continue;
3094 }
3095
3096 ret2 = adm_remap_and_send_cal_block(ADM_RTAC_AUDVOL_CAL,
3097 rtac_adm_data.device[i].afe_port,
3098 copp_idx, cal_block,
3099 atomic_read(&this_adm.copp.
3100 mode[port_idx][copp_idx]),
3101 audvol_cal_info->app_type,
3102 audvol_cal_info->acdb_id,
3103 atomic_read(&this_adm.copp.
3104 rate[port_idx][copp_idx]));
3105 if (ret2 < 0) {
3106 pr_debug("%s: remap and send failed for copp Id %d, acdb id %d, app type %d, path %d\n",
3107 __func__, rtac_adm_data.device[i].copp,
3108 audvol_cal_info->acdb_id,
3109 audvol_cal_info->app_type,
3110 audvol_cal_info->path);
3111 ret = ret2;
3112 }
3113 }
3114 }
3115unlock:
3116 mutex_unlock(&this_adm.cal_data[ADM_RTAC_AUDVOL_CAL]->lock);
3117 return ret;
3118}
3119
3120int adm_map_rtac_block(struct rtac_cal_block_data *cal_block)
3121{
3122 int result = 0;
3123
3124 pr_debug("%s:\n", __func__);
3125
3126 if (cal_block == NULL) {
3127 pr_err("%s: cal_block is NULL!\n",
3128 __func__);
3129 result = -EINVAL;
3130 goto done;
3131 }
3132
3133 if (cal_block->cal_data.paddr == 0) {
3134 pr_debug("%s: No address to map!\n",
3135 __func__);
3136 result = -EINVAL;
3137 goto done;
3138 }
3139
3140 if (cal_block->map_data.map_size == 0) {
3141 pr_debug("%s: map size is 0!\n",
3142 __func__);
3143 result = -EINVAL;
3144 goto done;
3145 }
3146
3147 /* valid port ID needed for callback use primary I2S */
3148 atomic_set(&this_adm.mem_map_index, ADM_RTAC_APR_CAL);
3149 result = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
3150 &cal_block->map_data.map_size, 1);
3151 if (result < 0) {
3152 pr_err("%s: RTAC mmap did not work! size = %d result %d\n",
3153 __func__,
3154 cal_block->map_data.map_size, result);
3155 pr_debug("%s: RTAC mmap did not work! addr = 0x%pK, size = %d\n",
3156 __func__,
3157 &cal_block->cal_data.paddr,
3158 cal_block->map_data.map_size);
3159 goto done;
3160 }
3161
3162 cal_block->map_data.map_handle = atomic_read(
3163 &this_adm.mem_map_handles[ADM_RTAC_APR_CAL]);
3164done:
3165 return result;
3166}
3167
3168int adm_unmap_rtac_block(uint32_t *mem_map_handle)
3169{
3170 int result = 0;
3171
3172 pr_debug("%s:\n", __func__);
3173
3174 if (mem_map_handle == NULL) {
3175 pr_debug("%s: Map handle is NULL, nothing to unmap\n",
3176 __func__);
3177 goto done;
3178 }
3179
3180 if (*mem_map_handle == 0) {
3181 pr_debug("%s: Map handle is 0, nothing to unmap\n",
3182 __func__);
3183 goto done;
3184 }
3185
3186 if (*mem_map_handle != atomic_read(
3187 &this_adm.mem_map_handles[ADM_RTAC_APR_CAL])) {
3188 pr_err("%s: Map handles do not match! Unmapping RTAC, RTAC map 0x%x, ADM map 0x%x\n",
3189 __func__, *mem_map_handle, atomic_read(
3190 &this_adm.mem_map_handles[ADM_RTAC_APR_CAL]));
3191
3192 /* if mismatch use handle passed in to unmap */
3193 atomic_set(&this_adm.mem_map_handles[ADM_RTAC_APR_CAL],
3194 *mem_map_handle);
3195 }
3196
3197 /* valid port ID needed for callback use primary I2S */
3198 atomic_set(&this_adm.mem_map_index, ADM_RTAC_APR_CAL);
3199 result = adm_memory_unmap_regions();
3200 if (result < 0) {
3201 pr_debug("%s: adm_memory_unmap_regions failed, error %d\n",
3202 __func__, result);
3203 } else {
3204 atomic_set(&this_adm.mem_map_handles[ADM_RTAC_APR_CAL], 0);
3205 *mem_map_handle = 0;
3206 }
3207done:
3208 return result;
3209}
3210
3211static int get_cal_type_index(int32_t cal_type)
3212{
3213 int ret = -EINVAL;
3214
3215 switch (cal_type) {
3216 case ADM_AUDPROC_CAL_TYPE:
3217 ret = ADM_AUDPROC_CAL;
3218 break;
3219 case ADM_AUDVOL_CAL_TYPE:
3220 ret = ADM_AUDVOL_CAL;
3221 break;
3222 case ADM_CUST_TOPOLOGY_CAL_TYPE:
3223 ret = ADM_CUSTOM_TOP_CAL;
3224 break;
3225 case ADM_RTAC_INFO_CAL_TYPE:
3226 ret = ADM_RTAC_INFO_CAL;
3227 break;
3228 case ADM_RTAC_APR_CAL_TYPE:
3229 ret = ADM_RTAC_APR_CAL;
3230 break;
3231 case ADM_RTAC_AUDVOL_CAL_TYPE:
3232 ret = ADM_RTAC_AUDVOL_CAL;
3233 break;
3234 default:
3235 pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
3236 }
3237 return ret;
3238}
3239
3240static int adm_alloc_cal(int32_t cal_type, size_t data_size, void *data)
3241{
3242 int ret = 0;
3243 int cal_index;
3244
3245 pr_debug("%s:\n", __func__);
3246
3247 cal_index = get_cal_type_index(cal_type);
3248 if (cal_index < 0) {
3249 pr_err("%s: could not get cal index %d!\n",
3250 __func__, cal_index);
3251 ret = -EINVAL;
3252 goto done;
3253 }
3254
3255 ret = cal_utils_alloc_cal(data_size, data,
3256 this_adm.cal_data[cal_index], 0, NULL);
3257 if (ret < 0) {
3258 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
3259 __func__, ret, cal_type);
3260 ret = -EINVAL;
3261 goto done;
3262 }
3263done:
3264 return ret;
3265}
3266
3267static int adm_dealloc_cal(int32_t cal_type, size_t data_size, void *data)
3268{
3269 int ret = 0;
3270 int cal_index;
3271
3272 pr_debug("%s:\n", __func__);
3273
3274 cal_index = get_cal_type_index(cal_type);
3275 if (cal_index < 0) {
3276 pr_err("%s: could not get cal index %d!\n",
3277 __func__, cal_index);
3278 ret = -EINVAL;
3279 goto done;
3280 }
3281
3282 ret = cal_utils_dealloc_cal(data_size, data,
3283 this_adm.cal_data[cal_index]);
3284 if (ret < 0) {
3285 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
3286 __func__, ret, cal_type);
3287 ret = -EINVAL;
3288 goto done;
3289 }
3290done:
3291 return ret;
3292}
3293
3294static int adm_set_cal(int32_t cal_type, size_t data_size, void *data)
3295{
3296 int ret = 0;
3297 int cal_index;
3298
3299 pr_debug("%s:\n", __func__);
3300
3301 cal_index = get_cal_type_index(cal_type);
3302 if (cal_index < 0) {
3303 pr_err("%s: could not get cal index %d!\n",
3304 __func__, cal_index);
3305 ret = -EINVAL;
3306 goto done;
3307 }
3308
3309 ret = cal_utils_set_cal(data_size, data,
3310 this_adm.cal_data[cal_index], 0, NULL);
3311 if (ret < 0) {
3312 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
3313 __func__, ret, cal_type);
3314 ret = -EINVAL;
3315 goto done;
3316 }
3317
3318 if (cal_index == ADM_CUSTOM_TOP_CAL) {
3319 mutex_lock(&this_adm.cal_data[ADM_CUSTOM_TOP_CAL]->lock);
3320 this_adm.set_custom_topology = 1;
3321 mutex_unlock(&this_adm.cal_data[ADM_CUSTOM_TOP_CAL]->lock);
3322 } else if (cal_index == ADM_RTAC_AUDVOL_CAL) {
3323 send_rtac_audvol_cal();
3324 }
3325done:
3326 return ret;
3327}
3328
3329static int adm_map_cal_data(int32_t cal_type,
3330 struct cal_block_data *cal_block)
3331{
3332 int ret = 0;
3333 int cal_index;
3334
3335 pr_debug("%s:\n", __func__);
3336
3337 cal_index = get_cal_type_index(cal_type);
3338 if (cal_index < 0) {
3339 pr_err("%s: could not get cal index %d!\n",
3340 __func__, cal_index);
3341 ret = -EINVAL;
3342 goto done;
3343 }
3344
3345 atomic_set(&this_adm.mem_map_index, cal_index);
3346 ret = adm_memory_map_regions(&cal_block->cal_data.paddr, 0,
3347 (uint32_t *)&cal_block->map_data.map_size, 1);
3348 if (ret < 0) {
3349 pr_err("%s: map did not work! cal_type %i ret %d\n",
3350 __func__, cal_index, ret);
3351 ret = -ENODEV;
3352 goto done;
3353 }
3354 cal_block->map_data.q6map_handle = atomic_read(&this_adm.
3355 mem_map_handles[cal_index]);
3356done:
3357 return ret;
3358}
3359
3360static int adm_unmap_cal_data(int32_t cal_type,
3361 struct cal_block_data *cal_block)
3362{
3363 int ret = 0;
3364 int cal_index;
3365
3366 pr_debug("%s:\n", __func__);
3367
3368 cal_index = get_cal_type_index(cal_type);
3369 if (cal_index < 0) {
3370 pr_err("%s: could not get cal index %d!\n",
3371 __func__, cal_index);
3372 ret = -EINVAL;
3373 goto done;
3374 }
3375
3376 if (cal_block == NULL) {
3377 pr_err("%s: Cal block is NULL!\n",
3378 __func__);
3379 goto done;
3380 }
3381
3382 if (cal_block->map_data.q6map_handle == 0) {
3383 pr_err("%s: Map handle is NULL, nothing to unmap\n",
3384 __func__);
3385 goto done;
3386 }
3387
3388 atomic_set(&this_adm.mem_map_handles[cal_index],
3389 cal_block->map_data.q6map_handle);
3390 atomic_set(&this_adm.mem_map_index, cal_index);
3391 ret = adm_memory_unmap_regions();
3392 if (ret < 0) {
3393 pr_err("%s: unmap did not work! cal_type %i ret %d\n",
3394 __func__, cal_index, ret);
3395 ret = -ENODEV;
3396 goto done;
3397 }
3398 cal_block->map_data.q6map_handle = 0;
3399done:
3400 return ret;
3401}
3402
3403static void adm_delete_cal_data(void)
3404{
3405 pr_debug("%s:\n", __func__);
3406
3407 cal_utils_destroy_cal_types(ADM_MAX_CAL_TYPES, this_adm.cal_data);
3408}
3409
3410static int adm_init_cal_data(void)
3411{
3412 int ret = 0;
3413 struct cal_type_info cal_type_info[] = {
3414 {{ADM_CUST_TOPOLOGY_CAL_TYPE,
3415 {adm_alloc_cal, adm_dealloc_cal, NULL,
3416 adm_set_cal, NULL, NULL} },
3417 {adm_map_cal_data, adm_unmap_cal_data,
3418 cal_utils_match_buf_num} },
3419
3420 {{ADM_AUDPROC_CAL_TYPE,
3421 {adm_alloc_cal, adm_dealloc_cal, NULL,
3422 adm_set_cal, NULL, NULL} },
3423 {adm_map_cal_data, adm_unmap_cal_data,
3424 cal_utils_match_buf_num} },
3425
3426 {{ADM_AUDVOL_CAL_TYPE,
3427 {adm_alloc_cal, adm_dealloc_cal, NULL,
3428 adm_set_cal, NULL, NULL} },
3429 {adm_map_cal_data, adm_unmap_cal_data,
3430 cal_utils_match_buf_num} },
3431
3432 {{ADM_RTAC_INFO_CAL_TYPE,
3433 {NULL, NULL, NULL, NULL, NULL, NULL} },
3434 {NULL, NULL, cal_utils_match_buf_num} },
3435
3436 {{ADM_RTAC_APR_CAL_TYPE,
3437 {NULL, NULL, NULL, NULL, NULL, NULL} },
3438 {NULL, NULL, cal_utils_match_buf_num} },
3439
3440 {{SRS_TRUMEDIA_CAL_TYPE,
3441 {NULL, NULL, NULL, NULL, NULL, NULL} },
3442 {NULL, NULL, cal_utils_match_buf_num} },
3443
3444 {{ADM_RTAC_AUDVOL_CAL_TYPE,
3445 {adm_alloc_cal, adm_dealloc_cal, NULL,
3446 adm_set_cal, NULL, NULL} },
3447 {adm_map_cal_data, adm_unmap_cal_data,
3448 cal_utils_match_buf_num} },
3449 };
3450 pr_debug("%s:\n", __func__);
3451
3452 ret = cal_utils_create_cal_types(ADM_MAX_CAL_TYPES, this_adm.cal_data,
3453 cal_type_info);
3454 if (ret < 0) {
3455 pr_err("%s: could not create cal type! ret %d\n",
3456 __func__, ret);
3457 ret = -EINVAL;
3458 goto err;
3459 }
3460
3461 return ret;
3462err:
3463 adm_delete_cal_data();
3464 return ret;
3465}
3466
3467int adm_set_volume(int port_id, int copp_idx, int volume)
3468{
3469 struct audproc_volume_ctrl_master_gain audproc_vol;
3470 int sz = 0;
3471 int rc = 0;
3472 int port_idx;
3473
3474 pr_debug("%s: port_id %d, volume %d\n", __func__, port_id, volume);
3475 port_id = afe_convert_virtual_to_portid(port_id);
3476 port_idx = adm_validate_and_get_port_index(port_id);
3477 if (port_idx < 0) {
3478 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
3479 rc = -EINVAL;
3480 goto fail_cmd;
3481 }
3482
3483 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
3484 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
3485 return -EINVAL;
3486 }
3487
3488 sz = sizeof(struct audproc_volume_ctrl_master_gain);
3489 audproc_vol.params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3490 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3491 audproc_vol.params.hdr.pkt_size = sz;
3492 audproc_vol.params.hdr.src_svc = APR_SVC_ADM;
3493 audproc_vol.params.hdr.src_domain = APR_DOMAIN_APPS;
3494 audproc_vol.params.hdr.src_port = port_id;
3495 audproc_vol.params.hdr.dest_svc = APR_SVC_ADM;
3496 audproc_vol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
3497 audproc_vol.params.hdr.dest_port =
3498 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3499 audproc_vol.params.hdr.token = port_idx << 16 | copp_idx;
3500 audproc_vol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3501 audproc_vol.params.payload_addr_lsw = 0;
3502 audproc_vol.params.payload_addr_msw = 0;
3503 audproc_vol.params.mem_map_handle = 0;
3504 audproc_vol.params.payload_size = sizeof(audproc_vol) -
3505 sizeof(audproc_vol.params);
3506 audproc_vol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
3507 audproc_vol.data.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN;
3508 audproc_vol.data.param_size = audproc_vol.params.payload_size -
3509 sizeof(audproc_vol.data);
3510 audproc_vol.data.reserved = 0;
3511 audproc_vol.master_gain = volume;
3512
3513 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3514 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_vol);
3515 if (rc < 0) {
3516 pr_err("%s: Set params failed port = %#x\n",
3517 __func__, port_id);
3518 rc = -EINVAL;
3519 goto fail_cmd;
3520 }
3521 /* Wait for the callback */
3522 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3523 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3524 msecs_to_jiffies(TIMEOUT_MS));
3525 if (!rc) {
3526 pr_err("%s: Vol cntrl Set params timed out port = %#x\n",
3527 __func__, port_id);
3528 rc = -EINVAL;
3529 goto fail_cmd;
3530 } else if (atomic_read(&this_adm.copp.stat
3531 [port_idx][copp_idx]) > 0) {
3532 pr_err("%s: DSP returned error[%s]\n",
3533 __func__, adsp_err_get_err_str(
3534 atomic_read(&this_adm.copp.stat
3535 [port_idx][copp_idx])));
3536 rc = adsp_err_get_lnx_err_code(
3537 atomic_read(&this_adm.copp.stat
3538 [port_idx][copp_idx]));
3539 goto fail_cmd;
3540 }
3541 rc = 0;
3542fail_cmd:
3543 return rc;
3544}
3545
3546int adm_set_softvolume(int port_id, int copp_idx,
3547 struct audproc_softvolume_params *softvol_param)
3548{
3549 struct audproc_soft_step_volume_params audproc_softvol;
3550 int sz = 0;
3551 int rc = 0;
3552 int port_idx;
3553
3554 pr_debug("%s: period %d step %d curve %d\n", __func__,
3555 softvol_param->period, softvol_param->step,
3556 softvol_param->rampingcurve);
3557
3558 port_id = afe_convert_virtual_to_portid(port_id);
3559 port_idx = adm_validate_and_get_port_index(port_id);
3560 if (port_idx < 0) {
3561 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
3562 rc = -EINVAL;
3563 goto fail_cmd;
3564 }
3565
3566 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
3567 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
3568 return -EINVAL;
3569 }
3570
3571 sz = sizeof(struct audproc_soft_step_volume_params);
3572
3573 audproc_softvol.params.hdr.hdr_field =
3574 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3575 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3576 audproc_softvol.params.hdr.pkt_size = sz;
3577 audproc_softvol.params.hdr.src_svc = APR_SVC_ADM;
3578 audproc_softvol.params.hdr.src_domain = APR_DOMAIN_APPS;
3579 audproc_softvol.params.hdr.src_port = port_id;
3580 audproc_softvol.params.hdr.dest_svc = APR_SVC_ADM;
3581 audproc_softvol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
3582 audproc_softvol.params.hdr.dest_port =
3583 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3584 audproc_softvol.params.hdr.token = port_idx << 16 | copp_idx;
3585 audproc_softvol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3586 audproc_softvol.params.payload_addr_lsw = 0;
3587 audproc_softvol.params.payload_addr_msw = 0;
3588 audproc_softvol.params.mem_map_handle = 0;
3589 audproc_softvol.params.payload_size = sizeof(audproc_softvol) -
3590 sizeof(audproc_softvol.params);
3591 audproc_softvol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
3592 audproc_softvol.data.param_id =
3593 AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
3594 audproc_softvol.data.param_size = audproc_softvol.params.payload_size -
3595 sizeof(audproc_softvol.data);
3596 audproc_softvol.data.reserved = 0;
3597 audproc_softvol.period = softvol_param->period;
3598 audproc_softvol.step = softvol_param->step;
3599 audproc_softvol.ramping_curve = softvol_param->rampingcurve;
3600
3601 pr_debug("%s: period %d, step %d, curve %d\n", __func__,
3602 audproc_softvol.period, audproc_softvol.step,
3603 audproc_softvol.ramping_curve);
3604
3605 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3606 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_softvol);
3607 if (rc < 0) {
3608 pr_err("%s: Set params failed port = %#x\n",
3609 __func__, port_id);
3610 rc = -EINVAL;
3611 goto fail_cmd;
3612 }
3613 /* Wait for the callback */
3614 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3615 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3616 msecs_to_jiffies(TIMEOUT_MS));
3617 if (!rc) {
3618 pr_err("%s: Soft volume Set params timed out port = %#x\n",
3619 __func__, port_id);
3620 rc = -EINVAL;
3621 goto fail_cmd;
3622 } else if (atomic_read(&this_adm.copp.stat
3623 [port_idx][copp_idx]) > 0) {
3624 pr_err("%s: DSP returned error[%s]\n",
3625 __func__, adsp_err_get_err_str(
3626 atomic_read(&this_adm.copp.stat
3627 [port_idx][copp_idx])));
3628 rc = adsp_err_get_lnx_err_code(
3629 atomic_read(&this_adm.copp.stat
3630 [port_idx][copp_idx]));
3631 goto fail_cmd;
3632 }
3633 rc = 0;
3634fail_cmd:
3635 return rc;
3636}
3637
3638int adm_set_mic_gain(int port_id, int copp_idx, int volume)
3639{
3640 struct adm_set_mic_gain_params mic_gain_params;
3641 int rc = 0;
3642 int sz, port_idx;
3643
3644 pr_debug("%s:\n", __func__);
3645 port_id = afe_convert_virtual_to_portid(port_id);
3646 port_idx = adm_validate_and_get_port_index(port_id);
3647 if (port_idx < 0) {
3648 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
3649 return -EINVAL;
3650 }
3651
3652 sz = sizeof(struct adm_set_mic_gain_params);
3653
3654 mic_gain_params.params.hdr.hdr_field =
3655 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3656 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3657 mic_gain_params.params.hdr.pkt_size = sz;
3658 mic_gain_params.params.hdr.src_svc = APR_SVC_ADM;
3659 mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS;
3660 mic_gain_params.params.hdr.src_port = port_id;
3661 mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM;
3662 mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
3663 mic_gain_params.params.hdr.dest_port =
3664 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3665 mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx;
3666 mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3667 mic_gain_params.params.payload_addr_lsw = 0;
3668 mic_gain_params.params.payload_addr_msw = 0;
3669 mic_gain_params.params.mem_map_handle = 0;
3670 mic_gain_params.params.payload_size =
3671 sizeof(struct adm_param_data_v5) +
3672 sizeof(struct admx_mic_gain);
3673 mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
3674 mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN;
3675 mic_gain_params.data.param_size =
3676 sizeof(struct admx_mic_gain);
3677 mic_gain_params.data.reserved = 0;
3678 mic_gain_params.mic_gain_data.tx_mic_gain = volume;
3679 mic_gain_params.mic_gain_data.reserved = 0;
3680 pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n",
3681 __func__, volume, port_id);
3682
3683 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3684 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params);
3685 if (rc < 0) {
3686 pr_err("%s: Set params failed port = %#x\n",
3687 __func__, port_id);
3688 rc = -EINVAL;
3689 goto fail_cmd;
3690 }
3691 /* Wait for the callback */
3692 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3693 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3694 msecs_to_jiffies(TIMEOUT_MS));
3695 if (!rc) {
3696 pr_err("%s: Mic Gain Set params timed out port = %#x\n",
3697 __func__, port_id);
3698 rc = -EINVAL;
3699 goto fail_cmd;
3700 } else if (atomic_read(&this_adm.copp.stat
3701 [port_idx][copp_idx]) > 0) {
3702 pr_err("%s: DSP returned error[%s]\n",
3703 __func__, adsp_err_get_err_str(
3704 atomic_read(&this_adm.copp.stat
3705 [port_idx][copp_idx])));
3706 rc = adsp_err_get_lnx_err_code(
3707 atomic_read(&this_adm.copp.stat
3708 [port_idx][copp_idx]));
3709 goto fail_cmd;
3710 }
3711 rc = 0;
3712fail_cmd:
3713 return rc;
3714}
3715
3716int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
3717 int primary_mic_ch)
3718{
3719 struct adm_set_sec_primary_ch_params sec_primary_ch_params;
3720 int rc = 0;
3721 int sz, port_idx;
3722
3723 pr_debug("%s port_id 0x%x, copp_idx 0x%x, primary_mic_ch %d\n",
3724 __func__, port_id, copp_idx, primary_mic_ch);
3725 port_id = afe_convert_virtual_to_portid(port_id);
3726 port_idx = adm_validate_and_get_port_index(port_id);
3727 if (port_idx < 0) {
3728 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
3729 return -EINVAL;
3730 }
3731
3732 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
3733 pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
3734 return -EINVAL;
3735 }
3736
3737 sz = sizeof(struct adm_set_sec_primary_ch_params);
3738
3739 sec_primary_ch_params.params.hdr.hdr_field =
3740 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3741 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3742 sec_primary_ch_params.params.hdr.pkt_size = sz;
3743 sec_primary_ch_params.params.hdr.src_svc = APR_SVC_ADM;
3744 sec_primary_ch_params.params.hdr.src_domain = APR_DOMAIN_APPS;
3745 sec_primary_ch_params.params.hdr.src_port = port_id;
3746 sec_primary_ch_params.params.hdr.dest_svc = APR_SVC_ADM;
3747 sec_primary_ch_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
3748 sec_primary_ch_params.params.hdr.dest_port =
3749 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3750 sec_primary_ch_params.params.hdr.token = port_idx << 16 | copp_idx;
3751 sec_primary_ch_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3752 sec_primary_ch_params.params.payload_addr_lsw = 0;
3753 sec_primary_ch_params.params.payload_addr_msw = 0;
3754 sec_primary_ch_params.params.mem_map_handle = 0;
3755 sec_primary_ch_params.params.payload_size =
3756 sizeof(struct adm_param_data_v5) +
3757 sizeof(struct admx_sec_primary_mic_ch);
3758 sec_primary_ch_params.data.module_id =
3759 AUDPROC_MODULE_ID_VOICE_TX_SECNS;
3760 sec_primary_ch_params.data.param_id =
3761 AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH;
3762 sec_primary_ch_params.data.param_size =
3763 sizeof(struct admx_sec_primary_mic_ch);
3764 sec_primary_ch_params.data.reserved = 0;
3765 sec_primary_ch_params.sec_primary_mic_ch_data.version = 0;
3766 sec_primary_ch_params.sec_primary_mic_ch_data.reserved = 0;
3767 sec_primary_ch_params.sec_primary_mic_ch_data.sec_primary_mic_ch =
3768 primary_mic_ch;
3769 sec_primary_ch_params.sec_primary_mic_ch_data.reserved1 = 0;
3770
3771 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3772 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&sec_primary_ch_params);
3773 if (rc < 0) {
3774 pr_err("%s: Set params failed port = %#x\n",
3775 __func__, port_id);
3776 rc = -EINVAL;
3777 goto fail_cmd;
3778 }
3779 /* Wait for the callback */
3780 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3781 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3782 msecs_to_jiffies(TIMEOUT_MS));
3783 if (!rc) {
3784 pr_err("%s: Mic Set params timed out port = %#x\n",
3785 __func__, port_id);
3786 rc = -EINVAL;
3787 goto fail_cmd;
3788 } else if (atomic_read(&this_adm.copp.stat
3789 [port_idx][copp_idx]) > 0) {
3790 pr_err("%s: DSP returned error[%s]\n",
3791 __func__, adsp_err_get_err_str(
3792 atomic_read(&this_adm.copp.stat
3793 [port_idx][copp_idx])));
3794 rc = adsp_err_get_lnx_err_code(
3795 atomic_read(&this_adm.copp.stat
3796 [port_idx][copp_idx]));
3797 goto fail_cmd;
3798 }
3799 rc = 0;
3800fail_cmd:
3801 return rc;
3802}
3803
3804int adm_param_enable(int port_id, int copp_idx, int module_id, int enable)
3805{
3806 struct audproc_enable_param_t adm_mod_enable;
3807 int sz = 0;
3808 int rc = 0;
3809 int port_idx;
3810
3811 pr_debug("%s port_id %d, module_id 0x%x, enable %d\n",
3812 __func__, port_id, module_id, enable);
3813 port_id = afe_convert_virtual_to_portid(port_id);
3814 port_idx = adm_validate_and_get_port_index(port_id);
3815 if (port_idx < 0) {
3816 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
3817 rc = -EINVAL;
3818 goto fail_cmd;
3819 }
3820
3821 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
3822 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
3823 return -EINVAL;
3824 }
3825
3826 sz = sizeof(struct audproc_enable_param_t);
3827
3828 adm_mod_enable.pp_params.hdr.hdr_field =
3829 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3830 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3831 adm_mod_enable.pp_params.hdr.pkt_size = sz;
3832 adm_mod_enable.pp_params.hdr.src_svc = APR_SVC_ADM;
3833 adm_mod_enable.pp_params.hdr.src_domain = APR_DOMAIN_APPS;
3834 adm_mod_enable.pp_params.hdr.src_port = port_id;
3835 adm_mod_enable.pp_params.hdr.dest_svc = APR_SVC_ADM;
3836 adm_mod_enable.pp_params.hdr.dest_domain = APR_DOMAIN_ADSP;
3837 adm_mod_enable.pp_params.hdr.dest_port =
3838 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3839 adm_mod_enable.pp_params.hdr.token = port_idx << 16 | copp_idx;
3840 adm_mod_enable.pp_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3841 adm_mod_enable.pp_params.payload_addr_lsw = 0;
3842 adm_mod_enable.pp_params.payload_addr_msw = 0;
3843 adm_mod_enable.pp_params.mem_map_handle = 0;
3844 adm_mod_enable.pp_params.payload_size = sizeof(adm_mod_enable) -
3845 sizeof(adm_mod_enable.pp_params) +
3846 sizeof(adm_mod_enable.pp_params.params);
3847 adm_mod_enable.pp_params.params.module_id = module_id;
3848 adm_mod_enable.pp_params.params.param_id = AUDPROC_PARAM_ID_ENABLE;
3849 adm_mod_enable.pp_params.params.param_size =
3850 adm_mod_enable.pp_params.payload_size -
3851 sizeof(adm_mod_enable.pp_params.params);
3852 adm_mod_enable.pp_params.params.reserved = 0;
3853 adm_mod_enable.enable = enable;
3854
3855 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3856
3857 rc = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_mod_enable);
3858 if (rc < 0) {
3859 pr_err("%s: Set params failed port = %#x\n",
3860 __func__, port_id);
3861 rc = -EINVAL;
3862 goto fail_cmd;
3863 }
3864 /* Wait for the callback */
3865 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3866 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3867 msecs_to_jiffies(TIMEOUT_MS));
3868 if (!rc) {
3869 pr_err("%s: module %x enable %d timed out on port = %#x\n",
3870 __func__, module_id, enable, port_id);
3871 rc = -EINVAL;
3872 goto fail_cmd;
3873 } else if (atomic_read(&this_adm.copp.stat
3874 [port_idx][copp_idx]) > 0) {
3875 pr_err("%s: DSP returned error[%s]\n",
3876 __func__, adsp_err_get_err_str(
3877 atomic_read(&this_adm.copp.stat
3878 [port_idx][copp_idx])));
3879 rc = adsp_err_get_lnx_err_code(
3880 atomic_read(&this_adm.copp.stat
3881 [port_idx][copp_idx]));
3882 goto fail_cmd;
3883 }
3884 rc = 0;
3885fail_cmd:
3886 return rc;
3887
3888}
3889
3890int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
3891 int cal_type, char *params, int size)
3892{
3893
3894 struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
3895 int sz, rc = 0;
3896 int port_idx;
3897
3898 pr_debug("%s:port_id %d, path %d, perf_mode %d, cal_type %d, size %d\n",
3899 __func__, port_id, path, perf_mode, cal_type, size);
3900
3901 port_id = afe_convert_virtual_to_portid(port_id);
3902 port_idx = adm_validate_and_get_port_index(port_id);
3903 if (port_idx < 0) {
3904 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
3905 rc = -EINVAL;
3906 goto end;
3907 }
3908
3909 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
3910 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
3911 return -EINVAL;
3912 }
3913
3914 /* Maps audio_dev_ctrl path definition to ACDB definition */
3915 if (get_cal_path(path) != RX_DEVICE) {
3916 pr_err("%s: acdb_path %d\n", __func__, path);
3917 rc = -EINVAL;
3918 goto end;
3919 }
3920
3921 sz = sizeof(struct adm_cmd_set_pp_params_v5) + size;
3922 adm_params = kzalloc(sz, GFP_KERNEL);
3923 if (!adm_params) {
3924 pr_err("%s, adm params memory alloc failed", __func__);
3925 rc = -ENOMEM;
3926 goto end;
3927 }
3928
3929 memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
3930 params, size);
3931
3932 adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
3933 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
3934 adm_params->hdr.pkt_size = sz;
3935 adm_params->hdr.src_svc = APR_SVC_ADM;
3936 adm_params->hdr.src_domain = APR_DOMAIN_APPS;
3937 adm_params->hdr.src_port = port_id;
3938 adm_params->hdr.dest_svc = APR_SVC_ADM;
3939 adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
3940 adm_params->hdr.dest_port =
3941 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
3942 adm_params->hdr.token = port_idx << 16 | copp_idx;
3943 adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
3944 /* payload address and mmap handle initialized to zero by kzalloc */
3945 adm_params->payload_size = size;
3946
3947 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
3948 rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
3949 if (rc < 0) {
3950 pr_err("%s: Set params failed port = %#x\n",
3951 __func__, port_id);
3952 rc = -EINVAL;
3953 goto end;
3954 }
3955 /* Wait for the callback */
3956 rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
3957 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
3958 msecs_to_jiffies(TIMEOUT_MS));
3959 if (!rc) {
3960 pr_err("%s: Set params timed out port = %#x\n",
3961 __func__, port_id);
3962 rc = -EINVAL;
3963 goto end;
3964 } else if (atomic_read(&this_adm.copp.stat
3965 [port_idx][copp_idx]) > 0) {
3966 pr_err("%s: DSP returned error[%s]\n",
3967 __func__, adsp_err_get_err_str(
3968 atomic_read(&this_adm.copp.stat
3969 [port_idx][copp_idx])));
3970 rc = adsp_err_get_lnx_err_code(
3971 atomic_read(&this_adm.copp.stat
3972 [port_idx][copp_idx]));
3973 goto end;
3974 }
3975 rc = 0;
3976
3977end:
3978 kfree(adm_params);
3979 return rc;
3980}
3981
3982/*
3983 * adm_update_wait_parameters must be called with routing driver locks.
3984 * adm_reset_wait_parameters must be called with routing driver locks.
3985 * set and reset parmeters are separated to make sure it is always called
3986 * under routing driver lock.
3987 * adm_wait_timeout is to block until timeout or interrupted. Timeout is
3988 * not a an error.
3989 */
3990int adm_set_wait_parameters(int port_id, int copp_idx)
3991{
3992
3993 int ret = 0, port_idx;
3994
3995 pr_debug("%s: port_id 0x%x, copp_idx %d\n", __func__, port_id,
3996 copp_idx);
3997 port_id = afe_convert_virtual_to_portid(port_id);
3998 port_idx = adm_validate_and_get_port_index(port_id);
3999 if (port_idx < 0) {
4000 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
4001 ret = -EINVAL;
4002 goto end;
4003 }
4004
4005 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4006 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4007 return -EINVAL;
4008 }
4009
4010 this_adm.copp.adm_delay[port_idx][copp_idx] = 1;
4011 atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx], 0);
4012
4013end:
4014 return ret;
4015
4016}
4017
4018int adm_reset_wait_parameters(int port_id, int copp_idx)
4019{
4020 int ret = 0, port_idx;
4021
4022 pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id,
4023 copp_idx);
4024 port_id = afe_convert_virtual_to_portid(port_id);
4025 port_idx = adm_validate_and_get_port_index(port_id);
4026 if (port_idx < 0) {
4027 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
4028 ret = -EINVAL;
4029 goto end;
4030 }
4031
4032 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4033 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4034 return -EINVAL;
4035 }
4036
4037 atomic_set(&this_adm.copp.adm_delay_stat[port_idx][copp_idx], 1);
4038 this_adm.copp.adm_delay[port_idx][copp_idx] = 0;
4039
4040end:
4041 return ret;
4042}
4043
4044int adm_wait_timeout(int port_id, int copp_idx, int wait_time)
4045{
4046 int ret = 0, port_idx;
4047
4048 pr_debug("%s: port_id 0x%x, copp_idx %d, wait_time %d\n", __func__,
4049 port_id, copp_idx, wait_time);
4050 port_id = afe_convert_virtual_to_portid(port_id);
4051 port_idx = adm_validate_and_get_port_index(port_id);
4052 if (port_idx < 0) {
4053 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
4054 ret = -EINVAL;
4055 goto end;
4056 }
4057
4058 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4059 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4060 return -EINVAL;
4061 }
4062
4063 ret = wait_event_timeout(
4064 this_adm.copp.adm_delay_wait[port_idx][copp_idx],
4065 atomic_read(&this_adm.copp.adm_delay_stat[port_idx][copp_idx]),
4066 msecs_to_jiffies(wait_time));
4067 pr_debug("%s: return %d\n", __func__, ret);
4068 if (ret != 0)
4069 ret = -EINTR;
4070end:
4071 pr_debug("%s: return %d--\n", __func__, ret);
4072 return ret;
4073}
4074
4075int adm_store_cal_data(int port_id, int copp_idx, int path, int perf_mode,
4076 int cal_index, char *params, int *size)
4077{
4078 int rc = 0;
4079 struct cal_block_data *cal_block = NULL;
4080 int app_type, acdb_id, port_idx, sample_rate;
4081
4082 if (this_adm.cal_data[cal_index] == NULL) {
4083 pr_debug("%s: cal_index %d not allocated!\n",
4084 __func__, cal_index);
4085 goto end;
4086 }
4087
4088 if (get_cal_path(path) != RX_DEVICE) {
4089 pr_debug("%s: Invalid path to store calibration %d\n",
4090 __func__, path);
4091 rc = -EINVAL;
4092 goto end;
4093 }
4094
4095 port_id = afe_convert_virtual_to_portid(port_id);
4096 port_idx = adm_validate_and_get_port_index(port_id);
4097 if (port_idx < 0) {
4098 pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
4099 rc = -EINVAL;
4100 goto end;
4101 }
4102
4103 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4104 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4105 return -EINVAL;
4106 }
4107
4108 acdb_id = atomic_read(&this_adm.copp.acdb_id[port_idx][copp_idx]);
4109 app_type = atomic_read(&this_adm.copp.app_type[port_idx][copp_idx]);
4110 sample_rate = atomic_read(&this_adm.copp.rate[port_idx][copp_idx]);
4111
4112 mutex_lock(&this_adm.cal_data[cal_index]->lock);
4113 cal_block = adm_find_cal(cal_index, get_cal_path(path), app_type,
4114 acdb_id, sample_rate);
4115 if (cal_block == NULL)
4116 goto unlock;
4117
4118 if (cal_block->cal_data.size <= 0) {
4119 pr_debug("%s: No ADM cal send for port_id = 0x%x!\n",
4120 __func__, port_id);
4121 rc = -EINVAL;
4122 goto unlock;
4123 }
4124
4125 if (cal_index == ADM_AUDPROC_CAL) {
4126 if (cal_block->cal_data.size > AUD_PROC_BLOCK_SIZE) {
4127 pr_err("%s:audproc:invalid size exp/actual[%zd, %d]\n",
4128 __func__, cal_block->cal_data.size, *size);
4129 rc = -ENOMEM;
4130 goto unlock;
4131 }
4132 } else if (cal_index == ADM_AUDVOL_CAL) {
4133 if (cal_block->cal_data.size > AUD_VOL_BLOCK_SIZE) {
4134 pr_err("%s:aud_vol:invalid size exp/actual[%zd, %d]\n",
4135 __func__, cal_block->cal_data.size, *size);
4136 rc = -ENOMEM;
4137 goto unlock;
4138 }
4139 } else {
4140 pr_debug("%s: Not valid calibration for dolby topolgy\n",
4141 __func__);
4142 rc = -EINVAL;
4143 goto unlock;
4144 }
4145 memcpy(params, cal_block->cal_data.kvaddr, cal_block->cal_data.size);
4146 *size = cal_block->cal_data.size;
4147
4148 pr_debug("%s:port_id %d, copp_idx %d, path %d",
4149 __func__, port_id, copp_idx, path);
4150 pr_debug("perf_mode %d, cal_type %d, size %d\n",
4151 perf_mode, cal_index, *size);
4152
4153unlock:
4154 mutex_unlock(&this_adm.cal_data[cal_index]->lock);
4155end:
4156 return rc;
4157}
4158
4159int adm_send_compressed_device_mute(int port_id, int copp_idx, bool mute_on)
4160{
4161 struct adm_set_compressed_device_mute mute_params;
4162 int ret = 0;
4163 int port_idx;
4164
4165 pr_debug("%s port_id: 0x%x, copp_idx %d, mute_on: %d\n",
4166 __func__, port_id, copp_idx, mute_on);
4167 port_id = afe_convert_virtual_to_portid(port_id);
4168 port_idx = adm_validate_and_get_port_index(port_id);
4169 if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
4170 pr_err("%s: Invalid port_id %#x copp_idx %d\n",
4171 __func__, port_id, copp_idx);
4172 ret = -EINVAL;
4173 goto end;
4174 }
4175
4176 mute_params.command.hdr.hdr_field =
4177 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
4178 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
4179 mute_params.command.hdr.pkt_size =
4180 sizeof(struct adm_set_compressed_device_mute);
4181 mute_params.command.hdr.src_svc = APR_SVC_ADM;
4182 mute_params.command.hdr.src_domain = APR_DOMAIN_APPS;
4183 mute_params.command.hdr.src_port = port_id;
4184 mute_params.command.hdr.dest_svc = APR_SVC_ADM;
4185 mute_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
4186 mute_params.command.hdr.dest_port =
4187 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
4188 mute_params.command.hdr.token = port_idx << 16 | copp_idx;
4189 mute_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
4190 mute_params.command.payload_addr_lsw = 0;
4191 mute_params.command.payload_addr_msw = 0;
4192 mute_params.command.mem_map_handle = 0;
4193 mute_params.command.payload_size = sizeof(mute_params) -
4194 sizeof(mute_params.command);
4195 mute_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE;
4196 mute_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE;
4197 mute_params.params.param_size = mute_params.command.payload_size -
4198 sizeof(mute_params.params);
4199 mute_params.params.reserved = 0;
4200 mute_params.mute_on = mute_on;
4201
4202 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
4203 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mute_params);
4204 if (ret < 0) {
4205 pr_err("%s: device mute for port %d copp %d failed, ret %d\n",
4206 __func__, port_id, copp_idx, ret);
4207 ret = -EINVAL;
4208 goto end;
4209 }
4210
4211 /* Wait for the callback */
4212 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
4213 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
4214 msecs_to_jiffies(TIMEOUT_MS));
4215 if (!ret) {
4216 pr_err("%s: send device mute for port %d copp %d failed\n",
4217 __func__, port_id, copp_idx);
4218 ret = -EINVAL;
4219 goto end;
4220 } else if (atomic_read(&this_adm.copp.stat
4221 [port_idx][copp_idx]) > 0) {
4222 pr_err("%s: DSP returned error[%s]\n",
4223 __func__, adsp_err_get_err_str(
4224 atomic_read(&this_adm.copp.stat
4225 [port_idx][copp_idx])));
4226 ret = adsp_err_get_lnx_err_code(
4227 atomic_read(&this_adm.copp.stat
4228 [port_idx][copp_idx]));
4229 goto end;
4230 }
4231 ret = 0;
4232end:
4233 return ret;
4234}
4235
4236int adm_send_compressed_device_latency(int port_id, int copp_idx, int latency)
4237{
4238 struct adm_set_compressed_device_latency latency_params;
4239 int port_idx;
4240 int ret = 0;
4241
4242 pr_debug("%s port_id: 0x%x, copp_idx %d latency: %d\n", __func__,
4243 port_id, copp_idx, latency);
4244 port_id = afe_convert_virtual_to_portid(port_id);
4245 port_idx = adm_validate_and_get_port_index(port_id);
4246 if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
4247 pr_err("%s: Invalid port_id %#x copp_idx %d\n",
4248 __func__, port_id, copp_idx);
4249 ret = -EINVAL;
4250 goto end;
4251 }
4252
4253 latency_params.command.hdr.hdr_field =
4254 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
4255 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
4256 latency_params.command.hdr.pkt_size =
4257 sizeof(struct adm_set_compressed_device_latency);
4258 latency_params.command.hdr.src_svc = APR_SVC_ADM;
4259 latency_params.command.hdr.src_domain = APR_DOMAIN_APPS;
4260 latency_params.command.hdr.src_port = port_id;
4261 latency_params.command.hdr.dest_svc = APR_SVC_ADM;
4262 latency_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
4263 latency_params.command.hdr.dest_port =
4264 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
4265 latency_params.command.hdr.token = port_idx << 16 | copp_idx;
4266 latency_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
4267 latency_params.command.payload_addr_lsw = 0;
4268 latency_params.command.payload_addr_msw = 0;
4269 latency_params.command.mem_map_handle = 0;
4270 latency_params.command.payload_size = sizeof(latency_params) -
4271 sizeof(latency_params.command);
4272 latency_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY;
4273 latency_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY;
4274 latency_params.params.param_size = latency_params.command.payload_size -
4275 sizeof(latency_params.params);
4276 latency_params.params.reserved = 0;
4277 latency_params.latency = latency;
4278
4279 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
4280 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&latency_params);
4281 if (ret < 0) {
4282 pr_err("%s: send device latency err %d for port %d copp %d\n",
4283 __func__, port_id, copp_idx, ret);
4284 ret = -EINVAL;
4285 goto end;
4286 }
4287
4288 /* Wait for the callback */
4289 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
4290 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
4291 msecs_to_jiffies(TIMEOUT_MS));
4292 if (!ret) {
4293 pr_err("%s: send device latency for port %d failed\n", __func__,
4294 port_id);
4295 ret = -EINVAL;
4296 goto end;
4297 } else if (atomic_read(&this_adm.copp.stat
4298 [port_idx][copp_idx]) > 0) {
4299 pr_err("%s: DSP returned error[%s]\n",
4300 __func__, adsp_err_get_err_str(
4301 atomic_read(&this_adm.copp.stat
4302 [port_idx][copp_idx])));
4303 ret = adsp_err_get_lnx_err_code(
4304 atomic_read(&this_adm.copp.stat
4305 [port_idx][copp_idx]));
4306 goto end;
4307 }
4308 ret = 0;
4309end:
4310 return ret;
4311}
4312
4313/**
4314 * adm_swap_speaker_channels
4315 *
4316 * Receives port_id, copp_idx, sample rate, spk_swap and
4317 * send MFC command to swap speaker channel.
4318 * Return zero on success. On failure returns nonzero.
4319 *
4320 * port_id - Passed value, port_id for which channels swap is wanted
4321 * copp_idx - Passed value, copp_idx for which channels swap is wanted
4322 * sample_rate - Passed value, sample rate used by app type config
4323 * spk_swap - Passed value, spk_swap for check if swap flag is set
4324 */
4325int adm_swap_speaker_channels(int port_id, int copp_idx,
4326 int sample_rate, bool spk_swap)
4327{
4328 struct audproc_mfc_output_media_fmt mfc_cfg;
4329 uint16_t num_channels;
4330 int port_idx;
4331 int ret = 0;
4332
4333 pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
4334 __func__, port_id, copp_idx);
4335 port_id = q6audio_convert_virtual_to_portid(port_id);
4336 port_idx = adm_validate_and_get_port_index(port_id);
4337 if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
4338 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
4339 ret = -EINVAL;
4340 goto done;
4341 }
4342
4343 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4344 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4345 ret = -EINVAL;
4346 goto done;
4347 }
4348
4349 num_channels = atomic_read(
4350 &this_adm.copp.channels[port_idx][copp_idx]);
4351 if (num_channels != 2) {
4352 pr_debug("%s: Invalid number of channels: %d\n",
4353 __func__, num_channels);
4354 ret = -EINVAL;
4355 goto done;
4356 }
4357
4358 memset(&mfc_cfg, 0, sizeof(mfc_cfg));
4359 mfc_cfg.params.hdr.hdr_field =
4360 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
4361 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
4362 mfc_cfg.params.hdr.pkt_size =
4363 sizeof(mfc_cfg);
4364 mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
4365 mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
4366 mfc_cfg.params.hdr.src_port = port_id;
4367 mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
4368 mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
4369 mfc_cfg.params.hdr.dest_port =
4370 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
4371 mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
4372 mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
4373 mfc_cfg.params.payload_addr_lsw = 0;
4374 mfc_cfg.params.payload_addr_msw = 0;
4375 mfc_cfg.params.mem_map_handle = 0;
4376 mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
4377 sizeof(mfc_cfg.params);
4378 mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
4379 mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
4380 mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
4381 sizeof(mfc_cfg.data);
4382 mfc_cfg.data.reserved = 0;
4383 mfc_cfg.sampling_rate = sample_rate;
4384 mfc_cfg.bits_per_sample =
4385 atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
4386 mfc_cfg.num_channels = num_channels;
4387
4388 /* Currently applying speaker swap for only 2 channel use case */
4389 if (spk_swap) {
4390 mfc_cfg.channel_type[0] =
4391 (uint16_t) PCM_CHANNEL_FR;
4392 mfc_cfg.channel_type[1] =
4393 (uint16_t) PCM_CHANNEL_FL;
4394 } else {
4395 mfc_cfg.channel_type[0] =
4396 (uint16_t) PCM_CHANNEL_FL;
4397 mfc_cfg.channel_type[1] =
4398 (uint16_t) PCM_CHANNEL_FR;
4399 }
4400
4401 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
4402 pr_debug("%s: mfc config: port_idx %d copp_idx %d copp SR %d copp BW %d copp chan %d\n",
4403 __func__, port_idx, copp_idx, mfc_cfg.sampling_rate,
4404 mfc_cfg.bits_per_sample, mfc_cfg.num_channels);
4405
4406 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
4407 if (ret < 0) {
4408 pr_err("%s: port_id: for[0x%x] failed %d\n",
4409 __func__, port_id, ret);
4410 goto done;
4411 }
4412 /* Wait for the callback with copp id */
4413 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
4414 atomic_read(&this_adm.copp.stat
4415 [port_idx][copp_idx]) >= 0,
4416 msecs_to_jiffies(TIMEOUT_MS));
4417 if (!ret) {
4418 pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
4419 __func__, port_id);
4420 ret = -ETIMEDOUT;
4421 goto done;
4422 }
4423
4424 if (atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) > 0) {
4425 pr_err("%s: DSP returned error[%s]\n",
4426 __func__, adsp_err_get_err_str(
4427 atomic_read(&this_adm.copp.stat
4428 [port_idx][copp_idx])));
4429 ret = adsp_err_get_lnx_err_code(
4430 atomic_read(&this_adm.copp.stat
4431 [port_idx][copp_idx]));
4432 goto done;
4433 }
4434
4435 pr_debug("%s: mfc_cfg Set params returned success", __func__);
4436 ret = 0;
4437
4438done:
4439 return ret;
4440}
4441EXPORT_SYMBOL(adm_swap_speaker_channels);
4442
4443int adm_set_sound_focus(int port_id, int copp_idx,
4444 struct sound_focus_param soundFocusData)
4445{
4446 struct adm_set_fluence_soundfocus_param soundfocus_params;
4447 int sz = 0;
4448 int ret = 0;
4449 int port_idx;
4450 int i;
4451
4452 pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
4453 __func__, port_id, copp_idx);
4454
4455 port_id = afe_convert_virtual_to_portid(port_id);
4456 port_idx = adm_validate_and_get_port_index(port_id);
4457 if (port_idx < 0) {
4458 pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
4459
4460 ret = -EINVAL;
4461 goto done;
4462 }
4463
4464 if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
4465 pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
4466
4467 ret = -EINVAL;
4468 goto done;
4469 }
4470
4471 sz = sizeof(struct adm_set_fluence_soundfocus_param);
4472 soundfocus_params.params.hdr.hdr_field =
4473 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
4474 APR_PKT_VER);
4475 soundfocus_params.params.hdr.pkt_size = sz;
4476 soundfocus_params.params.hdr.src_svc = APR_SVC_ADM;
4477 soundfocus_params.params.hdr.src_domain = APR_DOMAIN_APPS;
4478 soundfocus_params.params.hdr.src_port = port_id;
4479 soundfocus_params.params.hdr.dest_svc = APR_SVC_ADM;
4480 soundfocus_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
4481 soundfocus_params.params.hdr.dest_port =
4482 atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
4483 soundfocus_params.params.hdr.token = port_idx << 16 |
4484 ADM_CLIENT_ID_SOURCE_TRACKING << 8 | copp_idx;
4485 soundfocus_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
4486 soundfocus_params.params.payload_addr_lsw = 0;
4487 soundfocus_params.params.payload_addr_msw = 0;
4488 soundfocus_params.params.mem_map_handle = 0;
4489 soundfocus_params.params.payload_size = sizeof(soundfocus_params) -
4490 sizeof(soundfocus_params.params);
4491 soundfocus_params.data.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
4492 soundfocus_params.data.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS;
4493 soundfocus_params.data.param_size =
4494 soundfocus_params.params.payload_size -
4495 sizeof(soundfocus_params.data);
4496 soundfocus_params.data.reserved = 0;
4497
4498 memset(&(soundfocus_params.soundfocus_data), 0xFF,
4499 sizeof(struct adm_param_fluence_soundfocus_t));
4500 for (i = 0; i < MAX_SECTORS; i++) {
4501 soundfocus_params.soundfocus_data.start_angles[i] =
4502 soundFocusData.start_angle[i];
4503 soundfocus_params.soundfocus_data.enables[i] =
4504 soundFocusData.enable[i];
4505 pr_debug("%s: start_angle[%d] = %d\n",
4506 __func__, i, soundFocusData.start_angle[i]);
4507 pr_debug("%s: enable[%d] = %d\n",
4508 __func__, i, soundFocusData.enable[i]);
4509 }
4510 soundfocus_params.soundfocus_data.gain_step =
4511 soundFocusData.gain_step;
4512 pr_debug("%s: gain_step = %d\n", __func__, soundFocusData.gain_step);
4513
4514 soundfocus_params.soundfocus_data.reserved = 0;
4515
4516 atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
4517 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&soundfocus_params);
4518 if (ret < 0) {
4519 pr_err("%s: Set params failed\n", __func__);
4520
4521 ret = -EINVAL;
4522 goto done;
4523 }
4524 /* Wait for the callback */
4525 ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
4526 atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
4527 msecs_to_jiffies(TIMEOUT_MS));
4528 if (!ret) {
4529 pr_err("%s: Set params timed out\n", __func__);
4530
4531 ret = -EINVAL;
4532 goto done;
4533 }
4534
4535 if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
4536 pr_err("%s - set params returned error [%s]\n",
4537 __func__, adsp_err_get_err_str(
4538 this_adm.sourceTrackingData.apr_cmd_status));
4539
4540 ret = adsp_err_get_lnx_err_code(
4541 this_adm.sourceTrackingData.apr_cmd_status);
4542 goto done;
4543 }
4544
4545 ret = 0;
4546
4547done:
4548 pr_debug("%s: Exit, ret=%d\n", __func__, ret);
4549
4550 return ret;
4551}
4552
4553int adm_get_sound_focus(int port_id, int copp_idx,
4554 struct sound_focus_param *soundFocusData)
4555{
4556 int ret = 0, i;
4557 char *params_value;
4558 uint32_t param_payload_len = sizeof(struct adm_param_data_v5) +
4559 sizeof(struct adm_param_fluence_soundfocus_t);
4560 struct adm_param_fluence_soundfocus_t *soundfocus_params;
4561
4562 pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
4563 __func__, port_id, copp_idx);
4564
4565 params_value = kzalloc(param_payload_len, GFP_KERNEL);
4566 if (!params_value) {
4567 ret = -ENOMEM;
4568 goto done;
4569 }
4570 ret = adm_get_params_v2(port_id, copp_idx,
4571 VOICEPROC_MODULE_ID_GENERIC_TX,
4572 VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS,
4573 param_payload_len,
4574 params_value,
4575 ADM_CLIENT_ID_SOURCE_TRACKING);
4576 if (ret) {
4577 pr_err("%s: get parameters failed ret:%d\n", __func__, ret);
4578
4579 kfree(params_value);
4580 ret = -EINVAL;
4581 goto done;
4582 }
4583
4584 if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
4585 pr_err("%s - get params returned error [%s]\n",
4586 __func__, adsp_err_get_err_str(
4587 this_adm.sourceTrackingData.apr_cmd_status));
4588
4589 kfree(params_value);
4590 ret = adsp_err_get_lnx_err_code(
4591 this_adm.sourceTrackingData.apr_cmd_status);
4592 goto done;
4593 }
4594
4595 soundfocus_params = (struct adm_param_fluence_soundfocus_t *)
4596 params_value;
4597 for (i = 0; i < MAX_SECTORS; i++) {
4598 soundFocusData->start_angle[i] =
4599 soundfocus_params->start_angles[i];
4600 soundFocusData->enable[i] = soundfocus_params->enables[i];
4601 pr_debug("%s: start_angle[%d] = %d\n",
4602 __func__, i, soundFocusData->start_angle[i]);
4603 pr_debug("%s: enable[%d] = %d\n",
4604 __func__, i, soundFocusData->enable[i]);
4605 }
4606 soundFocusData->gain_step = soundfocus_params->gain_step;
4607 pr_debug("%s: gain_step = %d\n", __func__, soundFocusData->gain_step);
4608
4609 kfree(params_value);
4610
4611done:
4612 pr_debug("%s: Exit, ret = %d\n", __func__, ret);
4613
4614 return ret;
4615}
4616
4617static int adm_source_tracking_alloc_map_memory(void)
4618{
4619 int ret;
4620
4621 pr_debug("%s: Enter\n", __func__);
4622
4623 ret = msm_audio_ion_alloc("SOURCE_TRACKING",
4624 &this_adm.sourceTrackingData.ion_client,
4625 &this_adm.sourceTrackingData.ion_handle,
4626 AUD_PROC_BLOCK_SIZE,
4627 &this_adm.sourceTrackingData.memmap.paddr,
4628 &this_adm.sourceTrackingData.memmap.size,
4629 &this_adm.sourceTrackingData.memmap.kvaddr);
4630 if (ret) {
4631 pr_err("%s: failed to allocate memory\n", __func__);
4632
4633 ret = -EINVAL;
4634 goto done;
4635 }
4636
4637 atomic_set(&this_adm.mem_map_index, ADM_MEM_MAP_INDEX_SOURCE_TRACKING);
4638 ret = adm_memory_map_regions(&this_adm.sourceTrackingData.memmap.paddr,
4639 0,
4640 (uint32_t *)&this_adm.sourceTrackingData.memmap.size,
4641 1);
4642 if (ret < 0) {
4643 pr_err("%s: failed to map memory, paddr = 0x%pK, size = %d\n",
4644 __func__,
4645 (void *)this_adm.sourceTrackingData.memmap.paddr,
4646 (uint32_t)this_adm.sourceTrackingData.memmap.size);
4647
4648 msm_audio_ion_free(this_adm.sourceTrackingData.ion_client,
4649 this_adm.sourceTrackingData.ion_handle);
4650 this_adm.sourceTrackingData.ion_client = NULL;
4651 this_adm.sourceTrackingData.ion_handle = NULL;
4652 this_adm.sourceTrackingData.memmap.size = 0;
4653 this_adm.sourceTrackingData.memmap.kvaddr = NULL;
4654 this_adm.sourceTrackingData.memmap.paddr = 0;
4655 this_adm.sourceTrackingData.apr_cmd_status = -1;
4656 atomic_set(&this_adm.mem_map_handles
4657 [ADM_MEM_MAP_INDEX_SOURCE_TRACKING], 0);
4658
4659 ret = -EINVAL;
4660 goto done;
4661 }
4662 ret = 0;
4663 pr_debug("%s: paddr = 0x%pK, size = %d, mem_map_handle = 0x%x\n",
4664 __func__, (void *)this_adm.sourceTrackingData.memmap.paddr,
4665 (uint32_t)this_adm.sourceTrackingData.memmap.size,
4666 atomic_read(&this_adm.mem_map_handles
4667 [ADM_MEM_MAP_INDEX_SOURCE_TRACKING]));
4668
4669done:
4670 pr_debug("%s: Exit, ret = %d\n", __func__, ret);
4671
4672 return ret;
4673}
4674
4675int adm_get_source_tracking(int port_id, int copp_idx,
4676 struct source_tracking_param *sourceTrackingData)
4677{
4678 struct adm_cmd_get_pp_params_v5 admp;
4679 int p_idx, ret = 0, i;
4680 struct adm_param_fluence_sourcetracking_t *source_tracking_params;
4681
4682 pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
4683 __func__, port_id, copp_idx);
4684
4685 if (!this_adm.sourceTrackingData.memmap.paddr) {
4686 /* Allocate and map shared memory for out of band usage */
4687 ret = adm_source_tracking_alloc_map_memory();
4688 if (ret != 0) {
4689 ret = -EINVAL;
4690 goto done;
4691 }
4692 }
4693
4694 port_id = afe_convert_virtual_to_portid(port_id);
4695 p_idx = adm_validate_and_get_port_index(port_id);
4696 if (p_idx < 0) {
4697 pr_err("%s - invalid port index %i, port id %i, copp idx %i\n",
4698 __func__, p_idx, port_id, copp_idx);
4699
4700 ret = -EINVAL;
4701 goto done;
4702 }
4703
4704 admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
4705 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
4706 admp.hdr.pkt_size = sizeof(admp);
4707 admp.hdr.src_svc = APR_SVC_ADM;
4708 admp.hdr.src_domain = APR_DOMAIN_APPS;
4709 admp.hdr.src_port = port_id;
4710 admp.hdr.dest_svc = APR_SVC_ADM;
4711 admp.hdr.dest_domain = APR_DOMAIN_ADSP;
4712 admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]);
4713 admp.hdr.token = p_idx << 16 | ADM_CLIENT_ID_SOURCE_TRACKING << 8 |
4714 copp_idx;
4715 admp.hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
4716 admp.data_payload_addr_lsw =
4717 lower_32_bits(this_adm.sourceTrackingData.memmap.paddr);
4718 admp.data_payload_addr_msw =
4719 msm_audio_populate_upper_32_bits(
4720 this_adm.sourceTrackingData.memmap.paddr);
4721 admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[
4722 ADM_MEM_MAP_INDEX_SOURCE_TRACKING]);
4723 admp.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
4724 admp.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING;
4725 admp.param_max_size = sizeof(struct adm_param_fluence_sourcetracking_t)
4726 + sizeof(struct adm_param_data_v5);
4727 admp.reserved = 0;
4728
4729 atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1);
4730
4731 ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp);
4732 if (ret < 0) {
4733 pr_err("%s - failed to get Source Tracking Params\n",
4734 __func__);
4735
4736 ret = -EINVAL;
4737 goto done;
4738 }
4739 ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx],
4740 atomic_read(&this_adm.copp.stat[p_idx][copp_idx]) >= 0,
4741 msecs_to_jiffies(TIMEOUT_MS));
4742 if (!ret) {
4743 pr_err("%s - get params timed out\n", __func__);
4744
4745 ret = -EINVAL;
4746 goto done;
4747 } else if (atomic_read(&this_adm.copp.stat
4748 [p_idx][copp_idx]) > 0) {
4749 pr_err("%s: DSP returned error[%s]\n",
4750 __func__, adsp_err_get_err_str(
4751 atomic_read(&this_adm.copp.stat
4752 [p_idx][copp_idx])));
4753 ret = adsp_err_get_lnx_err_code(
4754 atomic_read(&this_adm.copp.stat
4755 [p_idx][copp_idx]));
4756 goto done;
4757 }
4758
4759 if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
4760 pr_err("%s - get params returned error [%s]\n",
4761 __func__, adsp_err_get_err_str(
4762 this_adm.sourceTrackingData.apr_cmd_status));
4763
4764 ret = adsp_err_get_lnx_err_code(
4765 this_adm.sourceTrackingData.apr_cmd_status);
4766 goto done;
4767 }
4768
4769 source_tracking_params = (struct adm_param_fluence_sourcetracking_t *)
4770 (this_adm.sourceTrackingData.memmap.kvaddr +
4771 sizeof(struct adm_param_data_v5));
4772 for (i = 0; i < MAX_SECTORS; i++) {
4773 sourceTrackingData->vad[i] = source_tracking_params->vad[i];
4774 pr_debug("%s: vad[%d] = %d\n",
4775 __func__, i, sourceTrackingData->vad[i]);
4776 }
4777 sourceTrackingData->doa_speech = source_tracking_params->doa_speech;
4778 pr_debug("%s: doa_speech = %d\n",
4779 __func__, sourceTrackingData->doa_speech);
4780
4781 for (i = 0; i < MAX_NOISE_SOURCE_INDICATORS; i++) {
4782 sourceTrackingData->doa_noise[i] =
4783 source_tracking_params->doa_noise[i];
4784 pr_debug("%s: doa_noise[%d] = %d\n",
4785 __func__, i, sourceTrackingData->doa_noise[i]);
4786 }
4787 for (i = 0; i < MAX_POLAR_ACTIVITY_INDICATORS; i++) {
4788 sourceTrackingData->polar_activity[i] =
4789 source_tracking_params->polar_activity[i];
4790 pr_debug("%s: polar_activity[%d] = %d\n",
4791 __func__, i, sourceTrackingData->polar_activity[i]);
4792 }
4793
4794 ret = 0;
4795
4796done:
4797 pr_debug("%s: Exit, ret=%d\n", __func__, ret);
4798
4799 return ret;
4800}
4801
4802static int __init adm_init(void)
4803{
4804 int i = 0, j;
4805
4806 this_adm.apr = NULL;
4807 this_adm.ec_ref_rx = -1;
4808 this_adm.num_ec_ref_rx_chans = 0;
4809 this_adm.ec_ref_rx_bit_width = 0;
4810 this_adm.ec_ref_rx_sampling_rate = 0;
4811 atomic_set(&this_adm.matrix_map_stat, 0);
4812 init_waitqueue_head(&this_adm.matrix_map_wait);
4813 atomic_set(&this_adm.adm_stat, 0);
4814 init_waitqueue_head(&this_adm.adm_wait);
4815
4816 for (i = 0; i < AFE_MAX_PORTS; i++) {
4817 for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
4818 atomic_set(&this_adm.copp.id[i][j], RESET_COPP_ID);
4819 atomic_set(&this_adm.copp.cnt[i][j], 0);
4820 atomic_set(&this_adm.copp.topology[i][j], 0);
4821 atomic_set(&this_adm.copp.mode[i][j], 0);
4822 atomic_set(&this_adm.copp.stat[i][j], 0);
4823 atomic_set(&this_adm.copp.rate[i][j], 0);
4824 atomic_set(&this_adm.copp.channels[i][j], 0);
4825 atomic_set(&this_adm.copp.bit_width[i][j], 0);
4826 atomic_set(&this_adm.copp.app_type[i][j], 0);
4827 atomic_set(&this_adm.copp.acdb_id[i][j], 0);
4828 init_waitqueue_head(&this_adm.copp.wait[i][j]);
4829 atomic_set(&this_adm.copp.adm_delay_stat[i][j], 0);
4830 init_waitqueue_head(
4831 &this_adm.copp.adm_delay_wait[i][j]);
4832 atomic_set(&this_adm.copp.topology[i][j], 0);
4833 this_adm.copp.adm_delay[i][j] = 0;
4834 this_adm.copp.adm_status[i][j] =
4835 ADM_STATUS_CALIBRATION_REQUIRED;
4836 }
4837 }
4838
4839 if (adm_init_cal_data())
4840 pr_err("%s: could not init cal data!\n", __func__);
4841
4842 this_adm.sourceTrackingData.ion_client = NULL;
4843 this_adm.sourceTrackingData.ion_handle = NULL;
4844 this_adm.sourceTrackingData.memmap.size = 0;
4845 this_adm.sourceTrackingData.memmap.kvaddr = NULL;
4846 this_adm.sourceTrackingData.memmap.paddr = 0;
4847 this_adm.sourceTrackingData.apr_cmd_status = -1;
4848 atomic_set(&this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING],
4849 0);
4850
4851 return 0;
4852}
4853
4854static void __exit adm_exit(void)
4855{
4856 adm_delete_cal_data();
4857}
4858
4859device_initcall(adm_init);
4860module_exit(adm_exit);