blob: 5b5291f8a603d9197335db8e6b2a6eec193f1466 [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Faiz Nabi Kuchaybd1e4df2019-07-16 13:26:28 +05302/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303 */
4
5#include <linux/module.h>
6#include <linux/firmware.h>
7#include <linux/device.h>
8#include <linux/slab.h>
9#include <linux/elf.h>
10#include <linux/wait.h>
11#include <linux/debugfs.h>
12#include <linux/delay.h>
13#include <linux/pm_qos.h>
14#include <linux/dma-mapping.h>
15#include <sound/soc.h>
16#include <sound/info.h>
17#include <sound/lsm_params.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053018#include <soc/qcom/pm.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053019#include <dsp/audio_cal_utils.h>
Meng Wang11a25cf2018-10-31 14:11:26 +080020#include <asoc/core.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053021#include "cpe_core.h"
22#include "cpe_err.h"
Laxminath Kasam7e057cf2017-08-09 23:55:15 +053023#include "cpe_cmi.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053024#include "wcd_cpe_core.h"
25#include "wcd_cpe_services.h"
26#include "wcd_cmi_api.h"
Meng Wang11a25cf2018-10-31 14:11:26 +080027#include <asoc/wcd9xxx-irq.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053028
29#define CMI_CMD_TIMEOUT (10 * HZ)
30#define WCD_CPE_LSM_MAX_SESSIONS 2
31#define WCD_CPE_AFE_MAX_PORTS 4
32#define AFE_SVC_EXPLICIT_PORT_START 1
33#define WCD_CPE_EC_PP_BUF_SIZE 480 /* 5 msec buffer */
34
35#define ELF_FLAG_EXECUTE (1 << 0)
36#define ELF_FLAG_WRITE (1 << 1)
37#define ELF_FLAG_READ (1 << 2)
38
39#define ELF_FLAG_RW (ELF_FLAG_READ | ELF_FLAG_WRITE)
40
41#define WCD_CPE_GRAB_LOCK(lock, name) \
42{ \
43 pr_debug("%s: %s lock acquire\n", \
44 __func__, name); \
45 mutex_lock(lock); \
46}
47
48#define WCD_CPE_REL_LOCK(lock, name) \
49{ \
50 pr_debug("%s: %s lock release\n", \
51 __func__, name); \
52 mutex_unlock(lock); \
53}
54
Faiz Nabi Kuchaybd1e4df2019-07-16 13:26:28 +053055#define WCD_CPE_STATE_MAX_LEN 32
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053056#define CPE_OFFLINE_WAIT_TIMEOUT (2 * HZ)
57#define CPE_READY_WAIT_TIMEOUT (3 * HZ)
58#define WCD_CPE_SYSFS_DIR_MAX_LENGTH 32
59
60#define CPE_ERR_IRQ_CB(core) \
61 (core->cpe_cdc_cb->cpe_err_irq_control)
62
63/*
64 * AFE output buffer size is always
65 * (sample_rate * number of bytes per sample/2*1000)
66 */
67#define AFE_OUT_BUF_SIZE(bit_width, sample_rate) \
68 (((sample_rate) * (bit_width / BITS_PER_BYTE))/(2*1000))
69
70enum afe_port_state {
71 AFE_PORT_STATE_DEINIT = 0,
72 AFE_PORT_STATE_INIT,
73 AFE_PORT_STATE_CONFIG,
74 AFE_PORT_STATE_STARTED,
75 AFE_PORT_STATE_SUSPENDED,
76};
77
78struct wcd_cmi_afe_port_data {
79 u8 port_id;
80 struct mutex afe_lock;
81 struct completion afe_cmd_complete;
82 enum afe_port_state port_state;
83 u8 cmd_result;
84 u32 mem_handle;
85};
86
87struct cpe_lsm_ids {
88 u32 module_id;
89 u32 param_id;
90};
91
92static struct wcd_cpe_core *core_d;
93static struct cpe_lsm_session
94 *lsm_sessions[WCD_CPE_LSM_MAX_SESSIONS + 1];
Meng Wang15c825d2018-09-06 10:49:18 +080095struct wcd_cpe_core * (*wcd_get_cpe_core)(struct snd_soc_component *component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053096static struct wcd_cmi_afe_port_data afe_ports[WCD_CPE_AFE_MAX_PORTS + 1];
97static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param);
98static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core);
99static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core);
100static ssize_t cpe_ftm_test_trigger(struct file *file,
101 const char __user *user_buf,
102 size_t count, loff_t *ppos);
103static u32 ramdump_enable;
104static u32 cpe_ftm_test_status;
105static const struct file_operations cpe_ftm_test_trigger_fops = {
106 .open = simple_open,
107 .write = cpe_ftm_test_trigger,
108};
109
110static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
111 u8 mode);
112struct wcd_cpe_attribute {
113 struct attribute attr;
114 ssize_t (*show)(struct wcd_cpe_core *core, char *buf);
115 ssize_t (*store)(struct wcd_cpe_core *core, const char *buf,
116 ssize_t count);
117};
118
119#define WCD_CPE_ATTR(_name, _mode, _show, _store) \
120static struct wcd_cpe_attribute cpe_attr_##_name = { \
121 .attr = {.name = __stringify(_name), .mode = _mode}, \
122 .show = _show, \
123 .store = _store, \
124}
125
126#define to_wcd_cpe_attr(a) \
127 container_of((a), struct wcd_cpe_attribute, attr)
128
129#define kobj_to_cpe_core(kobj) \
130 container_of((kobj), struct wcd_cpe_core, cpe_kobj)
131
132/* wcd_cpe_lsm_session_active: check if any session is active
133 * return true if any session is active.
134 */
135static bool wcd_cpe_lsm_session_active(void)
136{
137 int index = 1;
138 bool lsm_active = false;
139
140 /* session starts from index 1 */
141 for (; index <= WCD_CPE_LSM_MAX_SESSIONS; index++) {
142 if (lsm_sessions[index] != NULL) {
143 lsm_active = true;
144 break;
145 } else {
146 lsm_active = false;
147 }
148 }
149 return lsm_active;
150}
151
152static int wcd_cpe_get_sfr_dump(struct wcd_cpe_core *core)
153{
154 struct cpe_svc_mem_segment dump_seg;
155 int rc;
156 u8 *sfr_dump;
157
158 sfr_dump = kzalloc(core->sfr_buf_size, GFP_KERNEL);
159 if (!sfr_dump)
160 goto done;
161
162 dump_seg.type = CPE_SVC_DATA_MEM;
163 dump_seg.cpe_addr = core->sfr_buf_addr;
164 dump_seg.size = core->sfr_buf_size;
165 dump_seg.data = sfr_dump;
166 dev_dbg(core->dev,
167 "%s: reading SFR from CPE, size = %zu\n",
168 __func__, core->sfr_buf_size);
169
170 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
171 if (rc < 0) {
172 dev_err(core->dev,
173 "%s: Failed to read cpe sfr_dump, err = %d\n",
174 __func__, rc);
175 goto free_sfr_dump;
176 }
177
178 dev_info(core->dev,
179 "%s: cpe_sfr = %s\n", __func__, sfr_dump);
180
181free_sfr_dump:
182 kfree(sfr_dump);
183done:
184 /* Even if SFR dump failed, do not return error */
185 return 0;
186}
187
188static int wcd_cpe_collect_ramdump(struct wcd_cpe_core *core)
189{
190 struct cpe_svc_mem_segment dump_seg;
191 int rc;
192
193 if (!core->cpe_ramdump_dev || !core->cpe_dump_v_addr ||
194 core->hw_info.dram_size == 0) {
195 dev_err(core->dev,
196 "%s: Ramdump devices not set up, size = %zu\n",
197 __func__, core->hw_info.dram_size);
198 return -EINVAL;
199 }
200
201 dump_seg.type = CPE_SVC_DATA_MEM;
202 dump_seg.cpe_addr = core->hw_info.dram_offset;
203 dump_seg.size = core->hw_info.dram_size;
204 dump_seg.data = core->cpe_dump_v_addr;
205
206 dev_dbg(core->dev,
207 "%s: Reading ramdump from CPE\n",
208 __func__);
209
210 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
211 if (rc < 0) {
212 dev_err(core->dev,
213 "%s: Failed to read CPE ramdump, err = %d\n",
214 __func__, rc);
215 return rc;
216 }
217
218 dev_dbg(core->dev,
219 "%s: completed reading ramdump from CPE\n",
220 __func__);
221
222 core->cpe_ramdump_seg.address = (unsigned long) core->cpe_dump_addr;
223 core->cpe_ramdump_seg.size = core->hw_info.dram_size;
224 core->cpe_ramdump_seg.v_address = core->cpe_dump_v_addr;
225
226 rc = do_ramdump(core->cpe_ramdump_dev,
227 &core->cpe_ramdump_seg, 1);
228 if (rc)
229 dev_err(core->dev,
230 "%s: fail to dump cpe ram to device, err = %d\n",
231 __func__, rc);
232 return rc;
233}
234
235/* wcd_cpe_is_valid_elf_hdr: check if the ELF header is valid
236 * @core: handle to wcd_cpe_core
237 * @fw_size: size of firmware from request_firmware
238 * @ehdr: the elf header to be checked for
239 * return true if all checks pass, true if any elf check fails
240 */
241static bool wcd_cpe_is_valid_elf_hdr(struct wcd_cpe_core *core, size_t fw_size,
242 const struct elf32_hdr *ehdr)
243{
244 if (fw_size < sizeof(*ehdr)) {
245 dev_err(core->dev, "%s:Firmware too small\n", __func__);
246 goto elf_check_fail;
247 }
248
249 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
250 dev_err(core->dev, "%s: Not an ELF file\n", __func__);
251 goto elf_check_fail;
252 }
253
254 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
255 dev_err(core->dev, "%s: Not a executable image\n", __func__);
256 goto elf_check_fail;
257 }
258
259 if (ehdr->e_phnum == 0) {
260 dev_err(core->dev, "%s: no segments to load\n", __func__);
261 goto elf_check_fail;
262 }
263
264 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
265 sizeof(struct elf32_hdr) > fw_size) {
266 dev_err(core->dev, "%s: Too small MDT file\n", __func__);
267 goto elf_check_fail;
268 }
269
270 return true;
271
272elf_check_fail:
273 return false;
274}
275
276/*
277 * wcd_cpe_load_each_segment: download segment to CPE
278 * @core: handle to struct wcd_cpe_core
279 * @file_idx: index of split firmware image file name
280 * @phdr: program header from metadata
281 */
282static int wcd_cpe_load_each_segment(struct wcd_cpe_core *core,
283 int file_idx, const struct elf32_phdr *phdr)
284{
285 const struct firmware *split_fw;
286 char split_fname[32];
287 int ret = 0;
288 struct cpe_svc_mem_segment *segment;
289
290 if (!core || !phdr) {
291 pr_err("%s: Invalid params\n", __func__);
292 return -EINVAL;
293 }
294
295 /* file size can be 0 for bss segments */
296 if (phdr->p_filesz == 0 || phdr->p_memsz == 0)
297 return 0;
298
299 segment = kzalloc(sizeof(struct cpe_svc_mem_segment), GFP_KERNEL);
300 if (!segment)
301 return -ENOMEM;
302
303 snprintf(split_fname, sizeof(split_fname), "%s.b%02d",
304 core->fname, file_idx);
305
306 ret = request_firmware(&split_fw, split_fname, core->dev);
307 if (ret) {
308 dev_err(core->dev, "firmware %s not found\n",
309 split_fname);
310 ret = -EIO;
311 goto fw_req_fail;
312 }
313
314 if (phdr->p_flags & ELF_FLAG_EXECUTE)
315 segment->type = CPE_SVC_INSTRUCTION_MEM;
316 else if (phdr->p_flags & ELF_FLAG_RW)
317 segment->type = CPE_SVC_DATA_MEM;
318 else {
319 dev_err(core->dev, "%s invalid flags 0x%x\n",
320 __func__, phdr->p_flags);
321 goto done;
322 }
323
Xiaoyu Yeef9ded22018-01-17 11:30:35 -0800324 if (phdr->p_filesz != split_fw->size) {
325 dev_err(core->dev,
326 "%s: %s size mismatch, phdr_size: 0x%x fw_size: 0x%zx",
327 __func__, split_fname, phdr->p_filesz, split_fw->size);
328 ret = -EINVAL;
329 goto done;
330 }
331
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530332 segment->cpe_addr = phdr->p_paddr;
333 segment->size = phdr->p_filesz;
334 segment->data = (u8 *) split_fw->data;
335
336 dev_dbg(core->dev,
337 "%s: cpe segment type %s read from firmware\n", __func__,
338 (segment->type == CPE_SVC_INSTRUCTION_MEM) ?
339 "INSTRUCTION" : "DATA");
340
341 ret = cpe_svc_download_segment(core->cpe_handle, segment);
342 if (ret) {
343 dev_err(core->dev,
344 "%s: Failed to download %s, error = %d\n",
345 __func__, split_fname, ret);
346 goto done;
347 }
348
349done:
350 release_firmware(split_fw);
351
352fw_req_fail:
353 kfree(segment);
354 return ret;
355}
356
357/*
358 * wcd_cpe_enable_cpe_clks: enable the clocks for CPE
359 * @core: handle to wcd_cpe_core
360 * @enable: flag indicating whether to enable/disable cpe clocks
361 */
362static int wcd_cpe_enable_cpe_clks(struct wcd_cpe_core *core, bool enable)
363{
364 int ret, ret1;
365
366 if (!core || !core->cpe_cdc_cb ||
367 !core->cpe_cdc_cb->cpe_clk_en) {
368 pr_err("%s: invalid handle\n",
369 __func__);
370 return -EINVAL;
371 }
372
Meng Wang15c825d2018-09-06 10:49:18 +0800373 ret = core->cpe_cdc_cb->cdc_clk_en(core->component, enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530374 if (ret) {
375 dev_err(core->dev, "%s: Failed to enable RCO\n",
376 __func__);
377 return ret;
378 }
379
380 if (!enable && core->cpe_clk_ref > 0)
381 core->cpe_clk_ref--;
382
383 /*
384 * CPE clk will be enabled at the first time
385 * and be disabled at the last time.
386 */
387 if (core->cpe_clk_ref == 0) {
Meng Wang15c825d2018-09-06 10:49:18 +0800388 ret = core->cpe_cdc_cb->cpe_clk_en(core->component, enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530389 if (ret) {
390 dev_err(core->dev,
391 "%s: cpe_clk_en() failed, err = %d\n",
392 __func__, ret);
393 goto cpe_clk_fail;
394 }
395 }
396
397 if (enable)
398 core->cpe_clk_ref++;
399
400 return 0;
401
402cpe_clk_fail:
403 /* Release the codec clk if CPE clk enable failed */
404 if (enable) {
Meng Wang15c825d2018-09-06 10:49:18 +0800405 ret1 = core->cpe_cdc_cb->cdc_clk_en(core->component, !enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530406 if (ret1)
407 dev_err(core->dev,
408 "%s: Fail to release codec clk, err = %d\n",
409 __func__, ret1);
410 }
411
412 return ret;
413}
414
415/*
416 * wcd_cpe_bus_vote_max_bw: Function to vote for max bandwidth on codec bus
417 * @core: handle to core for cpe
418 * @vote: flag to indicate enable/disable of vote
419 *
420 * This function will try to use the codec provided callback to
421 * vote/unvote for the max bandwidth of the bus that is used by
422 * the codec for register reads/writes.
423 */
424static int wcd_cpe_bus_vote_max_bw(struct wcd_cpe_core *core,
425 bool vote)
426{
427 if (!core || !core->cpe_cdc_cb) {
428 pr_err("%s: Invalid handle to %s\n",
429 __func__,
430 (!core) ? "core" : "codec callbacks");
431 return -EINVAL;
432 }
433
434 if (core->cpe_cdc_cb->bus_vote_bw) {
435 dev_dbg(core->dev, "%s: %s cdc bus max bandwidth\n",
436 __func__, vote ? "Vote" : "Unvote");
Meng Wang15c825d2018-09-06 10:49:18 +0800437 core->cpe_cdc_cb->bus_vote_bw(core->component, vote);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530438 }
439
440 return 0;
441}
442
443/*
444 * wcd_cpe_load_fw: Function to load the fw image
445 * @core: cpe core pointer
446 * @load_type: indicates whether to load to data section
447 * or the instruction section
448 *
449 * Parse the mdt file to look for program headers, load each
450 * split file corresponding to the program headers.
451 */
452static int wcd_cpe_load_fw(struct wcd_cpe_core *core,
453 unsigned int load_type)
454{
455
456 int ret, phdr_idx;
Meng Wang15c825d2018-09-06 10:49:18 +0800457 struct snd_soc_component *component = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530458 struct wcd9xxx *wcd9xxx = NULL;
459 const struct elf32_hdr *ehdr;
460 const struct elf32_phdr *phdr;
461 const struct firmware *fw;
462 const u8 *elf_ptr;
463 char mdt_name[64];
464 bool img_dload_fail = false;
465 bool load_segment;
466
467 if (!core || !core->cpe_handle) {
468 pr_err("%s: Error CPE core %pK\n", __func__,
469 core);
470 return -EINVAL;
471 }
Meng Wang15c825d2018-09-06 10:49:18 +0800472 component = core->component;
473 wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530474 snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", core->fname);
475 ret = request_firmware(&fw, mdt_name, core->dev);
476 if (ret < 0) {
477 dev_err(core->dev, "firmware %s not found\n", mdt_name);
478 return ret;
479 }
480
481 ehdr = (struct elf32_hdr *) fw->data;
482 if (!wcd_cpe_is_valid_elf_hdr(core, fw->size, ehdr)) {
483 dev_err(core->dev, "%s: fw mdt %s is invalid\n",
484 __func__, mdt_name);
485 ret = -EINVAL;
486 goto done;
487 }
488
489 elf_ptr = fw->data + sizeof(*ehdr);
490
491 if (load_type == ELF_FLAG_EXECUTE) {
492 /* Reset CPE first */
493 ret = cpe_svc_reset(core->cpe_handle);
494 if (ret < 0) {
495 dev_err(core->dev,
496 "%s: Failed to reset CPE with error %d\n",
497 __func__, ret);
498 goto done;
499 }
500 }
501
502 dev_dbg(core->dev, "%s: start image dload, name = %s, load_type = 0x%x\n",
503 __func__, core->fname, load_type);
504
505 wcd_cpe_bus_vote_max_bw(core, true);
506
507 /* parse every program header and request corresponding firmware */
508 for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
509 phdr = (struct elf32_phdr *)elf_ptr;
510 load_segment = false;
511
512 dev_dbg(core->dev,
513 "index = %d, vaddr = 0x%x, paddr = 0x%x, filesz = 0x%x, memsz = 0x%x, flags = 0x%x\n"
514 , phdr_idx, phdr->p_vaddr, phdr->p_paddr,
515 phdr->p_filesz, phdr->p_memsz, phdr->p_flags);
516
517 switch (load_type) {
518 case ELF_FLAG_EXECUTE:
519 if (phdr->p_flags & load_type)
520 load_segment = true;
521 break;
522 case ELF_FLAG_RW:
523 if (!(phdr->p_flags & ELF_FLAG_EXECUTE) &&
524 (phdr->p_flags & load_type))
525 load_segment = true;
526 break;
527 default:
528 pr_err("%s: Invalid load_type 0x%x\n",
529 __func__, load_type);
530 ret = -EINVAL;
531 goto rel_bus_vote;
532 }
533
534 if (load_segment) {
535 ret = wcd_cpe_load_each_segment(core,
536 phdr_idx, phdr);
537 if (ret < 0) {
538 dev_err(core->dev,
539 "Failed to load segment %d, aborting img dload\n",
540 phdr_idx);
541 img_dload_fail = true;
542 goto rel_bus_vote;
543 }
544 } else {
545 dev_dbg(core->dev,
546 "%s: skipped segment with index %d\n",
547 __func__, phdr_idx);
548 }
549
550 elf_ptr = elf_ptr + sizeof(*phdr);
551 }
552 if (load_type == ELF_FLAG_EXECUTE)
553 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
554
555rel_bus_vote:
556 wcd_cpe_bus_vote_max_bw(core, false);
557
558done:
559 release_firmware(fw);
560 return ret;
561}
562
563/*
564 * wcd_cpe_change_online_state - mark cpe online/offline state
565 * @core: core session to mark
566 * @online: whether online of offline
567 *
568 */
569static void wcd_cpe_change_online_state(struct wcd_cpe_core *core,
570 int online)
571{
572 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
573 unsigned long ret;
574
575 if (!core) {
576 pr_err("%s: Invalid core handle\n",
577 __func__);
578 return;
579 }
580
581 ssr_entry = &core->ssr_entry;
582 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
583 ssr_entry->offline = !online;
584
585 /* Make sure write to offline state is completed. */
586 wmb();
587 ret = xchg(&ssr_entry->offline_change, 1);
588 wake_up_interruptible(&ssr_entry->offline_poll_wait);
589 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
590 pr_debug("%s: change state 0x%x offline_change 0x%x\n"
591 " core->offline 0x%x, ret = %ld\n",
592 __func__, online,
593 ssr_entry->offline_change,
594 core->ssr_entry.offline, ret);
595}
596
597/*
598 * wcd_cpe_load_fw_image: work function to load the fw image
599 * @work: work that is scheduled to perform the image loading
600 *
601 * Parse the mdt file to look for program headers, load each
602 * split file corresponding to the program headers.
603 */
604static void wcd_cpe_load_fw_image(struct work_struct *work)
605{
606 struct wcd_cpe_core *core;
607 int ret = 0;
608
609 core = container_of(work, struct wcd_cpe_core, load_fw_work);
610 ret = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
611 if (!ret)
612 wcd_cpe_change_online_state(core, 1);
613 else
614 pr_err("%s: failed to load instruction section, err = %d\n",
615 __func__, ret);
616}
617
618/*
619 * wcd_cpe_get_core_handle: get the handle to wcd_cpe_core
Meng Wang15c825d2018-09-06 10:49:18 +0800620 * @component: codec from which this handle is to be obtained
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530621 * Codec driver should provide a callback function to obtain
622 * handle to wcd_cpe_core during initialization of wcd_cpe_core
623 */
624void *wcd_cpe_get_core_handle(
Meng Wang15c825d2018-09-06 10:49:18 +0800625 struct snd_soc_component *component)
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530626{
627 struct wcd_cpe_core *core = NULL;
628
Meng Wang15c825d2018-09-06 10:49:18 +0800629 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530630 pr_err("%s: Invalid codec handle\n",
631 __func__);
632 goto done;
633 }
634
635 if (!wcd_get_cpe_core) {
Meng Wang15c825d2018-09-06 10:49:18 +0800636 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530637 "%s: codec callback not available\n",
638 __func__);
639 goto done;
640 }
641
Meng Wang15c825d2018-09-06 10:49:18 +0800642 core = wcd_get_cpe_core(component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530643
644 if (!core)
Meng Wang15c825d2018-09-06 10:49:18 +0800645 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530646 "%s: handle to core not available\n",
647 __func__);
648done:
649 return core;
650}
651EXPORT_SYMBOL(wcd_cpe_get_core_handle);
652
653/*
654 * svass_engine_irq: threaded interrupt handler for svass engine irq
655 * @irq: interrupt number
656 * @data: data pointer passed during irq registration
657 */
658static irqreturn_t svass_engine_irq(int irq, void *data)
659{
660 struct wcd_cpe_core *core = data;
661 int ret = 0;
662
663 if (!core) {
664 pr_err("%s: Invalid data for interrupt handler\n",
665 __func__);
666 goto done;
667 }
668
669 ret = cpe_svc_process_irq(core->cpe_handle, CPE_IRQ_OUTBOX_IRQ);
670 if (ret < 0)
671 dev_err(core->dev,
672 "%s: Error processing irq from cpe_Services\n",
673 __func__);
674done:
675 return IRQ_HANDLED;
676}
677
678/*
679 * wcd_cpe_state_read - update read status in procfs
680 * @entry: snd_info_entry
681 * @buf: buffer where the read status is updated.
682 *
683 */
684static ssize_t wcd_cpe_state_read(struct snd_info_entry *entry,
685 void *file_private_data, struct file *file,
686 char __user *buf, size_t count, loff_t pos)
687{
688 int len = 0;
689 char buffer[WCD_CPE_STATE_MAX_LEN];
690 struct wcd_cpe_core *core = NULL;
691 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
692
693 core = (struct wcd_cpe_core *) entry->private_data;
694 if (!core) {
695 pr_err("%s: CPE core NULL\n", __func__);
696 return -EINVAL;
697 }
698 ssr_entry = &core->ssr_entry;
699
700 /* Make sure read from ssr_entry is completed. */
701 rmb();
702 dev_dbg(core->dev,
703 "%s: Offline 0x%x\n", __func__,
704 ssr_entry->offline);
705
706 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
707 len = snprintf(buffer, sizeof(buffer), "%s\n",
708 ssr_entry->offline ? "OFFLINE" : "ONLINE");
709 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
710
711 return simple_read_from_buffer(buf, count, &pos, buffer, len);
712}
713
714/*
715 * wcd_cpe_state_poll - polls for change state
716 * @entry: snd_info_entry
717 * @wait: wait for duration for poll wait
718 *
719 */
720static unsigned int wcd_cpe_state_poll(struct snd_info_entry *entry,
721 void *private_data, struct file *file,
722 poll_table *wait)
723{
724 struct wcd_cpe_core *core = NULL;
725 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
726 int ret = 0;
727
728 core = (struct wcd_cpe_core *) entry->private_data;
729 if (!core) {
730 pr_err("%s: CPE core NULL\n", __func__);
731 return -EINVAL;
732 }
733
734 ssr_entry = &core->ssr_entry;
735
736 dev_dbg(core->dev, "%s: CPE Poll wait\n",
737 __func__);
738 poll_wait(file, &ssr_entry->offline_poll_wait, wait);
739 dev_dbg(core->dev, "%s: Wake-up Poll wait\n",
740 __func__);
741 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
742
743 if (xchg(&ssr_entry->offline_change, 0))
744 ret = POLLIN | POLLPRI | POLLRDNORM;
745
746 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
747
748 dev_dbg(core->dev, "%s: ret (%d) from poll_wait\n",
749 __func__, ret);
750 return ret;
751}
752
753/*
754 * wcd_cpe_is_online_state - return true if card is online state
755 * @core: core offline to query
756 */
757static bool wcd_cpe_is_online_state(void *core_handle)
758{
759 struct wcd_cpe_core *core = core_handle;
760
761 if (core_handle) {
762 return !core->ssr_entry.offline;
763 } else {
764 pr_err("%s: Core handle NULL\n", __func__);
765 /* still return 1- offline if core ptr null */
766 return false;
767 }
768}
769
770static struct snd_info_entry_ops wcd_cpe_state_proc_ops = {
771 .read = wcd_cpe_state_read,
772 .poll = wcd_cpe_state_poll,
773};
774
775static int wcd_cpe_check_new_image(struct wcd_cpe_core *core)
776{
777 int rc = 0;
778 char temp_img_name[WCD_CPE_IMAGE_FNAME_MAX];
779
780 if (!strcmp(core->fname, core->dyn_fname) &&
781 core->ssr_type != WCD_CPE_INITIALIZED) {
782 dev_dbg(core->dev,
783 "%s: Firmware unchanged, fname = %s, ssr_type 0x%x\n",
784 __func__, core->fname, core->ssr_type);
785 goto done;
786 }
787
788 /*
789 * Different firmware name requested,
790 * Re-load the instruction section
791 */
792 strlcpy(temp_img_name, core->fname,
793 WCD_CPE_IMAGE_FNAME_MAX);
794 strlcpy(core->fname, core->dyn_fname,
795 WCD_CPE_IMAGE_FNAME_MAX);
796
797 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
798 if (rc) {
799 dev_err(core->dev,
800 "%s: Failed to dload new image %s, err = %d\n",
801 __func__, core->fname, rc);
802 /* If new image download failed, revert back to old image */
803 strlcpy(core->fname, temp_img_name,
804 WCD_CPE_IMAGE_FNAME_MAX);
805 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
806 if (rc)
807 dev_err(core->dev,
808 "%s: Failed to re-dload image %s, err = %d\n",
809 __func__, core->fname, rc);
810 } else {
811 dev_info(core->dev, "%s: fw changed to %s\n",
812 __func__, core->fname);
813 }
814done:
815 return rc;
816}
817
818static int wcd_cpe_enable(struct wcd_cpe_core *core,
819 bool enable)
820{
821 int ret = 0;
822
823 if (enable) {
824 /* Reset CPE first */
825 ret = cpe_svc_reset(core->cpe_handle);
826 if (ret < 0) {
827 dev_err(core->dev,
828 "%s: CPE Reset failed, error = %d\n",
829 __func__, ret);
830 goto done;
831 }
832
833 ret = wcd_cpe_setup_irqs(core);
834 if (ret) {
835 dev_err(core->dev,
836 "%s: CPE IRQs setup failed, error = %d\n",
837 __func__, ret);
838 goto done;
839 }
840 ret = wcd_cpe_check_new_image(core);
841 if (ret)
842 goto fail_boot;
843
844 /* Dload data section */
845 ret = wcd_cpe_load_fw(core, ELF_FLAG_RW);
846 if (ret) {
847 dev_err(core->dev,
848 "%s: Failed to dload data section, err = %d\n",
849 __func__, ret);
850 goto fail_boot;
851 }
852
853 ret = wcd_cpe_enable_cpe_clks(core, true);
854 if (ret < 0) {
855 dev_err(core->dev,
856 "%s: CPE clk enable failed, err = %d\n",
857 __func__, ret);
858 goto fail_boot;
859 }
860
861 ret = cpe_svc_boot(core->cpe_handle,
862 core->cpe_debug_mode);
863 if (ret < 0) {
864 dev_err(core->dev,
865 "%s: Failed to boot CPE\n",
866 __func__);
867 goto fail_boot;
868 }
869
870 /* wait for CPE to be online */
871 dev_dbg(core->dev,
872 "%s: waiting for CPE bootup\n",
873 __func__);
874
875 wait_for_completion(&core->online_compl);
876
877 dev_dbg(core->dev,
878 "%s: CPE bootup done\n",
879 __func__);
880
881 core->ssr_type = WCD_CPE_ENABLED;
882 } else {
883 if (core->ssr_type == WCD_CPE_BUS_DOWN_EVENT ||
884 core->ssr_type == WCD_CPE_SSR_EVENT) {
885 /*
886 * If this disable vote is when
887 * SSR is in progress, do not disable CPE here,
888 * instead SSR handler will control CPE.
889 */
890 wcd_cpe_enable_cpe_clks(core, false);
891 wcd_cpe_cleanup_irqs(core);
892 goto done;
893 }
894
895 ret = cpe_svc_shutdown(core->cpe_handle);
896 if (ret < 0) {
897 dev_err(core->dev,
898 "%s: CPE shutdown failed, error %d\n",
899 __func__, ret);
900 goto done;
901 }
902
903 wcd_cpe_enable_cpe_clks(core, false);
904 wcd_cpe_cleanup_irqs(core);
905 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
906 }
907
908 return ret;
909
910fail_boot:
911 wcd_cpe_cleanup_irqs(core);
912
913done:
914 return ret;
915}
916
917/*
918 * wcd_cpe_boot_ssr: Load the images to CPE after ssr and bootup cpe
919 * @core: handle to the core
920 */
921static int wcd_cpe_boot_ssr(struct wcd_cpe_core *core)
922{
923 int rc = 0;
924
925 if (!core || !core->cpe_handle) {
926 pr_err("%s: Invalid handle\n", __func__);
927 rc = -EINVAL;
928 goto fail;
929 }
930 /* Load the instruction section and mark CPE as online */
931 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
932 if (rc) {
933 dev_err(core->dev,
934 "%s: Failed to load instruction, err = %d\n",
935 __func__, rc);
936 goto fail;
937 } else {
938 wcd_cpe_change_online_state(core, 1);
939 }
940
941fail:
942 return rc;
943}
944
945/*
946 * wcd_cpe_clr_ready_status:
947 * Clear the value from the ready status for CPE
948 * @core: handle to the core
949 * @value: flag/bitmask that is to be cleared
950 *
951 * This function should not be invoked with ssr_lock acquired
952 */
953static void wcd_cpe_clr_ready_status(struct wcd_cpe_core *core,
954 u8 value)
955{
956 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
957 core->ready_status &= ~(value);
958 dev_dbg(core->dev,
959 "%s: ready_status = 0x%x\n",
960 __func__, core->ready_status);
961 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
962}
963
964/*
965 * wcd_cpe_set_and_complete:
966 * Set the ready status with the provided value and
967 * flag the completion object if ready status moves
968 * to ready to download
969 * @core: handle to the core
970 * @value: flag/bitmask that is to be set
971 */
972static void wcd_cpe_set_and_complete(struct wcd_cpe_core *core,
973 u8 value)
974{
975 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
976 core->ready_status |= value;
977 if ((core->ready_status & WCD_CPE_READY_TO_DLOAD) ==
978 WCD_CPE_READY_TO_DLOAD) {
979 dev_dbg(core->dev,
980 "%s: marking ready, status = 0x%x\n",
981 __func__, core->ready_status);
982 complete(&core->ready_compl);
983 }
984 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
985}
986
987
988/*
989 * wcd_cpe_ssr_work: work function to handle CPE SSR
990 * @work: work that is scheduled to perform CPE shutdown
991 * and restart
992 */
993static void wcd_cpe_ssr_work(struct work_struct *work)
994{
995
996 int rc = 0;
997 u32 irq = 0;
998 struct wcd_cpe_core *core = NULL;
999 u8 status = 0;
1000
1001 core = container_of(work, struct wcd_cpe_core, ssr_work);
1002 if (!core) {
1003 pr_err("%s: Core handle NULL\n", __func__);
1004 return;
1005 }
1006
1007 /* Obtain pm request up in case of suspend mode */
1008 pm_qos_add_request(&core->pm_qos_req,
1009 PM_QOS_CPU_DMA_LATENCY,
1010 PM_QOS_DEFAULT_VALUE);
1011 pm_qos_update_request(&core->pm_qos_req,
1012 msm_cpuidle_get_deep_idle_latency());
1013
1014 dev_dbg(core->dev,
1015 "%s: CPE SSR with event %d\n",
1016 __func__, core->ssr_type);
1017
1018 if (core->ssr_type == WCD_CPE_SSR_EVENT) {
1019 if (CPE_ERR_IRQ_CB(core))
1020 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001021 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301022 CPE_ERR_IRQ_STATUS,
1023 &status);
1024 if (status & core->irq_info.cpe_fatal_irqs)
1025 irq = CPE_IRQ_WDOG_BITE;
1026 } else {
1027 /* If bus is down, cdc reg cannot be read */
1028 irq = CPE_IRQ_WDOG_BITE;
1029 }
1030
1031 if (core->cpe_users > 0) {
1032 rc = cpe_svc_process_irq(core->cpe_handle, irq);
1033 if (rc < 0)
1034 /*
1035 * Even if process_irq fails,
1036 * wait for cpe to move to offline state
1037 */
1038 dev_err(core->dev,
1039 "%s: irq processing failed, error = %d\n",
1040 __func__, rc);
1041
1042 rc = wait_for_completion_timeout(&core->offline_compl,
1043 CPE_OFFLINE_WAIT_TIMEOUT);
1044 if (!rc) {
1045 dev_err(core->dev,
1046 "%s: wait for cpe offline timed out\n",
1047 __func__);
1048 goto err_ret;
1049 }
1050 if (core->ssr_type != WCD_CPE_BUS_DOWN_EVENT) {
1051 wcd_cpe_get_sfr_dump(core);
1052
1053 /*
1054 * Ramdump has to be explicitly enabled
1055 * through debugfs and cannot be collected
1056 * when bus is down.
1057 */
1058 if (ramdump_enable)
1059 wcd_cpe_collect_ramdump(core);
1060 }
1061 } else {
1062 pr_err("%s: no cpe users, mark as offline\n", __func__);
1063 wcd_cpe_change_online_state(core, 0);
1064 wcd_cpe_set_and_complete(core,
1065 WCD_CPE_BLK_READY);
1066 }
1067
1068 rc = wait_for_completion_timeout(&core->ready_compl,
1069 CPE_READY_WAIT_TIMEOUT);
1070 if (!rc) {
1071 dev_err(core->dev,
1072 "%s: ready to online timed out, status = %u\n",
1073 __func__, core->ready_status);
1074 goto err_ret;
1075 }
1076
1077 rc = wcd_cpe_boot_ssr(core);
1078
1079 /* Once image are downloaded make sure all
1080 * error interrupts are cleared
1081 */
1082 if (CPE_ERR_IRQ_CB(core))
Meng Wang15c825d2018-09-06 10:49:18 +08001083 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301084 CPE_ERR_IRQ_CLEAR, NULL);
1085
1086err_ret:
1087 /* remove after default pm qos */
1088 pm_qos_update_request(&core->pm_qos_req,
1089 PM_QOS_DEFAULT_VALUE);
1090 pm_qos_remove_request(&core->pm_qos_req);
1091}
1092
1093/*
1094 * wcd_cpe_ssr_handle: handle SSR events here.
1095 * @core_handle: handle to the cpe core
1096 * @event: indicates ADSP or CDSP SSR.
1097 */
1098int wcd_cpe_ssr_event(void *core_handle,
1099 enum wcd_cpe_ssr_state_event event)
1100{
1101 struct wcd_cpe_core *core = core_handle;
1102
1103 if (!core) {
1104 pr_err("%s: Invalid handle to core\n",
1105 __func__);
1106 return -EINVAL;
1107 }
1108
1109 /*
1110 * If CPE is not even enabled, the SSR event for
1111 * CPE needs to be ignored
1112 */
1113 if (core->ssr_type == WCD_CPE_INITIALIZED) {
1114 dev_info(core->dev,
1115 "%s: CPE initialized but not enabled, skip CPE ssr\n",
1116 __func__);
1117 return 0;
1118 }
1119
1120 dev_dbg(core->dev,
1121 "%s: Schedule ssr work, event = %d\n",
1122 __func__, core->ssr_type);
1123
1124 switch (event) {
1125 case WCD_CPE_BUS_DOWN_EVENT:
1126 /*
1127 * If bus down, then CPE block is also
1128 * treated to be down
1129 */
1130 wcd_cpe_clr_ready_status(core, WCD_CPE_READY_TO_DLOAD);
1131 core->ssr_type = event;
1132 schedule_work(&core->ssr_work);
1133 break;
1134
1135 case WCD_CPE_SSR_EVENT:
1136 wcd_cpe_clr_ready_status(core, WCD_CPE_BLK_READY);
1137 core->ssr_type = event;
1138 schedule_work(&core->ssr_work);
1139 break;
1140
1141 case WCD_CPE_BUS_UP_EVENT:
1142 wcd_cpe_set_and_complete(core, WCD_CPE_BUS_READY);
1143 /*
1144 * In case of bus up event ssr_type will be changed
1145 * to WCD_CPE_ACTIVE once CPE is online
1146 */
1147 break;
1148
1149 default:
1150 dev_err(core->dev,
1151 "%s: unhandled SSR event %d\n",
1152 __func__, event);
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158EXPORT_SYMBOL(wcd_cpe_ssr_event);
1159
1160/*
1161 * svass_exception_irq: threaded irq handler for sva error interrupts
1162 * @irq: interrupt number
1163 * @data: data pointer passed during irq registration
1164 *
1165 * Once a error interrupt is received, it is not cleared, since
1166 * clearing this interrupt will raise spurious interrupts unless
1167 * CPE is reset.
1168 */
1169static irqreturn_t svass_exception_irq(int irq, void *data)
1170{
1171 struct wcd_cpe_core *core = data;
1172 u8 status = 0;
1173
1174 if (!core || !CPE_ERR_IRQ_CB(core)) {
1175 pr_err("%s: Invalid %s\n",
1176 __func__,
1177 (!core) ? "core" : "cdc control");
1178 return IRQ_HANDLED;
1179 }
1180
Meng Wang15c825d2018-09-06 10:49:18 +08001181 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301182 CPE_ERR_IRQ_STATUS, &status);
1183
1184 while (status != 0) {
1185 if (status & core->irq_info.cpe_fatal_irqs) {
1186 dev_err(core->dev,
1187 "%s: CPE SSR event,err_status = 0x%02x\n",
1188 __func__, status);
1189 wcd_cpe_ssr_event(core, WCD_CPE_SSR_EVENT);
1190 /*
1191 * If fatal interrupt is received,
1192 * trigger SSR and stop processing
1193 * further interrupts
1194 */
1195 break;
1196 }
1197 /*
1198 * Mask the interrupt that was raised to
1199 * avoid spurious interrupts
1200 */
Meng Wang15c825d2018-09-06 10:49:18 +08001201 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301202 CPE_ERR_IRQ_MASK, &status);
1203
1204 /* Clear only the interrupt that was raised */
Meng Wang15c825d2018-09-06 10:49:18 +08001205 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301206 CPE_ERR_IRQ_CLEAR, &status);
1207 dev_err(core->dev,
1208 "%s: err_interrupt status = 0x%x\n",
1209 __func__, status);
1210
1211 /* Read status for pending interrupts */
Meng Wang15c825d2018-09-06 10:49:18 +08001212 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301213 CPE_ERR_IRQ_STATUS, &status);
1214 }
1215
1216 return IRQ_HANDLED;
1217}
1218
1219/*
1220 * wcd_cpe_cmi_afe_cb: callback called on response to afe commands
1221 * @param: parameter containing the response code, etc
1222 *
1223 * Process the request to the command sent to CPE and wakeup the
1224 * command send wait.
1225 */
1226static void wcd_cpe_cmi_afe_cb(const struct cmi_api_notification *param)
1227{
1228 struct cmi_hdr *hdr;
1229 struct wcd_cmi_afe_port_data *afe_port_d;
1230 u8 port_id;
1231
1232 if (!param) {
1233 pr_err("%s: param is null\n", __func__);
1234 return;
1235 }
1236
1237 if (param->event != CMI_API_MSG) {
1238 pr_err("%s: unhandled event 0x%x\n",
1239 __func__, param->event);
1240 return;
1241 }
1242
1243 pr_debug("%s: param->result = %d\n",
1244 __func__, param->result);
1245
1246 hdr = (struct cmi_hdr *) param->message;
1247
1248 /*
1249 * for AFE cmd response, port id is
1250 * stored at session id field of header
1251 */
1252 port_id = CMI_HDR_GET_SESSION_ID(hdr);
1253 if (port_id > WCD_CPE_AFE_MAX_PORTS) {
1254 pr_err("%s: invalid port_id %d\n",
1255 __func__, port_id);
1256 return;
1257 }
1258
1259 afe_port_d = &(afe_ports[port_id]);
1260
1261 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
1262
1263 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
1264 u8 result = payload[0];
1265
1266 afe_port_d->cmd_result = result;
1267 complete(&afe_port_d->afe_cmd_complete);
1268
1269 } else if (hdr->opcode == CPE_AFE_PORT_CMDRSP_SHARED_MEM_ALLOC) {
1270
1271 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
1272 (struct cpe_cmdrsp_shmem_alloc *) param->message;
1273
1274 if (cmdrsp_shmem_alloc->addr == 0) {
1275 pr_err("%s: Failed AFE shared mem alloc\n", __func__);
1276 afe_port_d->cmd_result = CMI_SHMEM_ALLOC_FAILED;
1277 } else {
1278 pr_debug("%s AFE shared mem addr = 0x%x\n",
1279 __func__, cmdrsp_shmem_alloc->addr);
1280 afe_port_d->mem_handle = cmdrsp_shmem_alloc->addr;
1281 afe_port_d->cmd_result = 0;
1282 }
1283 complete(&afe_port_d->afe_cmd_complete);
1284 }
1285}
1286
1287/*
1288 * wcd_cpe_initialize_afe_port_data: Initialize all AFE ports
1289 *
1290 * Initialize the data for all the afe ports. Assign the
1291 * afe port state to INIT state.
1292 */
1293static void wcd_cpe_initialize_afe_port_data(void)
1294{
1295 struct wcd_cmi_afe_port_data *afe_port_d;
1296 int i;
1297
1298 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1299 afe_port_d = &afe_ports[i];
1300 afe_port_d->port_id = i;
1301 init_completion(&afe_port_d->afe_cmd_complete);
1302 afe_port_d->port_state = AFE_PORT_STATE_INIT;
1303 mutex_init(&afe_port_d->afe_lock);
1304 }
1305}
1306
1307/*
1308 * wcd_cpe_deinitialize_afe_port_data: De-initialize all AFE ports
1309 *
1310 * De-Initialize the data for all the afe ports. Assign the
1311 * afe port state to DEINIT state.
1312 */
1313static void wcd_cpe_deinitialize_afe_port_data(void)
1314{
1315 struct wcd_cmi_afe_port_data *afe_port_d;
1316 int i;
1317
1318 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1319 afe_port_d = &afe_ports[i];
1320 afe_port_d->port_state = AFE_PORT_STATE_DEINIT;
1321 mutex_destroy(&afe_port_d->afe_lock);
1322 }
1323}
1324
1325/*
1326 * wcd_cpe_svc_event_cb: callback from cpe services, indicating
1327 * CPE is online or offline.
1328 * @param: parameter / payload for event to be notified
1329 */
1330static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param)
1331{
Meng Wang15c825d2018-09-06 10:49:18 +08001332 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301333 struct wcd_cpe_core *core;
1334 struct cpe_svc_boot_event *boot_data;
1335 bool active_sessions;
1336
1337 if (!param) {
1338 pr_err("%s: Invalid event\n", __func__);
1339 return;
1340 }
1341
Meng Wang15c825d2018-09-06 10:49:18 +08001342 component = param->private_data;
1343 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301344 pr_err("%s: Invalid handle to codec\n",
1345 __func__);
1346 return;
1347 }
1348
Meng Wang15c825d2018-09-06 10:49:18 +08001349 core = wcd_cpe_get_core_handle(component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301350 if (!core) {
1351 pr_err("%s: Invalid handle to core\n",
1352 __func__);
1353 return;
1354 }
1355
1356 dev_dbg(core->dev,
1357 "%s: event = 0x%x, ssr_type = 0x%x\n",
1358 __func__, param->event, core->ssr_type);
1359
1360 switch (param->event) {
1361 case CPE_SVC_BOOT:
1362 boot_data = (struct cpe_svc_boot_event *)
1363 param->payload;
1364 core->sfr_buf_addr = boot_data->debug_address;
1365 core->sfr_buf_size = boot_data->debug_buffer_size;
1366 dev_dbg(core->dev,
1367 "%s: CPE booted, sfr_addr = %d, sfr_size = %zu\n",
1368 __func__, core->sfr_buf_addr,
1369 core->sfr_buf_size);
1370 break;
1371 case CPE_SVC_ONLINE:
1372 core->ssr_type = WCD_CPE_ACTIVE;
1373 dev_dbg(core->dev, "%s CPE is now online\n",
1374 __func__);
1375 complete(&core->online_compl);
1376 break;
1377 case CPE_SVC_OFFLINE:
1378 /*
1379 * offline can happen during normal shutdown,
1380 * but we are interested in offline only during
1381 * SSR.
1382 */
1383 if (core->ssr_type != WCD_CPE_SSR_EVENT &&
1384 core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
1385 break;
1386
1387 active_sessions = wcd_cpe_lsm_session_active();
1388 wcd_cpe_change_online_state(core, 0);
1389 complete(&core->offline_compl);
1390 dev_err(core->dev, "%s: CPE is now offline\n",
1391 __func__);
1392 break;
1393 case CPE_SVC_CMI_CLIENTS_DEREG:
1394
1395 /*
1396 * Only when either CPE SSR is in progress,
1397 * or the bus is down, we need to mark the CPE
1398 * as ready. In all other cases, this event is
1399 * ignored
1400 */
1401 if (core->ssr_type == WCD_CPE_SSR_EVENT ||
1402 core->ssr_type == WCD_CPE_BUS_DOWN_EVENT)
1403 wcd_cpe_set_and_complete(core,
1404 WCD_CPE_BLK_READY);
1405 break;
1406 default:
1407 dev_err(core->dev,
1408 "%s: unhandled notification\n",
1409 __func__);
1410 break;
1411 }
1412}
1413
1414/*
1415 * wcd_cpe_cleanup_irqs: free the irq resources required by cpe
1416 * @core: handle the cpe core
1417 *
1418 * This API will free the IRQs for CPE but does not mask the
1419 * CPE interrupts. If masking is needed, it has to be done
1420 * explicity by caller.
1421 */
1422static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core)
1423{
1424
Meng Wang15c825d2018-09-06 10:49:18 +08001425 struct snd_soc_component *component = core->component;
1426 struct wcd9xxx *wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301427 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1428
1429 wcd9xxx_free_irq(core_res,
1430 core->irq_info.cpe_engine_irq,
1431 core);
1432 wcd9xxx_free_irq(core_res,
1433 core->irq_info.cpe_err_irq,
1434 core);
1435
1436}
1437
1438/*
1439 * wcd_cpe_setup_sva_err_intr: setup the irqs for CPE
1440 * @core: handle to wcd_cpe_core
1441 * All interrupts needed for CPE are acquired. If any
1442 * request_irq fails, then all irqs are free'd
1443 */
1444static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core)
1445{
1446 int ret;
Meng Wang15c825d2018-09-06 10:49:18 +08001447 struct snd_soc_component *component = core->component;
1448 struct wcd9xxx *wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301449 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1450
1451 ret = wcd9xxx_request_irq(core_res,
1452 core->irq_info.cpe_engine_irq,
1453 svass_engine_irq, "SVASS_Engine", core);
1454 if (ret) {
1455 dev_err(core->dev,
1456 "%s: Failed to request svass engine irq\n",
1457 __func__);
1458 goto fail_engine_irq;
1459 }
1460
1461 /* Make sure all error interrupts are cleared */
1462 if (CPE_ERR_IRQ_CB(core))
1463 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001464 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301465 CPE_ERR_IRQ_CLEAR,
1466 NULL);
1467
1468 /* Enable required error interrupts */
1469 if (CPE_ERR_IRQ_CB(core))
1470 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001471 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301472 CPE_ERR_IRQ_UNMASK,
1473 NULL);
1474
1475 ret = wcd9xxx_request_irq(core_res,
1476 core->irq_info.cpe_err_irq,
1477 svass_exception_irq, "SVASS_Exception", core);
1478 if (ret) {
1479 dev_err(core->dev,
1480 "%s: Failed to request svass err irq\n",
1481 __func__);
1482 goto fail_exception_irq;
1483 }
1484
1485 return 0;
1486
1487fail_exception_irq:
1488 wcd9xxx_free_irq(core_res,
1489 core->irq_info.cpe_engine_irq, core);
1490
1491fail_engine_irq:
1492 return ret;
1493}
1494
1495static int wcd_cpe_get_cal_index(int32_t cal_type)
1496{
1497 int cal_index = -EINVAL;
1498
1499 if (cal_type == ULP_AFE_CAL_TYPE)
1500 cal_index = WCD_CPE_LSM_CAL_AFE;
1501 else if (cal_type == ULP_LSM_CAL_TYPE)
1502 cal_index = WCD_CPE_LSM_CAL_LSM;
1503 else if (cal_type == ULP_LSM_TOPOLOGY_ID_CAL_TYPE)
1504 cal_index = WCD_CPE_LSM_CAL_TOPOLOGY_ID;
1505 else
1506 pr_err("%s: invalid cal_type %d\n",
1507 __func__, cal_type);
1508
1509 return cal_index;
1510}
1511
1512static int wcd_cpe_alloc_cal(int32_t cal_type, size_t data_size, void *data)
1513{
1514 int ret = 0;
1515 int cal_index;
1516
1517 cal_index = wcd_cpe_get_cal_index(cal_type);
1518 if (cal_index < 0) {
1519 pr_err("%s: invalid caltype %d\n",
1520 __func__, cal_type);
1521 return -EINVAL;
1522 }
1523
1524 ret = cal_utils_alloc_cal(data_size, data,
1525 core_d->cal_data[cal_index],
1526 0, NULL);
1527 if (ret < 0)
1528 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
1529 __func__, ret, cal_type);
1530 return ret;
1531}
1532
1533static int wcd_cpe_dealloc_cal(int32_t cal_type, size_t data_size,
1534 void *data)
1535{
1536 int ret = 0;
1537 int cal_index;
1538
1539 cal_index = wcd_cpe_get_cal_index(cal_type);
1540 if (cal_index < 0) {
1541 pr_err("%s: invalid caltype %d\n",
1542 __func__, cal_type);
1543 return -EINVAL;
1544 }
1545
1546 ret = cal_utils_dealloc_cal(data_size, data,
1547 core_d->cal_data[cal_index]);
1548 if (ret < 0)
1549 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
1550 __func__, ret, cal_type);
1551 return ret;
1552}
1553
1554static int wcd_cpe_set_cal(int32_t cal_type, size_t data_size, void *data)
1555{
1556 int ret = 0;
1557 int cal_index;
1558
1559 cal_index = wcd_cpe_get_cal_index(cal_type);
1560 if (cal_index < 0) {
1561 pr_err("%s: invalid caltype %d\n",
1562 __func__, cal_type);
1563 return -EINVAL;
1564 }
1565
1566 ret = cal_utils_set_cal(data_size, data,
1567 core_d->cal_data[cal_index],
1568 0, NULL);
1569 if (ret < 0)
1570 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
1571 __func__, ret, cal_type);
1572 return ret;
1573}
1574
1575static int wcd_cpe_cal_init(struct wcd_cpe_core *core)
1576{
1577 int ret = 0;
1578
1579 struct cal_type_info cal_type_info[] = {
1580 {{ULP_AFE_CAL_TYPE,
1581 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1582 wcd_cpe_set_cal, NULL, NULL} },
1583 {NULL, NULL, cal_utils_match_buf_num} },
1584
1585 {{ULP_LSM_CAL_TYPE,
1586 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1587 wcd_cpe_set_cal, NULL, NULL} },
1588 {NULL, NULL, cal_utils_match_buf_num} },
1589
1590 {{ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
1591 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1592 wcd_cpe_set_cal, NULL, NULL} },
1593 {NULL, NULL, cal_utils_match_buf_num} },
1594 };
1595
1596 ret = cal_utils_create_cal_types(WCD_CPE_LSM_CAL_MAX,
1597 core->cal_data,
1598 cal_type_info);
1599 if (ret < 0)
1600 pr_err("%s: could not create cal type!\n",
1601 __func__);
1602 return ret;
1603}
1604
1605/*
1606 * wcd_cpe_enable: setup the cpe interrupts and schedule
1607 * the work to download image and bootup the CPE.
1608 * core: handle to cpe core structure
1609 */
1610static int wcd_cpe_vote(struct wcd_cpe_core *core,
1611 bool enable)
1612{
1613 int ret = 0;
1614
1615 if (!core) {
1616 pr_err("%s: Invalid handle to core\n",
1617 __func__);
1618 ret = -EINVAL;
1619 goto done;
1620 }
1621
1622 dev_dbg(core->dev,
1623 "%s: enter, enable = %s, cpe_users = %u\n",
1624 __func__, (enable ? "true" : "false"),
1625 core->cpe_users);
1626
1627 if (enable) {
1628 core->cpe_users++;
1629 if (core->cpe_users == 1) {
1630 ret = wcd_cpe_enable(core, enable);
1631 if (ret) {
1632 dev_err(core->dev,
1633 "%s: CPE enable failed, err = %d\n",
1634 __func__, ret);
1635 goto done;
1636 }
1637 } else {
1638 dev_dbg(core->dev,
1639 "%s: cpe already enabled, users = %u\n",
1640 __func__, core->cpe_users);
1641 goto done;
1642 }
1643 } else {
1644 core->cpe_users--;
1645 if (core->cpe_users == 0) {
1646 ret = wcd_cpe_enable(core, enable);
1647 if (ret) {
1648 dev_err(core->dev,
1649 "%s: CPE disable failed, err = %d\n",
1650 __func__, ret);
1651 goto done;
1652 }
1653 } else {
1654 dev_dbg(core->dev,
1655 "%s: %u valid users on cpe\n",
1656 __func__, core->cpe_users);
1657 goto done;
1658 }
1659 }
1660
1661 dev_dbg(core->dev,
1662 "%s: leave, enable = %s, cpe_users = %u\n",
1663 __func__, (enable ? "true" : "false"),
1664 core->cpe_users);
1665
1666done:
1667 return ret;
1668}
1669
1670static int wcd_cpe_debugfs_init(struct wcd_cpe_core *core)
1671{
1672 int rc = 0;
1673
1674 struct dentry *dir = debugfs_create_dir("wcd_cpe", NULL);
1675
1676 if (IS_ERR_OR_NULL(dir)) {
1677 dir = NULL;
1678 rc = -ENODEV;
1679 goto err_create_dir;
1680 }
1681
1682 if (!debugfs_create_u32("ramdump_enable", 0644,
1683 dir, &ramdump_enable)) {
1684 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1685 __func__, "ramdump_enable");
1686 rc = -ENODEV;
1687 goto err_create_entry;
1688 }
1689
1690 if (!debugfs_create_file("cpe_ftm_test_trigger", 0200,
1691 dir, core, &cpe_ftm_test_trigger_fops)) {
1692 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1693 __func__, "cpe_ftm_test_trigger");
1694 rc = -ENODEV;
1695 goto err_create_entry;
1696 }
1697
1698 if (!debugfs_create_u32("cpe_ftm_test_status", 0444,
1699 dir, &cpe_ftm_test_status)) {
1700 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1701 __func__, "cpe_ftm_test_status");
1702 rc = -ENODEV;
1703 goto err_create_entry;
1704 }
1705
1706err_create_entry:
1707 debugfs_remove(dir);
1708
1709err_create_dir:
1710 return rc;
1711}
1712
1713static ssize_t fw_name_show(struct wcd_cpe_core *core, char *buf)
1714{
1715 return snprintf(buf, WCD_CPE_IMAGE_FNAME_MAX, "%s",
1716 core->dyn_fname);
1717}
1718
1719static ssize_t fw_name_store(struct wcd_cpe_core *core,
1720 const char *buf, ssize_t count)
1721{
1722 int copy_count = count;
1723 const char *pos;
1724
1725 pos = memchr(buf, '\n', count);
1726 if (pos)
1727 copy_count = pos - buf;
1728
1729 if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
1730 dev_err(core->dev,
1731 "%s: Invalid length %d, max allowed %d\n",
1732 __func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
1733 return -EINVAL;
1734 }
1735
1736 strlcpy(core->dyn_fname, buf, copy_count + 1);
1737
1738 return count;
1739}
1740
1741WCD_CPE_ATTR(fw_name, 0660, fw_name_show, fw_name_store);
1742
1743static ssize_t wcd_cpe_sysfs_show(struct kobject *kobj,
1744 struct attribute *attr, char *buf)
1745{
1746 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1747 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1748 ssize_t ret = -EINVAL;
1749
1750 if (core && cpe_attr->show)
1751 ret = cpe_attr->show(core, buf);
1752
1753 return ret;
1754}
1755
1756static ssize_t wcd_cpe_sysfs_store(struct kobject *kobj,
1757 struct attribute *attr, const char *buf,
1758 size_t count)
1759{
1760 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1761 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1762 ssize_t ret = -EINVAL;
1763
1764 if (core && cpe_attr->store)
1765 ret = cpe_attr->store(core, buf, count);
1766
1767 return ret;
1768}
1769
1770static const struct sysfs_ops wcd_cpe_sysfs_ops = {
1771 .show = wcd_cpe_sysfs_show,
1772 .store = wcd_cpe_sysfs_store,
1773};
1774
1775static struct kobj_type wcd_cpe_ktype = {
1776 .sysfs_ops = &wcd_cpe_sysfs_ops,
1777};
1778
1779static int wcd_cpe_sysfs_init(struct wcd_cpe_core *core, int id)
1780{
1781 char sysfs_dir_name[WCD_CPE_SYSFS_DIR_MAX_LENGTH];
1782 int rc = 0;
1783
1784 snprintf(sysfs_dir_name, WCD_CPE_SYSFS_DIR_MAX_LENGTH,
1785 "%s%d", "wcd_cpe", id);
1786
1787 rc = kobject_init_and_add(&core->cpe_kobj, &wcd_cpe_ktype,
1788 kernel_kobj,
1789 sysfs_dir_name);
1790 if (unlikely(rc)) {
1791 dev_err(core->dev,
1792 "%s: Failed to add kobject %s, err = %d\n",
1793 __func__, sysfs_dir_name, rc);
1794 goto done;
1795 }
1796
1797 rc = sysfs_create_file(&core->cpe_kobj, &cpe_attr_fw_name.attr);
1798 if (rc) {
1799 dev_err(core->dev,
1800 "%s: Failed to fw_name sysfs entry to %s\n",
1801 __func__, sysfs_dir_name);
1802 goto fail_create_file;
1803 }
1804
1805 return 0;
1806
1807fail_create_file:
1808 kobject_put(&core->cpe_kobj);
1809done:
1810 return rc;
1811}
1812
1813static ssize_t cpe_ftm_test_trigger(struct file *file,
1814 const char __user *user_buf,
1815 size_t count, loff_t *ppos)
1816{
1817 struct wcd_cpe_core *core = file->private_data;
1818 int ret = 0;
1819
1820 /* Enable the clks for cpe */
1821 ret = wcd_cpe_enable_cpe_clks(core, true);
1822 if (ret < 0) {
1823 dev_err(core->dev,
1824 "%s: CPE clk enable failed, err = %d\n",
1825 __func__, ret);
1826 goto done;
1827 }
1828
1829 /* Get the CPE_STATUS */
1830 ret = cpe_svc_ftm_test(core->cpe_handle, &cpe_ftm_test_status);
1831 if (ret < 0) {
1832 dev_err(core->dev,
1833 "%s: CPE FTM test failed, err = %d\n",
1834 __func__, ret);
1835 if (ret == CPE_SVC_BUSY) {
1836 cpe_ftm_test_status = 1;
1837 ret = 0;
1838 }
1839 }
1840
1841 /* Disable the clks for cpe */
1842 ret = wcd_cpe_enable_cpe_clks(core, false);
1843 if (ret < 0) {
1844 dev_err(core->dev,
1845 "%s: CPE clk disable failed, err = %d\n",
1846 __func__, ret);
1847 }
1848
1849done:
1850 if (ret < 0)
1851 return ret;
1852 else
1853 return count;
1854}
1855
1856static int wcd_cpe_validate_params(
Meng Wang15c825d2018-09-06 10:49:18 +08001857 struct snd_soc_component *component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301858 struct wcd_cpe_params *params)
1859{
1860
Meng Wang15c825d2018-09-06 10:49:18 +08001861 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301862 pr_err("%s: Invalid codec\n", __func__);
1863 return -EINVAL;
1864 }
1865
1866 if (!params) {
Meng Wang15c825d2018-09-06 10:49:18 +08001867 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301868 "%s: No params supplied for codec %s\n",
Meng Wang15c825d2018-09-06 10:49:18 +08001869 __func__, component->name);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301870 return -EINVAL;
1871 }
1872
Meng Wang15c825d2018-09-06 10:49:18 +08001873 if (!params->component || !params->get_cpe_core ||
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301874 !params->cdc_cb) {
Meng Wang15c825d2018-09-06 10:49:18 +08001875 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301876 "%s: Invalid params for codec %s\n",
Meng Wang15c825d2018-09-06 10:49:18 +08001877 __func__, component->name);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884/*
1885 * wcd_cpe_init: Initialize CPE related structures
1886 * @img_fname: filename for firmware image
Meng Wang15c825d2018-09-06 10:49:18 +08001887 * @component: handle to codec requesting for image download
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301888 * @params: parameter structure passed from caller
1889 *
1890 * This API will initialize the cpe core but will not
1891 * download the image or boot the cpe core.
1892 */
1893struct wcd_cpe_core *wcd_cpe_init(const char *img_fname,
Meng Wang15c825d2018-09-06 10:49:18 +08001894 struct snd_soc_component *component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301895 struct wcd_cpe_params *params)
1896{
1897 struct wcd_cpe_core *core;
1898 int ret = 0;
1899 struct snd_card *card = NULL;
1900 struct snd_info_entry *entry = NULL;
1901 char proc_name[WCD_CPE_STATE_MAX_LEN];
1902 const char *cpe_name = "cpe";
1903 const char *state_name = "_state";
1904 const struct cpe_svc_hw_cfg *hw_info;
1905 int id = 0;
1906
Meng Wang15c825d2018-09-06 10:49:18 +08001907 if (wcd_cpe_validate_params(component, params))
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301908 return NULL;
1909
1910 core = kzalloc(sizeof(struct wcd_cpe_core), GFP_KERNEL);
1911 if (!core)
1912 return NULL;
1913
1914 snprintf(core->fname, sizeof(core->fname), "%s", img_fname);
1915 strlcpy(core->dyn_fname, core->fname, WCD_CPE_IMAGE_FNAME_MAX);
1916
1917 wcd_get_cpe_core = params->get_cpe_core;
1918
Meng Wang15c825d2018-09-06 10:49:18 +08001919 core->component = params->component;
1920 core->dev = params->component->dev;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301921 core->cpe_debug_mode = params->dbg_mode;
1922
1923 core->cdc_info.major_version = params->cdc_major_ver;
1924 core->cdc_info.minor_version = params->cdc_minor_ver;
1925 core->cdc_info.id = params->cdc_id;
1926
1927 core->cpe_cdc_cb = params->cdc_cb;
1928
1929 memcpy(&core->irq_info, &params->cdc_irq_info,
1930 sizeof(core->irq_info));
1931
1932 INIT_WORK(&core->load_fw_work, wcd_cpe_load_fw_image);
1933 INIT_WORK(&core->ssr_work, wcd_cpe_ssr_work);
1934 init_completion(&core->offline_compl);
1935 init_completion(&core->ready_compl);
1936 init_completion(&core->online_compl);
1937 init_waitqueue_head(&core->ssr_entry.offline_poll_wait);
1938 mutex_init(&core->ssr_lock);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05301939 mutex_init(&core->session_lock);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301940 core->cpe_users = 0;
1941 core->cpe_clk_ref = 0;
1942
1943 /*
1944 * By default, during probe, it is assumed that
1945 * both CPE hardware block and underlying bus to codec
1946 * are ready
1947 */
1948 core->ready_status = WCD_CPE_READY_TO_DLOAD;
1949
1950 core->cpe_handle = cpe_svc_initialize(NULL, &core->cdc_info,
1951 params->cpe_svc_params);
1952 if (!core->cpe_handle) {
1953 dev_err(core->dev,
1954 "%s: failed to initialize cpe services\n",
1955 __func__);
1956 goto fail_cpe_initialize;
1957 }
1958
1959 core->cpe_reg_handle = cpe_svc_register(core->cpe_handle,
1960 wcd_cpe_svc_event_cb,
1961 CPE_SVC_ONLINE | CPE_SVC_OFFLINE |
1962 CPE_SVC_BOOT |
1963 CPE_SVC_CMI_CLIENTS_DEREG,
1964 "codec cpe handler");
1965 if (!core->cpe_reg_handle) {
1966 dev_err(core->dev,
1967 "%s: failed to register cpe service\n",
1968 __func__);
1969 goto fail_cpe_register;
1970 }
1971
Meng Wang15c825d2018-09-06 10:49:18 +08001972 card = component->card->snd_card;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301973 snprintf(proc_name, (sizeof("cpe") + sizeof("_state") +
1974 sizeof(id) - 2), "%s%d%s", cpe_name, id, state_name);
1975 entry = snd_info_create_card_entry(card, proc_name,
1976 card->proc_root);
1977 if (entry) {
1978 core->ssr_entry.entry = entry;
1979 core->ssr_entry.offline = 1;
1980 entry->size = WCD_CPE_STATE_MAX_LEN;
1981 entry->content = SNDRV_INFO_CONTENT_DATA;
1982 entry->c.ops = &wcd_cpe_state_proc_ops;
1983 entry->private_data = core;
1984 ret = snd_info_register(entry);
1985 if (ret < 0) {
1986 dev_err(core->dev,
1987 "%s: snd_info_register failed (%d)\n",
1988 __func__, ret);
1989 snd_info_free_entry(entry);
1990 entry = NULL;
1991 }
1992 } else {
1993 dev_err(core->dev,
1994 "%s: Failed to create CPE SSR status entry\n",
1995 __func__);
1996 /*
1997 * Even if SSR entry creation fails, continue
1998 * with image download
1999 */
2000 }
2001
2002 core_d = core;
2003 ret = wcd_cpe_cal_init(core);
2004 if (ret < 0) {
2005 dev_err(core->dev,
2006 "%s: CPE calibration init failed, err = %d\n",
2007 __func__, ret);
2008 goto fail_cpe_reset;
2009 }
2010
2011 wcd_cpe_debugfs_init(core);
2012
2013 wcd_cpe_sysfs_init(core, id);
2014
2015 hw_info = cpe_svc_get_hw_cfg(core->cpe_handle);
2016 if (!hw_info) {
2017 dev_err(core->dev,
2018 "%s: hw info not available\n",
2019 __func__);
2020 goto schedule_dload_work;
2021 } else {
2022 core->hw_info.dram_offset = hw_info->DRAM_offset;
2023 core->hw_info.dram_size = hw_info->DRAM_size;
2024 core->hw_info.iram_offset = hw_info->IRAM_offset;
2025 core->hw_info.iram_size = hw_info->IRAM_size;
2026 }
2027
2028 /* Setup the ramdump device and buffer */
2029 core->cpe_ramdump_dev = create_ramdump_device("cpe",
2030 core->dev);
2031 if (!core->cpe_ramdump_dev) {
2032 dev_err(core->dev,
2033 "%s: Failed to create ramdump device\n",
2034 __func__);
2035 goto schedule_dload_work;
2036 }
2037
2038 arch_setup_dma_ops(core->dev, 0, 0, NULL, 0);
2039 core->cpe_dump_v_addr = dma_alloc_coherent(core->dev,
2040 core->hw_info.dram_size,
2041 &core->cpe_dump_addr,
2042 GFP_KERNEL);
2043 if (!core->cpe_dump_v_addr) {
2044 dev_err(core->dev,
2045 "%s: Failed to alloc memory for cpe dump, size = %zd\n",
2046 __func__, core->hw_info.dram_size);
2047 goto schedule_dload_work;
2048 } else {
2049 memset(core->cpe_dump_v_addr, 0, core->hw_info.dram_size);
2050 }
2051
2052schedule_dload_work:
2053 core->ssr_type = WCD_CPE_INITIALIZED;
2054 schedule_work(&core->load_fw_work);
2055 return core;
2056
2057fail_cpe_reset:
2058 cpe_svc_deregister(core->cpe_handle, core->cpe_reg_handle);
2059
2060fail_cpe_register:
2061 cpe_svc_deinitialize(core->cpe_handle);
2062
2063fail_cpe_initialize:
2064 kfree(core);
2065 return NULL;
2066}
2067EXPORT_SYMBOL(wcd_cpe_init);
2068
2069/*
2070 * wcd_cpe_cmi_lsm_callback: callback called from cpe services
2071 * to notify command response for lsm
2072 * service
2073 * @param: param containing the response code and status
2074 *
2075 * This callback is registered with cpe services while registering
2076 * the LSM service
2077 */
2078static void wcd_cpe_cmi_lsm_callback(const struct cmi_api_notification *param)
2079{
2080 struct cmi_hdr *hdr;
2081 struct cpe_lsm_session *lsm_session;
2082 u8 session_id;
2083
2084 if (!param) {
2085 pr_err("%s: param is null\n", __func__);
2086 return;
2087 }
2088
2089 if (param->event != CMI_API_MSG) {
2090 pr_err("%s: unhandled event 0x%x\n", __func__, param->event);
2091 return;
2092 }
2093
2094 hdr = (struct cmi_hdr *) param->message;
2095 session_id = CMI_HDR_GET_SESSION_ID(hdr);
2096
2097 if (session_id > WCD_CPE_LSM_MAX_SESSIONS) {
2098 pr_err("%s: invalid lsm session id = %d\n",
2099 __func__, session_id);
2100 return;
2101 }
2102
2103 lsm_session = lsm_sessions[session_id];
2104
2105 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
2106
2107 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
2108 u8 result = payload[0];
2109
2110 lsm_session->cmd_err_code = result;
2111 complete(&lsm_session->cmd_comp);
2112
2113 } else if (hdr->opcode == CPE_LSM_SESSION_CMDRSP_SHARED_MEM_ALLOC) {
2114
2115 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
2116 (struct cpe_cmdrsp_shmem_alloc *) param->message;
2117
2118 if (cmdrsp_shmem_alloc->addr == 0) {
2119 pr_err("%s: Failed LSM shared mem alloc\n", __func__);
2120 lsm_session->cmd_err_code = CMI_SHMEM_ALLOC_FAILED;
2121
2122 } else {
2123
2124 pr_debug("%s LSM shared mem addr = 0x%x\n",
2125 __func__, cmdrsp_shmem_alloc->addr);
2126 lsm_session->lsm_mem_handle = cmdrsp_shmem_alloc->addr;
2127 lsm_session->cmd_err_code = 0;
2128 }
2129
2130 complete(&lsm_session->cmd_comp);
2131
2132 } else if (hdr->opcode == CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
2133
2134 struct cpe_lsm_event_detect_v2 *event_detect_v2 =
2135 (struct cpe_lsm_event_detect_v2 *) param->message;
2136
2137 if (!lsm_session->priv_d) {
2138 pr_err("%s: private data is not present\n",
2139 __func__);
2140 return;
2141 }
2142
2143 pr_debug("%s: event payload, status = %u, size = %u\n",
2144 __func__, event_detect_v2->detection_status,
2145 event_detect_v2->size);
2146
2147 if (lsm_session->event_cb)
2148 lsm_session->event_cb(
2149 lsm_session->priv_d,
2150 event_detect_v2->detection_status,
2151 event_detect_v2->size,
2152 event_detect_v2->payload);
2153 }
2154}
2155
2156/*
2157 * wcd_cpe_cmi_send_lsm_msg: send a message to lsm service
2158 * @core: handle to cpe core
2159 * @session: session on which to send the message
2160 * @message: actual message containing header and payload
2161 *
2162 * Sends message to lsm service for specified session and wait
2163 * for response back on the message.
2164 * should be called after acquiring session specific mutex
2165 */
2166static int wcd_cpe_cmi_send_lsm_msg(
2167 struct wcd_cpe_core *core,
2168 struct cpe_lsm_session *session,
2169 void *message)
2170{
2171 int ret = 0;
2172 struct cmi_hdr *hdr = message;
2173
2174 pr_debug("%s: sending message with opcode 0x%x\n",
2175 __func__, hdr->opcode);
2176
2177 if (unlikely(!wcd_cpe_is_online_state(core))) {
2178 dev_err(core->dev,
2179 "%s: MSG not sent, CPE offline\n",
2180 __func__);
2181 goto done;
2182 }
2183
2184 if (CMI_HDR_GET_OBM_FLAG(hdr))
2185 wcd_cpe_bus_vote_max_bw(core, true);
2186
2187 reinit_completion(&session->cmd_comp);
2188 ret = cmi_send_msg(message);
2189 if (ret) {
2190 pr_err("%s: msg opcode (0x%x) send failed (%d)\n",
2191 __func__, hdr->opcode, ret);
2192 goto rel_bus_vote;
2193 }
2194
2195 ret = wait_for_completion_timeout(&session->cmd_comp,
2196 CMI_CMD_TIMEOUT);
2197 if (ret > 0) {
2198 pr_debug("%s: command 0x%x, received response 0x%x\n",
2199 __func__, hdr->opcode, session->cmd_err_code);
2200 if (session->cmd_err_code == CMI_SHMEM_ALLOC_FAILED)
2201 session->cmd_err_code = CPE_ENOMEMORY;
2202 if (session->cmd_err_code > 0)
2203 pr_err("%s: CPE returned error[%s]\n",
2204 __func__, cpe_err_get_err_str(
2205 session->cmd_err_code));
2206 ret = cpe_err_get_lnx_err_code(session->cmd_err_code);
2207 goto rel_bus_vote;
2208 } else {
2209 pr_err("%s: command (0x%x) send timed out\n",
2210 __func__, hdr->opcode);
2211 ret = -ETIMEDOUT;
2212 goto rel_bus_vote;
2213 }
2214
2215
2216rel_bus_vote:
2217
2218 if (CMI_HDR_GET_OBM_FLAG(hdr))
2219 wcd_cpe_bus_vote_max_bw(core, false);
2220
2221done:
2222 return ret;
2223}
2224
2225
2226/*
2227 * fill_cmi_header: fill the cmi header with specified values
2228 *
2229 * @hdr: header to be updated with values
2230 * @session_id: session id of the header,
2231 * in case of AFE service it is port_id
2232 * @service_id: afe/lsm, etc
2233 * @version: update the version field in header
2234 * @payload_size: size of the payload following after header
2235 * @opcode: opcode of the message
2236 * @obm_flag: indicates if this header is for obm message
2237 *
2238 */
2239static int fill_cmi_header(struct cmi_hdr *hdr,
2240 u8 session_id, u8 service_id,
2241 bool version, u8 payload_size,
2242 u16 opcode, bool obm_flag)
2243{
2244 /* sanitize the data */
2245 if (!IS_VALID_SESSION_ID(session_id) ||
2246 !IS_VALID_SERVICE_ID(service_id) ||
2247 !IS_VALID_PLD_SIZE(payload_size)) {
2248 pr_err("Invalid header creation request\n");
2249 return -EINVAL;
2250 }
2251
2252 CMI_HDR_SET_SESSION(hdr, session_id);
2253 CMI_HDR_SET_SERVICE(hdr, service_id);
2254 if (version)
2255 CMI_HDR_SET_VERSION(hdr, 1);
2256 else
2257 CMI_HDR_SET_VERSION(hdr, 0);
2258
2259 CMI_HDR_SET_PAYLOAD_SIZE(hdr, payload_size);
2260
2261 hdr->opcode = opcode;
2262
2263 if (obm_flag)
2264 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
2265 else
2266 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
2267
2268 return 0;
2269}
2270
2271/*
2272 * fill_lsm_cmd_header_v0_inband:
2273 * Given the header, fill the header with information
2274 * for lsm service, version 0 and inband message
2275 * @hdr: the cmi header to be filled.
2276 * @session_id: ID for the lsm session
2277 * @payload_size: size for cmi message payload
2278 * @opcode: opcode for cmi message
2279 */
2280static int fill_lsm_cmd_header_v0_inband(struct cmi_hdr *hdr,
2281 u8 session_id, u8 payload_size, u16 opcode)
2282{
2283 return fill_cmi_header(hdr, session_id,
2284 CMI_CPE_LSM_SERVICE_ID, false,
2285 payload_size, opcode, false);
2286}
2287
2288/*
2289 * wcd_cpe_is_valid_lsm_session:
2290 * Check session parameters to identify validity for the sesion
2291 * @core: handle to cpe core
2292 * @session: handle to the lsm session
2293 * @func: invoking function to be printed in error logs
2294 */
2295static int wcd_cpe_is_valid_lsm_session(struct wcd_cpe_core *core,
2296 struct cpe_lsm_session *session,
2297 const char *func)
2298{
2299 if (unlikely(IS_ERR_OR_NULL(core))) {
2300 pr_err("%s: invalid handle to core\n",
2301 func);
2302 return -EINVAL;
2303 }
2304
2305 if (unlikely(IS_ERR_OR_NULL(session))) {
2306 dev_err(core->dev, "%s: invalid session\n",
2307 func);
2308 return -EINVAL;
2309 }
2310
2311 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
2312 dev_err(core->dev, "%s: invalid session id (%u)\n",
2313 func, session->id);
2314 return -EINVAL;
2315 }
2316
2317 dev_dbg(core->dev, "%s: session_id = %u\n",
2318 func, session->id);
2319 return 0;
2320}
2321
2322static int wcd_cpe_cmd_lsm_open_tx_v2(
2323 struct wcd_cpe_core *core,
2324 struct cpe_lsm_session *session)
2325{
2326 struct cpe_lsm_cmd_open_tx_v2 cmd_open_tx_v2;
2327 struct cal_block_data *top_cal = NULL;
2328 struct audio_cal_info_lsm_top *lsm_top;
2329 int ret = 0;
2330
2331 ret = wcd_cpe_is_valid_lsm_session(core, session,
2332 __func__);
2333 if (ret)
2334 return ret;
2335
2336 if (core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID] == NULL) {
2337 dev_err(core->dev,
2338 "%s: LSM_TOPOLOGY cal not allocated!\n",
2339 __func__);
2340 return -EINVAL;
2341 }
2342
2343 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2344 top_cal = cal_utils_get_only_cal_block(
2345 core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]);
2346 if (!top_cal) {
2347 dev_err(core->dev,
2348 "%s: Failed to get LSM TOPOLOGY cal block\n",
2349 __func__);
2350 ret = -EINVAL;
2351 goto unlock_cal_mutex;
2352 }
2353
2354 lsm_top = (struct audio_cal_info_lsm_top *)
2355 top_cal->cal_info;
2356
2357 if (!lsm_top) {
2358 dev_err(core->dev,
2359 "%s: cal_info for LSM_TOPOLOGY not found\n",
2360 __func__);
2361 ret = -EINVAL;
2362 goto unlock_cal_mutex;
2363 }
2364
2365 dev_dbg(core->dev,
2366 "%s: topology_id = 0x%x, acdb_id = 0x%x, app_type = 0x%x\n",
2367 __func__, lsm_top->topology, lsm_top->acdb_id,
2368 lsm_top->app_type);
2369
2370 if (lsm_top->topology == 0) {
2371 dev_err(core->dev,
2372 "%s: topology id not sent for app_type 0x%x\n",
2373 __func__, lsm_top->app_type);
2374 ret = -EINVAL;
2375 goto unlock_cal_mutex;
2376 }
2377
2378 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2379
2380 memset(&cmd_open_tx_v2, 0, sizeof(struct cpe_lsm_cmd_open_tx_v2));
2381 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx_v2.hdr,
2382 session->id, OPEN_V2_CMD_PAYLOAD_SIZE,
2383 CPE_LSM_SESSION_CMD_OPEN_TX_V2)) {
2384 ret = -EINVAL;
2385 goto end_ret;
2386 }
2387
2388 cmd_open_tx_v2.topology_id = lsm_top->topology;
2389 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx_v2);
2390 if (ret)
2391 dev_err(core->dev,
2392 "%s: failed to send open_tx_v2 cmd, err = %d\n",
2393 __func__, ret);
2394 else
2395 session->is_topology_used = true;
2396end_ret:
2397 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2398
2399unlock_cal_mutex:
2400 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2401 return ret;
2402}
2403
2404/*
2405 * wcd_cpe_cmd_lsm_open_tx: compose and send lsm open command
2406 * @core_handle: handle to cpe core
2407 * @session: session for which the command needs to be sent
2408 * @app_id: application id part of the command
2409 * @sample_rate: sample rate for this session
2410 */
2411static int wcd_cpe_cmd_lsm_open_tx(void *core_handle,
2412 struct cpe_lsm_session *session,
2413 u16 app_id, u16 sample_rate)
2414{
2415 struct cpe_lsm_cmd_open_tx cmd_open_tx;
2416 struct wcd_cpe_core *core = core_handle;
2417 int ret = 0;
2418
2419 ret = wcd_cpe_is_valid_lsm_session(core, session,
2420 __func__);
2421 if (ret)
2422 return ret;
2423
2424 /* Try to open with topology first */
2425 ret = wcd_cpe_cmd_lsm_open_tx_v2(core, session);
2426 if (!ret)
2427 goto done;
2428
2429 dev_dbg(core->dev, "%s: Try open_tx without topology\n",
2430 __func__);
2431
2432 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2433
2434 memset(&cmd_open_tx, 0, sizeof(struct cpe_lsm_cmd_open_tx));
2435 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx.hdr,
2436 session->id, OPEN_CMD_PAYLOAD_SIZE,
2437 CPE_LSM_SESSION_CMD_OPEN_TX)) {
2438 ret = -EINVAL;
2439 goto end_ret;
2440 }
2441
2442 cmd_open_tx.app_id = app_id;
2443 cmd_open_tx.sampling_rate = sample_rate;
2444
2445 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx);
2446 if (ret)
2447 dev_err(core->dev,
2448 "%s: failed to send open_tx cmd, err = %d\n",
2449 __func__, ret);
2450end_ret:
2451 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2452done:
2453 return ret;
2454}
2455
2456/*
2457 * wcd_cpe_cmd_close_tx: compose and send lsm close command
2458 * @core_handle: handle to cpe core
2459 * @session: session for which the command needs to be sent
2460 */
2461static int wcd_cpe_cmd_lsm_close_tx(void *core_handle,
2462 struct cpe_lsm_session *session)
2463{
2464 struct cmi_hdr cmd_close_tx;
2465 struct wcd_cpe_core *core = core_handle;
2466 int ret = 0;
2467
2468 ret = wcd_cpe_is_valid_lsm_session(core, session,
2469 __func__);
2470 if (ret)
2471 return ret;
2472
2473 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2474
2475 memset(&cmd_close_tx, 0, sizeof(cmd_close_tx));
2476 if (fill_lsm_cmd_header_v0_inband(&cmd_close_tx, session->id,
2477 0, CPE_LSM_SESSION_CMD_CLOSE_TX)) {
2478 ret = -EINVAL;
2479 goto end_ret;
2480 }
2481
2482 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_close_tx);
2483 if (ret)
2484 dev_err(core->dev,
2485 "%s: lsm close_tx cmd failed, err = %d\n",
2486 __func__, ret);
2487 else
2488 session->is_topology_used = false;
2489end_ret:
2490 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2491 return ret;
2492}
2493
2494/*
2495 * wcd_cpe_cmd_shmem_alloc: compose and send lsm shared
2496 * memory allocation command
2497 * @core_handle: handle to cpe core
2498 * @session: session for which the command needs to be sent
2499 * @size: size of memory to be allocated
2500 */
2501static int wcd_cpe_cmd_lsm_shmem_alloc(void *core_handle,
2502 struct cpe_lsm_session *session,
2503 u32 size)
2504{
2505 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
2506 struct wcd_cpe_core *core = core_handle;
2507 int ret = 0;
2508
2509 ret = wcd_cpe_is_valid_lsm_session(core, session,
2510 __func__);
2511 if (ret)
2512 return ret;
2513
2514 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2515
2516 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
2517 if (fill_lsm_cmd_header_v0_inband(&cmd_shmem_alloc.hdr, session->id,
2518 SHMEM_ALLOC_CMD_PLD_SIZE,
2519 CPE_LSM_SESSION_CMD_SHARED_MEM_ALLOC)) {
2520 ret = -EINVAL;
2521 goto end_ret;
2522 }
2523
2524 cmd_shmem_alloc.size = size;
2525 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_shmem_alloc);
2526 if (ret)
2527 dev_err(core->dev,
2528 "%s: lsm_shmem_alloc cmd send fail, %d\n",
2529 __func__, ret);
2530end_ret:
2531 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2532 return ret;
2533}
2534
2535/*
2536 * wcd_cpe_cmd_lsm_shmem_dealloc: deallocate the shared memory
2537 * for the specified session
2538 * @core_handle: handle to cpe core
2539 * @session: session for which memory needs to be deallocated.
2540 */
2541static int wcd_cpe_cmd_lsm_shmem_dealloc(void *core_handle,
2542 struct cpe_lsm_session *session)
2543{
2544 struct cpe_cmd_shmem_dealloc cmd_dealloc;
2545 struct wcd_cpe_core *core = core_handle;
2546 int ret = 0;
2547
2548 ret = wcd_cpe_is_valid_lsm_session(core, session,
2549 __func__);
2550 if (ret)
2551 return ret;
2552
2553 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2554
2555 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
2556 if (fill_lsm_cmd_header_v0_inband(&cmd_dealloc.hdr, session->id,
2557 SHMEM_DEALLOC_CMD_PLD_SIZE,
2558 CPE_LSM_SESSION_CMD_SHARED_MEM_DEALLOC)) {
2559 ret = -EINVAL;
2560 goto end_ret;
2561 }
2562
2563 cmd_dealloc.addr = session->lsm_mem_handle;
2564 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dealloc);
2565 if (ret) {
2566 dev_err(core->dev,
2567 "%s: lsm_shmem_dealloc cmd failed, rc %d\n",
2568 __func__, ret);
2569 goto end_ret;
2570 }
2571
2572 memset(&session->lsm_mem_handle, 0,
2573 sizeof(session->lsm_mem_handle));
2574
2575end_ret:
2576 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2577 return ret;
2578}
2579
2580/*
2581 * wcd_cpe_send_lsm_cal: send the calibration for lsm service
2582 * from acdb to the cpe
2583 * @core: handle to cpe core
2584 * @session: session for which the calibration needs to be set.
2585 */
2586static int wcd_cpe_send_lsm_cal(
2587 struct wcd_cpe_core *core,
2588 struct cpe_lsm_session *session)
2589{
2590
2591 u8 *msg_pld;
2592 struct cmi_hdr *hdr;
2593 struct cal_block_data *lsm_cal = NULL;
2594 void *inb_msg;
2595 int rc = 0;
2596
2597 if (core->cal_data[WCD_CPE_LSM_CAL_LSM] == NULL) {
2598 pr_err("%s: LSM cal not allocated!\n", __func__);
2599 return -EINVAL;
2600 }
2601
2602 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2603 lsm_cal = cal_utils_get_only_cal_block(
2604 core->cal_data[WCD_CPE_LSM_CAL_LSM]);
2605 if (!lsm_cal) {
2606 pr_err("%s: failed to get lsm cal block\n", __func__);
2607 rc = -EINVAL;
2608 goto unlock_cal_mutex;
2609 }
2610
2611 if (lsm_cal->cal_data.size == 0) {
2612 dev_dbg(core->dev, "%s: No LSM cal to send\n",
2613 __func__);
2614 rc = 0;
2615 goto unlock_cal_mutex;
2616 }
2617
2618 inb_msg = kzalloc(sizeof(struct cmi_hdr) + lsm_cal->cal_data.size,
2619 GFP_KERNEL);
2620 if (!inb_msg) {
2621 rc = -ENOMEM;
2622 goto unlock_cal_mutex;
2623 }
2624
2625 hdr = (struct cmi_hdr *) inb_msg;
2626
2627 rc = fill_lsm_cmd_header_v0_inband(hdr, session->id,
2628 lsm_cal->cal_data.size,
2629 CPE_LSM_SESSION_CMD_SET_PARAMS);
2630 if (rc) {
2631 pr_err("%s: invalid params for header, err = %d\n",
2632 __func__, rc);
2633 goto free_msg;
2634 }
2635
2636 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
2637 memcpy(msg_pld, lsm_cal->cal_data.kvaddr,
2638 lsm_cal->cal_data.size);
2639
2640 rc = wcd_cpe_cmi_send_lsm_msg(core, session, inb_msg);
2641 if (rc)
2642 pr_err("%s: acdb lsm_params send failed, err = %d\n",
2643 __func__, rc);
2644
2645free_msg:
2646 kfree(inb_msg);
2647
2648unlock_cal_mutex:
2649 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2650 return rc;
2651
2652}
2653
2654static void wcd_cpe_set_param_data(struct cpe_param_data *param_d,
2655 struct cpe_lsm_ids *ids, u32 p_size,
2656 u32 set_param_cmd)
2657{
2658 param_d->module_id = ids->module_id;
2659 param_d->param_id = ids->param_id;
2660
2661 switch (set_param_cmd) {
2662 case CPE_LSM_SESSION_CMD_SET_PARAMS_V2:
2663 param_d->p_size.param_size = p_size;
2664 break;
2665 case CPE_LSM_SESSION_CMD_SET_PARAMS:
2666 default:
2667 param_d->p_size.sr.param_size =
2668 (u16) p_size;
2669 param_d->p_size.sr.reserved = 0;
2670 break;
2671 }
2672}
2673
2674static int wcd_cpe_send_param_epd_thres(struct wcd_cpe_core *core,
2675 struct cpe_lsm_session *session,
2676 void *data, struct cpe_lsm_ids *ids)
2677{
2678 struct snd_lsm_ep_det_thres *ep_det_data;
2679 struct cpe_lsm_param_epd_thres epd_cmd;
2680 struct cmi_hdr *msg_hdr = &epd_cmd.hdr;
2681 struct cpe_param_data *param_d =
2682 &epd_cmd.param;
2683 int rc;
2684
2685 memset(&epd_cmd, 0, sizeof(epd_cmd));
2686 ep_det_data = (struct snd_lsm_ep_det_thres *) data;
2687 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2688 session->id,
2689 CPE_CMD_EPD_THRES_PLD_SIZE,
2690 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2691 rc = -EINVAL;
2692 goto err_ret;
2693 }
2694
2695 wcd_cpe_set_param_data(param_d, ids,
2696 CPE_EPD_THRES_PARAM_SIZE,
2697 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2698
2699 epd_cmd.minor_version = 1;
2700 epd_cmd.epd_begin = ep_det_data->epd_begin;
2701 epd_cmd.epd_end = ep_det_data->epd_end;
2702
2703 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2704 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &epd_cmd);
2705 if (unlikely(rc))
2706 dev_err(core->dev,
2707 "%s: set_param(EPD Threshold) failed, rc %dn",
2708 __func__, rc);
2709 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2710err_ret:
2711 return rc;
2712}
2713
2714static int wcd_cpe_send_param_opmode(struct wcd_cpe_core *core,
2715 struct cpe_lsm_session *session,
2716 void *data, struct cpe_lsm_ids *ids)
2717{
2718 struct snd_lsm_detect_mode *opmode_d;
2719 struct cpe_lsm_param_opmode opmode_cmd;
2720 struct cmi_hdr *msg_hdr = &opmode_cmd.hdr;
2721 struct cpe_param_data *param_d =
2722 &opmode_cmd.param;
2723 int rc;
2724
2725 memset(&opmode_cmd, 0, sizeof(opmode_cmd));
2726 opmode_d = (struct snd_lsm_detect_mode *) data;
2727 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2728 session->id,
2729 CPE_CMD_OPMODE_PLD_SIZE,
2730 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2731 rc = -EINVAL;
2732 goto err_ret;
2733 }
2734
2735 wcd_cpe_set_param_data(param_d, ids,
2736 CPE_OPMODE_PARAM_SIZE,
2737 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2738
2739 opmode_cmd.minor_version = 1;
2740 if (opmode_d->mode == LSM_MODE_KEYWORD_ONLY_DETECTION)
2741 opmode_cmd.mode = 1;
2742 else
2743 opmode_cmd.mode = 3;
2744
2745 if (opmode_d->detect_failure)
2746 opmode_cmd.mode |= 0x04;
2747
2748 opmode_cmd.reserved = 0;
2749
2750 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2751 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &opmode_cmd);
2752 if (unlikely(rc))
2753 dev_err(core->dev,
2754 "%s: set_param(operation_mode) failed, rc %dn",
2755 __func__, rc);
2756 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2757err_ret:
2758 return rc;
2759}
2760
2761static int wcd_cpe_send_param_gain(struct wcd_cpe_core *core,
2762 struct cpe_lsm_session *session,
2763 void *data, struct cpe_lsm_ids *ids)
2764{
2765 struct snd_lsm_gain *gain_d;
2766 struct cpe_lsm_param_gain gain_cmd;
2767 struct cmi_hdr *msg_hdr = &gain_cmd.hdr;
2768 struct cpe_param_data *param_d =
2769 &gain_cmd.param;
2770 int rc;
2771
2772 memset(&gain_cmd, 0, sizeof(gain_cmd));
2773 gain_d = (struct snd_lsm_gain *) data;
2774 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2775 session->id,
2776 CPE_CMD_GAIN_PLD_SIZE,
2777 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2778 rc = -EINVAL;
2779 goto err_ret;
2780 }
2781
2782 wcd_cpe_set_param_data(param_d, ids,
2783 CPE_GAIN_PARAM_SIZE,
2784 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2785
2786 gain_cmd.minor_version = 1;
2787 gain_cmd.gain = gain_d->gain;
2788 gain_cmd.reserved = 0;
2789
2790 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2791 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &gain_cmd);
2792 if (unlikely(rc))
2793 dev_err(core->dev,
2794 "%s: set_param(lsm_gain) failed, rc %dn",
2795 __func__, rc);
2796 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2797err_ret:
2798 return rc;
2799}
2800
2801static int wcd_cpe_send_param_connectport(struct wcd_cpe_core *core,
2802 struct cpe_lsm_session *session,
2803 void *data, struct cpe_lsm_ids *ids, u16 port_id)
2804{
2805 struct cpe_lsm_param_connectport con_port_cmd;
2806 struct cmi_hdr *msg_hdr = &con_port_cmd.hdr;
2807 struct cpe_param_data *param_d =
2808 &con_port_cmd.param;
2809 int rc;
2810
2811 memset(&con_port_cmd, 0, sizeof(con_port_cmd));
2812 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2813 session->id,
2814 CPE_CMD_CONNECTPORT_PLD_SIZE,
2815 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2816 rc = -EINVAL;
2817 goto err_ret;
2818 }
2819
2820 wcd_cpe_set_param_data(param_d, ids,
2821 CPE_CONNECTPORT_PARAM_SIZE,
2822 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2823
2824 con_port_cmd.minor_version = 1;
2825 con_port_cmd.afe_port_id = port_id;
2826 con_port_cmd.reserved = 0;
2827
2828 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2829 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &con_port_cmd);
2830 if (unlikely(rc))
2831 dev_err(core->dev,
2832 "%s: set_param(connect_port) failed, rc %dn",
2833 __func__, rc);
2834 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2835err_ret:
2836 return rc;
2837}
2838
2839static int wcd_cpe_send_param_conf_levels(
2840 struct wcd_cpe_core *core,
2841 struct cpe_lsm_session *session,
2842 struct cpe_lsm_ids *ids)
2843{
2844 struct cpe_lsm_conf_level conf_level_data;
2845 struct cmi_hdr *hdr = &(conf_level_data.hdr);
2846 struct cpe_param_data *param_d = &(conf_level_data.param);
2847 u8 pld_size = 0;
2848 u8 pad_bytes = 0;
2849 void *message;
2850 int ret = 0;
2851
2852 memset(&conf_level_data, 0, sizeof(conf_level_data));
2853
2854 pld_size = (sizeof(struct cpe_lsm_conf_level) - sizeof(struct cmi_hdr));
2855 pld_size += session->num_confidence_levels;
2856 pad_bytes = ((4 - (pld_size % 4)) % 4);
2857 pld_size += pad_bytes;
2858
2859 fill_cmi_header(hdr, session->id, CMI_CPE_LSM_SERVICE_ID,
2860 false, pld_size,
2861 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, false);
2862
2863 wcd_cpe_set_param_data(param_d, ids,
2864 pld_size - sizeof(struct cpe_param_data),
2865 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2866
2867 conf_level_data.num_active_models = session->num_confidence_levels;
2868
2869 message = kzalloc(sizeof(struct cpe_lsm_conf_level) +
2870 conf_level_data.num_active_models + pad_bytes,
2871 GFP_KERNEL);
2872 if (!message) {
2873 pr_err("%s: no memory for conf_level\n", __func__);
2874 return -ENOMEM;
2875 }
2876
2877 memcpy(message, &conf_level_data,
2878 sizeof(struct cpe_lsm_conf_level));
2879 memcpy(((u8 *) message) + sizeof(struct cpe_lsm_conf_level),
2880 session->conf_levels, conf_level_data.num_active_models);
2881
2882 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2883 ret = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2884 if (ret)
2885 pr_err("%s: lsm_set_conf_levels failed, err = %d\n",
2886 __func__, ret);
2887 kfree(message);
2888 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2889 return ret;
2890}
2891
2892static int wcd_cpe_send_param_snd_model(struct wcd_cpe_core *core,
2893 struct cpe_lsm_session *session, struct cpe_lsm_ids *ids)
2894{
2895 int ret = 0;
2896 struct cmi_obm_msg obm_msg;
2897 struct cpe_param_data *param_d;
2898
Guodong Hu42658f42019-10-14 17:16:45 +08002899 memset(&obm_msg, 0, sizeof(obm_msg));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302900
2901 ret = fill_cmi_header(&obm_msg.hdr, session->id,
2902 CMI_CPE_LSM_SERVICE_ID, 0, 20,
2903 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, true);
2904 if (ret) {
2905 dev_err(core->dev,
2906 "%s: Invalid parameters, rc = %d\n",
2907 __func__, ret);
2908 goto err_ret;
2909 }
2910
2911 obm_msg.pld.version = 0;
2912 obm_msg.pld.size = session->snd_model_size;
2913 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
2914 obm_msg.pld.mem_handle = session->lsm_mem_handle;
2915
2916 param_d = (struct cpe_param_data *) session->snd_model_data;
2917 wcd_cpe_set_param_data(param_d, ids,
2918 (session->snd_model_size - sizeof(*param_d)),
2919 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2920
2921 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2922 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
2923 if (ret)
2924 dev_err(core->dev,
2925 "%s: snd_model_register failed, %d\n",
2926 __func__, ret);
2927 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2928
2929err_ret:
2930 return ret;
2931}
2932
2933static int wcd_cpe_send_param_dereg_model(
2934 struct wcd_cpe_core *core,
2935 struct cpe_lsm_session *session,
2936 struct cpe_lsm_ids *ids)
2937{
2938 struct cmi_hdr *hdr;
2939 struct cpe_param_data *param_d;
2940 u8 *message;
2941 u32 pld_size;
2942 int rc = 0;
2943
2944 pld_size = sizeof(*hdr) + sizeof(*param_d);
2945
2946 message = kzalloc(pld_size, GFP_KERNEL);
2947 if (!message)
2948 return -ENOMEM;
2949
2950 hdr = (struct cmi_hdr *) message;
2951 param_d = (struct cpe_param_data *)
2952 (((u8 *) message) + sizeof(*hdr));
2953
2954 if (fill_lsm_cmd_header_v0_inband(hdr,
2955 session->id,
2956 sizeof(*param_d),
2957 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2958 rc = -EINVAL;
2959 goto err_ret;
2960 }
2961 wcd_cpe_set_param_data(param_d, ids, 0,
2962 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2963 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2964 rc = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2965 if (rc)
2966 dev_err(core->dev,
2967 "%s: snd_model_deregister failed, %d\n",
2968 __func__, rc);
2969 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2970err_ret:
2971 kfree(message);
2972 return rc;
2973}
2974
2975static int wcd_cpe_send_custom_param(
2976 struct wcd_cpe_core *core,
2977 struct cpe_lsm_session *session,
2978 void *data, u32 msg_size)
2979{
2980 u8 *msg;
2981 struct cmi_hdr *hdr;
2982 u8 *msg_pld;
2983 int rc;
2984
2985 if (msg_size > CMI_INBAND_MESSAGE_SIZE) {
2986 dev_err(core->dev,
2987 "%s: out of band custom params not supported\n",
2988 __func__);
2989 return -EINVAL;
2990 }
2991
2992 msg = kzalloc(sizeof(*hdr) + msg_size, GFP_KERNEL);
2993 if (!msg)
2994 return -ENOMEM;
2995
2996 hdr = (struct cmi_hdr *) msg;
2997 msg_pld = msg + sizeof(struct cmi_hdr);
2998
2999 if (fill_lsm_cmd_header_v0_inband(hdr,
3000 session->id,
3001 msg_size,
3002 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3003 rc = -EINVAL;
3004 goto err_ret;
3005 }
3006
3007 memcpy(msg_pld, data, msg_size);
3008 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3009 rc = wcd_cpe_cmi_send_lsm_msg(core, session, msg);
3010 if (rc)
3011 dev_err(core->dev,
3012 "%s: custom params send failed, err = %d\n",
3013 __func__, rc);
3014 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3015err_ret:
3016 kfree(msg);
3017 return rc;
3018}
3019
3020static int wcd_cpe_set_one_param(void *core_handle,
3021 struct cpe_lsm_session *session, struct lsm_params_info *p_info,
3022 void *data, uint32_t param_type)
3023{
3024 struct wcd_cpe_core *core = core_handle;
3025 int rc = 0;
3026 struct cpe_lsm_ids ids;
3027
3028 memset(&ids, 0, sizeof(ids));
3029 ids.module_id = p_info->module_id;
3030 ids.param_id = p_info->param_id;
3031
3032 switch (param_type) {
3033 case LSM_ENDPOINT_DETECT_THRESHOLD:
3034 rc = wcd_cpe_send_param_epd_thres(core, session,
3035 data, &ids);
3036 break;
3037 case LSM_OPERATION_MODE:
3038 rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
3039 break;
3040 case LSM_GAIN:
3041 rc = wcd_cpe_send_param_gain(core, session, data, &ids);
3042 break;
3043 case LSM_MIN_CONFIDENCE_LEVELS:
3044 rc = wcd_cpe_send_param_conf_levels(core, session, &ids);
3045 break;
3046 case LSM_REG_SND_MODEL:
3047 rc = wcd_cpe_send_param_snd_model(core, session, &ids);
3048 break;
3049 case LSM_DEREG_SND_MODEL:
3050 rc = wcd_cpe_send_param_dereg_model(core, session, &ids);
3051 break;
3052 case LSM_CUSTOM_PARAMS:
3053 rc = wcd_cpe_send_custom_param(core, session,
3054 data, p_info->param_size);
3055 break;
3056 default:
3057 pr_err("%s: wrong param_type 0x%x\n",
3058 __func__, param_type);
3059 }
3060
3061 if (rc)
3062 dev_err(core->dev,
3063 "%s: send_param(%d) failed, err %d\n",
3064 __func__, param_type, rc);
3065 return rc;
3066}
3067
3068/*
3069 * wcd_cpe_lsm_set_params: set the parameters for lsm service
3070 * @core: handle to cpe core
3071 * @session: session for which the parameters are to be set
3072 * @detect_mode: mode for detection
3073 * @detect_failure: flag indicating failure detection enabled/disabled
3074 *
3075 */
3076static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
3077 struct cpe_lsm_session *session,
3078 enum lsm_detection_mode detect_mode, bool detect_failure)
3079{
3080 struct cpe_lsm_ids ids;
3081 struct snd_lsm_detect_mode det_mode;
3082
3083 int ret = 0;
3084
3085 /* Send lsm calibration */
3086 ret = wcd_cpe_send_lsm_cal(core, session);
3087 if (ret) {
3088 pr_err("%s: fail to sent acdb cal, err = %d",
3089 __func__, ret);
3090 goto err_ret;
3091 }
3092
3093 /* Send operation mode */
3094 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3095 ids.param_id = CPE_LSM_PARAM_ID_OPERATION_MODE;
3096 det_mode.mode = detect_mode;
3097 det_mode.detect_failure = detect_failure;
3098 ret = wcd_cpe_send_param_opmode(core, session,
3099 &det_mode, &ids);
3100 if (ret)
3101 dev_err(core->dev,
3102 "%s: Failed to set opmode, err=%d\n",
3103 __func__, ret);
3104
3105err_ret:
3106 return ret;
3107}
3108
3109static int wcd_cpe_lsm_set_data(void *core_handle,
3110 struct cpe_lsm_session *session,
3111 enum lsm_detection_mode detect_mode,
3112 bool detect_failure)
3113{
3114 struct wcd_cpe_core *core = core_handle;
3115 struct cpe_lsm_ids ids;
3116 int ret = 0;
3117
3118 if (session->num_confidence_levels > 0) {
3119 ret = wcd_cpe_lsm_set_params(core, session, detect_mode,
3120 detect_failure);
3121 if (ret) {
3122 dev_err(core->dev,
3123 "%s: lsm set params failed, rc = %d\n",
3124 __func__, ret);
3125 goto err_ret;
3126 }
3127
3128 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3129 ids.param_id = CPE_LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
3130 ret = wcd_cpe_send_param_conf_levels(core, session, &ids);
3131 if (ret) {
3132 dev_err(core->dev,
3133 "%s: lsm confidence levels failed, rc = %d\n",
3134 __func__, ret);
3135 goto err_ret;
3136 }
3137 } else {
3138 dev_dbg(core->dev,
3139 "%s: no conf levels to set\n",
3140 __func__);
3141 }
3142
3143err_ret:
3144 return ret;
3145}
3146
3147/*
3148 * wcd_cpe_lsm_reg_snd_model: register the sound model for listen
3149 * @session: session for which to register the sound model
3150 * @detect_mode: detection mode, user dependent/independent
3151 * @detect_failure: flag to indicate if failure detection is enabled
3152 *
3153 * The memory required for sound model should be pre-allocated on CPE
3154 * before this function is invoked.
3155 */
3156static int wcd_cpe_lsm_reg_snd_model(void *core_handle,
3157 struct cpe_lsm_session *session,
3158 enum lsm_detection_mode detect_mode,
3159 bool detect_failure)
3160{
3161 int ret = 0;
3162 struct cmi_obm_msg obm_msg;
3163 struct wcd_cpe_core *core = core_handle;
3164
3165 ret = wcd_cpe_is_valid_lsm_session(core, session,
3166 __func__);
3167 if (ret)
3168 return ret;
3169
3170 ret = wcd_cpe_lsm_set_data(core_handle, session,
3171 detect_mode, detect_failure);
3172 if (ret) {
3173 dev_err(core->dev,
3174 "%s: fail to set lsm data, err = %d\n",
3175 __func__, ret);
3176 return ret;
3177 }
3178
3179 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3180
3181 ret = fill_cmi_header(&obm_msg.hdr, session->id,
3182 CMI_CPE_LSM_SERVICE_ID, 0, 20,
3183 CPE_LSM_SESSION_CMD_REGISTER_SOUND_MODEL, true);
3184 if (ret) {
3185 dev_err(core->dev,
3186 "%s: Invalid parameters, rc = %d\n",
3187 __func__, ret);
3188 goto err_ret;
3189 }
3190
3191 obm_msg.pld.version = 0;
3192 obm_msg.pld.size = session->snd_model_size;
3193 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
3194 obm_msg.pld.mem_handle = session->lsm_mem_handle;
3195
3196 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
3197 if (ret)
3198 dev_err(core->dev,
3199 "%s: snd_model_register failed, %d\n",
3200 __func__, ret);
3201err_ret:
3202 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3203 return ret;
3204}
3205
3206/*
3207 * wcd_cpe_lsm_dereg_snd_model: deregister the sound model for listen
3208 * @core_handle: handle to cpe core
3209 * @session: session for which to deregister the sound model
3210 *
3211 */
3212static int wcd_cpe_lsm_dereg_snd_model(void *core_handle,
3213 struct cpe_lsm_session *session)
3214{
3215 struct cmi_hdr cmd_dereg_snd_model;
3216 struct wcd_cpe_core *core = core_handle;
3217 int ret = 0;
3218
3219 ret = wcd_cpe_is_valid_lsm_session(core, session,
3220 __func__);
3221 if (ret)
3222 return ret;
3223
3224 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3225
3226 memset(&cmd_dereg_snd_model, 0, sizeof(cmd_dereg_snd_model));
3227 if (fill_lsm_cmd_header_v0_inband(&cmd_dereg_snd_model, session->id,
3228 0, CPE_LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL)) {
3229 ret = -EINVAL;
3230 goto end_ret;
3231 }
3232
3233 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dereg_snd_model);
3234 if (ret)
3235 dev_err(core->dev,
3236 "%s: failed to send dereg_snd_model cmd\n",
3237 __func__);
3238end_ret:
3239 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3240 return ret;
3241}
3242
3243/*
3244 * wcd_cpe_lsm_get_afe_out_port_id: get afe output port id
3245 * @core_handle: handle to the CPE core
3246 * @session: session for which port id needs to get
3247 */
3248static int wcd_cpe_lsm_get_afe_out_port_id(void *core_handle,
3249 struct cpe_lsm_session *session)
3250{
3251 struct wcd_cpe_core *core = core_handle;
Meng Wang15c825d2018-09-06 10:49:18 +08003252 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303253 int rc = 0;
3254
Meng Wang15c825d2018-09-06 10:49:18 +08003255 if (!core || !core->component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303256 pr_err("%s: Invalid handle to %s\n",
3257 __func__,
3258 (!core) ? "core" : "codec");
3259 rc = -EINVAL;
3260 goto done;
3261 }
3262
3263 if (!session) {
3264 dev_err(core->dev, "%s: Invalid session\n",
3265 __func__);
3266 rc = -EINVAL;
3267 goto done;
3268 }
3269
3270 if (!core->cpe_cdc_cb ||
3271 !core->cpe_cdc_cb->get_afe_out_port_id) {
3272 session->afe_out_port_id = WCD_CPE_AFE_OUT_PORT_2;
3273 dev_dbg(core->dev,
3274 "%s: callback not defined, default port_id = %d\n",
3275 __func__, session->afe_out_port_id);
3276 goto done;
3277 }
3278
Meng Wang15c825d2018-09-06 10:49:18 +08003279 component = core->component;
3280 rc = core->cpe_cdc_cb->get_afe_out_port_id(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303281 &session->afe_out_port_id);
3282 if (rc) {
3283 dev_err(core->dev,
3284 "%s: failed to get port id, err = %d\n",
3285 __func__, rc);
3286 goto done;
3287 }
3288 dev_dbg(core->dev, "%s: port_id: %d\n", __func__,
3289 session->afe_out_port_id);
3290
3291done:
3292 return rc;
3293}
3294
3295/*
3296 * wcd_cpe_cmd_lsm_start: send the start command to lsm
3297 * @core_handle: handle to the CPE core
3298 * @session: session for which start command to be sent
3299 *
3300 */
3301static int wcd_cpe_cmd_lsm_start(void *core_handle,
3302 struct cpe_lsm_session *session)
3303{
3304 struct cmi_hdr cmd_lsm_start;
3305 struct wcd_cpe_core *core = core_handle;
3306 int ret = 0;
3307
3308 ret = wcd_cpe_is_valid_lsm_session(core, session,
3309 __func__);
3310 if (ret)
3311 return ret;
3312
3313 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3314
3315 memset(&cmd_lsm_start, 0, sizeof(struct cmi_hdr));
3316 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_start, session->id, 0,
3317 CPE_LSM_SESSION_CMD_START)) {
3318 ret = -EINVAL;
3319 goto end_ret;
3320 }
3321
3322 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_start);
3323 if (ret)
3324 dev_err(core->dev, "failed to send lsm_start cmd\n");
3325end_ret:
3326 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3327 return ret;
3328}
3329
3330/*
3331 * wcd_cpe_cmd_lsm_stop: send the stop command for LSM service
3332 * @core_handle: handle to the cpe core
3333 * @session: session for which stop command to be sent
3334 *
3335 */
3336static int wcd_cpe_cmd_lsm_stop(void *core_handle,
3337 struct cpe_lsm_session *session)
3338{
3339 struct cmi_hdr cmd_lsm_stop;
3340 struct wcd_cpe_core *core = core_handle;
3341 int ret = 0;
3342
3343 ret = wcd_cpe_is_valid_lsm_session(core, session,
3344 __func__);
3345 if (ret)
3346 return ret;
3347
3348 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3349
3350 memset(&cmd_lsm_stop, 0, sizeof(struct cmi_hdr));
3351 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_stop, session->id, 0,
3352 CPE_LSM_SESSION_CMD_STOP)) {
3353 ret = -EINVAL;
3354 goto end_ret;
3355 }
3356
3357 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_stop);
3358 if (ret)
3359 dev_err(core->dev,
3360 "%s: failed to send lsm_stop cmd\n",
3361 __func__);
3362end_ret:
3363 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3364 return ret;
3365
3366}
3367
3368/*
3369 * wcd_cpe_alloc_lsm_session: allocate a lsm session
3370 * @core: handle to wcd_cpe_core
3371 * @lsm_priv_d: lsm private data
3372 */
3373static struct cpe_lsm_session *wcd_cpe_alloc_lsm_session(
3374 void *core_handle, void *client_data,
3375 void (*event_cb)(void *, u8, u8, u8 *))
3376{
3377 struct cpe_lsm_session *session;
3378 int i, session_id = -1;
3379 struct wcd_cpe_core *core = core_handle;
3380 bool afe_register_service = false;
3381 int ret = 0;
3382
3383 /*
3384 * Even if multiple listen sessions can be
3385 * allocated, the AFE service registration
3386 * should be done only once as CPE can only
3387 * have one instance of AFE service.
3388 *
3389 * If this is the first session to be allocated,
3390 * only then register the afe service.
3391 */
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303392 WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303393 if (!wcd_cpe_lsm_session_active())
3394 afe_register_service = true;
3395
3396 for (i = 1; i <= WCD_CPE_LSM_MAX_SESSIONS; i++) {
3397 if (!lsm_sessions[i]) {
3398 session_id = i;
3399 break;
3400 }
3401 }
3402
3403 if (session_id < 0) {
3404 dev_err(core->dev,
3405 "%s: max allowed sessions already allocated\n",
3406 __func__);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303407 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303408 return NULL;
3409 }
3410
3411 ret = wcd_cpe_vote(core, true);
3412 if (ret) {
3413 dev_err(core->dev,
3414 "%s: Failed to enable cpe, err = %d\n",
3415 __func__, ret);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303416 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303417 return NULL;
3418 }
3419
3420 session = kzalloc(sizeof(struct cpe_lsm_session), GFP_KERNEL);
3421 if (!session)
3422 goto err_session_alloc;
3423
3424 session->id = session_id;
3425 session->event_cb = event_cb;
3426 session->cmi_reg_handle = cmi_register(wcd_cpe_cmi_lsm_callback,
3427 CMI_CPE_LSM_SERVICE_ID);
3428 if (!session->cmi_reg_handle) {
3429 dev_err(core->dev,
3430 "%s: Failed to register LSM service with CMI\n",
3431 __func__);
3432 goto err_ret;
3433 }
3434 session->priv_d = client_data;
3435 mutex_init(&session->lsm_lock);
3436 if (afe_register_service) {
3437 /* Register for AFE Service */
3438 core->cmi_afe_handle = cmi_register(wcd_cpe_cmi_afe_cb,
3439 CMI_CPE_AFE_SERVICE_ID);
3440 wcd_cpe_initialize_afe_port_data();
3441 if (!core->cmi_afe_handle) {
3442 dev_err(core->dev,
3443 "%s: Failed to register AFE service with CMI\n",
3444 __func__);
3445 goto err_afe_svc_reg;
3446 }
3447
3448 /* Once AFE service is registered, send the mode command */
3449 ret = wcd_cpe_afe_svc_cmd_mode(core,
3450 AFE_SVC_EXPLICIT_PORT_START);
3451 if (ret)
3452 goto err_afe_mode_cmd;
3453 }
3454
3455 session->lsm_mem_handle = 0;
3456 init_completion(&session->cmd_comp);
3457
3458 lsm_sessions[session_id] = session;
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303459
3460 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303461 return session;
3462
3463err_afe_mode_cmd:
3464 cmi_deregister(core->cmi_afe_handle);
3465
3466err_afe_svc_reg:
3467 cmi_deregister(session->cmi_reg_handle);
3468 mutex_destroy(&session->lsm_lock);
3469
3470err_ret:
3471 kfree(session);
3472
3473err_session_alloc:
3474 wcd_cpe_vote(core, false);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303475 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303476 return NULL;
3477}
3478
3479/*
3480 * wcd_cpe_lsm_config_lab_latency: send lab latency value
3481 * @core: handle to wcd_cpe_core
3482 * @session: lsm session
3483 * @latency: the value of latency for lab setup in msec
3484 */
3485static int wcd_cpe_lsm_config_lab_latency(
3486 struct wcd_cpe_core *core,
3487 struct cpe_lsm_session *session,
3488 u32 latency)
3489{
3490 int ret = 0, pld_size = CPE_PARAM_LSM_LAB_LATENCY_SIZE;
3491 struct cpe_lsm_lab_latency_config cpe_lab_latency;
3492 struct cpe_lsm_lab_config *lab_lat = &cpe_lab_latency.latency_cfg;
3493 struct cpe_param_data *param_d = &lab_lat->param;
3494 struct cpe_lsm_ids ids;
3495
3496 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_latency.hdr, session->id,
3497 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3498 pr_err("%s: Failed to create header\n", __func__);
3499 return -EINVAL;
3500 }
3501 if (latency == 0x00 || latency > WCD_CPE_LAB_MAX_LATENCY) {
3502 pr_err("%s: Invalid latency %u\n",
3503 __func__, latency);
3504 return -EINVAL;
3505 }
3506
3507 lab_lat->latency = latency;
3508 lab_lat->minor_ver = 1;
3509 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3510 ids.param_id = CPE_LSM_PARAM_ID_LAB_CONFIG;
3511 wcd_cpe_set_param_data(param_d, &ids,
3512 PARAM_SIZE_LSM_LATENCY_SIZE,
3513 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3514
3515 pr_debug("%s: Module 0x%x Param 0x%x size %zu pld_size 0x%x\n",
3516 __func__, lab_lat->param.module_id,
3517 lab_lat->param.param_id, PARAM_SIZE_LSM_LATENCY_SIZE,
3518 pld_size);
3519
3520 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3521 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_latency);
3522 if (ret != 0)
3523 pr_err("%s: lsm_set_params failed, error = %d\n",
3524 __func__, ret);
3525 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3526 return ret;
3527}
3528
3529/*
3530 * wcd_cpe_lsm_lab_control: enable/disable lab
3531 * @core: handle to wcd_cpe_core
3532 * @session: lsm session
3533 * @enable: Indicates whether to enable / disable lab
3534 */
3535static int wcd_cpe_lsm_lab_control(
3536 void *core_handle,
3537 struct cpe_lsm_session *session,
3538 bool enable)
3539{
3540 struct wcd_cpe_core *core = core_handle;
3541 int ret = 0, pld_size = CPE_PARAM_SIZE_LSM_LAB_CONTROL;
3542 struct cpe_lsm_control_lab cpe_lab_enable;
3543 struct cpe_lsm_lab_enable *lab_enable = &cpe_lab_enable.lab_enable;
3544 struct cpe_param_data *param_d = &lab_enable->param;
3545 struct cpe_lsm_ids ids;
3546
3547 pr_debug("%s: enter payload_size = %d Enable %d\n",
3548 __func__, pld_size, enable);
3549
3550 memset(&cpe_lab_enable, 0, sizeof(cpe_lab_enable));
3551
3552 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_enable.hdr, session->id,
3553 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3554 return -EINVAL;
3555 }
3556 if (enable == true)
3557 lab_enable->enable = 1;
3558 else
3559 lab_enable->enable = 0;
3560
3561 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3562 ids.param_id = CPE_LSM_PARAM_ID_LAB_ENABLE;
3563 wcd_cpe_set_param_data(param_d, &ids,
3564 PARAM_SIZE_LSM_CONTROL_SIZE,
3565 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3566
3567 pr_debug("%s: Module 0x%x, Param 0x%x size %zu pld_size 0x%x\n",
3568 __func__, lab_enable->param.module_id,
3569 lab_enable->param.param_id, PARAM_SIZE_LSM_CONTROL_SIZE,
3570 pld_size);
3571
3572 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3573 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_enable);
3574 if (ret != 0) {
3575 pr_err("%s: lsm_set_params failed, error = %d\n",
3576 __func__, ret);
3577 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3578 goto done;
3579 }
3580 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3581
3582 if (lab_enable->enable)
3583 ret = wcd_cpe_lsm_config_lab_latency(core, session,
3584 WCD_CPE_LAB_MAX_LATENCY);
3585done:
3586 return ret;
3587}
3588
3589/*
3590 * wcd_cpe_lsm_eob: stop lab
3591 * @core: handle to wcd_cpe_core
3592 * @session: lsm session to be deallocated
3593 */
3594static int wcd_cpe_lsm_eob(
3595 struct wcd_cpe_core *core,
3596 struct cpe_lsm_session *session)
3597{
3598 int ret = 0;
3599 struct cmi_hdr lab_eob;
3600
3601 if (fill_lsm_cmd_header_v0_inband(&lab_eob, session->id,
3602 0, CPE_LSM_SESSION_CMD_EOB)) {
3603 return -EINVAL;
3604 }
3605
3606 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3607 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &lab_eob);
3608 if (ret != 0)
3609 pr_err("%s: lsm_set_params failed\n", __func__);
3610 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3611
3612 return ret;
3613}
3614
3615/*
3616 * wcd_cpe_dealloc_lsm_session: deallocate lsm session
3617 * @core: handle to wcd_cpe_core
3618 * @session: lsm session to be deallocated
3619 */
3620static int wcd_cpe_dealloc_lsm_session(void *core_handle,
3621 struct cpe_lsm_session *session)
3622{
3623 struct wcd_cpe_core *core = core_handle;
3624 int ret = 0;
3625
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303626 WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303627 if (!session) {
3628 dev_err(core->dev,
3629 "%s: Invalid lsm session\n", __func__);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303630 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303631 return -EINVAL;
3632 }
3633
3634 dev_dbg(core->dev, "%s: session %d being deallocated\n",
3635 __func__, session->id);
3636 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
3637 dev_err(core->dev,
3638 "%s: Wrong session id %d max allowed = %d\n",
3639 __func__, session->id,
3640 WCD_CPE_LSM_MAX_SESSIONS);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303641 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303642 return -EINVAL;
3643 }
3644
3645 cmi_deregister(session->cmi_reg_handle);
3646 mutex_destroy(&session->lsm_lock);
3647 lsm_sessions[session->id] = NULL;
3648 kfree(session);
3649
3650 if (!wcd_cpe_lsm_session_active()) {
3651 cmi_deregister(core->cmi_afe_handle);
3652 core->cmi_afe_handle = NULL;
3653 wcd_cpe_deinitialize_afe_port_data();
3654 }
3655
3656 ret = wcd_cpe_vote(core, false);
3657 if (ret)
3658 dev_dbg(core->dev,
3659 "%s: Failed to un-vote cpe, err = %d\n",
3660 __func__, ret);
3661
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303662 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303663 return ret;
3664}
3665
3666static int wcd_cpe_lab_ch_setup(void *core_handle,
3667 struct cpe_lsm_session *session,
3668 enum wcd_cpe_event event)
3669{
3670 struct wcd_cpe_core *core = core_handle;
Meng Wang15c825d2018-09-06 10:49:18 +08003671 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303672 int rc = 0;
3673 u8 cpe_intr_bits;
3674
Meng Wang15c825d2018-09-06 10:49:18 +08003675 if (!core || !core->component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303676 pr_err("%s: Invalid handle to %s\n",
3677 __func__,
3678 (!core) ? "core" : "codec");
3679 rc = EINVAL;
3680 goto done;
3681 }
3682
3683 if (!core->cpe_cdc_cb ||
3684 !core->cpe_cdc_cb->cdc_ext_clk ||
3685 !core->cpe_cdc_cb->lab_cdc_ch_ctl) {
3686 dev_err(core->dev,
3687 "%s: Invalid codec callbacks\n",
3688 __func__);
3689 rc = -EINVAL;
3690 goto done;
3691 }
3692
Meng Wang15c825d2018-09-06 10:49:18 +08003693 component = core->component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303694 dev_dbg(core->dev,
3695 "%s: event = 0x%x\n",
3696 __func__, event);
3697
3698 switch (event) {
3699 case WCD_CPE_PRE_ENABLE:
Meng Wang15c825d2018-09-06 10:49:18 +08003700 rc = core->cpe_cdc_cb->cdc_ext_clk(component, true, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303701 if (rc) {
3702 dev_err(core->dev,
3703 "%s: failed to enable cdc clk, err = %d\n",
3704 __func__, rc);
3705 goto done;
3706 }
3707
Meng Wang15c825d2018-09-06 10:49:18 +08003708 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303709 true);
3710 if (rc) {
3711 dev_err(core->dev,
3712 "%s: failed to enable cdc port, err = %d\n",
3713 __func__, rc);
Meng Wang15c825d2018-09-06 10:49:18 +08003714 rc = core->cpe_cdc_cb->cdc_ext_clk(
3715 component, false, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303716 goto done;
3717 }
3718
3719 break;
3720
3721 case WCD_CPE_POST_ENABLE:
3722 rc = cpe_svc_toggle_lab(core->cpe_handle, true);
3723 if (rc)
3724 dev_err(core->dev,
3725 "%s: Failed to enable lab\n", __func__);
3726 break;
3727
3728 case WCD_CPE_PRE_DISABLE:
3729 /*
3730 * Mask the non-fatal interrupts in CPE as they will
3731 * be generated during lab teardown and may flood.
3732 */
3733 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3734 if (CPE_ERR_IRQ_CB(core))
3735 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08003736 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303737 CPE_ERR_IRQ_MASK,
3738 &cpe_intr_bits);
3739
Meng Wang15c825d2018-09-06 10:49:18 +08003740 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303741 false);
3742 if (rc)
3743 dev_err(core->dev,
3744 "%s: failed to disable cdc port, err = %d\n",
3745 __func__, rc);
3746 break;
3747
3748 case WCD_CPE_POST_DISABLE:
3749 rc = wcd_cpe_lsm_eob(core, session);
3750 if (rc)
3751 dev_err(core->dev,
3752 "%s: eob send failed, err = %d\n",
3753 __func__, rc);
3754
3755 /* Continue teardown even if eob failed */
3756 rc = cpe_svc_toggle_lab(core->cpe_handle, false);
3757 if (rc)
3758 dev_err(core->dev,
3759 "%s: Failed to disable lab\n", __func__);
3760
3761 /* Continue with disabling even if toggle lab fails */
Meng Wang15c825d2018-09-06 10:49:18 +08003762 rc = core->cpe_cdc_cb->cdc_ext_clk(component, false, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303763 if (rc)
3764 dev_err(core->dev,
3765 "%s: failed to disable cdc clk, err = %d\n",
3766 __func__, rc);
3767
3768 /* Unmask non-fatal CPE interrupts */
3769 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3770 if (CPE_ERR_IRQ_CB(core))
3771 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08003772 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303773 CPE_ERR_IRQ_UNMASK,
3774 &cpe_intr_bits);
3775 break;
3776
3777 default:
3778 dev_err(core->dev,
3779 "%s: Invalid event 0x%x\n",
3780 __func__, event);
3781 rc = -EINVAL;
3782 break;
3783 }
3784
3785done:
3786 return rc;
3787}
3788
3789static int wcd_cpe_lsm_set_fmt_cfg(void *core_handle,
3790 struct cpe_lsm_session *session)
3791{
3792 int ret;
3793 struct cpe_lsm_output_format_cfg out_fmt_cfg;
3794 struct wcd_cpe_core *core = core_handle;
3795
3796 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3797 if (ret)
3798 goto done;
3799
3800 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3801
3802 memset(&out_fmt_cfg, 0, sizeof(out_fmt_cfg));
3803 if (fill_lsm_cmd_header_v0_inband(&out_fmt_cfg.hdr,
3804 session->id, OUT_FMT_CFG_CMD_PAYLOAD_SIZE,
3805 CPE_LSM_SESSION_CMD_TX_BUFF_OUTPUT_CONFIG)) {
3806 ret = -EINVAL;
3807 goto err_ret;
3808 }
3809
3810 out_fmt_cfg.format = session->out_fmt_cfg.format;
3811 out_fmt_cfg.packing = session->out_fmt_cfg.pack_mode;
3812 out_fmt_cfg.data_path_events = session->out_fmt_cfg.data_path_events;
3813
3814 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &out_fmt_cfg);
3815 if (ret)
3816 dev_err(core->dev,
3817 "%s: lsm_set_output_format_cfg failed, err = %d\n",
3818 __func__, ret);
3819
3820err_ret:
3821 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3822done:
3823 return ret;
3824}
3825
3826static void wcd_cpe_snd_model_offset(void *core_handle,
3827 struct cpe_lsm_session *session, size_t *offset)
3828{
3829 *offset = sizeof(struct cpe_param_data);
3830}
3831
3832static int wcd_cpe_lsm_set_media_fmt_params(void *core_handle,
3833 struct cpe_lsm_session *session,
3834 struct lsm_hw_params *param)
3835{
3836 struct cpe_lsm_media_fmt_param media_fmt;
3837 struct cmi_hdr *msg_hdr = &media_fmt.hdr;
3838 struct wcd_cpe_core *core = core_handle;
3839 struct cpe_param_data *param_d = &media_fmt.param;
3840 struct cpe_lsm_ids ids;
3841 int ret;
3842
3843 memset(&media_fmt, 0, sizeof(media_fmt));
3844 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
3845 session->id,
3846 CPE_MEDIA_FMT_PLD_SIZE,
3847 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3848 ret = -EINVAL;
3849 goto done;
3850 }
3851
3852 memset(&ids, 0, sizeof(ids));
3853 ids.module_id = CPE_LSM_MODULE_FRAMEWORK;
3854 ids.param_id = CPE_LSM_PARAM_ID_MEDIA_FMT;
3855
3856 wcd_cpe_set_param_data(param_d, &ids, CPE_MEDIA_FMT_PARAM_SIZE,
3857 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3858
3859 media_fmt.minor_version = 1;
3860 media_fmt.sample_rate = param->sample_rate;
3861 media_fmt.num_channels = param->num_chs;
3862 media_fmt.bit_width = param->bit_width;
3863
3864 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3865 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &media_fmt);
3866 if (ret)
3867 dev_err(core->dev,
3868 "%s: Set_param(media_format) failed, err=%d\n",
3869 __func__, ret);
3870 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3871done:
3872 return ret;
3873}
3874
3875static int wcd_cpe_lsm_set_port(void *core_handle,
3876 struct cpe_lsm_session *session, void *data)
3877{
3878 u32 port_id;
3879 int ret;
3880 struct cpe_lsm_ids ids;
3881 struct wcd_cpe_core *core = core_handle;
3882
3883 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3884 if (ret)
3885 goto done;
3886
3887 if (!data) {
3888 dev_err(core->dev, "%s: data is NULL\n", __func__);
3889 ret = -EINVAL;
3890 goto done;
3891 }
3892 port_id = *(u32 *)data;
3893 dev_dbg(core->dev, "%s: port_id: %d\n", __func__, port_id);
3894
3895 memset(&ids, 0, sizeof(ids));
3896 ids.module_id = LSM_MODULE_ID_FRAMEWORK;
3897 ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
3898
3899 ret = wcd_cpe_send_param_connectport(core, session, NULL,
3900 &ids, port_id);
3901 if (ret)
3902 dev_err(core->dev,
3903 "%s: send_param_connectport failed, err %d\n",
3904 __func__, ret);
3905done:
3906 return ret;
3907}
3908
3909/*
3910 * wcd_cpe_get_lsm_ops: register lsm driver to codec
3911 * @lsm_ops: structure with lsm callbacks
Meng Wang15c825d2018-09-06 10:49:18 +08003912 * @component: codec to which this lsm driver is registered to
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303913 */
3914int wcd_cpe_get_lsm_ops(struct wcd_cpe_lsm_ops *lsm_ops)
3915{
3916 lsm_ops->lsm_alloc_session = wcd_cpe_alloc_lsm_session;
3917 lsm_ops->lsm_dealloc_session = wcd_cpe_dealloc_lsm_session;
3918 lsm_ops->lsm_open_tx = wcd_cpe_cmd_lsm_open_tx;
3919 lsm_ops->lsm_close_tx = wcd_cpe_cmd_lsm_close_tx;
3920 lsm_ops->lsm_shmem_alloc = wcd_cpe_cmd_lsm_shmem_alloc;
3921 lsm_ops->lsm_shmem_dealloc = wcd_cpe_cmd_lsm_shmem_dealloc;
3922 lsm_ops->lsm_register_snd_model = wcd_cpe_lsm_reg_snd_model;
3923 lsm_ops->lsm_deregister_snd_model = wcd_cpe_lsm_dereg_snd_model;
3924 lsm_ops->lsm_get_afe_out_port_id = wcd_cpe_lsm_get_afe_out_port_id;
3925 lsm_ops->lsm_start = wcd_cpe_cmd_lsm_start;
3926 lsm_ops->lsm_stop = wcd_cpe_cmd_lsm_stop;
3927 lsm_ops->lsm_lab_control = wcd_cpe_lsm_lab_control;
3928 lsm_ops->lab_ch_setup = wcd_cpe_lab_ch_setup;
3929 lsm_ops->lsm_set_data = wcd_cpe_lsm_set_data;
3930 lsm_ops->lsm_set_fmt_cfg = wcd_cpe_lsm_set_fmt_cfg;
3931 lsm_ops->lsm_set_one_param = wcd_cpe_set_one_param;
3932 lsm_ops->lsm_get_snd_model_offset = wcd_cpe_snd_model_offset;
3933 lsm_ops->lsm_set_media_fmt_params = wcd_cpe_lsm_set_media_fmt_params;
3934 lsm_ops->lsm_set_port = wcd_cpe_lsm_set_port;
3935
3936 return 0;
3937}
3938EXPORT_SYMBOL(wcd_cpe_get_lsm_ops);
3939
3940static int fill_afe_cmd_header(struct cmi_hdr *hdr, u8 port_id,
3941 u16 opcode, u8 pld_size,
3942 bool obm_flag)
3943{
3944 CMI_HDR_SET_SESSION(hdr, port_id);
3945 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_AFE_SERVICE_ID);
3946
3947 CMI_HDR_SET_PAYLOAD_SIZE(hdr, pld_size);
3948
3949 hdr->opcode = opcode;
3950
3951 if (obm_flag)
3952 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
3953 else
3954 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
3955
3956 return 0;
3957}
3958
3959/*
3960 * wcd_cpe_cmi_send_afe_msg: send message to AFE service
3961 * @core: wcd cpe core handle
3962 * @port_cfg: configuration data for the afe port
3963 * for which this message is to be sent
3964 * @message: actual message with header and payload
3965 *
3966 * Port specific lock needs to be acquired before this
3967 * function can be invoked
3968 */
3969static int wcd_cpe_cmi_send_afe_msg(
3970 struct wcd_cpe_core *core,
3971 struct wcd_cmi_afe_port_data *port_d,
3972 void *message)
3973{
3974 int ret = 0;
3975 struct cmi_hdr *hdr = message;
3976
3977 pr_debug("%s: sending message with opcode 0x%x\n",
3978 __func__, hdr->opcode);
3979
3980 if (unlikely(!wcd_cpe_is_online_state(core))) {
3981 dev_err(core->dev, "%s: CPE offline\n", __func__);
3982 return 0;
3983 }
3984
3985 if (CMI_HDR_GET_OBM_FLAG(hdr))
3986 wcd_cpe_bus_vote_max_bw(core, true);
3987
3988 ret = cmi_send_msg(message);
3989 if (ret) {
3990 pr_err("%s: cmd 0x%x send failed, err = %d\n",
3991 __func__, hdr->opcode, ret);
3992 goto rel_bus_vote;
3993 }
3994
3995 ret = wait_for_completion_timeout(&port_d->afe_cmd_complete,
3996 CMI_CMD_TIMEOUT);
3997 if (ret > 0) {
3998 pr_debug("%s: command 0x%x, received response 0x%x\n",
3999 __func__, hdr->opcode, port_d->cmd_result);
4000 if (port_d->cmd_result == CMI_SHMEM_ALLOC_FAILED)
4001 port_d->cmd_result = CPE_ENOMEMORY;
4002 if (port_d->cmd_result > 0)
4003 pr_err("%s: CPE returned error[%s]\n",
4004 __func__, cpe_err_get_err_str(
4005 port_d->cmd_result));
4006 ret = cpe_err_get_lnx_err_code(port_d->cmd_result);
4007 goto rel_bus_vote;
4008 } else {
4009 pr_err("%s: command 0x%x send timed out\n",
4010 __func__, hdr->opcode);
4011 ret = -ETIMEDOUT;
4012 goto rel_bus_vote;
4013 }
4014
4015rel_bus_vote:
4016 reinit_completion(&port_d->afe_cmd_complete);
4017
4018 if (CMI_HDR_GET_OBM_FLAG(hdr))
4019 wcd_cpe_bus_vote_max_bw(core, false);
4020
4021 return ret;
4022}
4023
4024
4025
4026/*
4027 * wcd_cpe_afe_shmem_alloc: allocate the cpe memory for afe service
4028 * @core: handle to cpe core
4029 * @port_cfg: configuration data for the port which needs
4030 * memory to be allocated on CPE
4031 * @size: size of the memory to be allocated
4032 */
4033static int wcd_cpe_afe_shmem_alloc(
4034 struct wcd_cpe_core *core,
4035 struct wcd_cmi_afe_port_data *port_d,
4036 u32 size)
4037{
4038 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
4039 int ret = 0;
4040
4041 pr_debug("%s: enter: size = %d\n", __func__, size);
4042
4043 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
4044 if (fill_afe_cmd_header(&cmd_shmem_alloc.hdr, port_d->port_id,
4045 CPE_AFE_PORT_CMD_SHARED_MEM_ALLOC,
4046 SHMEM_ALLOC_CMD_PLD_SIZE, false)) {
4047 ret = -EINVAL;
4048 goto end_ret;
4049 }
4050
4051 cmd_shmem_alloc.size = size;
4052
4053 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_shmem_alloc);
4054 if (ret) {
4055 pr_err("%s: afe_shmem_alloc fail,ret = %d\n",
4056 __func__, ret);
4057 goto end_ret;
4058 }
4059
4060 pr_debug("%s: completed %s, mem_handle = 0x%x\n",
4061 __func__, "CPE_AFE_CMD_SHARED_MEM_ALLOC",
4062 port_d->mem_handle);
4063
4064end_ret:
4065 return ret;
4066}
4067
4068/*
4069 * wcd_cpe_afe_shmem_dealloc: deallocate the cpe memory for
4070 * afe service
4071 * @core: handle to cpe core
4072 * @port_d: configuration data for the port which needs
4073 * memory to be deallocated on CPE
4074 * The memory handle to be de-allocated is saved in the
4075 * port configuration data
4076 */
4077static int wcd_cpe_afe_shmem_dealloc(
4078 struct wcd_cpe_core *core,
4079 struct wcd_cmi_afe_port_data *port_d)
4080{
4081 struct cpe_cmd_shmem_dealloc cmd_dealloc;
4082 int ret = 0;
4083
4084 pr_debug("%s: enter, port_id = %d\n",
4085 __func__, port_d->port_id);
4086
4087 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
4088 if (fill_afe_cmd_header(&cmd_dealloc.hdr, port_d->port_id,
4089 CPE_AFE_PORT_CMD_SHARED_MEM_DEALLOC,
4090 SHMEM_DEALLOC_CMD_PLD_SIZE, false)) {
4091 ret = -EINVAL;
4092 goto end_ret;
4093 }
4094
4095 cmd_dealloc.addr = port_d->mem_handle;
4096 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_dealloc);
4097 if (ret) {
4098 pr_err("failed to send shmem_dealloc cmd\n");
4099 goto end_ret;
4100 }
4101 memset(&port_d->mem_handle, 0,
4102 sizeof(port_d->mem_handle));
4103
4104end_ret:
4105 return ret;
4106}
4107
4108/*
4109 * wcd_cpe_send_afe_cal: send the acdb calibration to AFE port
4110 * @core: handle to cpe core
4111 * @port_d: configuration data for the port for which the
4112 * calibration needs to be appplied
4113 */
4114static int wcd_cpe_send_afe_cal(void *core_handle,
4115 struct wcd_cmi_afe_port_data *port_d)
4116{
4117
4118 struct cal_block_data *afe_cal = NULL;
4119 struct wcd_cpe_core *core = core_handle;
4120 struct cmi_obm_msg obm_msg;
4121 void *inb_msg = NULL;
4122 void *msg;
4123 int rc = 0;
4124 bool is_obm_msg;
4125
4126 if (core->cal_data[WCD_CPE_LSM_CAL_AFE] == NULL) {
4127 pr_err("%s: LSM cal not allocated!\n",
4128 __func__);
4129 rc = -EINVAL;
4130 goto rel_cal_mutex;
4131 }
4132
4133 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4134 afe_cal = cal_utils_get_only_cal_block(
4135 core->cal_data[WCD_CPE_LSM_CAL_AFE]);
4136 if (!afe_cal) {
4137 pr_err("%s: failed to get afe cal block\n",
4138 __func__);
4139 rc = -EINVAL;
4140 goto rel_cal_mutex;
4141 }
4142
4143 if (afe_cal->cal_data.size == 0) {
4144 dev_dbg(core->dev, "%s: No AFE cal to send\n",
4145 __func__);
4146 rc = 0;
4147 goto rel_cal_mutex;
4148 }
4149
4150 is_obm_msg = (afe_cal->cal_data.size >
4151 CMI_INBAND_MESSAGE_SIZE) ? true : false;
4152
4153 if (is_obm_msg) {
4154 struct cmi_hdr *hdr = &(obm_msg.hdr);
4155 struct cmi_obm *pld = &(obm_msg.pld);
4156
4157 rc = wcd_cpe_afe_shmem_alloc(core, port_d,
4158 afe_cal->cal_data.size);
4159 if (rc) {
4160 dev_err(core->dev,
4161 "%s: AFE shmem alloc fail %d\n",
4162 __func__, rc);
4163 goto rel_cal_mutex;
4164 }
4165
4166 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4167 CPE_AFE_CMD_SET_PARAM,
4168 CPE_AFE_PARAM_PAYLOAD_SIZE,
4169 true);
4170 if (rc) {
4171 dev_err(core->dev,
4172 "%s: invalid params for header, err = %d\n",
4173 __func__, rc);
4174 wcd_cpe_afe_shmem_dealloc(core, port_d);
4175 goto rel_cal_mutex;
4176 }
4177
4178 pld->version = 0;
4179 pld->size = afe_cal->cal_data.size;
4180 pld->data_ptr.kvaddr = afe_cal->cal_data.kvaddr;
4181 pld->mem_handle = port_d->mem_handle;
4182 msg = &obm_msg;
4183
4184 } else {
4185 u8 *msg_pld;
4186 struct cmi_hdr *hdr;
4187
4188 inb_msg = kzalloc(sizeof(struct cmi_hdr) +
4189 afe_cal->cal_data.size,
4190 GFP_KERNEL);
4191 if (!inb_msg) {
4192 dev_err(core->dev,
4193 "%s: no memory for afe cal inband\n",
4194 __func__);
4195 rc = -ENOMEM;
4196 goto rel_cal_mutex;
4197 }
4198
4199 hdr = (struct cmi_hdr *) inb_msg;
4200
4201 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4202 CPE_AFE_CMD_SET_PARAM,
4203 CPE_AFE_PARAM_PAYLOAD_SIZE,
4204 false);
4205 if (rc) {
4206 dev_err(core->dev,
4207 "%s: invalid params for header, err = %d\n",
4208 __func__, rc);
4209 kfree(inb_msg);
4210 inb_msg = NULL;
4211 goto rel_cal_mutex;
4212 }
4213
4214 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
4215 memcpy(msg_pld, afe_cal->cal_data.kvaddr,
4216 afe_cal->cal_data.size);
4217
4218 msg = inb_msg;
4219 }
4220
4221 rc = wcd_cpe_cmi_send_afe_msg(core, port_d, msg);
4222 if (rc)
4223 pr_err("%s: afe cal for listen failed, rc = %d\n",
4224 __func__, rc);
4225
4226 if (is_obm_msg) {
4227 wcd_cpe_afe_shmem_dealloc(core, port_d);
4228 port_d->mem_handle = 0;
4229 } else {
4230 kfree(inb_msg);
4231 inb_msg = NULL;
4232 }
4233
4234rel_cal_mutex:
4235 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4236 return rc;
4237}
4238
4239/*
4240 * wcd_cpe_is_valid_port: check validity of afe port id
4241 * @core: handle to core to check for validity
4242 * @afe_cfg: client provided afe configuration
4243 * @func: function name invoking this validity check,
4244 * used for logging purpose only.
4245 */
4246static int wcd_cpe_is_valid_port(struct wcd_cpe_core *core,
4247 struct wcd_cpe_afe_port_cfg *afe_cfg,
4248 const char *func)
4249{
4250 if (unlikely(IS_ERR_OR_NULL(core))) {
4251 pr_err("%s: Invalid core handle\n", func);
4252 return -EINVAL;
4253 }
4254
4255 if (afe_cfg->port_id > WCD_CPE_AFE_MAX_PORTS) {
4256 dev_err(core->dev,
4257 "%s: invalid afe port (%u)\n",
4258 func, afe_cfg->port_id);
4259 return -EINVAL;
4260 }
4261
4262 dev_dbg(core->dev,
4263 "%s: port_id = %u\n",
4264 func, afe_cfg->port_id);
4265
4266 return 0;
4267}
4268
4269static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
4270 u8 mode)
4271{
4272 struct cpe_afe_svc_cmd_mode afe_mode;
4273 struct wcd_cpe_core *core = core_handle;
4274 struct wcd_cmi_afe_port_data *afe_port_d;
4275 int ret;
4276
4277 afe_port_d = &afe_ports[0];
4278 /*
4279 * AFE SVC mode command is for the service and not port
4280 * specific, hence use AFE port as 0 so the command will
4281 * be applied to all AFE ports on CPE.
4282 */
4283 afe_port_d->port_id = 0;
4284
4285 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4286 memset(&afe_mode, 0, sizeof(afe_mode));
4287 if (fill_afe_cmd_header(&afe_mode.hdr, afe_port_d->port_id,
4288 CPE_AFE_SVC_CMD_LAB_MODE,
4289 CPE_AFE_CMD_MODE_PAYLOAD_SIZE,
4290 false)) {
4291 ret = -EINVAL;
4292 goto err_ret;
4293 }
4294
4295 afe_mode.mode = mode;
4296
4297 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_mode);
4298 if (ret)
4299 dev_err(core->dev,
4300 "%s: afe_svc_mode cmd failed, err = %d\n",
4301 __func__, ret);
4302
4303err_ret:
4304 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4305 return ret;
4306}
4307
4308static int wcd_cpe_afe_cmd_port_cfg(void *core_handle,
4309 struct wcd_cpe_afe_port_cfg *afe_cfg)
4310{
4311 struct cpe_afe_cmd_port_cfg port_cfg_cmd;
4312 struct wcd_cpe_core *core = core_handle;
4313 struct wcd_cmi_afe_port_data *afe_port_d;
4314 int ret;
4315
4316 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4317 if (ret)
4318 goto done;
4319
4320 afe_port_d = &afe_ports[afe_cfg->port_id];
4321 afe_port_d->port_id = afe_cfg->port_id;
4322
4323 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4324 memset(&port_cfg_cmd, 0, sizeof(port_cfg_cmd));
4325 if (fill_afe_cmd_header(&port_cfg_cmd.hdr,
4326 afe_cfg->port_id,
4327 CPE_AFE_PORT_CMD_GENERIC_CONFIG,
4328 CPE_AFE_CMD_PORT_CFG_PAYLOAD_SIZE,
4329 false)) {
4330 ret = -EINVAL;
4331 goto err_ret;
4332 }
4333
4334 port_cfg_cmd.bit_width = afe_cfg->bit_width;
4335 port_cfg_cmd.num_channels = afe_cfg->num_channels;
4336 port_cfg_cmd.sample_rate = afe_cfg->sample_rate;
4337
4338 if (afe_port_d->port_id == CPE_AFE_PORT_3_TX)
4339 port_cfg_cmd.buffer_size = WCD_CPE_EC_PP_BUF_SIZE;
4340 else
4341 port_cfg_cmd.buffer_size = AFE_OUT_BUF_SIZE(afe_cfg->bit_width,
4342 afe_cfg->sample_rate);
4343
4344 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &port_cfg_cmd);
4345 if (ret)
4346 dev_err(core->dev,
4347 "%s: afe_port_config failed, err = %d\n",
4348 __func__, ret);
4349
4350err_ret:
4351 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4352done:
4353 return ret;
4354}
4355
4356/*
4357 * wcd_cpe_afe_set_params: set the parameters for afe port
4358 * @afe_cfg: configuration data for the port for which the
4359 * parameters are to be set
4360 */
4361static int wcd_cpe_afe_set_params(void *core_handle,
4362 struct wcd_cpe_afe_port_cfg *afe_cfg, bool afe_mad_ctl)
4363{
4364 struct cpe_afe_params afe_params;
4365 struct cpe_afe_hw_mad_ctrl *hw_mad_ctrl = &afe_params.hw_mad_ctrl;
4366 struct cpe_afe_port_cfg *port_cfg = &afe_params.port_cfg;
4367 struct wcd_cpe_core *core = core_handle;
4368 struct wcd_cmi_afe_port_data *afe_port_d;
4369 int ret = 0, pld_size = 0;
4370
4371 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4372 if (ret)
4373 return ret;
4374
4375 afe_port_d = &afe_ports[afe_cfg->port_id];
4376 afe_port_d->port_id = afe_cfg->port_id;
4377
4378 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4379
4380 ret = wcd_cpe_send_afe_cal(core, afe_port_d);
4381 if (ret) {
4382 dev_err(core->dev,
4383 "%s: afe acdb cal send failed, err = %d\n",
4384 __func__, ret);
4385 goto err_ret;
4386 }
4387
4388 pld_size = CPE_AFE_PARAM_PAYLOAD_SIZE;
4389 memset(&afe_params, 0, sizeof(afe_params));
4390
4391 if (fill_afe_cmd_header(&afe_params.hdr,
4392 afe_cfg->port_id,
4393 CPE_AFE_CMD_SET_PARAM,
4394 (u8) pld_size, false)) {
4395 ret = -EINVAL;
4396 goto err_ret;
4397 }
4398
4399 hw_mad_ctrl->param.module_id = CPE_AFE_MODULE_HW_MAD;
4400 hw_mad_ctrl->param.param_id = CPE_AFE_PARAM_ID_HW_MAD_CTL;
4401 hw_mad_ctrl->param.p_size.sr.param_size = PARAM_SIZE_AFE_HW_MAD_CTRL;
4402 hw_mad_ctrl->param.p_size.sr.reserved = 0;
4403 hw_mad_ctrl->minor_version = 1;
4404 hw_mad_ctrl->mad_type = MAD_TYPE_AUDIO;
4405 hw_mad_ctrl->mad_enable = afe_mad_ctl;
4406
4407 port_cfg->param.module_id = CPE_AFE_MODULE_AUDIO_DEV_INTERFACE;
4408 port_cfg->param.param_id = CPE_AFE_PARAM_ID_GENERIC_PORT_CONFIG;
4409 port_cfg->param.p_size.sr.param_size = PARAM_SIZE_AFE_PORT_CFG;
4410 port_cfg->param.p_size.sr.reserved = 0;
4411 port_cfg->minor_version = 1;
4412 port_cfg->bit_width = afe_cfg->bit_width;
4413 port_cfg->num_channels = afe_cfg->num_channels;
4414 port_cfg->sample_rate = afe_cfg->sample_rate;
4415
4416 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_params);
4417 if (ret)
4418 dev_err(core->dev,
4419 "%s: afe_port_config failed, err = %d\n",
4420 __func__, ret);
4421err_ret:
4422 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4423 return ret;
4424}
4425
4426/*
4427 * wcd_cpe_afe_port_start: send the start command to afe service
4428 * @core_handle: handle to the cpe core
4429 * @port_cfg: configuration data for the afe port which needs
4430 * to be started.
4431 */
4432static int wcd_cpe_afe_port_start(void *core_handle,
4433 struct wcd_cpe_afe_port_cfg *port_cfg)
4434{
4435
4436 struct cmi_hdr hdr;
4437 struct wcd_cpe_core *core = core_handle;
4438 struct wcd_cmi_afe_port_data *afe_port_d;
4439 int ret = 0;
4440
4441 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4442 if (ret)
4443 return ret;
4444
4445 afe_port_d = &afe_ports[port_cfg->port_id];
4446 afe_port_d->port_id = port_cfg->port_id;
4447
4448 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4449
4450 memset(&hdr, 0, sizeof(struct cmi_hdr));
4451 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4452 CPE_AFE_PORT_CMD_START,
4453 0, false);
4454 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4455 if (ret)
4456 dev_err(core->dev,
4457 "%s: afe_port_start cmd failed, err = %d\n",
4458 __func__, ret);
4459 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4460 return ret;
4461}
4462
4463/*
4464 * wcd_cpe_afe_port_stop: send stop command to afe service
4465 * @core_handle: handle to the cpe core
4466 * @port_cfg: configuration data for the afe port which needs
4467 * to be stopped.
4468 */
4469static int wcd_cpe_afe_port_stop(void *core_handle,
4470 struct wcd_cpe_afe_port_cfg *port_cfg)
4471{
4472 struct cmi_hdr hdr;
4473 struct wcd_cpe_core *core = core_handle;
4474 struct wcd_cmi_afe_port_data *afe_port_d;
4475 int ret = 0;
4476
4477 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4478 if (ret)
4479 return ret;
4480
4481 afe_port_d = &afe_ports[port_cfg->port_id];
4482 afe_port_d->port_id = port_cfg->port_id;
4483
4484 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4485
4486 memset(&hdr, 0, sizeof(hdr));
4487 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4488 CPE_AFE_PORT_CMD_STOP,
4489 0, false);
4490 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4491 if (ret)
4492 dev_err(core->dev,
4493 "%s: afe_stop cmd failed, err = %d\n",
4494 __func__, ret);
4495
4496 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4497 return ret;
4498}
4499
4500/*
4501 * wcd_cpe_afe_port_suspend: send suspend command to afe service
4502 * @core_handle: handle to the cpe core
4503 * @port_cfg: configuration data for the afe port which needs
4504 * to be suspended.
4505 */
4506static int wcd_cpe_afe_port_suspend(void *core_handle,
4507 struct wcd_cpe_afe_port_cfg *port_cfg)
4508{
4509 struct cmi_hdr hdr;
4510 struct wcd_cpe_core *core = core_handle;
4511 struct wcd_cmi_afe_port_data *afe_port_d;
4512 int ret = 0;
4513
4514 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4515 if (ret)
4516 return ret;
4517
4518 afe_port_d = &afe_ports[port_cfg->port_id];
4519 afe_port_d->port_id = port_cfg->port_id;
4520
4521 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4522
4523 memset(&hdr, 0, sizeof(struct cmi_hdr));
4524 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4525 CPE_AFE_PORT_CMD_SUSPEND,
4526 0, false);
4527 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4528 if (ret)
4529 dev_err(core->dev,
4530 "%s: afe_suspend cmd failed, err = %d\n",
4531 __func__, ret);
4532 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4533 return ret;
4534}
4535
4536/*
4537 * wcd_cpe_afe_port_resume: send the resume command to afe service
4538 * @core_handle: handle to the cpe core
4539 * @port_cfg: configuration data for the afe port which needs
4540 * to be resumed.
4541 */
4542static int wcd_cpe_afe_port_resume(void *core_handle,
4543 struct wcd_cpe_afe_port_cfg *port_cfg)
4544{
4545 struct cmi_hdr hdr;
4546 struct wcd_cpe_core *core = core_handle;
4547 struct wcd_cmi_afe_port_data *afe_port_d;
4548 int ret = 0;
4549
4550 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4551 if (ret)
4552 return ret;
4553
4554 afe_port_d = &afe_ports[port_cfg->port_id];
4555 afe_port_d->port_id = port_cfg->port_id;
4556
4557 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4558
4559 memset(&hdr, 0, sizeof(hdr));
4560 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4561 CPE_AFE_PORT_CMD_RESUME,
4562 0, false);
4563 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4564 if (ret)
4565 dev_err(core->dev,
4566 "%s: afe_resume cmd failed, err = %d\n",
4567 __func__, ret);
4568 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4569 return ret;
4570
4571}
4572
4573/*
4574 * wcd_cpe_register_afe_driver: register lsm driver to codec
4575 * @cpe_ops: structure with lsm callbacks
4576 * @codec: codec to which this lsm driver is registered to
4577 */
4578int wcd_cpe_get_afe_ops(struct wcd_cpe_afe_ops *afe_ops)
4579{
4580 afe_ops->afe_set_params = wcd_cpe_afe_set_params;
4581 afe_ops->afe_port_start = wcd_cpe_afe_port_start;
4582 afe_ops->afe_port_stop = wcd_cpe_afe_port_stop;
4583 afe_ops->afe_port_suspend = wcd_cpe_afe_port_suspend;
4584 afe_ops->afe_port_resume = wcd_cpe_afe_port_resume;
4585 afe_ops->afe_port_cmd_cfg = wcd_cpe_afe_cmd_port_cfg;
4586
4587 return 0;
4588}
4589EXPORT_SYMBOL(wcd_cpe_get_afe_ops);
4590
4591MODULE_DESCRIPTION("WCD CPE Core");
4592MODULE_LICENSE("GPL v2");