blob: 00a384369c07fa24d39389b46d332f0534067e17 [file] [log] [blame]
Meng Wang61af6842018-09-10 17:47:55 +08001// SPDX-License-Identifier: GPL-2.0
Xiaoyu Yeef9ded22018-01-17 11:30:35 -08002/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303 */
4
5#include <linux/module.h>
6#include <linux/firmware.h>
7#include <linux/device.h>
8#include <linux/slab.h>
9#include <linux/elf.h>
10#include <linux/wait.h>
11#include <linux/debugfs.h>
12#include <linux/delay.h>
13#include <linux/pm_qos.h>
14#include <linux/dma-mapping.h>
15#include <sound/soc.h>
16#include <sound/info.h>
17#include <sound/lsm_params.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053018#include <soc/qcom/pm.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053019#include <dsp/audio_cal_utils.h>
20#include "core.h"
21#include "cpe_core.h"
22#include "cpe_err.h"
Laxminath Kasam7e057cf2017-08-09 23:55:15 +053023#include "cpe_cmi.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053024#include "wcd_cpe_core.h"
25#include "wcd_cpe_services.h"
26#include "wcd_cmi_api.h"
Laxminath Kasam605b42f2017-08-01 22:02:15 +053027#include "wcd9xxx-irq.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053028
29#define CMI_CMD_TIMEOUT (10 * HZ)
30#define WCD_CPE_LSM_MAX_SESSIONS 2
31#define WCD_CPE_AFE_MAX_PORTS 4
32#define AFE_SVC_EXPLICIT_PORT_START 1
33#define WCD_CPE_EC_PP_BUF_SIZE 480 /* 5 msec buffer */
34
35#define ELF_FLAG_EXECUTE (1 << 0)
36#define ELF_FLAG_WRITE (1 << 1)
37#define ELF_FLAG_READ (1 << 2)
38
39#define ELF_FLAG_RW (ELF_FLAG_READ | ELF_FLAG_WRITE)
40
41#define WCD_CPE_GRAB_LOCK(lock, name) \
42{ \
43 pr_debug("%s: %s lock acquire\n", \
44 __func__, name); \
45 mutex_lock(lock); \
46}
47
48#define WCD_CPE_REL_LOCK(lock, name) \
49{ \
50 pr_debug("%s: %s lock release\n", \
51 __func__, name); \
52 mutex_unlock(lock); \
53}
54
55#define WCD_CPE_STATE_MAX_LEN 11
56#define CPE_OFFLINE_WAIT_TIMEOUT (2 * HZ)
57#define CPE_READY_WAIT_TIMEOUT (3 * HZ)
58#define WCD_CPE_SYSFS_DIR_MAX_LENGTH 32
59
60#define CPE_ERR_IRQ_CB(core) \
61 (core->cpe_cdc_cb->cpe_err_irq_control)
62
63/*
64 * AFE output buffer size is always
65 * (sample_rate * number of bytes per sample/2*1000)
66 */
67#define AFE_OUT_BUF_SIZE(bit_width, sample_rate) \
68 (((sample_rate) * (bit_width / BITS_PER_BYTE))/(2*1000))
69
70enum afe_port_state {
71 AFE_PORT_STATE_DEINIT = 0,
72 AFE_PORT_STATE_INIT,
73 AFE_PORT_STATE_CONFIG,
74 AFE_PORT_STATE_STARTED,
75 AFE_PORT_STATE_SUSPENDED,
76};
77
78struct wcd_cmi_afe_port_data {
79 u8 port_id;
80 struct mutex afe_lock;
81 struct completion afe_cmd_complete;
82 enum afe_port_state port_state;
83 u8 cmd_result;
84 u32 mem_handle;
85};
86
87struct cpe_lsm_ids {
88 u32 module_id;
89 u32 param_id;
90};
91
92static struct wcd_cpe_core *core_d;
93static struct cpe_lsm_session
94 *lsm_sessions[WCD_CPE_LSM_MAX_SESSIONS + 1];
Meng Wang15c825d2018-09-06 10:49:18 +080095struct wcd_cpe_core * (*wcd_get_cpe_core)(struct snd_soc_component *component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053096static struct wcd_cmi_afe_port_data afe_ports[WCD_CPE_AFE_MAX_PORTS + 1];
97static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param);
98static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core);
99static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core);
100static ssize_t cpe_ftm_test_trigger(struct file *file,
101 const char __user *user_buf,
102 size_t count, loff_t *ppos);
103static u32 ramdump_enable;
104static u32 cpe_ftm_test_status;
105static const struct file_operations cpe_ftm_test_trigger_fops = {
106 .open = simple_open,
107 .write = cpe_ftm_test_trigger,
108};
109
110static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
111 u8 mode);
112struct wcd_cpe_attribute {
113 struct attribute attr;
114 ssize_t (*show)(struct wcd_cpe_core *core, char *buf);
115 ssize_t (*store)(struct wcd_cpe_core *core, const char *buf,
116 ssize_t count);
117};
118
119#define WCD_CPE_ATTR(_name, _mode, _show, _store) \
120static struct wcd_cpe_attribute cpe_attr_##_name = { \
121 .attr = {.name = __stringify(_name), .mode = _mode}, \
122 .show = _show, \
123 .store = _store, \
124}
125
126#define to_wcd_cpe_attr(a) \
127 container_of((a), struct wcd_cpe_attribute, attr)
128
129#define kobj_to_cpe_core(kobj) \
130 container_of((kobj), struct wcd_cpe_core, cpe_kobj)
131
132/* wcd_cpe_lsm_session_active: check if any session is active
133 * return true if any session is active.
134 */
135static bool wcd_cpe_lsm_session_active(void)
136{
137 int index = 1;
138 bool lsm_active = false;
139
140 /* session starts from index 1 */
141 for (; index <= WCD_CPE_LSM_MAX_SESSIONS; index++) {
142 if (lsm_sessions[index] != NULL) {
143 lsm_active = true;
144 break;
145 } else {
146 lsm_active = false;
147 }
148 }
149 return lsm_active;
150}
151
152static int wcd_cpe_get_sfr_dump(struct wcd_cpe_core *core)
153{
154 struct cpe_svc_mem_segment dump_seg;
155 int rc;
156 u8 *sfr_dump;
157
158 sfr_dump = kzalloc(core->sfr_buf_size, GFP_KERNEL);
159 if (!sfr_dump)
160 goto done;
161
162 dump_seg.type = CPE_SVC_DATA_MEM;
163 dump_seg.cpe_addr = core->sfr_buf_addr;
164 dump_seg.size = core->sfr_buf_size;
165 dump_seg.data = sfr_dump;
166 dev_dbg(core->dev,
167 "%s: reading SFR from CPE, size = %zu\n",
168 __func__, core->sfr_buf_size);
169
170 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
171 if (rc < 0) {
172 dev_err(core->dev,
173 "%s: Failed to read cpe sfr_dump, err = %d\n",
174 __func__, rc);
175 goto free_sfr_dump;
176 }
177
178 dev_info(core->dev,
179 "%s: cpe_sfr = %s\n", __func__, sfr_dump);
180
181free_sfr_dump:
182 kfree(sfr_dump);
183done:
184 /* Even if SFR dump failed, do not return error */
185 return 0;
186}
187
188static int wcd_cpe_collect_ramdump(struct wcd_cpe_core *core)
189{
190 struct cpe_svc_mem_segment dump_seg;
191 int rc;
192
193 if (!core->cpe_ramdump_dev || !core->cpe_dump_v_addr ||
194 core->hw_info.dram_size == 0) {
195 dev_err(core->dev,
196 "%s: Ramdump devices not set up, size = %zu\n",
197 __func__, core->hw_info.dram_size);
198 return -EINVAL;
199 }
200
201 dump_seg.type = CPE_SVC_DATA_MEM;
202 dump_seg.cpe_addr = core->hw_info.dram_offset;
203 dump_seg.size = core->hw_info.dram_size;
204 dump_seg.data = core->cpe_dump_v_addr;
205
206 dev_dbg(core->dev,
207 "%s: Reading ramdump from CPE\n",
208 __func__);
209
210 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
211 if (rc < 0) {
212 dev_err(core->dev,
213 "%s: Failed to read CPE ramdump, err = %d\n",
214 __func__, rc);
215 return rc;
216 }
217
218 dev_dbg(core->dev,
219 "%s: completed reading ramdump from CPE\n",
220 __func__);
221
222 core->cpe_ramdump_seg.address = (unsigned long) core->cpe_dump_addr;
223 core->cpe_ramdump_seg.size = core->hw_info.dram_size;
224 core->cpe_ramdump_seg.v_address = core->cpe_dump_v_addr;
225
226 rc = do_ramdump(core->cpe_ramdump_dev,
227 &core->cpe_ramdump_seg, 1);
228 if (rc)
229 dev_err(core->dev,
230 "%s: fail to dump cpe ram to device, err = %d\n",
231 __func__, rc);
232 return rc;
233}
234
235/* wcd_cpe_is_valid_elf_hdr: check if the ELF header is valid
236 * @core: handle to wcd_cpe_core
237 * @fw_size: size of firmware from request_firmware
238 * @ehdr: the elf header to be checked for
239 * return true if all checks pass, true if any elf check fails
240 */
241static bool wcd_cpe_is_valid_elf_hdr(struct wcd_cpe_core *core, size_t fw_size,
242 const struct elf32_hdr *ehdr)
243{
244 if (fw_size < sizeof(*ehdr)) {
245 dev_err(core->dev, "%s:Firmware too small\n", __func__);
246 goto elf_check_fail;
247 }
248
249 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
250 dev_err(core->dev, "%s: Not an ELF file\n", __func__);
251 goto elf_check_fail;
252 }
253
254 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
255 dev_err(core->dev, "%s: Not a executable image\n", __func__);
256 goto elf_check_fail;
257 }
258
259 if (ehdr->e_phnum == 0) {
260 dev_err(core->dev, "%s: no segments to load\n", __func__);
261 goto elf_check_fail;
262 }
263
264 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
265 sizeof(struct elf32_hdr) > fw_size) {
266 dev_err(core->dev, "%s: Too small MDT file\n", __func__);
267 goto elf_check_fail;
268 }
269
270 return true;
271
272elf_check_fail:
273 return false;
274}
275
276/*
277 * wcd_cpe_load_each_segment: download segment to CPE
278 * @core: handle to struct wcd_cpe_core
279 * @file_idx: index of split firmware image file name
280 * @phdr: program header from metadata
281 */
282static int wcd_cpe_load_each_segment(struct wcd_cpe_core *core,
283 int file_idx, const struct elf32_phdr *phdr)
284{
285 const struct firmware *split_fw;
286 char split_fname[32];
287 int ret = 0;
288 struct cpe_svc_mem_segment *segment;
289
290 if (!core || !phdr) {
291 pr_err("%s: Invalid params\n", __func__);
292 return -EINVAL;
293 }
294
295 /* file size can be 0 for bss segments */
296 if (phdr->p_filesz == 0 || phdr->p_memsz == 0)
297 return 0;
298
299 segment = kzalloc(sizeof(struct cpe_svc_mem_segment), GFP_KERNEL);
300 if (!segment)
301 return -ENOMEM;
302
303 snprintf(split_fname, sizeof(split_fname), "%s.b%02d",
304 core->fname, file_idx);
305
306 ret = request_firmware(&split_fw, split_fname, core->dev);
307 if (ret) {
308 dev_err(core->dev, "firmware %s not found\n",
309 split_fname);
310 ret = -EIO;
311 goto fw_req_fail;
312 }
313
314 if (phdr->p_flags & ELF_FLAG_EXECUTE)
315 segment->type = CPE_SVC_INSTRUCTION_MEM;
316 else if (phdr->p_flags & ELF_FLAG_RW)
317 segment->type = CPE_SVC_DATA_MEM;
318 else {
319 dev_err(core->dev, "%s invalid flags 0x%x\n",
320 __func__, phdr->p_flags);
321 goto done;
322 }
323
Xiaoyu Yeef9ded22018-01-17 11:30:35 -0800324 if (phdr->p_filesz != split_fw->size) {
325 dev_err(core->dev,
326 "%s: %s size mismatch, phdr_size: 0x%x fw_size: 0x%zx",
327 __func__, split_fname, phdr->p_filesz, split_fw->size);
328 ret = -EINVAL;
329 goto done;
330 }
331
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530332 segment->cpe_addr = phdr->p_paddr;
333 segment->size = phdr->p_filesz;
334 segment->data = (u8 *) split_fw->data;
335
336 dev_dbg(core->dev,
337 "%s: cpe segment type %s read from firmware\n", __func__,
338 (segment->type == CPE_SVC_INSTRUCTION_MEM) ?
339 "INSTRUCTION" : "DATA");
340
341 ret = cpe_svc_download_segment(core->cpe_handle, segment);
342 if (ret) {
343 dev_err(core->dev,
344 "%s: Failed to download %s, error = %d\n",
345 __func__, split_fname, ret);
346 goto done;
347 }
348
349done:
350 release_firmware(split_fw);
351
352fw_req_fail:
353 kfree(segment);
354 return ret;
355}
356
357/*
358 * wcd_cpe_enable_cpe_clks: enable the clocks for CPE
359 * @core: handle to wcd_cpe_core
360 * @enable: flag indicating whether to enable/disable cpe clocks
361 */
362static int wcd_cpe_enable_cpe_clks(struct wcd_cpe_core *core, bool enable)
363{
364 int ret, ret1;
365
366 if (!core || !core->cpe_cdc_cb ||
367 !core->cpe_cdc_cb->cpe_clk_en) {
368 pr_err("%s: invalid handle\n",
369 __func__);
370 return -EINVAL;
371 }
372
Meng Wang15c825d2018-09-06 10:49:18 +0800373 ret = core->cpe_cdc_cb->cdc_clk_en(core->component, enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530374 if (ret) {
375 dev_err(core->dev, "%s: Failed to enable RCO\n",
376 __func__);
377 return ret;
378 }
379
380 if (!enable && core->cpe_clk_ref > 0)
381 core->cpe_clk_ref--;
382
383 /*
384 * CPE clk will be enabled at the first time
385 * and be disabled at the last time.
386 */
387 if (core->cpe_clk_ref == 0) {
Meng Wang15c825d2018-09-06 10:49:18 +0800388 ret = core->cpe_cdc_cb->cpe_clk_en(core->component, enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530389 if (ret) {
390 dev_err(core->dev,
391 "%s: cpe_clk_en() failed, err = %d\n",
392 __func__, ret);
393 goto cpe_clk_fail;
394 }
395 }
396
397 if (enable)
398 core->cpe_clk_ref++;
399
400 return 0;
401
402cpe_clk_fail:
403 /* Release the codec clk if CPE clk enable failed */
404 if (enable) {
Meng Wang15c825d2018-09-06 10:49:18 +0800405 ret1 = core->cpe_cdc_cb->cdc_clk_en(core->component, !enable);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530406 if (ret1)
407 dev_err(core->dev,
408 "%s: Fail to release codec clk, err = %d\n",
409 __func__, ret1);
410 }
411
412 return ret;
413}
414
415/*
416 * wcd_cpe_bus_vote_max_bw: Function to vote for max bandwidth on codec bus
417 * @core: handle to core for cpe
418 * @vote: flag to indicate enable/disable of vote
419 *
420 * This function will try to use the codec provided callback to
421 * vote/unvote for the max bandwidth of the bus that is used by
422 * the codec for register reads/writes.
423 */
424static int wcd_cpe_bus_vote_max_bw(struct wcd_cpe_core *core,
425 bool vote)
426{
427 if (!core || !core->cpe_cdc_cb) {
428 pr_err("%s: Invalid handle to %s\n",
429 __func__,
430 (!core) ? "core" : "codec callbacks");
431 return -EINVAL;
432 }
433
434 if (core->cpe_cdc_cb->bus_vote_bw) {
435 dev_dbg(core->dev, "%s: %s cdc bus max bandwidth\n",
436 __func__, vote ? "Vote" : "Unvote");
Meng Wang15c825d2018-09-06 10:49:18 +0800437 core->cpe_cdc_cb->bus_vote_bw(core->component, vote);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530438 }
439
440 return 0;
441}
442
443/*
444 * wcd_cpe_load_fw: Function to load the fw image
445 * @core: cpe core pointer
446 * @load_type: indicates whether to load to data section
447 * or the instruction section
448 *
449 * Parse the mdt file to look for program headers, load each
450 * split file corresponding to the program headers.
451 */
452static int wcd_cpe_load_fw(struct wcd_cpe_core *core,
453 unsigned int load_type)
454{
455
456 int ret, phdr_idx;
Meng Wang15c825d2018-09-06 10:49:18 +0800457 struct snd_soc_component *component = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530458 struct wcd9xxx *wcd9xxx = NULL;
459 const struct elf32_hdr *ehdr;
460 const struct elf32_phdr *phdr;
461 const struct firmware *fw;
462 const u8 *elf_ptr;
463 char mdt_name[64];
464 bool img_dload_fail = false;
465 bool load_segment;
466
467 if (!core || !core->cpe_handle) {
468 pr_err("%s: Error CPE core %pK\n", __func__,
469 core);
470 return -EINVAL;
471 }
Meng Wang15c825d2018-09-06 10:49:18 +0800472 component = core->component;
473 wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530474 snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", core->fname);
475 ret = request_firmware(&fw, mdt_name, core->dev);
476 if (ret < 0) {
477 dev_err(core->dev, "firmware %s not found\n", mdt_name);
478 return ret;
479 }
480
481 ehdr = (struct elf32_hdr *) fw->data;
482 if (!wcd_cpe_is_valid_elf_hdr(core, fw->size, ehdr)) {
483 dev_err(core->dev, "%s: fw mdt %s is invalid\n",
484 __func__, mdt_name);
485 ret = -EINVAL;
486 goto done;
487 }
488
489 elf_ptr = fw->data + sizeof(*ehdr);
490
491 if (load_type == ELF_FLAG_EXECUTE) {
492 /* Reset CPE first */
493 ret = cpe_svc_reset(core->cpe_handle);
494 if (ret < 0) {
495 dev_err(core->dev,
496 "%s: Failed to reset CPE with error %d\n",
497 __func__, ret);
498 goto done;
499 }
500 }
501
502 dev_dbg(core->dev, "%s: start image dload, name = %s, load_type = 0x%x\n",
503 __func__, core->fname, load_type);
504
505 wcd_cpe_bus_vote_max_bw(core, true);
506
507 /* parse every program header and request corresponding firmware */
508 for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
509 phdr = (struct elf32_phdr *)elf_ptr;
510 load_segment = false;
511
512 dev_dbg(core->dev,
513 "index = %d, vaddr = 0x%x, paddr = 0x%x, filesz = 0x%x, memsz = 0x%x, flags = 0x%x\n"
514 , phdr_idx, phdr->p_vaddr, phdr->p_paddr,
515 phdr->p_filesz, phdr->p_memsz, phdr->p_flags);
516
517 switch (load_type) {
518 case ELF_FLAG_EXECUTE:
519 if (phdr->p_flags & load_type)
520 load_segment = true;
521 break;
522 case ELF_FLAG_RW:
523 if (!(phdr->p_flags & ELF_FLAG_EXECUTE) &&
524 (phdr->p_flags & load_type))
525 load_segment = true;
526 break;
527 default:
528 pr_err("%s: Invalid load_type 0x%x\n",
529 __func__, load_type);
530 ret = -EINVAL;
531 goto rel_bus_vote;
532 }
533
534 if (load_segment) {
535 ret = wcd_cpe_load_each_segment(core,
536 phdr_idx, phdr);
537 if (ret < 0) {
538 dev_err(core->dev,
539 "Failed to load segment %d, aborting img dload\n",
540 phdr_idx);
541 img_dload_fail = true;
542 goto rel_bus_vote;
543 }
544 } else {
545 dev_dbg(core->dev,
546 "%s: skipped segment with index %d\n",
547 __func__, phdr_idx);
548 }
549
550 elf_ptr = elf_ptr + sizeof(*phdr);
551 }
552 if (load_type == ELF_FLAG_EXECUTE)
553 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
554
555rel_bus_vote:
556 wcd_cpe_bus_vote_max_bw(core, false);
557
558done:
559 release_firmware(fw);
560 return ret;
561}
562
563/*
564 * wcd_cpe_change_online_state - mark cpe online/offline state
565 * @core: core session to mark
566 * @online: whether online of offline
567 *
568 */
569static void wcd_cpe_change_online_state(struct wcd_cpe_core *core,
570 int online)
571{
572 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
573 unsigned long ret;
574
575 if (!core) {
576 pr_err("%s: Invalid core handle\n",
577 __func__);
578 return;
579 }
580
581 ssr_entry = &core->ssr_entry;
582 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
583 ssr_entry->offline = !online;
584
585 /* Make sure write to offline state is completed. */
586 wmb();
587 ret = xchg(&ssr_entry->offline_change, 1);
588 wake_up_interruptible(&ssr_entry->offline_poll_wait);
589 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
590 pr_debug("%s: change state 0x%x offline_change 0x%x\n"
591 " core->offline 0x%x, ret = %ld\n",
592 __func__, online,
593 ssr_entry->offline_change,
594 core->ssr_entry.offline, ret);
595}
596
597/*
598 * wcd_cpe_load_fw_image: work function to load the fw image
599 * @work: work that is scheduled to perform the image loading
600 *
601 * Parse the mdt file to look for program headers, load each
602 * split file corresponding to the program headers.
603 */
604static void wcd_cpe_load_fw_image(struct work_struct *work)
605{
606 struct wcd_cpe_core *core;
607 int ret = 0;
608
609 core = container_of(work, struct wcd_cpe_core, load_fw_work);
610 ret = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
611 if (!ret)
612 wcd_cpe_change_online_state(core, 1);
613 else
614 pr_err("%s: failed to load instruction section, err = %d\n",
615 __func__, ret);
616}
617
618/*
619 * wcd_cpe_get_core_handle: get the handle to wcd_cpe_core
Meng Wang15c825d2018-09-06 10:49:18 +0800620 * @component: codec from which this handle is to be obtained
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530621 * Codec driver should provide a callback function to obtain
622 * handle to wcd_cpe_core during initialization of wcd_cpe_core
623 */
624void *wcd_cpe_get_core_handle(
Meng Wang15c825d2018-09-06 10:49:18 +0800625 struct snd_soc_component *component)
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530626{
627 struct wcd_cpe_core *core = NULL;
628
Meng Wang15c825d2018-09-06 10:49:18 +0800629 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530630 pr_err("%s: Invalid codec handle\n",
631 __func__);
632 goto done;
633 }
634
635 if (!wcd_get_cpe_core) {
Meng Wang15c825d2018-09-06 10:49:18 +0800636 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530637 "%s: codec callback not available\n",
638 __func__);
639 goto done;
640 }
641
Meng Wang15c825d2018-09-06 10:49:18 +0800642 core = wcd_get_cpe_core(component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530643
644 if (!core)
Meng Wang15c825d2018-09-06 10:49:18 +0800645 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530646 "%s: handle to core not available\n",
647 __func__);
648done:
649 return core;
650}
651EXPORT_SYMBOL(wcd_cpe_get_core_handle);
652
653/*
654 * svass_engine_irq: threaded interrupt handler for svass engine irq
655 * @irq: interrupt number
656 * @data: data pointer passed during irq registration
657 */
658static irqreturn_t svass_engine_irq(int irq, void *data)
659{
660 struct wcd_cpe_core *core = data;
661 int ret = 0;
662
663 if (!core) {
664 pr_err("%s: Invalid data for interrupt handler\n",
665 __func__);
666 goto done;
667 }
668
669 ret = cpe_svc_process_irq(core->cpe_handle, CPE_IRQ_OUTBOX_IRQ);
670 if (ret < 0)
671 dev_err(core->dev,
672 "%s: Error processing irq from cpe_Services\n",
673 __func__);
674done:
675 return IRQ_HANDLED;
676}
677
678/*
679 * wcd_cpe_state_read - update read status in procfs
680 * @entry: snd_info_entry
681 * @buf: buffer where the read status is updated.
682 *
683 */
684static ssize_t wcd_cpe_state_read(struct snd_info_entry *entry,
685 void *file_private_data, struct file *file,
686 char __user *buf, size_t count, loff_t pos)
687{
688 int len = 0;
689 char buffer[WCD_CPE_STATE_MAX_LEN];
690 struct wcd_cpe_core *core = NULL;
691 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
692
693 core = (struct wcd_cpe_core *) entry->private_data;
694 if (!core) {
695 pr_err("%s: CPE core NULL\n", __func__);
696 return -EINVAL;
697 }
698 ssr_entry = &core->ssr_entry;
699
700 /* Make sure read from ssr_entry is completed. */
701 rmb();
702 dev_dbg(core->dev,
703 "%s: Offline 0x%x\n", __func__,
704 ssr_entry->offline);
705
706 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
707 len = snprintf(buffer, sizeof(buffer), "%s\n",
708 ssr_entry->offline ? "OFFLINE" : "ONLINE");
709 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
710
711 return simple_read_from_buffer(buf, count, &pos, buffer, len);
712}
713
714/*
715 * wcd_cpe_state_poll - polls for change state
716 * @entry: snd_info_entry
717 * @wait: wait for duration for poll wait
718 *
719 */
720static unsigned int wcd_cpe_state_poll(struct snd_info_entry *entry,
721 void *private_data, struct file *file,
722 poll_table *wait)
723{
724 struct wcd_cpe_core *core = NULL;
725 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
726 int ret = 0;
727
728 core = (struct wcd_cpe_core *) entry->private_data;
729 if (!core) {
730 pr_err("%s: CPE core NULL\n", __func__);
731 return -EINVAL;
732 }
733
734 ssr_entry = &core->ssr_entry;
735
736 dev_dbg(core->dev, "%s: CPE Poll wait\n",
737 __func__);
738 poll_wait(file, &ssr_entry->offline_poll_wait, wait);
739 dev_dbg(core->dev, "%s: Wake-up Poll wait\n",
740 __func__);
741 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
742
743 if (xchg(&ssr_entry->offline_change, 0))
744 ret = POLLIN | POLLPRI | POLLRDNORM;
745
746 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
747
748 dev_dbg(core->dev, "%s: ret (%d) from poll_wait\n",
749 __func__, ret);
750 return ret;
751}
752
753/*
754 * wcd_cpe_is_online_state - return true if card is online state
755 * @core: core offline to query
756 */
757static bool wcd_cpe_is_online_state(void *core_handle)
758{
759 struct wcd_cpe_core *core = core_handle;
760
761 if (core_handle) {
762 return !core->ssr_entry.offline;
763 } else {
764 pr_err("%s: Core handle NULL\n", __func__);
765 /* still return 1- offline if core ptr null */
766 return false;
767 }
768}
769
770static struct snd_info_entry_ops wcd_cpe_state_proc_ops = {
771 .read = wcd_cpe_state_read,
772 .poll = wcd_cpe_state_poll,
773};
774
775static int wcd_cpe_check_new_image(struct wcd_cpe_core *core)
776{
777 int rc = 0;
778 char temp_img_name[WCD_CPE_IMAGE_FNAME_MAX];
779
780 if (!strcmp(core->fname, core->dyn_fname) &&
781 core->ssr_type != WCD_CPE_INITIALIZED) {
782 dev_dbg(core->dev,
783 "%s: Firmware unchanged, fname = %s, ssr_type 0x%x\n",
784 __func__, core->fname, core->ssr_type);
785 goto done;
786 }
787
788 /*
789 * Different firmware name requested,
790 * Re-load the instruction section
791 */
792 strlcpy(temp_img_name, core->fname,
793 WCD_CPE_IMAGE_FNAME_MAX);
794 strlcpy(core->fname, core->dyn_fname,
795 WCD_CPE_IMAGE_FNAME_MAX);
796
797 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
798 if (rc) {
799 dev_err(core->dev,
800 "%s: Failed to dload new image %s, err = %d\n",
801 __func__, core->fname, rc);
802 /* If new image download failed, revert back to old image */
803 strlcpy(core->fname, temp_img_name,
804 WCD_CPE_IMAGE_FNAME_MAX);
805 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
806 if (rc)
807 dev_err(core->dev,
808 "%s: Failed to re-dload image %s, err = %d\n",
809 __func__, core->fname, rc);
810 } else {
811 dev_info(core->dev, "%s: fw changed to %s\n",
812 __func__, core->fname);
813 }
814done:
815 return rc;
816}
817
818static int wcd_cpe_enable(struct wcd_cpe_core *core,
819 bool enable)
820{
821 int ret = 0;
822
823 if (enable) {
824 /* Reset CPE first */
825 ret = cpe_svc_reset(core->cpe_handle);
826 if (ret < 0) {
827 dev_err(core->dev,
828 "%s: CPE Reset failed, error = %d\n",
829 __func__, ret);
830 goto done;
831 }
832
833 ret = wcd_cpe_setup_irqs(core);
834 if (ret) {
835 dev_err(core->dev,
836 "%s: CPE IRQs setup failed, error = %d\n",
837 __func__, ret);
838 goto done;
839 }
840 ret = wcd_cpe_check_new_image(core);
841 if (ret)
842 goto fail_boot;
843
844 /* Dload data section */
845 ret = wcd_cpe_load_fw(core, ELF_FLAG_RW);
846 if (ret) {
847 dev_err(core->dev,
848 "%s: Failed to dload data section, err = %d\n",
849 __func__, ret);
850 goto fail_boot;
851 }
852
853 ret = wcd_cpe_enable_cpe_clks(core, true);
854 if (ret < 0) {
855 dev_err(core->dev,
856 "%s: CPE clk enable failed, err = %d\n",
857 __func__, ret);
858 goto fail_boot;
859 }
860
861 ret = cpe_svc_boot(core->cpe_handle,
862 core->cpe_debug_mode);
863 if (ret < 0) {
864 dev_err(core->dev,
865 "%s: Failed to boot CPE\n",
866 __func__);
867 goto fail_boot;
868 }
869
870 /* wait for CPE to be online */
871 dev_dbg(core->dev,
872 "%s: waiting for CPE bootup\n",
873 __func__);
874
875 wait_for_completion(&core->online_compl);
876
877 dev_dbg(core->dev,
878 "%s: CPE bootup done\n",
879 __func__);
880
881 core->ssr_type = WCD_CPE_ENABLED;
882 } else {
883 if (core->ssr_type == WCD_CPE_BUS_DOWN_EVENT ||
884 core->ssr_type == WCD_CPE_SSR_EVENT) {
885 /*
886 * If this disable vote is when
887 * SSR is in progress, do not disable CPE here,
888 * instead SSR handler will control CPE.
889 */
890 wcd_cpe_enable_cpe_clks(core, false);
891 wcd_cpe_cleanup_irqs(core);
892 goto done;
893 }
894
895 ret = cpe_svc_shutdown(core->cpe_handle);
896 if (ret < 0) {
897 dev_err(core->dev,
898 "%s: CPE shutdown failed, error %d\n",
899 __func__, ret);
900 goto done;
901 }
902
903 wcd_cpe_enable_cpe_clks(core, false);
904 wcd_cpe_cleanup_irqs(core);
905 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
906 }
907
908 return ret;
909
910fail_boot:
911 wcd_cpe_cleanup_irqs(core);
912
913done:
914 return ret;
915}
916
917/*
918 * wcd_cpe_boot_ssr: Load the images to CPE after ssr and bootup cpe
919 * @core: handle to the core
920 */
921static int wcd_cpe_boot_ssr(struct wcd_cpe_core *core)
922{
923 int rc = 0;
924
925 if (!core || !core->cpe_handle) {
926 pr_err("%s: Invalid handle\n", __func__);
927 rc = -EINVAL;
928 goto fail;
929 }
930 /* Load the instruction section and mark CPE as online */
931 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
932 if (rc) {
933 dev_err(core->dev,
934 "%s: Failed to load instruction, err = %d\n",
935 __func__, rc);
936 goto fail;
937 } else {
938 wcd_cpe_change_online_state(core, 1);
939 }
940
941fail:
942 return rc;
943}
944
945/*
946 * wcd_cpe_clr_ready_status:
947 * Clear the value from the ready status for CPE
948 * @core: handle to the core
949 * @value: flag/bitmask that is to be cleared
950 *
951 * This function should not be invoked with ssr_lock acquired
952 */
953static void wcd_cpe_clr_ready_status(struct wcd_cpe_core *core,
954 u8 value)
955{
956 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
957 core->ready_status &= ~(value);
958 dev_dbg(core->dev,
959 "%s: ready_status = 0x%x\n",
960 __func__, core->ready_status);
961 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
962}
963
964/*
965 * wcd_cpe_set_and_complete:
966 * Set the ready status with the provided value and
967 * flag the completion object if ready status moves
968 * to ready to download
969 * @core: handle to the core
970 * @value: flag/bitmask that is to be set
971 */
972static void wcd_cpe_set_and_complete(struct wcd_cpe_core *core,
973 u8 value)
974{
975 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
976 core->ready_status |= value;
977 if ((core->ready_status & WCD_CPE_READY_TO_DLOAD) ==
978 WCD_CPE_READY_TO_DLOAD) {
979 dev_dbg(core->dev,
980 "%s: marking ready, status = 0x%x\n",
981 __func__, core->ready_status);
982 complete(&core->ready_compl);
983 }
984 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
985}
986
987
988/*
989 * wcd_cpe_ssr_work: work function to handle CPE SSR
990 * @work: work that is scheduled to perform CPE shutdown
991 * and restart
992 */
993static void wcd_cpe_ssr_work(struct work_struct *work)
994{
995
996 int rc = 0;
997 u32 irq = 0;
998 struct wcd_cpe_core *core = NULL;
999 u8 status = 0;
1000
1001 core = container_of(work, struct wcd_cpe_core, ssr_work);
1002 if (!core) {
1003 pr_err("%s: Core handle NULL\n", __func__);
1004 return;
1005 }
1006
1007 /* Obtain pm request up in case of suspend mode */
1008 pm_qos_add_request(&core->pm_qos_req,
1009 PM_QOS_CPU_DMA_LATENCY,
1010 PM_QOS_DEFAULT_VALUE);
1011 pm_qos_update_request(&core->pm_qos_req,
1012 msm_cpuidle_get_deep_idle_latency());
1013
1014 dev_dbg(core->dev,
1015 "%s: CPE SSR with event %d\n",
1016 __func__, core->ssr_type);
1017
1018 if (core->ssr_type == WCD_CPE_SSR_EVENT) {
1019 if (CPE_ERR_IRQ_CB(core))
1020 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001021 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301022 CPE_ERR_IRQ_STATUS,
1023 &status);
1024 if (status & core->irq_info.cpe_fatal_irqs)
1025 irq = CPE_IRQ_WDOG_BITE;
1026 } else {
1027 /* If bus is down, cdc reg cannot be read */
1028 irq = CPE_IRQ_WDOG_BITE;
1029 }
1030
1031 if (core->cpe_users > 0) {
1032 rc = cpe_svc_process_irq(core->cpe_handle, irq);
1033 if (rc < 0)
1034 /*
1035 * Even if process_irq fails,
1036 * wait for cpe to move to offline state
1037 */
1038 dev_err(core->dev,
1039 "%s: irq processing failed, error = %d\n",
1040 __func__, rc);
1041
1042 rc = wait_for_completion_timeout(&core->offline_compl,
1043 CPE_OFFLINE_WAIT_TIMEOUT);
1044 if (!rc) {
1045 dev_err(core->dev,
1046 "%s: wait for cpe offline timed out\n",
1047 __func__);
1048 goto err_ret;
1049 }
1050 if (core->ssr_type != WCD_CPE_BUS_DOWN_EVENT) {
1051 wcd_cpe_get_sfr_dump(core);
1052
1053 /*
1054 * Ramdump has to be explicitly enabled
1055 * through debugfs and cannot be collected
1056 * when bus is down.
1057 */
1058 if (ramdump_enable)
1059 wcd_cpe_collect_ramdump(core);
1060 }
1061 } else {
1062 pr_err("%s: no cpe users, mark as offline\n", __func__);
1063 wcd_cpe_change_online_state(core, 0);
1064 wcd_cpe_set_and_complete(core,
1065 WCD_CPE_BLK_READY);
1066 }
1067
1068 rc = wait_for_completion_timeout(&core->ready_compl,
1069 CPE_READY_WAIT_TIMEOUT);
1070 if (!rc) {
1071 dev_err(core->dev,
1072 "%s: ready to online timed out, status = %u\n",
1073 __func__, core->ready_status);
1074 goto err_ret;
1075 }
1076
1077 rc = wcd_cpe_boot_ssr(core);
1078
1079 /* Once image are downloaded make sure all
1080 * error interrupts are cleared
1081 */
1082 if (CPE_ERR_IRQ_CB(core))
Meng Wang15c825d2018-09-06 10:49:18 +08001083 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301084 CPE_ERR_IRQ_CLEAR, NULL);
1085
1086err_ret:
1087 /* remove after default pm qos */
1088 pm_qos_update_request(&core->pm_qos_req,
1089 PM_QOS_DEFAULT_VALUE);
1090 pm_qos_remove_request(&core->pm_qos_req);
1091}
1092
1093/*
1094 * wcd_cpe_ssr_handle: handle SSR events here.
1095 * @core_handle: handle to the cpe core
1096 * @event: indicates ADSP or CDSP SSR.
1097 */
1098int wcd_cpe_ssr_event(void *core_handle,
1099 enum wcd_cpe_ssr_state_event event)
1100{
1101 struct wcd_cpe_core *core = core_handle;
1102
1103 if (!core) {
1104 pr_err("%s: Invalid handle to core\n",
1105 __func__);
1106 return -EINVAL;
1107 }
1108
1109 /*
1110 * If CPE is not even enabled, the SSR event for
1111 * CPE needs to be ignored
1112 */
1113 if (core->ssr_type == WCD_CPE_INITIALIZED) {
1114 dev_info(core->dev,
1115 "%s: CPE initialized but not enabled, skip CPE ssr\n",
1116 __func__);
1117 return 0;
1118 }
1119
1120 dev_dbg(core->dev,
1121 "%s: Schedule ssr work, event = %d\n",
1122 __func__, core->ssr_type);
1123
1124 switch (event) {
1125 case WCD_CPE_BUS_DOWN_EVENT:
1126 /*
1127 * If bus down, then CPE block is also
1128 * treated to be down
1129 */
1130 wcd_cpe_clr_ready_status(core, WCD_CPE_READY_TO_DLOAD);
1131 core->ssr_type = event;
1132 schedule_work(&core->ssr_work);
1133 break;
1134
1135 case WCD_CPE_SSR_EVENT:
1136 wcd_cpe_clr_ready_status(core, WCD_CPE_BLK_READY);
1137 core->ssr_type = event;
1138 schedule_work(&core->ssr_work);
1139 break;
1140
1141 case WCD_CPE_BUS_UP_EVENT:
1142 wcd_cpe_set_and_complete(core, WCD_CPE_BUS_READY);
1143 /*
1144 * In case of bus up event ssr_type will be changed
1145 * to WCD_CPE_ACTIVE once CPE is online
1146 */
1147 break;
1148
1149 default:
1150 dev_err(core->dev,
1151 "%s: unhandled SSR event %d\n",
1152 __func__, event);
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158EXPORT_SYMBOL(wcd_cpe_ssr_event);
1159
1160/*
1161 * svass_exception_irq: threaded irq handler for sva error interrupts
1162 * @irq: interrupt number
1163 * @data: data pointer passed during irq registration
1164 *
1165 * Once a error interrupt is received, it is not cleared, since
1166 * clearing this interrupt will raise spurious interrupts unless
1167 * CPE is reset.
1168 */
1169static irqreturn_t svass_exception_irq(int irq, void *data)
1170{
1171 struct wcd_cpe_core *core = data;
1172 u8 status = 0;
1173
1174 if (!core || !CPE_ERR_IRQ_CB(core)) {
1175 pr_err("%s: Invalid %s\n",
1176 __func__,
1177 (!core) ? "core" : "cdc control");
1178 return IRQ_HANDLED;
1179 }
1180
Meng Wang15c825d2018-09-06 10:49:18 +08001181 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301182 CPE_ERR_IRQ_STATUS, &status);
1183
1184 while (status != 0) {
1185 if (status & core->irq_info.cpe_fatal_irqs) {
1186 dev_err(core->dev,
1187 "%s: CPE SSR event,err_status = 0x%02x\n",
1188 __func__, status);
1189 wcd_cpe_ssr_event(core, WCD_CPE_SSR_EVENT);
1190 /*
1191 * If fatal interrupt is received,
1192 * trigger SSR and stop processing
1193 * further interrupts
1194 */
1195 break;
1196 }
1197 /*
1198 * Mask the interrupt that was raised to
1199 * avoid spurious interrupts
1200 */
Meng Wang15c825d2018-09-06 10:49:18 +08001201 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301202 CPE_ERR_IRQ_MASK, &status);
1203
1204 /* Clear only the interrupt that was raised */
Meng Wang15c825d2018-09-06 10:49:18 +08001205 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301206 CPE_ERR_IRQ_CLEAR, &status);
1207 dev_err(core->dev,
1208 "%s: err_interrupt status = 0x%x\n",
1209 __func__, status);
1210
1211 /* Read status for pending interrupts */
Meng Wang15c825d2018-09-06 10:49:18 +08001212 core->cpe_cdc_cb->cpe_err_irq_control(core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301213 CPE_ERR_IRQ_STATUS, &status);
1214 }
1215
1216 return IRQ_HANDLED;
1217}
1218
1219/*
1220 * wcd_cpe_cmi_afe_cb: callback called on response to afe commands
1221 * @param: parameter containing the response code, etc
1222 *
1223 * Process the request to the command sent to CPE and wakeup the
1224 * command send wait.
1225 */
1226static void wcd_cpe_cmi_afe_cb(const struct cmi_api_notification *param)
1227{
1228 struct cmi_hdr *hdr;
1229 struct wcd_cmi_afe_port_data *afe_port_d;
1230 u8 port_id;
1231
1232 if (!param) {
1233 pr_err("%s: param is null\n", __func__);
1234 return;
1235 }
1236
1237 if (param->event != CMI_API_MSG) {
1238 pr_err("%s: unhandled event 0x%x\n",
1239 __func__, param->event);
1240 return;
1241 }
1242
1243 pr_debug("%s: param->result = %d\n",
1244 __func__, param->result);
1245
1246 hdr = (struct cmi_hdr *) param->message;
1247
1248 /*
1249 * for AFE cmd response, port id is
1250 * stored at session id field of header
1251 */
1252 port_id = CMI_HDR_GET_SESSION_ID(hdr);
1253 if (port_id > WCD_CPE_AFE_MAX_PORTS) {
1254 pr_err("%s: invalid port_id %d\n",
1255 __func__, port_id);
1256 return;
1257 }
1258
1259 afe_port_d = &(afe_ports[port_id]);
1260
1261 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
1262
1263 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
1264 u8 result = payload[0];
1265
1266 afe_port_d->cmd_result = result;
1267 complete(&afe_port_d->afe_cmd_complete);
1268
1269 } else if (hdr->opcode == CPE_AFE_PORT_CMDRSP_SHARED_MEM_ALLOC) {
1270
1271 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
1272 (struct cpe_cmdrsp_shmem_alloc *) param->message;
1273
1274 if (cmdrsp_shmem_alloc->addr == 0) {
1275 pr_err("%s: Failed AFE shared mem alloc\n", __func__);
1276 afe_port_d->cmd_result = CMI_SHMEM_ALLOC_FAILED;
1277 } else {
1278 pr_debug("%s AFE shared mem addr = 0x%x\n",
1279 __func__, cmdrsp_shmem_alloc->addr);
1280 afe_port_d->mem_handle = cmdrsp_shmem_alloc->addr;
1281 afe_port_d->cmd_result = 0;
1282 }
1283 complete(&afe_port_d->afe_cmd_complete);
1284 }
1285}
1286
1287/*
1288 * wcd_cpe_initialize_afe_port_data: Initialize all AFE ports
1289 *
1290 * Initialize the data for all the afe ports. Assign the
1291 * afe port state to INIT state.
1292 */
1293static void wcd_cpe_initialize_afe_port_data(void)
1294{
1295 struct wcd_cmi_afe_port_data *afe_port_d;
1296 int i;
1297
1298 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1299 afe_port_d = &afe_ports[i];
1300 afe_port_d->port_id = i;
1301 init_completion(&afe_port_d->afe_cmd_complete);
1302 afe_port_d->port_state = AFE_PORT_STATE_INIT;
1303 mutex_init(&afe_port_d->afe_lock);
1304 }
1305}
1306
1307/*
1308 * wcd_cpe_deinitialize_afe_port_data: De-initialize all AFE ports
1309 *
1310 * De-Initialize the data for all the afe ports. Assign the
1311 * afe port state to DEINIT state.
1312 */
1313static void wcd_cpe_deinitialize_afe_port_data(void)
1314{
1315 struct wcd_cmi_afe_port_data *afe_port_d;
1316 int i;
1317
1318 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1319 afe_port_d = &afe_ports[i];
1320 afe_port_d->port_state = AFE_PORT_STATE_DEINIT;
1321 mutex_destroy(&afe_port_d->afe_lock);
1322 }
1323}
1324
1325/*
1326 * wcd_cpe_svc_event_cb: callback from cpe services, indicating
1327 * CPE is online or offline.
1328 * @param: parameter / payload for event to be notified
1329 */
1330static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param)
1331{
Meng Wang15c825d2018-09-06 10:49:18 +08001332 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301333 struct wcd_cpe_core *core;
1334 struct cpe_svc_boot_event *boot_data;
1335 bool active_sessions;
1336
1337 if (!param) {
1338 pr_err("%s: Invalid event\n", __func__);
1339 return;
1340 }
1341
Meng Wang15c825d2018-09-06 10:49:18 +08001342 component = param->private_data;
1343 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301344 pr_err("%s: Invalid handle to codec\n",
1345 __func__);
1346 return;
1347 }
1348
Meng Wang15c825d2018-09-06 10:49:18 +08001349 core = wcd_cpe_get_core_handle(component);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301350 if (!core) {
1351 pr_err("%s: Invalid handle to core\n",
1352 __func__);
1353 return;
1354 }
1355
1356 dev_dbg(core->dev,
1357 "%s: event = 0x%x, ssr_type = 0x%x\n",
1358 __func__, param->event, core->ssr_type);
1359
1360 switch (param->event) {
1361 case CPE_SVC_BOOT:
1362 boot_data = (struct cpe_svc_boot_event *)
1363 param->payload;
1364 core->sfr_buf_addr = boot_data->debug_address;
1365 core->sfr_buf_size = boot_data->debug_buffer_size;
1366 dev_dbg(core->dev,
1367 "%s: CPE booted, sfr_addr = %d, sfr_size = %zu\n",
1368 __func__, core->sfr_buf_addr,
1369 core->sfr_buf_size);
1370 break;
1371 case CPE_SVC_ONLINE:
1372 core->ssr_type = WCD_CPE_ACTIVE;
1373 dev_dbg(core->dev, "%s CPE is now online\n",
1374 __func__);
1375 complete(&core->online_compl);
1376 break;
1377 case CPE_SVC_OFFLINE:
1378 /*
1379 * offline can happen during normal shutdown,
1380 * but we are interested in offline only during
1381 * SSR.
1382 */
1383 if (core->ssr_type != WCD_CPE_SSR_EVENT &&
1384 core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
1385 break;
1386
1387 active_sessions = wcd_cpe_lsm_session_active();
1388 wcd_cpe_change_online_state(core, 0);
1389 complete(&core->offline_compl);
1390 dev_err(core->dev, "%s: CPE is now offline\n",
1391 __func__);
1392 break;
1393 case CPE_SVC_CMI_CLIENTS_DEREG:
1394
1395 /*
1396 * Only when either CPE SSR is in progress,
1397 * or the bus is down, we need to mark the CPE
1398 * as ready. In all other cases, this event is
1399 * ignored
1400 */
1401 if (core->ssr_type == WCD_CPE_SSR_EVENT ||
1402 core->ssr_type == WCD_CPE_BUS_DOWN_EVENT)
1403 wcd_cpe_set_and_complete(core,
1404 WCD_CPE_BLK_READY);
1405 break;
1406 default:
1407 dev_err(core->dev,
1408 "%s: unhandled notification\n",
1409 __func__);
1410 break;
1411 }
1412}
1413
1414/*
1415 * wcd_cpe_cleanup_irqs: free the irq resources required by cpe
1416 * @core: handle the cpe core
1417 *
1418 * This API will free the IRQs for CPE but does not mask the
1419 * CPE interrupts. If masking is needed, it has to be done
1420 * explicity by caller.
1421 */
1422static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core)
1423{
1424
Meng Wang15c825d2018-09-06 10:49:18 +08001425 struct snd_soc_component *component = core->component;
1426 struct wcd9xxx *wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301427 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1428
1429 wcd9xxx_free_irq(core_res,
1430 core->irq_info.cpe_engine_irq,
1431 core);
1432 wcd9xxx_free_irq(core_res,
1433 core->irq_info.cpe_err_irq,
1434 core);
1435
1436}
1437
1438/*
1439 * wcd_cpe_setup_sva_err_intr: setup the irqs for CPE
1440 * @core: handle to wcd_cpe_core
1441 * All interrupts needed for CPE are acquired. If any
1442 * request_irq fails, then all irqs are free'd
1443 */
1444static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core)
1445{
1446 int ret;
Meng Wang15c825d2018-09-06 10:49:18 +08001447 struct snd_soc_component *component = core->component;
1448 struct wcd9xxx *wcd9xxx = dev_get_drvdata(component->dev->parent);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301449 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1450
1451 ret = wcd9xxx_request_irq(core_res,
1452 core->irq_info.cpe_engine_irq,
1453 svass_engine_irq, "SVASS_Engine", core);
1454 if (ret) {
1455 dev_err(core->dev,
1456 "%s: Failed to request svass engine irq\n",
1457 __func__);
1458 goto fail_engine_irq;
1459 }
1460
1461 /* Make sure all error interrupts are cleared */
1462 if (CPE_ERR_IRQ_CB(core))
1463 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001464 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301465 CPE_ERR_IRQ_CLEAR,
1466 NULL);
1467
1468 /* Enable required error interrupts */
1469 if (CPE_ERR_IRQ_CB(core))
1470 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08001471 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301472 CPE_ERR_IRQ_UNMASK,
1473 NULL);
1474
1475 ret = wcd9xxx_request_irq(core_res,
1476 core->irq_info.cpe_err_irq,
1477 svass_exception_irq, "SVASS_Exception", core);
1478 if (ret) {
1479 dev_err(core->dev,
1480 "%s: Failed to request svass err irq\n",
1481 __func__);
1482 goto fail_exception_irq;
1483 }
1484
1485 return 0;
1486
1487fail_exception_irq:
1488 wcd9xxx_free_irq(core_res,
1489 core->irq_info.cpe_engine_irq, core);
1490
1491fail_engine_irq:
1492 return ret;
1493}
1494
1495static int wcd_cpe_get_cal_index(int32_t cal_type)
1496{
1497 int cal_index = -EINVAL;
1498
1499 if (cal_type == ULP_AFE_CAL_TYPE)
1500 cal_index = WCD_CPE_LSM_CAL_AFE;
1501 else if (cal_type == ULP_LSM_CAL_TYPE)
1502 cal_index = WCD_CPE_LSM_CAL_LSM;
1503 else if (cal_type == ULP_LSM_TOPOLOGY_ID_CAL_TYPE)
1504 cal_index = WCD_CPE_LSM_CAL_TOPOLOGY_ID;
1505 else
1506 pr_err("%s: invalid cal_type %d\n",
1507 __func__, cal_type);
1508
1509 return cal_index;
1510}
1511
1512static int wcd_cpe_alloc_cal(int32_t cal_type, size_t data_size, void *data)
1513{
1514 int ret = 0;
1515 int cal_index;
1516
1517 cal_index = wcd_cpe_get_cal_index(cal_type);
1518 if (cal_index < 0) {
1519 pr_err("%s: invalid caltype %d\n",
1520 __func__, cal_type);
1521 return -EINVAL;
1522 }
1523
1524 ret = cal_utils_alloc_cal(data_size, data,
1525 core_d->cal_data[cal_index],
1526 0, NULL);
1527 if (ret < 0)
1528 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
1529 __func__, ret, cal_type);
1530 return ret;
1531}
1532
1533static int wcd_cpe_dealloc_cal(int32_t cal_type, size_t data_size,
1534 void *data)
1535{
1536 int ret = 0;
1537 int cal_index;
1538
1539 cal_index = wcd_cpe_get_cal_index(cal_type);
1540 if (cal_index < 0) {
1541 pr_err("%s: invalid caltype %d\n",
1542 __func__, cal_type);
1543 return -EINVAL;
1544 }
1545
1546 ret = cal_utils_dealloc_cal(data_size, data,
1547 core_d->cal_data[cal_index]);
1548 if (ret < 0)
1549 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
1550 __func__, ret, cal_type);
1551 return ret;
1552}
1553
1554static int wcd_cpe_set_cal(int32_t cal_type, size_t data_size, void *data)
1555{
1556 int ret = 0;
1557 int cal_index;
1558
1559 cal_index = wcd_cpe_get_cal_index(cal_type);
1560 if (cal_index < 0) {
1561 pr_err("%s: invalid caltype %d\n",
1562 __func__, cal_type);
1563 return -EINVAL;
1564 }
1565
1566 ret = cal_utils_set_cal(data_size, data,
1567 core_d->cal_data[cal_index],
1568 0, NULL);
1569 if (ret < 0)
1570 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
1571 __func__, ret, cal_type);
1572 return ret;
1573}
1574
1575static int wcd_cpe_cal_init(struct wcd_cpe_core *core)
1576{
1577 int ret = 0;
1578
1579 struct cal_type_info cal_type_info[] = {
1580 {{ULP_AFE_CAL_TYPE,
1581 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1582 wcd_cpe_set_cal, NULL, NULL} },
1583 {NULL, NULL, cal_utils_match_buf_num} },
1584
1585 {{ULP_LSM_CAL_TYPE,
1586 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1587 wcd_cpe_set_cal, NULL, NULL} },
1588 {NULL, NULL, cal_utils_match_buf_num} },
1589
1590 {{ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
1591 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1592 wcd_cpe_set_cal, NULL, NULL} },
1593 {NULL, NULL, cal_utils_match_buf_num} },
1594 };
1595
1596 ret = cal_utils_create_cal_types(WCD_CPE_LSM_CAL_MAX,
1597 core->cal_data,
1598 cal_type_info);
1599 if (ret < 0)
1600 pr_err("%s: could not create cal type!\n",
1601 __func__);
1602 return ret;
1603}
1604
1605/*
1606 * wcd_cpe_enable: setup the cpe interrupts and schedule
1607 * the work to download image and bootup the CPE.
1608 * core: handle to cpe core structure
1609 */
1610static int wcd_cpe_vote(struct wcd_cpe_core *core,
1611 bool enable)
1612{
1613 int ret = 0;
1614
1615 if (!core) {
1616 pr_err("%s: Invalid handle to core\n",
1617 __func__);
1618 ret = -EINVAL;
1619 goto done;
1620 }
1621
1622 dev_dbg(core->dev,
1623 "%s: enter, enable = %s, cpe_users = %u\n",
1624 __func__, (enable ? "true" : "false"),
1625 core->cpe_users);
1626
1627 if (enable) {
1628 core->cpe_users++;
1629 if (core->cpe_users == 1) {
1630 ret = wcd_cpe_enable(core, enable);
1631 if (ret) {
1632 dev_err(core->dev,
1633 "%s: CPE enable failed, err = %d\n",
1634 __func__, ret);
1635 goto done;
1636 }
1637 } else {
1638 dev_dbg(core->dev,
1639 "%s: cpe already enabled, users = %u\n",
1640 __func__, core->cpe_users);
1641 goto done;
1642 }
1643 } else {
1644 core->cpe_users--;
1645 if (core->cpe_users == 0) {
1646 ret = wcd_cpe_enable(core, enable);
1647 if (ret) {
1648 dev_err(core->dev,
1649 "%s: CPE disable failed, err = %d\n",
1650 __func__, ret);
1651 goto done;
1652 }
1653 } else {
1654 dev_dbg(core->dev,
1655 "%s: %u valid users on cpe\n",
1656 __func__, core->cpe_users);
1657 goto done;
1658 }
1659 }
1660
1661 dev_dbg(core->dev,
1662 "%s: leave, enable = %s, cpe_users = %u\n",
1663 __func__, (enable ? "true" : "false"),
1664 core->cpe_users);
1665
1666done:
1667 return ret;
1668}
1669
1670static int wcd_cpe_debugfs_init(struct wcd_cpe_core *core)
1671{
1672 int rc = 0;
1673
1674 struct dentry *dir = debugfs_create_dir("wcd_cpe", NULL);
1675
1676 if (IS_ERR_OR_NULL(dir)) {
1677 dir = NULL;
1678 rc = -ENODEV;
1679 goto err_create_dir;
1680 }
1681
1682 if (!debugfs_create_u32("ramdump_enable", 0644,
1683 dir, &ramdump_enable)) {
1684 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1685 __func__, "ramdump_enable");
1686 rc = -ENODEV;
1687 goto err_create_entry;
1688 }
1689
1690 if (!debugfs_create_file("cpe_ftm_test_trigger", 0200,
1691 dir, core, &cpe_ftm_test_trigger_fops)) {
1692 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1693 __func__, "cpe_ftm_test_trigger");
1694 rc = -ENODEV;
1695 goto err_create_entry;
1696 }
1697
1698 if (!debugfs_create_u32("cpe_ftm_test_status", 0444,
1699 dir, &cpe_ftm_test_status)) {
1700 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1701 __func__, "cpe_ftm_test_status");
1702 rc = -ENODEV;
1703 goto err_create_entry;
1704 }
1705
1706err_create_entry:
1707 debugfs_remove(dir);
1708
1709err_create_dir:
1710 return rc;
1711}
1712
1713static ssize_t fw_name_show(struct wcd_cpe_core *core, char *buf)
1714{
1715 return snprintf(buf, WCD_CPE_IMAGE_FNAME_MAX, "%s",
1716 core->dyn_fname);
1717}
1718
1719static ssize_t fw_name_store(struct wcd_cpe_core *core,
1720 const char *buf, ssize_t count)
1721{
1722 int copy_count = count;
1723 const char *pos;
1724
1725 pos = memchr(buf, '\n', count);
1726 if (pos)
1727 copy_count = pos - buf;
1728
1729 if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
1730 dev_err(core->dev,
1731 "%s: Invalid length %d, max allowed %d\n",
1732 __func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
1733 return -EINVAL;
1734 }
1735
1736 strlcpy(core->dyn_fname, buf, copy_count + 1);
1737
1738 return count;
1739}
1740
1741WCD_CPE_ATTR(fw_name, 0660, fw_name_show, fw_name_store);
1742
1743static ssize_t wcd_cpe_sysfs_show(struct kobject *kobj,
1744 struct attribute *attr, char *buf)
1745{
1746 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1747 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1748 ssize_t ret = -EINVAL;
1749
1750 if (core && cpe_attr->show)
1751 ret = cpe_attr->show(core, buf);
1752
1753 return ret;
1754}
1755
1756static ssize_t wcd_cpe_sysfs_store(struct kobject *kobj,
1757 struct attribute *attr, const char *buf,
1758 size_t count)
1759{
1760 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1761 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1762 ssize_t ret = -EINVAL;
1763
1764 if (core && cpe_attr->store)
1765 ret = cpe_attr->store(core, buf, count);
1766
1767 return ret;
1768}
1769
1770static const struct sysfs_ops wcd_cpe_sysfs_ops = {
1771 .show = wcd_cpe_sysfs_show,
1772 .store = wcd_cpe_sysfs_store,
1773};
1774
1775static struct kobj_type wcd_cpe_ktype = {
1776 .sysfs_ops = &wcd_cpe_sysfs_ops,
1777};
1778
1779static int wcd_cpe_sysfs_init(struct wcd_cpe_core *core, int id)
1780{
1781 char sysfs_dir_name[WCD_CPE_SYSFS_DIR_MAX_LENGTH];
1782 int rc = 0;
1783
1784 snprintf(sysfs_dir_name, WCD_CPE_SYSFS_DIR_MAX_LENGTH,
1785 "%s%d", "wcd_cpe", id);
1786
1787 rc = kobject_init_and_add(&core->cpe_kobj, &wcd_cpe_ktype,
1788 kernel_kobj,
1789 sysfs_dir_name);
1790 if (unlikely(rc)) {
1791 dev_err(core->dev,
1792 "%s: Failed to add kobject %s, err = %d\n",
1793 __func__, sysfs_dir_name, rc);
1794 goto done;
1795 }
1796
1797 rc = sysfs_create_file(&core->cpe_kobj, &cpe_attr_fw_name.attr);
1798 if (rc) {
1799 dev_err(core->dev,
1800 "%s: Failed to fw_name sysfs entry to %s\n",
1801 __func__, sysfs_dir_name);
1802 goto fail_create_file;
1803 }
1804
1805 return 0;
1806
1807fail_create_file:
1808 kobject_put(&core->cpe_kobj);
1809done:
1810 return rc;
1811}
1812
1813static ssize_t cpe_ftm_test_trigger(struct file *file,
1814 const char __user *user_buf,
1815 size_t count, loff_t *ppos)
1816{
1817 struct wcd_cpe_core *core = file->private_data;
1818 int ret = 0;
1819
1820 /* Enable the clks for cpe */
1821 ret = wcd_cpe_enable_cpe_clks(core, true);
1822 if (ret < 0) {
1823 dev_err(core->dev,
1824 "%s: CPE clk enable failed, err = %d\n",
1825 __func__, ret);
1826 goto done;
1827 }
1828
1829 /* Get the CPE_STATUS */
1830 ret = cpe_svc_ftm_test(core->cpe_handle, &cpe_ftm_test_status);
1831 if (ret < 0) {
1832 dev_err(core->dev,
1833 "%s: CPE FTM test failed, err = %d\n",
1834 __func__, ret);
1835 if (ret == CPE_SVC_BUSY) {
1836 cpe_ftm_test_status = 1;
1837 ret = 0;
1838 }
1839 }
1840
1841 /* Disable the clks for cpe */
1842 ret = wcd_cpe_enable_cpe_clks(core, false);
1843 if (ret < 0) {
1844 dev_err(core->dev,
1845 "%s: CPE clk disable failed, err = %d\n",
1846 __func__, ret);
1847 }
1848
1849done:
1850 if (ret < 0)
1851 return ret;
1852 else
1853 return count;
1854}
1855
1856static int wcd_cpe_validate_params(
Meng Wang15c825d2018-09-06 10:49:18 +08001857 struct snd_soc_component *component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301858 struct wcd_cpe_params *params)
1859{
1860
Meng Wang15c825d2018-09-06 10:49:18 +08001861 if (!component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301862 pr_err("%s: Invalid codec\n", __func__);
1863 return -EINVAL;
1864 }
1865
1866 if (!params) {
Meng Wang15c825d2018-09-06 10:49:18 +08001867 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301868 "%s: No params supplied for codec %s\n",
Meng Wang15c825d2018-09-06 10:49:18 +08001869 __func__, component->name);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301870 return -EINVAL;
1871 }
1872
Meng Wang15c825d2018-09-06 10:49:18 +08001873 if (!params->component || !params->get_cpe_core ||
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301874 !params->cdc_cb) {
Meng Wang15c825d2018-09-06 10:49:18 +08001875 dev_err(component->dev,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301876 "%s: Invalid params for codec %s\n",
Meng Wang15c825d2018-09-06 10:49:18 +08001877 __func__, component->name);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884/*
1885 * wcd_cpe_init: Initialize CPE related structures
1886 * @img_fname: filename for firmware image
Meng Wang15c825d2018-09-06 10:49:18 +08001887 * @component: handle to codec requesting for image download
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301888 * @params: parameter structure passed from caller
1889 *
1890 * This API will initialize the cpe core but will not
1891 * download the image or boot the cpe core.
1892 */
1893struct wcd_cpe_core *wcd_cpe_init(const char *img_fname,
Meng Wang15c825d2018-09-06 10:49:18 +08001894 struct snd_soc_component *component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301895 struct wcd_cpe_params *params)
1896{
1897 struct wcd_cpe_core *core;
1898 int ret = 0;
1899 struct snd_card *card = NULL;
1900 struct snd_info_entry *entry = NULL;
1901 char proc_name[WCD_CPE_STATE_MAX_LEN];
1902 const char *cpe_name = "cpe";
1903 const char *state_name = "_state";
1904 const struct cpe_svc_hw_cfg *hw_info;
1905 int id = 0;
1906
Meng Wang15c825d2018-09-06 10:49:18 +08001907 if (wcd_cpe_validate_params(component, params))
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301908 return NULL;
1909
1910 core = kzalloc(sizeof(struct wcd_cpe_core), GFP_KERNEL);
1911 if (!core)
1912 return NULL;
1913
1914 snprintf(core->fname, sizeof(core->fname), "%s", img_fname);
1915 strlcpy(core->dyn_fname, core->fname, WCD_CPE_IMAGE_FNAME_MAX);
1916
1917 wcd_get_cpe_core = params->get_cpe_core;
1918
Meng Wang15c825d2018-09-06 10:49:18 +08001919 core->component = params->component;
1920 core->dev = params->component->dev;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301921 core->cpe_debug_mode = params->dbg_mode;
1922
1923 core->cdc_info.major_version = params->cdc_major_ver;
1924 core->cdc_info.minor_version = params->cdc_minor_ver;
1925 core->cdc_info.id = params->cdc_id;
1926
1927 core->cpe_cdc_cb = params->cdc_cb;
1928
1929 memcpy(&core->irq_info, &params->cdc_irq_info,
1930 sizeof(core->irq_info));
1931
1932 INIT_WORK(&core->load_fw_work, wcd_cpe_load_fw_image);
1933 INIT_WORK(&core->ssr_work, wcd_cpe_ssr_work);
1934 init_completion(&core->offline_compl);
1935 init_completion(&core->ready_compl);
1936 init_completion(&core->online_compl);
1937 init_waitqueue_head(&core->ssr_entry.offline_poll_wait);
1938 mutex_init(&core->ssr_lock);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05301939 mutex_init(&core->session_lock);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301940 core->cpe_users = 0;
1941 core->cpe_clk_ref = 0;
1942
1943 /*
1944 * By default, during probe, it is assumed that
1945 * both CPE hardware block and underlying bus to codec
1946 * are ready
1947 */
1948 core->ready_status = WCD_CPE_READY_TO_DLOAD;
1949
1950 core->cpe_handle = cpe_svc_initialize(NULL, &core->cdc_info,
1951 params->cpe_svc_params);
1952 if (!core->cpe_handle) {
1953 dev_err(core->dev,
1954 "%s: failed to initialize cpe services\n",
1955 __func__);
1956 goto fail_cpe_initialize;
1957 }
1958
1959 core->cpe_reg_handle = cpe_svc_register(core->cpe_handle,
1960 wcd_cpe_svc_event_cb,
1961 CPE_SVC_ONLINE | CPE_SVC_OFFLINE |
1962 CPE_SVC_BOOT |
1963 CPE_SVC_CMI_CLIENTS_DEREG,
1964 "codec cpe handler");
1965 if (!core->cpe_reg_handle) {
1966 dev_err(core->dev,
1967 "%s: failed to register cpe service\n",
1968 __func__);
1969 goto fail_cpe_register;
1970 }
1971
Meng Wang15c825d2018-09-06 10:49:18 +08001972 card = component->card->snd_card;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301973 snprintf(proc_name, (sizeof("cpe") + sizeof("_state") +
1974 sizeof(id) - 2), "%s%d%s", cpe_name, id, state_name);
1975 entry = snd_info_create_card_entry(card, proc_name,
1976 card->proc_root);
1977 if (entry) {
1978 core->ssr_entry.entry = entry;
1979 core->ssr_entry.offline = 1;
1980 entry->size = WCD_CPE_STATE_MAX_LEN;
1981 entry->content = SNDRV_INFO_CONTENT_DATA;
1982 entry->c.ops = &wcd_cpe_state_proc_ops;
1983 entry->private_data = core;
1984 ret = snd_info_register(entry);
1985 if (ret < 0) {
1986 dev_err(core->dev,
1987 "%s: snd_info_register failed (%d)\n",
1988 __func__, ret);
1989 snd_info_free_entry(entry);
1990 entry = NULL;
1991 }
1992 } else {
1993 dev_err(core->dev,
1994 "%s: Failed to create CPE SSR status entry\n",
1995 __func__);
1996 /*
1997 * Even if SSR entry creation fails, continue
1998 * with image download
1999 */
2000 }
2001
2002 core_d = core;
2003 ret = wcd_cpe_cal_init(core);
2004 if (ret < 0) {
2005 dev_err(core->dev,
2006 "%s: CPE calibration init failed, err = %d\n",
2007 __func__, ret);
2008 goto fail_cpe_reset;
2009 }
2010
2011 wcd_cpe_debugfs_init(core);
2012
2013 wcd_cpe_sysfs_init(core, id);
2014
2015 hw_info = cpe_svc_get_hw_cfg(core->cpe_handle);
2016 if (!hw_info) {
2017 dev_err(core->dev,
2018 "%s: hw info not available\n",
2019 __func__);
2020 goto schedule_dload_work;
2021 } else {
2022 core->hw_info.dram_offset = hw_info->DRAM_offset;
2023 core->hw_info.dram_size = hw_info->DRAM_size;
2024 core->hw_info.iram_offset = hw_info->IRAM_offset;
2025 core->hw_info.iram_size = hw_info->IRAM_size;
2026 }
2027
2028 /* Setup the ramdump device and buffer */
2029 core->cpe_ramdump_dev = create_ramdump_device("cpe",
2030 core->dev);
2031 if (!core->cpe_ramdump_dev) {
2032 dev_err(core->dev,
2033 "%s: Failed to create ramdump device\n",
2034 __func__);
2035 goto schedule_dload_work;
2036 }
2037
2038 arch_setup_dma_ops(core->dev, 0, 0, NULL, 0);
2039 core->cpe_dump_v_addr = dma_alloc_coherent(core->dev,
2040 core->hw_info.dram_size,
2041 &core->cpe_dump_addr,
2042 GFP_KERNEL);
2043 if (!core->cpe_dump_v_addr) {
2044 dev_err(core->dev,
2045 "%s: Failed to alloc memory for cpe dump, size = %zd\n",
2046 __func__, core->hw_info.dram_size);
2047 goto schedule_dload_work;
2048 } else {
2049 memset(core->cpe_dump_v_addr, 0, core->hw_info.dram_size);
2050 }
2051
2052schedule_dload_work:
2053 core->ssr_type = WCD_CPE_INITIALIZED;
2054 schedule_work(&core->load_fw_work);
2055 return core;
2056
2057fail_cpe_reset:
2058 cpe_svc_deregister(core->cpe_handle, core->cpe_reg_handle);
2059
2060fail_cpe_register:
2061 cpe_svc_deinitialize(core->cpe_handle);
2062
2063fail_cpe_initialize:
2064 kfree(core);
2065 return NULL;
2066}
2067EXPORT_SYMBOL(wcd_cpe_init);
2068
2069/*
2070 * wcd_cpe_cmi_lsm_callback: callback called from cpe services
2071 * to notify command response for lsm
2072 * service
2073 * @param: param containing the response code and status
2074 *
2075 * This callback is registered with cpe services while registering
2076 * the LSM service
2077 */
2078static void wcd_cpe_cmi_lsm_callback(const struct cmi_api_notification *param)
2079{
2080 struct cmi_hdr *hdr;
2081 struct cpe_lsm_session *lsm_session;
2082 u8 session_id;
2083
2084 if (!param) {
2085 pr_err("%s: param is null\n", __func__);
2086 return;
2087 }
2088
2089 if (param->event != CMI_API_MSG) {
2090 pr_err("%s: unhandled event 0x%x\n", __func__, param->event);
2091 return;
2092 }
2093
2094 hdr = (struct cmi_hdr *) param->message;
2095 session_id = CMI_HDR_GET_SESSION_ID(hdr);
2096
2097 if (session_id > WCD_CPE_LSM_MAX_SESSIONS) {
2098 pr_err("%s: invalid lsm session id = %d\n",
2099 __func__, session_id);
2100 return;
2101 }
2102
2103 lsm_session = lsm_sessions[session_id];
2104
2105 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
2106
2107 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
2108 u8 result = payload[0];
2109
2110 lsm_session->cmd_err_code = result;
2111 complete(&lsm_session->cmd_comp);
2112
2113 } else if (hdr->opcode == CPE_LSM_SESSION_CMDRSP_SHARED_MEM_ALLOC) {
2114
2115 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
2116 (struct cpe_cmdrsp_shmem_alloc *) param->message;
2117
2118 if (cmdrsp_shmem_alloc->addr == 0) {
2119 pr_err("%s: Failed LSM shared mem alloc\n", __func__);
2120 lsm_session->cmd_err_code = CMI_SHMEM_ALLOC_FAILED;
2121
2122 } else {
2123
2124 pr_debug("%s LSM shared mem addr = 0x%x\n",
2125 __func__, cmdrsp_shmem_alloc->addr);
2126 lsm_session->lsm_mem_handle = cmdrsp_shmem_alloc->addr;
2127 lsm_session->cmd_err_code = 0;
2128 }
2129
2130 complete(&lsm_session->cmd_comp);
2131
2132 } else if (hdr->opcode == CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
2133
2134 struct cpe_lsm_event_detect_v2 *event_detect_v2 =
2135 (struct cpe_lsm_event_detect_v2 *) param->message;
2136
2137 if (!lsm_session->priv_d) {
2138 pr_err("%s: private data is not present\n",
2139 __func__);
2140 return;
2141 }
2142
2143 pr_debug("%s: event payload, status = %u, size = %u\n",
2144 __func__, event_detect_v2->detection_status,
2145 event_detect_v2->size);
2146
2147 if (lsm_session->event_cb)
2148 lsm_session->event_cb(
2149 lsm_session->priv_d,
2150 event_detect_v2->detection_status,
2151 event_detect_v2->size,
2152 event_detect_v2->payload);
2153 }
2154}
2155
2156/*
2157 * wcd_cpe_cmi_send_lsm_msg: send a message to lsm service
2158 * @core: handle to cpe core
2159 * @session: session on which to send the message
2160 * @message: actual message containing header and payload
2161 *
2162 * Sends message to lsm service for specified session and wait
2163 * for response back on the message.
2164 * should be called after acquiring session specific mutex
2165 */
2166static int wcd_cpe_cmi_send_lsm_msg(
2167 struct wcd_cpe_core *core,
2168 struct cpe_lsm_session *session,
2169 void *message)
2170{
2171 int ret = 0;
2172 struct cmi_hdr *hdr = message;
2173
2174 pr_debug("%s: sending message with opcode 0x%x\n",
2175 __func__, hdr->opcode);
2176
2177 if (unlikely(!wcd_cpe_is_online_state(core))) {
2178 dev_err(core->dev,
2179 "%s: MSG not sent, CPE offline\n",
2180 __func__);
2181 goto done;
2182 }
2183
2184 if (CMI_HDR_GET_OBM_FLAG(hdr))
2185 wcd_cpe_bus_vote_max_bw(core, true);
2186
2187 reinit_completion(&session->cmd_comp);
2188 ret = cmi_send_msg(message);
2189 if (ret) {
2190 pr_err("%s: msg opcode (0x%x) send failed (%d)\n",
2191 __func__, hdr->opcode, ret);
2192 goto rel_bus_vote;
2193 }
2194
2195 ret = wait_for_completion_timeout(&session->cmd_comp,
2196 CMI_CMD_TIMEOUT);
2197 if (ret > 0) {
2198 pr_debug("%s: command 0x%x, received response 0x%x\n",
2199 __func__, hdr->opcode, session->cmd_err_code);
2200 if (session->cmd_err_code == CMI_SHMEM_ALLOC_FAILED)
2201 session->cmd_err_code = CPE_ENOMEMORY;
2202 if (session->cmd_err_code > 0)
2203 pr_err("%s: CPE returned error[%s]\n",
2204 __func__, cpe_err_get_err_str(
2205 session->cmd_err_code));
2206 ret = cpe_err_get_lnx_err_code(session->cmd_err_code);
2207 goto rel_bus_vote;
2208 } else {
2209 pr_err("%s: command (0x%x) send timed out\n",
2210 __func__, hdr->opcode);
2211 ret = -ETIMEDOUT;
2212 goto rel_bus_vote;
2213 }
2214
2215
2216rel_bus_vote:
2217
2218 if (CMI_HDR_GET_OBM_FLAG(hdr))
2219 wcd_cpe_bus_vote_max_bw(core, false);
2220
2221done:
2222 return ret;
2223}
2224
2225
2226/*
2227 * fill_cmi_header: fill the cmi header with specified values
2228 *
2229 * @hdr: header to be updated with values
2230 * @session_id: session id of the header,
2231 * in case of AFE service it is port_id
2232 * @service_id: afe/lsm, etc
2233 * @version: update the version field in header
2234 * @payload_size: size of the payload following after header
2235 * @opcode: opcode of the message
2236 * @obm_flag: indicates if this header is for obm message
2237 *
2238 */
2239static int fill_cmi_header(struct cmi_hdr *hdr,
2240 u8 session_id, u8 service_id,
2241 bool version, u8 payload_size,
2242 u16 opcode, bool obm_flag)
2243{
2244 /* sanitize the data */
2245 if (!IS_VALID_SESSION_ID(session_id) ||
2246 !IS_VALID_SERVICE_ID(service_id) ||
2247 !IS_VALID_PLD_SIZE(payload_size)) {
2248 pr_err("Invalid header creation request\n");
2249 return -EINVAL;
2250 }
2251
2252 CMI_HDR_SET_SESSION(hdr, session_id);
2253 CMI_HDR_SET_SERVICE(hdr, service_id);
2254 if (version)
2255 CMI_HDR_SET_VERSION(hdr, 1);
2256 else
2257 CMI_HDR_SET_VERSION(hdr, 0);
2258
2259 CMI_HDR_SET_PAYLOAD_SIZE(hdr, payload_size);
2260
2261 hdr->opcode = opcode;
2262
2263 if (obm_flag)
2264 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
2265 else
2266 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
2267
2268 return 0;
2269}
2270
2271/*
2272 * fill_lsm_cmd_header_v0_inband:
2273 * Given the header, fill the header with information
2274 * for lsm service, version 0 and inband message
2275 * @hdr: the cmi header to be filled.
2276 * @session_id: ID for the lsm session
2277 * @payload_size: size for cmi message payload
2278 * @opcode: opcode for cmi message
2279 */
2280static int fill_lsm_cmd_header_v0_inband(struct cmi_hdr *hdr,
2281 u8 session_id, u8 payload_size, u16 opcode)
2282{
2283 return fill_cmi_header(hdr, session_id,
2284 CMI_CPE_LSM_SERVICE_ID, false,
2285 payload_size, opcode, false);
2286}
2287
2288/*
2289 * wcd_cpe_is_valid_lsm_session:
2290 * Check session parameters to identify validity for the sesion
2291 * @core: handle to cpe core
2292 * @session: handle to the lsm session
2293 * @func: invoking function to be printed in error logs
2294 */
2295static int wcd_cpe_is_valid_lsm_session(struct wcd_cpe_core *core,
2296 struct cpe_lsm_session *session,
2297 const char *func)
2298{
2299 if (unlikely(IS_ERR_OR_NULL(core))) {
2300 pr_err("%s: invalid handle to core\n",
2301 func);
2302 return -EINVAL;
2303 }
2304
2305 if (unlikely(IS_ERR_OR_NULL(session))) {
2306 dev_err(core->dev, "%s: invalid session\n",
2307 func);
2308 return -EINVAL;
2309 }
2310
2311 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
2312 dev_err(core->dev, "%s: invalid session id (%u)\n",
2313 func, session->id);
2314 return -EINVAL;
2315 }
2316
2317 dev_dbg(core->dev, "%s: session_id = %u\n",
2318 func, session->id);
2319 return 0;
2320}
2321
2322static int wcd_cpe_cmd_lsm_open_tx_v2(
2323 struct wcd_cpe_core *core,
2324 struct cpe_lsm_session *session)
2325{
2326 struct cpe_lsm_cmd_open_tx_v2 cmd_open_tx_v2;
2327 struct cal_block_data *top_cal = NULL;
2328 struct audio_cal_info_lsm_top *lsm_top;
2329 int ret = 0;
2330
2331 ret = wcd_cpe_is_valid_lsm_session(core, session,
2332 __func__);
2333 if (ret)
2334 return ret;
2335
2336 if (core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID] == NULL) {
2337 dev_err(core->dev,
2338 "%s: LSM_TOPOLOGY cal not allocated!\n",
2339 __func__);
2340 return -EINVAL;
2341 }
2342
2343 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2344 top_cal = cal_utils_get_only_cal_block(
2345 core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]);
2346 if (!top_cal) {
2347 dev_err(core->dev,
2348 "%s: Failed to get LSM TOPOLOGY cal block\n",
2349 __func__);
2350 ret = -EINVAL;
2351 goto unlock_cal_mutex;
2352 }
2353
2354 lsm_top = (struct audio_cal_info_lsm_top *)
2355 top_cal->cal_info;
2356
2357 if (!lsm_top) {
2358 dev_err(core->dev,
2359 "%s: cal_info for LSM_TOPOLOGY not found\n",
2360 __func__);
2361 ret = -EINVAL;
2362 goto unlock_cal_mutex;
2363 }
2364
2365 dev_dbg(core->dev,
2366 "%s: topology_id = 0x%x, acdb_id = 0x%x, app_type = 0x%x\n",
2367 __func__, lsm_top->topology, lsm_top->acdb_id,
2368 lsm_top->app_type);
2369
2370 if (lsm_top->topology == 0) {
2371 dev_err(core->dev,
2372 "%s: topology id not sent for app_type 0x%x\n",
2373 __func__, lsm_top->app_type);
2374 ret = -EINVAL;
2375 goto unlock_cal_mutex;
2376 }
2377
2378 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2379
2380 memset(&cmd_open_tx_v2, 0, sizeof(struct cpe_lsm_cmd_open_tx_v2));
2381 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx_v2.hdr,
2382 session->id, OPEN_V2_CMD_PAYLOAD_SIZE,
2383 CPE_LSM_SESSION_CMD_OPEN_TX_V2)) {
2384 ret = -EINVAL;
2385 goto end_ret;
2386 }
2387
2388 cmd_open_tx_v2.topology_id = lsm_top->topology;
2389 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx_v2);
2390 if (ret)
2391 dev_err(core->dev,
2392 "%s: failed to send open_tx_v2 cmd, err = %d\n",
2393 __func__, ret);
2394 else
2395 session->is_topology_used = true;
2396end_ret:
2397 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2398
2399unlock_cal_mutex:
2400 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2401 return ret;
2402}
2403
2404/*
2405 * wcd_cpe_cmd_lsm_open_tx: compose and send lsm open command
2406 * @core_handle: handle to cpe core
2407 * @session: session for which the command needs to be sent
2408 * @app_id: application id part of the command
2409 * @sample_rate: sample rate for this session
2410 */
2411static int wcd_cpe_cmd_lsm_open_tx(void *core_handle,
2412 struct cpe_lsm_session *session,
2413 u16 app_id, u16 sample_rate)
2414{
2415 struct cpe_lsm_cmd_open_tx cmd_open_tx;
2416 struct wcd_cpe_core *core = core_handle;
2417 int ret = 0;
2418
2419 ret = wcd_cpe_is_valid_lsm_session(core, session,
2420 __func__);
2421 if (ret)
2422 return ret;
2423
2424 /* Try to open with topology first */
2425 ret = wcd_cpe_cmd_lsm_open_tx_v2(core, session);
2426 if (!ret)
2427 goto done;
2428
2429 dev_dbg(core->dev, "%s: Try open_tx without topology\n",
2430 __func__);
2431
2432 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2433
2434 memset(&cmd_open_tx, 0, sizeof(struct cpe_lsm_cmd_open_tx));
2435 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx.hdr,
2436 session->id, OPEN_CMD_PAYLOAD_SIZE,
2437 CPE_LSM_SESSION_CMD_OPEN_TX)) {
2438 ret = -EINVAL;
2439 goto end_ret;
2440 }
2441
2442 cmd_open_tx.app_id = app_id;
2443 cmd_open_tx.sampling_rate = sample_rate;
2444
2445 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx);
2446 if (ret)
2447 dev_err(core->dev,
2448 "%s: failed to send open_tx cmd, err = %d\n",
2449 __func__, ret);
2450end_ret:
2451 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2452done:
2453 return ret;
2454}
2455
2456/*
2457 * wcd_cpe_cmd_close_tx: compose and send lsm close command
2458 * @core_handle: handle to cpe core
2459 * @session: session for which the command needs to be sent
2460 */
2461static int wcd_cpe_cmd_lsm_close_tx(void *core_handle,
2462 struct cpe_lsm_session *session)
2463{
2464 struct cmi_hdr cmd_close_tx;
2465 struct wcd_cpe_core *core = core_handle;
2466 int ret = 0;
2467
2468 ret = wcd_cpe_is_valid_lsm_session(core, session,
2469 __func__);
2470 if (ret)
2471 return ret;
2472
2473 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2474
2475 memset(&cmd_close_tx, 0, sizeof(cmd_close_tx));
2476 if (fill_lsm_cmd_header_v0_inband(&cmd_close_tx, session->id,
2477 0, CPE_LSM_SESSION_CMD_CLOSE_TX)) {
2478 ret = -EINVAL;
2479 goto end_ret;
2480 }
2481
2482 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_close_tx);
2483 if (ret)
2484 dev_err(core->dev,
2485 "%s: lsm close_tx cmd failed, err = %d\n",
2486 __func__, ret);
2487 else
2488 session->is_topology_used = false;
2489end_ret:
2490 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2491 return ret;
2492}
2493
2494/*
2495 * wcd_cpe_cmd_shmem_alloc: compose and send lsm shared
2496 * memory allocation command
2497 * @core_handle: handle to cpe core
2498 * @session: session for which the command needs to be sent
2499 * @size: size of memory to be allocated
2500 */
2501static int wcd_cpe_cmd_lsm_shmem_alloc(void *core_handle,
2502 struct cpe_lsm_session *session,
2503 u32 size)
2504{
2505 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
2506 struct wcd_cpe_core *core = core_handle;
2507 int ret = 0;
2508
2509 ret = wcd_cpe_is_valid_lsm_session(core, session,
2510 __func__);
2511 if (ret)
2512 return ret;
2513
2514 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2515
2516 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
2517 if (fill_lsm_cmd_header_v0_inband(&cmd_shmem_alloc.hdr, session->id,
2518 SHMEM_ALLOC_CMD_PLD_SIZE,
2519 CPE_LSM_SESSION_CMD_SHARED_MEM_ALLOC)) {
2520 ret = -EINVAL;
2521 goto end_ret;
2522 }
2523
2524 cmd_shmem_alloc.size = size;
2525 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_shmem_alloc);
2526 if (ret)
2527 dev_err(core->dev,
2528 "%s: lsm_shmem_alloc cmd send fail, %d\n",
2529 __func__, ret);
2530end_ret:
2531 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2532 return ret;
2533}
2534
2535/*
2536 * wcd_cpe_cmd_lsm_shmem_dealloc: deallocate the shared memory
2537 * for the specified session
2538 * @core_handle: handle to cpe core
2539 * @session: session for which memory needs to be deallocated.
2540 */
2541static int wcd_cpe_cmd_lsm_shmem_dealloc(void *core_handle,
2542 struct cpe_lsm_session *session)
2543{
2544 struct cpe_cmd_shmem_dealloc cmd_dealloc;
2545 struct wcd_cpe_core *core = core_handle;
2546 int ret = 0;
2547
2548 ret = wcd_cpe_is_valid_lsm_session(core, session,
2549 __func__);
2550 if (ret)
2551 return ret;
2552
2553 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2554
2555 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
2556 if (fill_lsm_cmd_header_v0_inband(&cmd_dealloc.hdr, session->id,
2557 SHMEM_DEALLOC_CMD_PLD_SIZE,
2558 CPE_LSM_SESSION_CMD_SHARED_MEM_DEALLOC)) {
2559 ret = -EINVAL;
2560 goto end_ret;
2561 }
2562
2563 cmd_dealloc.addr = session->lsm_mem_handle;
2564 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dealloc);
2565 if (ret) {
2566 dev_err(core->dev,
2567 "%s: lsm_shmem_dealloc cmd failed, rc %d\n",
2568 __func__, ret);
2569 goto end_ret;
2570 }
2571
2572 memset(&session->lsm_mem_handle, 0,
2573 sizeof(session->lsm_mem_handle));
2574
2575end_ret:
2576 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2577 return ret;
2578}
2579
2580/*
2581 * wcd_cpe_send_lsm_cal: send the calibration for lsm service
2582 * from acdb to the cpe
2583 * @core: handle to cpe core
2584 * @session: session for which the calibration needs to be set.
2585 */
2586static int wcd_cpe_send_lsm_cal(
2587 struct wcd_cpe_core *core,
2588 struct cpe_lsm_session *session)
2589{
2590
2591 u8 *msg_pld;
2592 struct cmi_hdr *hdr;
2593 struct cal_block_data *lsm_cal = NULL;
2594 void *inb_msg;
2595 int rc = 0;
2596
2597 if (core->cal_data[WCD_CPE_LSM_CAL_LSM] == NULL) {
2598 pr_err("%s: LSM cal not allocated!\n", __func__);
2599 return -EINVAL;
2600 }
2601
2602 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2603 lsm_cal = cal_utils_get_only_cal_block(
2604 core->cal_data[WCD_CPE_LSM_CAL_LSM]);
2605 if (!lsm_cal) {
2606 pr_err("%s: failed to get lsm cal block\n", __func__);
2607 rc = -EINVAL;
2608 goto unlock_cal_mutex;
2609 }
2610
2611 if (lsm_cal->cal_data.size == 0) {
2612 dev_dbg(core->dev, "%s: No LSM cal to send\n",
2613 __func__);
2614 rc = 0;
2615 goto unlock_cal_mutex;
2616 }
2617
2618 inb_msg = kzalloc(sizeof(struct cmi_hdr) + lsm_cal->cal_data.size,
2619 GFP_KERNEL);
2620 if (!inb_msg) {
2621 rc = -ENOMEM;
2622 goto unlock_cal_mutex;
2623 }
2624
2625 hdr = (struct cmi_hdr *) inb_msg;
2626
2627 rc = fill_lsm_cmd_header_v0_inband(hdr, session->id,
2628 lsm_cal->cal_data.size,
2629 CPE_LSM_SESSION_CMD_SET_PARAMS);
2630 if (rc) {
2631 pr_err("%s: invalid params for header, err = %d\n",
2632 __func__, rc);
2633 goto free_msg;
2634 }
2635
2636 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
2637 memcpy(msg_pld, lsm_cal->cal_data.kvaddr,
2638 lsm_cal->cal_data.size);
2639
2640 rc = wcd_cpe_cmi_send_lsm_msg(core, session, inb_msg);
2641 if (rc)
2642 pr_err("%s: acdb lsm_params send failed, err = %d\n",
2643 __func__, rc);
2644
2645free_msg:
2646 kfree(inb_msg);
2647
2648unlock_cal_mutex:
2649 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2650 return rc;
2651
2652}
2653
2654static void wcd_cpe_set_param_data(struct cpe_param_data *param_d,
2655 struct cpe_lsm_ids *ids, u32 p_size,
2656 u32 set_param_cmd)
2657{
2658 param_d->module_id = ids->module_id;
2659 param_d->param_id = ids->param_id;
2660
2661 switch (set_param_cmd) {
2662 case CPE_LSM_SESSION_CMD_SET_PARAMS_V2:
2663 param_d->p_size.param_size = p_size;
2664 break;
2665 case CPE_LSM_SESSION_CMD_SET_PARAMS:
2666 default:
2667 param_d->p_size.sr.param_size =
2668 (u16) p_size;
2669 param_d->p_size.sr.reserved = 0;
2670 break;
2671 }
2672}
2673
2674static int wcd_cpe_send_param_epd_thres(struct wcd_cpe_core *core,
2675 struct cpe_lsm_session *session,
2676 void *data, struct cpe_lsm_ids *ids)
2677{
2678 struct snd_lsm_ep_det_thres *ep_det_data;
2679 struct cpe_lsm_param_epd_thres epd_cmd;
2680 struct cmi_hdr *msg_hdr = &epd_cmd.hdr;
2681 struct cpe_param_data *param_d =
2682 &epd_cmd.param;
2683 int rc;
2684
2685 memset(&epd_cmd, 0, sizeof(epd_cmd));
2686 ep_det_data = (struct snd_lsm_ep_det_thres *) data;
2687 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2688 session->id,
2689 CPE_CMD_EPD_THRES_PLD_SIZE,
2690 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2691 rc = -EINVAL;
2692 goto err_ret;
2693 }
2694
2695 wcd_cpe_set_param_data(param_d, ids,
2696 CPE_EPD_THRES_PARAM_SIZE,
2697 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2698
2699 epd_cmd.minor_version = 1;
2700 epd_cmd.epd_begin = ep_det_data->epd_begin;
2701 epd_cmd.epd_end = ep_det_data->epd_end;
2702
2703 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2704 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &epd_cmd);
2705 if (unlikely(rc))
2706 dev_err(core->dev,
2707 "%s: set_param(EPD Threshold) failed, rc %dn",
2708 __func__, rc);
2709 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2710err_ret:
2711 return rc;
2712}
2713
2714static int wcd_cpe_send_param_opmode(struct wcd_cpe_core *core,
2715 struct cpe_lsm_session *session,
2716 void *data, struct cpe_lsm_ids *ids)
2717{
2718 struct snd_lsm_detect_mode *opmode_d;
2719 struct cpe_lsm_param_opmode opmode_cmd;
2720 struct cmi_hdr *msg_hdr = &opmode_cmd.hdr;
2721 struct cpe_param_data *param_d =
2722 &opmode_cmd.param;
2723 int rc;
2724
2725 memset(&opmode_cmd, 0, sizeof(opmode_cmd));
2726 opmode_d = (struct snd_lsm_detect_mode *) data;
2727 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2728 session->id,
2729 CPE_CMD_OPMODE_PLD_SIZE,
2730 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2731 rc = -EINVAL;
2732 goto err_ret;
2733 }
2734
2735 wcd_cpe_set_param_data(param_d, ids,
2736 CPE_OPMODE_PARAM_SIZE,
2737 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2738
2739 opmode_cmd.minor_version = 1;
2740 if (opmode_d->mode == LSM_MODE_KEYWORD_ONLY_DETECTION)
2741 opmode_cmd.mode = 1;
2742 else
2743 opmode_cmd.mode = 3;
2744
2745 if (opmode_d->detect_failure)
2746 opmode_cmd.mode |= 0x04;
2747
2748 opmode_cmd.reserved = 0;
2749
2750 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2751 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &opmode_cmd);
2752 if (unlikely(rc))
2753 dev_err(core->dev,
2754 "%s: set_param(operation_mode) failed, rc %dn",
2755 __func__, rc);
2756 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2757err_ret:
2758 return rc;
2759}
2760
2761static int wcd_cpe_send_param_gain(struct wcd_cpe_core *core,
2762 struct cpe_lsm_session *session,
2763 void *data, struct cpe_lsm_ids *ids)
2764{
2765 struct snd_lsm_gain *gain_d;
2766 struct cpe_lsm_param_gain gain_cmd;
2767 struct cmi_hdr *msg_hdr = &gain_cmd.hdr;
2768 struct cpe_param_data *param_d =
2769 &gain_cmd.param;
2770 int rc;
2771
2772 memset(&gain_cmd, 0, sizeof(gain_cmd));
2773 gain_d = (struct snd_lsm_gain *) data;
2774 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2775 session->id,
2776 CPE_CMD_GAIN_PLD_SIZE,
2777 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2778 rc = -EINVAL;
2779 goto err_ret;
2780 }
2781
2782 wcd_cpe_set_param_data(param_d, ids,
2783 CPE_GAIN_PARAM_SIZE,
2784 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2785
2786 gain_cmd.minor_version = 1;
2787 gain_cmd.gain = gain_d->gain;
2788 gain_cmd.reserved = 0;
2789
2790 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2791 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &gain_cmd);
2792 if (unlikely(rc))
2793 dev_err(core->dev,
2794 "%s: set_param(lsm_gain) failed, rc %dn",
2795 __func__, rc);
2796 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2797err_ret:
2798 return rc;
2799}
2800
2801static int wcd_cpe_send_param_connectport(struct wcd_cpe_core *core,
2802 struct cpe_lsm_session *session,
2803 void *data, struct cpe_lsm_ids *ids, u16 port_id)
2804{
2805 struct cpe_lsm_param_connectport con_port_cmd;
2806 struct cmi_hdr *msg_hdr = &con_port_cmd.hdr;
2807 struct cpe_param_data *param_d =
2808 &con_port_cmd.param;
2809 int rc;
2810
2811 memset(&con_port_cmd, 0, sizeof(con_port_cmd));
2812 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2813 session->id,
2814 CPE_CMD_CONNECTPORT_PLD_SIZE,
2815 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2816 rc = -EINVAL;
2817 goto err_ret;
2818 }
2819
2820 wcd_cpe_set_param_data(param_d, ids,
2821 CPE_CONNECTPORT_PARAM_SIZE,
2822 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2823
2824 con_port_cmd.minor_version = 1;
2825 con_port_cmd.afe_port_id = port_id;
2826 con_port_cmd.reserved = 0;
2827
2828 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2829 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &con_port_cmd);
2830 if (unlikely(rc))
2831 dev_err(core->dev,
2832 "%s: set_param(connect_port) failed, rc %dn",
2833 __func__, rc);
2834 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2835err_ret:
2836 return rc;
2837}
2838
2839static int wcd_cpe_send_param_conf_levels(
2840 struct wcd_cpe_core *core,
2841 struct cpe_lsm_session *session,
2842 struct cpe_lsm_ids *ids)
2843{
2844 struct cpe_lsm_conf_level conf_level_data;
2845 struct cmi_hdr *hdr = &(conf_level_data.hdr);
2846 struct cpe_param_data *param_d = &(conf_level_data.param);
2847 u8 pld_size = 0;
2848 u8 pad_bytes = 0;
2849 void *message;
2850 int ret = 0;
2851
2852 memset(&conf_level_data, 0, sizeof(conf_level_data));
2853
2854 pld_size = (sizeof(struct cpe_lsm_conf_level) - sizeof(struct cmi_hdr));
2855 pld_size += session->num_confidence_levels;
2856 pad_bytes = ((4 - (pld_size % 4)) % 4);
2857 pld_size += pad_bytes;
2858
2859 fill_cmi_header(hdr, session->id, CMI_CPE_LSM_SERVICE_ID,
2860 false, pld_size,
2861 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, false);
2862
2863 wcd_cpe_set_param_data(param_d, ids,
2864 pld_size - sizeof(struct cpe_param_data),
2865 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2866
2867 conf_level_data.num_active_models = session->num_confidence_levels;
2868
2869 message = kzalloc(sizeof(struct cpe_lsm_conf_level) +
2870 conf_level_data.num_active_models + pad_bytes,
2871 GFP_KERNEL);
2872 if (!message) {
2873 pr_err("%s: no memory for conf_level\n", __func__);
2874 return -ENOMEM;
2875 }
2876
2877 memcpy(message, &conf_level_data,
2878 sizeof(struct cpe_lsm_conf_level));
2879 memcpy(((u8 *) message) + sizeof(struct cpe_lsm_conf_level),
2880 session->conf_levels, conf_level_data.num_active_models);
2881
2882 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2883 ret = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2884 if (ret)
2885 pr_err("%s: lsm_set_conf_levels failed, err = %d\n",
2886 __func__, ret);
2887 kfree(message);
2888 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2889 return ret;
2890}
2891
2892static int wcd_cpe_send_param_snd_model(struct wcd_cpe_core *core,
2893 struct cpe_lsm_session *session, struct cpe_lsm_ids *ids)
2894{
2895 int ret = 0;
2896 struct cmi_obm_msg obm_msg;
2897 struct cpe_param_data *param_d;
2898
2899
2900 ret = fill_cmi_header(&obm_msg.hdr, session->id,
2901 CMI_CPE_LSM_SERVICE_ID, 0, 20,
2902 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, true);
2903 if (ret) {
2904 dev_err(core->dev,
2905 "%s: Invalid parameters, rc = %d\n",
2906 __func__, ret);
2907 goto err_ret;
2908 }
2909
2910 obm_msg.pld.version = 0;
2911 obm_msg.pld.size = session->snd_model_size;
2912 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
2913 obm_msg.pld.mem_handle = session->lsm_mem_handle;
2914
2915 param_d = (struct cpe_param_data *) session->snd_model_data;
2916 wcd_cpe_set_param_data(param_d, ids,
2917 (session->snd_model_size - sizeof(*param_d)),
2918 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2919
2920 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2921 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
2922 if (ret)
2923 dev_err(core->dev,
2924 "%s: snd_model_register failed, %d\n",
2925 __func__, ret);
2926 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2927
2928err_ret:
2929 return ret;
2930}
2931
2932static int wcd_cpe_send_param_dereg_model(
2933 struct wcd_cpe_core *core,
2934 struct cpe_lsm_session *session,
2935 struct cpe_lsm_ids *ids)
2936{
2937 struct cmi_hdr *hdr;
2938 struct cpe_param_data *param_d;
2939 u8 *message;
2940 u32 pld_size;
2941 int rc = 0;
2942
2943 pld_size = sizeof(*hdr) + sizeof(*param_d);
2944
2945 message = kzalloc(pld_size, GFP_KERNEL);
2946 if (!message)
2947 return -ENOMEM;
2948
2949 hdr = (struct cmi_hdr *) message;
2950 param_d = (struct cpe_param_data *)
2951 (((u8 *) message) + sizeof(*hdr));
2952
2953 if (fill_lsm_cmd_header_v0_inband(hdr,
2954 session->id,
2955 sizeof(*param_d),
2956 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2957 rc = -EINVAL;
2958 goto err_ret;
2959 }
2960 wcd_cpe_set_param_data(param_d, ids, 0,
2961 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2962 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2963 rc = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2964 if (rc)
2965 dev_err(core->dev,
2966 "%s: snd_model_deregister failed, %d\n",
2967 __func__, rc);
2968 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2969err_ret:
2970 kfree(message);
2971 return rc;
2972}
2973
2974static int wcd_cpe_send_custom_param(
2975 struct wcd_cpe_core *core,
2976 struct cpe_lsm_session *session,
2977 void *data, u32 msg_size)
2978{
2979 u8 *msg;
2980 struct cmi_hdr *hdr;
2981 u8 *msg_pld;
2982 int rc;
2983
2984 if (msg_size > CMI_INBAND_MESSAGE_SIZE) {
2985 dev_err(core->dev,
2986 "%s: out of band custom params not supported\n",
2987 __func__);
2988 return -EINVAL;
2989 }
2990
2991 msg = kzalloc(sizeof(*hdr) + msg_size, GFP_KERNEL);
2992 if (!msg)
2993 return -ENOMEM;
2994
2995 hdr = (struct cmi_hdr *) msg;
2996 msg_pld = msg + sizeof(struct cmi_hdr);
2997
2998 if (fill_lsm_cmd_header_v0_inband(hdr,
2999 session->id,
3000 msg_size,
3001 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3002 rc = -EINVAL;
3003 goto err_ret;
3004 }
3005
3006 memcpy(msg_pld, data, msg_size);
3007 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3008 rc = wcd_cpe_cmi_send_lsm_msg(core, session, msg);
3009 if (rc)
3010 dev_err(core->dev,
3011 "%s: custom params send failed, err = %d\n",
3012 __func__, rc);
3013 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3014err_ret:
3015 kfree(msg);
3016 return rc;
3017}
3018
3019static int wcd_cpe_set_one_param(void *core_handle,
3020 struct cpe_lsm_session *session, struct lsm_params_info *p_info,
3021 void *data, uint32_t param_type)
3022{
3023 struct wcd_cpe_core *core = core_handle;
3024 int rc = 0;
3025 struct cpe_lsm_ids ids;
3026
3027 memset(&ids, 0, sizeof(ids));
3028 ids.module_id = p_info->module_id;
3029 ids.param_id = p_info->param_id;
3030
3031 switch (param_type) {
3032 case LSM_ENDPOINT_DETECT_THRESHOLD:
3033 rc = wcd_cpe_send_param_epd_thres(core, session,
3034 data, &ids);
3035 break;
3036 case LSM_OPERATION_MODE:
3037 rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
3038 break;
3039 case LSM_GAIN:
3040 rc = wcd_cpe_send_param_gain(core, session, data, &ids);
3041 break;
3042 case LSM_MIN_CONFIDENCE_LEVELS:
3043 rc = wcd_cpe_send_param_conf_levels(core, session, &ids);
3044 break;
3045 case LSM_REG_SND_MODEL:
3046 rc = wcd_cpe_send_param_snd_model(core, session, &ids);
3047 break;
3048 case LSM_DEREG_SND_MODEL:
3049 rc = wcd_cpe_send_param_dereg_model(core, session, &ids);
3050 break;
3051 case LSM_CUSTOM_PARAMS:
3052 rc = wcd_cpe_send_custom_param(core, session,
3053 data, p_info->param_size);
3054 break;
3055 default:
3056 pr_err("%s: wrong param_type 0x%x\n",
3057 __func__, param_type);
3058 }
3059
3060 if (rc)
3061 dev_err(core->dev,
3062 "%s: send_param(%d) failed, err %d\n",
3063 __func__, param_type, rc);
3064 return rc;
3065}
3066
3067/*
3068 * wcd_cpe_lsm_set_params: set the parameters for lsm service
3069 * @core: handle to cpe core
3070 * @session: session for which the parameters are to be set
3071 * @detect_mode: mode for detection
3072 * @detect_failure: flag indicating failure detection enabled/disabled
3073 *
3074 */
3075static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
3076 struct cpe_lsm_session *session,
3077 enum lsm_detection_mode detect_mode, bool detect_failure)
3078{
3079 struct cpe_lsm_ids ids;
3080 struct snd_lsm_detect_mode det_mode;
3081
3082 int ret = 0;
3083
3084 /* Send lsm calibration */
3085 ret = wcd_cpe_send_lsm_cal(core, session);
3086 if (ret) {
3087 pr_err("%s: fail to sent acdb cal, err = %d",
3088 __func__, ret);
3089 goto err_ret;
3090 }
3091
3092 /* Send operation mode */
3093 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3094 ids.param_id = CPE_LSM_PARAM_ID_OPERATION_MODE;
3095 det_mode.mode = detect_mode;
3096 det_mode.detect_failure = detect_failure;
3097 ret = wcd_cpe_send_param_opmode(core, session,
3098 &det_mode, &ids);
3099 if (ret)
3100 dev_err(core->dev,
3101 "%s: Failed to set opmode, err=%d\n",
3102 __func__, ret);
3103
3104err_ret:
3105 return ret;
3106}
3107
3108static int wcd_cpe_lsm_set_data(void *core_handle,
3109 struct cpe_lsm_session *session,
3110 enum lsm_detection_mode detect_mode,
3111 bool detect_failure)
3112{
3113 struct wcd_cpe_core *core = core_handle;
3114 struct cpe_lsm_ids ids;
3115 int ret = 0;
3116
3117 if (session->num_confidence_levels > 0) {
3118 ret = wcd_cpe_lsm_set_params(core, session, detect_mode,
3119 detect_failure);
3120 if (ret) {
3121 dev_err(core->dev,
3122 "%s: lsm set params failed, rc = %d\n",
3123 __func__, ret);
3124 goto err_ret;
3125 }
3126
3127 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3128 ids.param_id = CPE_LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
3129 ret = wcd_cpe_send_param_conf_levels(core, session, &ids);
3130 if (ret) {
3131 dev_err(core->dev,
3132 "%s: lsm confidence levels failed, rc = %d\n",
3133 __func__, ret);
3134 goto err_ret;
3135 }
3136 } else {
3137 dev_dbg(core->dev,
3138 "%s: no conf levels to set\n",
3139 __func__);
3140 }
3141
3142err_ret:
3143 return ret;
3144}
3145
3146/*
3147 * wcd_cpe_lsm_reg_snd_model: register the sound model for listen
3148 * @session: session for which to register the sound model
3149 * @detect_mode: detection mode, user dependent/independent
3150 * @detect_failure: flag to indicate if failure detection is enabled
3151 *
3152 * The memory required for sound model should be pre-allocated on CPE
3153 * before this function is invoked.
3154 */
3155static int wcd_cpe_lsm_reg_snd_model(void *core_handle,
3156 struct cpe_lsm_session *session,
3157 enum lsm_detection_mode detect_mode,
3158 bool detect_failure)
3159{
3160 int ret = 0;
3161 struct cmi_obm_msg obm_msg;
3162 struct wcd_cpe_core *core = core_handle;
3163
3164 ret = wcd_cpe_is_valid_lsm_session(core, session,
3165 __func__);
3166 if (ret)
3167 return ret;
3168
3169 ret = wcd_cpe_lsm_set_data(core_handle, session,
3170 detect_mode, detect_failure);
3171 if (ret) {
3172 dev_err(core->dev,
3173 "%s: fail to set lsm data, err = %d\n",
3174 __func__, ret);
3175 return ret;
3176 }
3177
3178 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3179
3180 ret = fill_cmi_header(&obm_msg.hdr, session->id,
3181 CMI_CPE_LSM_SERVICE_ID, 0, 20,
3182 CPE_LSM_SESSION_CMD_REGISTER_SOUND_MODEL, true);
3183 if (ret) {
3184 dev_err(core->dev,
3185 "%s: Invalid parameters, rc = %d\n",
3186 __func__, ret);
3187 goto err_ret;
3188 }
3189
3190 obm_msg.pld.version = 0;
3191 obm_msg.pld.size = session->snd_model_size;
3192 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
3193 obm_msg.pld.mem_handle = session->lsm_mem_handle;
3194
3195 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
3196 if (ret)
3197 dev_err(core->dev,
3198 "%s: snd_model_register failed, %d\n",
3199 __func__, ret);
3200err_ret:
3201 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3202 return ret;
3203}
3204
3205/*
3206 * wcd_cpe_lsm_dereg_snd_model: deregister the sound model for listen
3207 * @core_handle: handle to cpe core
3208 * @session: session for which to deregister the sound model
3209 *
3210 */
3211static int wcd_cpe_lsm_dereg_snd_model(void *core_handle,
3212 struct cpe_lsm_session *session)
3213{
3214 struct cmi_hdr cmd_dereg_snd_model;
3215 struct wcd_cpe_core *core = core_handle;
3216 int ret = 0;
3217
3218 ret = wcd_cpe_is_valid_lsm_session(core, session,
3219 __func__);
3220 if (ret)
3221 return ret;
3222
3223 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3224
3225 memset(&cmd_dereg_snd_model, 0, sizeof(cmd_dereg_snd_model));
3226 if (fill_lsm_cmd_header_v0_inband(&cmd_dereg_snd_model, session->id,
3227 0, CPE_LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL)) {
3228 ret = -EINVAL;
3229 goto end_ret;
3230 }
3231
3232 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dereg_snd_model);
3233 if (ret)
3234 dev_err(core->dev,
3235 "%s: failed to send dereg_snd_model cmd\n",
3236 __func__);
3237end_ret:
3238 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3239 return ret;
3240}
3241
3242/*
3243 * wcd_cpe_lsm_get_afe_out_port_id: get afe output port id
3244 * @core_handle: handle to the CPE core
3245 * @session: session for which port id needs to get
3246 */
3247static int wcd_cpe_lsm_get_afe_out_port_id(void *core_handle,
3248 struct cpe_lsm_session *session)
3249{
3250 struct wcd_cpe_core *core = core_handle;
Meng Wang15c825d2018-09-06 10:49:18 +08003251 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303252 int rc = 0;
3253
Meng Wang15c825d2018-09-06 10:49:18 +08003254 if (!core || !core->component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303255 pr_err("%s: Invalid handle to %s\n",
3256 __func__,
3257 (!core) ? "core" : "codec");
3258 rc = -EINVAL;
3259 goto done;
3260 }
3261
3262 if (!session) {
3263 dev_err(core->dev, "%s: Invalid session\n",
3264 __func__);
3265 rc = -EINVAL;
3266 goto done;
3267 }
3268
3269 if (!core->cpe_cdc_cb ||
3270 !core->cpe_cdc_cb->get_afe_out_port_id) {
3271 session->afe_out_port_id = WCD_CPE_AFE_OUT_PORT_2;
3272 dev_dbg(core->dev,
3273 "%s: callback not defined, default port_id = %d\n",
3274 __func__, session->afe_out_port_id);
3275 goto done;
3276 }
3277
Meng Wang15c825d2018-09-06 10:49:18 +08003278 component = core->component;
3279 rc = core->cpe_cdc_cb->get_afe_out_port_id(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303280 &session->afe_out_port_id);
3281 if (rc) {
3282 dev_err(core->dev,
3283 "%s: failed to get port id, err = %d\n",
3284 __func__, rc);
3285 goto done;
3286 }
3287 dev_dbg(core->dev, "%s: port_id: %d\n", __func__,
3288 session->afe_out_port_id);
3289
3290done:
3291 return rc;
3292}
3293
3294/*
3295 * wcd_cpe_cmd_lsm_start: send the start command to lsm
3296 * @core_handle: handle to the CPE core
3297 * @session: session for which start command to be sent
3298 *
3299 */
3300static int wcd_cpe_cmd_lsm_start(void *core_handle,
3301 struct cpe_lsm_session *session)
3302{
3303 struct cmi_hdr cmd_lsm_start;
3304 struct wcd_cpe_core *core = core_handle;
3305 int ret = 0;
3306
3307 ret = wcd_cpe_is_valid_lsm_session(core, session,
3308 __func__);
3309 if (ret)
3310 return ret;
3311
3312 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3313
3314 memset(&cmd_lsm_start, 0, sizeof(struct cmi_hdr));
3315 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_start, session->id, 0,
3316 CPE_LSM_SESSION_CMD_START)) {
3317 ret = -EINVAL;
3318 goto end_ret;
3319 }
3320
3321 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_start);
3322 if (ret)
3323 dev_err(core->dev, "failed to send lsm_start cmd\n");
3324end_ret:
3325 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3326 return ret;
3327}
3328
3329/*
3330 * wcd_cpe_cmd_lsm_stop: send the stop command for LSM service
3331 * @core_handle: handle to the cpe core
3332 * @session: session for which stop command to be sent
3333 *
3334 */
3335static int wcd_cpe_cmd_lsm_stop(void *core_handle,
3336 struct cpe_lsm_session *session)
3337{
3338 struct cmi_hdr cmd_lsm_stop;
3339 struct wcd_cpe_core *core = core_handle;
3340 int ret = 0;
3341
3342 ret = wcd_cpe_is_valid_lsm_session(core, session,
3343 __func__);
3344 if (ret)
3345 return ret;
3346
3347 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3348
3349 memset(&cmd_lsm_stop, 0, sizeof(struct cmi_hdr));
3350 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_stop, session->id, 0,
3351 CPE_LSM_SESSION_CMD_STOP)) {
3352 ret = -EINVAL;
3353 goto end_ret;
3354 }
3355
3356 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_stop);
3357 if (ret)
3358 dev_err(core->dev,
3359 "%s: failed to send lsm_stop cmd\n",
3360 __func__);
3361end_ret:
3362 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3363 return ret;
3364
3365}
3366
3367/*
3368 * wcd_cpe_alloc_lsm_session: allocate a lsm session
3369 * @core: handle to wcd_cpe_core
3370 * @lsm_priv_d: lsm private data
3371 */
3372static struct cpe_lsm_session *wcd_cpe_alloc_lsm_session(
3373 void *core_handle, void *client_data,
3374 void (*event_cb)(void *, u8, u8, u8 *))
3375{
3376 struct cpe_lsm_session *session;
3377 int i, session_id = -1;
3378 struct wcd_cpe_core *core = core_handle;
3379 bool afe_register_service = false;
3380 int ret = 0;
3381
3382 /*
3383 * Even if multiple listen sessions can be
3384 * allocated, the AFE service registration
3385 * should be done only once as CPE can only
3386 * have one instance of AFE service.
3387 *
3388 * If this is the first session to be allocated,
3389 * only then register the afe service.
3390 */
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303391 WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303392 if (!wcd_cpe_lsm_session_active())
3393 afe_register_service = true;
3394
3395 for (i = 1; i <= WCD_CPE_LSM_MAX_SESSIONS; i++) {
3396 if (!lsm_sessions[i]) {
3397 session_id = i;
3398 break;
3399 }
3400 }
3401
3402 if (session_id < 0) {
3403 dev_err(core->dev,
3404 "%s: max allowed sessions already allocated\n",
3405 __func__);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303406 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303407 return NULL;
3408 }
3409
3410 ret = wcd_cpe_vote(core, true);
3411 if (ret) {
3412 dev_err(core->dev,
3413 "%s: Failed to enable cpe, err = %d\n",
3414 __func__, ret);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303415 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303416 return NULL;
3417 }
3418
3419 session = kzalloc(sizeof(struct cpe_lsm_session), GFP_KERNEL);
3420 if (!session)
3421 goto err_session_alloc;
3422
3423 session->id = session_id;
3424 session->event_cb = event_cb;
3425 session->cmi_reg_handle = cmi_register(wcd_cpe_cmi_lsm_callback,
3426 CMI_CPE_LSM_SERVICE_ID);
3427 if (!session->cmi_reg_handle) {
3428 dev_err(core->dev,
3429 "%s: Failed to register LSM service with CMI\n",
3430 __func__);
3431 goto err_ret;
3432 }
3433 session->priv_d = client_data;
3434 mutex_init(&session->lsm_lock);
3435 if (afe_register_service) {
3436 /* Register for AFE Service */
3437 core->cmi_afe_handle = cmi_register(wcd_cpe_cmi_afe_cb,
3438 CMI_CPE_AFE_SERVICE_ID);
3439 wcd_cpe_initialize_afe_port_data();
3440 if (!core->cmi_afe_handle) {
3441 dev_err(core->dev,
3442 "%s: Failed to register AFE service with CMI\n",
3443 __func__);
3444 goto err_afe_svc_reg;
3445 }
3446
3447 /* Once AFE service is registered, send the mode command */
3448 ret = wcd_cpe_afe_svc_cmd_mode(core,
3449 AFE_SVC_EXPLICIT_PORT_START);
3450 if (ret)
3451 goto err_afe_mode_cmd;
3452 }
3453
3454 session->lsm_mem_handle = 0;
3455 init_completion(&session->cmd_comp);
3456
3457 lsm_sessions[session_id] = session;
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303458
3459 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303460 return session;
3461
3462err_afe_mode_cmd:
3463 cmi_deregister(core->cmi_afe_handle);
3464
3465err_afe_svc_reg:
3466 cmi_deregister(session->cmi_reg_handle);
3467 mutex_destroy(&session->lsm_lock);
3468
3469err_ret:
3470 kfree(session);
3471
3472err_session_alloc:
3473 wcd_cpe_vote(core, false);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303474 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303475 return NULL;
3476}
3477
3478/*
3479 * wcd_cpe_lsm_config_lab_latency: send lab latency value
3480 * @core: handle to wcd_cpe_core
3481 * @session: lsm session
3482 * @latency: the value of latency for lab setup in msec
3483 */
3484static int wcd_cpe_lsm_config_lab_latency(
3485 struct wcd_cpe_core *core,
3486 struct cpe_lsm_session *session,
3487 u32 latency)
3488{
3489 int ret = 0, pld_size = CPE_PARAM_LSM_LAB_LATENCY_SIZE;
3490 struct cpe_lsm_lab_latency_config cpe_lab_latency;
3491 struct cpe_lsm_lab_config *lab_lat = &cpe_lab_latency.latency_cfg;
3492 struct cpe_param_data *param_d = &lab_lat->param;
3493 struct cpe_lsm_ids ids;
3494
3495 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_latency.hdr, session->id,
3496 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3497 pr_err("%s: Failed to create header\n", __func__);
3498 return -EINVAL;
3499 }
3500 if (latency == 0x00 || latency > WCD_CPE_LAB_MAX_LATENCY) {
3501 pr_err("%s: Invalid latency %u\n",
3502 __func__, latency);
3503 return -EINVAL;
3504 }
3505
3506 lab_lat->latency = latency;
3507 lab_lat->minor_ver = 1;
3508 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3509 ids.param_id = CPE_LSM_PARAM_ID_LAB_CONFIG;
3510 wcd_cpe_set_param_data(param_d, &ids,
3511 PARAM_SIZE_LSM_LATENCY_SIZE,
3512 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3513
3514 pr_debug("%s: Module 0x%x Param 0x%x size %zu pld_size 0x%x\n",
3515 __func__, lab_lat->param.module_id,
3516 lab_lat->param.param_id, PARAM_SIZE_LSM_LATENCY_SIZE,
3517 pld_size);
3518
3519 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3520 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_latency);
3521 if (ret != 0)
3522 pr_err("%s: lsm_set_params failed, error = %d\n",
3523 __func__, ret);
3524 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3525 return ret;
3526}
3527
3528/*
3529 * wcd_cpe_lsm_lab_control: enable/disable lab
3530 * @core: handle to wcd_cpe_core
3531 * @session: lsm session
3532 * @enable: Indicates whether to enable / disable lab
3533 */
3534static int wcd_cpe_lsm_lab_control(
3535 void *core_handle,
3536 struct cpe_lsm_session *session,
3537 bool enable)
3538{
3539 struct wcd_cpe_core *core = core_handle;
3540 int ret = 0, pld_size = CPE_PARAM_SIZE_LSM_LAB_CONTROL;
3541 struct cpe_lsm_control_lab cpe_lab_enable;
3542 struct cpe_lsm_lab_enable *lab_enable = &cpe_lab_enable.lab_enable;
3543 struct cpe_param_data *param_d = &lab_enable->param;
3544 struct cpe_lsm_ids ids;
3545
3546 pr_debug("%s: enter payload_size = %d Enable %d\n",
3547 __func__, pld_size, enable);
3548
3549 memset(&cpe_lab_enable, 0, sizeof(cpe_lab_enable));
3550
3551 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_enable.hdr, session->id,
3552 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3553 return -EINVAL;
3554 }
3555 if (enable == true)
3556 lab_enable->enable = 1;
3557 else
3558 lab_enable->enable = 0;
3559
3560 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3561 ids.param_id = CPE_LSM_PARAM_ID_LAB_ENABLE;
3562 wcd_cpe_set_param_data(param_d, &ids,
3563 PARAM_SIZE_LSM_CONTROL_SIZE,
3564 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3565
3566 pr_debug("%s: Module 0x%x, Param 0x%x size %zu pld_size 0x%x\n",
3567 __func__, lab_enable->param.module_id,
3568 lab_enable->param.param_id, PARAM_SIZE_LSM_CONTROL_SIZE,
3569 pld_size);
3570
3571 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3572 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_enable);
3573 if (ret != 0) {
3574 pr_err("%s: lsm_set_params failed, error = %d\n",
3575 __func__, ret);
3576 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3577 goto done;
3578 }
3579 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3580
3581 if (lab_enable->enable)
3582 ret = wcd_cpe_lsm_config_lab_latency(core, session,
3583 WCD_CPE_LAB_MAX_LATENCY);
3584done:
3585 return ret;
3586}
3587
3588/*
3589 * wcd_cpe_lsm_eob: stop lab
3590 * @core: handle to wcd_cpe_core
3591 * @session: lsm session to be deallocated
3592 */
3593static int wcd_cpe_lsm_eob(
3594 struct wcd_cpe_core *core,
3595 struct cpe_lsm_session *session)
3596{
3597 int ret = 0;
3598 struct cmi_hdr lab_eob;
3599
3600 if (fill_lsm_cmd_header_v0_inband(&lab_eob, session->id,
3601 0, CPE_LSM_SESSION_CMD_EOB)) {
3602 return -EINVAL;
3603 }
3604
3605 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3606 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &lab_eob);
3607 if (ret != 0)
3608 pr_err("%s: lsm_set_params failed\n", __func__);
3609 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3610
3611 return ret;
3612}
3613
3614/*
3615 * wcd_cpe_dealloc_lsm_session: deallocate lsm session
3616 * @core: handle to wcd_cpe_core
3617 * @session: lsm session to be deallocated
3618 */
3619static int wcd_cpe_dealloc_lsm_session(void *core_handle,
3620 struct cpe_lsm_session *session)
3621{
3622 struct wcd_cpe_core *core = core_handle;
3623 int ret = 0;
3624
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303625 WCD_CPE_GRAB_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303626 if (!session) {
3627 dev_err(core->dev,
3628 "%s: Invalid lsm session\n", __func__);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303629 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303630 return -EINVAL;
3631 }
3632
3633 dev_dbg(core->dev, "%s: session %d being deallocated\n",
3634 __func__, session->id);
3635 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
3636 dev_err(core->dev,
3637 "%s: Wrong session id %d max allowed = %d\n",
3638 __func__, session->id,
3639 WCD_CPE_LSM_MAX_SESSIONS);
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303640 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303641 return -EINVAL;
3642 }
3643
3644 cmi_deregister(session->cmi_reg_handle);
3645 mutex_destroy(&session->lsm_lock);
3646 lsm_sessions[session->id] = NULL;
3647 kfree(session);
3648
3649 if (!wcd_cpe_lsm_session_active()) {
3650 cmi_deregister(core->cmi_afe_handle);
3651 core->cmi_afe_handle = NULL;
3652 wcd_cpe_deinitialize_afe_port_data();
3653 }
3654
3655 ret = wcd_cpe_vote(core, false);
3656 if (ret)
3657 dev_dbg(core->dev,
3658 "%s: Failed to un-vote cpe, err = %d\n",
3659 __func__, ret);
3660
Vaishnavi Kommaraju89d1a772018-01-23 18:10:21 +05303661 WCD_CPE_REL_LOCK(&core->session_lock, "session_lock");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303662 return ret;
3663}
3664
3665static int wcd_cpe_lab_ch_setup(void *core_handle,
3666 struct cpe_lsm_session *session,
3667 enum wcd_cpe_event event)
3668{
3669 struct wcd_cpe_core *core = core_handle;
Meng Wang15c825d2018-09-06 10:49:18 +08003670 struct snd_soc_component *component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303671 int rc = 0;
3672 u8 cpe_intr_bits;
3673
Meng Wang15c825d2018-09-06 10:49:18 +08003674 if (!core || !core->component) {
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303675 pr_err("%s: Invalid handle to %s\n",
3676 __func__,
3677 (!core) ? "core" : "codec");
3678 rc = EINVAL;
3679 goto done;
3680 }
3681
3682 if (!core->cpe_cdc_cb ||
3683 !core->cpe_cdc_cb->cdc_ext_clk ||
3684 !core->cpe_cdc_cb->lab_cdc_ch_ctl) {
3685 dev_err(core->dev,
3686 "%s: Invalid codec callbacks\n",
3687 __func__);
3688 rc = -EINVAL;
3689 goto done;
3690 }
3691
Meng Wang15c825d2018-09-06 10:49:18 +08003692 component = core->component;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303693 dev_dbg(core->dev,
3694 "%s: event = 0x%x\n",
3695 __func__, event);
3696
3697 switch (event) {
3698 case WCD_CPE_PRE_ENABLE:
Meng Wang15c825d2018-09-06 10:49:18 +08003699 rc = core->cpe_cdc_cb->cdc_ext_clk(component, true, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303700 if (rc) {
3701 dev_err(core->dev,
3702 "%s: failed to enable cdc clk, err = %d\n",
3703 __func__, rc);
3704 goto done;
3705 }
3706
Meng Wang15c825d2018-09-06 10:49:18 +08003707 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303708 true);
3709 if (rc) {
3710 dev_err(core->dev,
3711 "%s: failed to enable cdc port, err = %d\n",
3712 __func__, rc);
Meng Wang15c825d2018-09-06 10:49:18 +08003713 rc = core->cpe_cdc_cb->cdc_ext_clk(
3714 component, false, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303715 goto done;
3716 }
3717
3718 break;
3719
3720 case WCD_CPE_POST_ENABLE:
3721 rc = cpe_svc_toggle_lab(core->cpe_handle, true);
3722 if (rc)
3723 dev_err(core->dev,
3724 "%s: Failed to enable lab\n", __func__);
3725 break;
3726
3727 case WCD_CPE_PRE_DISABLE:
3728 /*
3729 * Mask the non-fatal interrupts in CPE as they will
3730 * be generated during lab teardown and may flood.
3731 */
3732 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3733 if (CPE_ERR_IRQ_CB(core))
3734 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08003735 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303736 CPE_ERR_IRQ_MASK,
3737 &cpe_intr_bits);
3738
Meng Wang15c825d2018-09-06 10:49:18 +08003739 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303740 false);
3741 if (rc)
3742 dev_err(core->dev,
3743 "%s: failed to disable cdc port, err = %d\n",
3744 __func__, rc);
3745 break;
3746
3747 case WCD_CPE_POST_DISABLE:
3748 rc = wcd_cpe_lsm_eob(core, session);
3749 if (rc)
3750 dev_err(core->dev,
3751 "%s: eob send failed, err = %d\n",
3752 __func__, rc);
3753
3754 /* Continue teardown even if eob failed */
3755 rc = cpe_svc_toggle_lab(core->cpe_handle, false);
3756 if (rc)
3757 dev_err(core->dev,
3758 "%s: Failed to disable lab\n", __func__);
3759
3760 /* Continue with disabling even if toggle lab fails */
Meng Wang15c825d2018-09-06 10:49:18 +08003761 rc = core->cpe_cdc_cb->cdc_ext_clk(component, false, false);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303762 if (rc)
3763 dev_err(core->dev,
3764 "%s: failed to disable cdc clk, err = %d\n",
3765 __func__, rc);
3766
3767 /* Unmask non-fatal CPE interrupts */
3768 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3769 if (CPE_ERR_IRQ_CB(core))
3770 core->cpe_cdc_cb->cpe_err_irq_control(
Meng Wang15c825d2018-09-06 10:49:18 +08003771 core->component,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303772 CPE_ERR_IRQ_UNMASK,
3773 &cpe_intr_bits);
3774 break;
3775
3776 default:
3777 dev_err(core->dev,
3778 "%s: Invalid event 0x%x\n",
3779 __func__, event);
3780 rc = -EINVAL;
3781 break;
3782 }
3783
3784done:
3785 return rc;
3786}
3787
3788static int wcd_cpe_lsm_set_fmt_cfg(void *core_handle,
3789 struct cpe_lsm_session *session)
3790{
3791 int ret;
3792 struct cpe_lsm_output_format_cfg out_fmt_cfg;
3793 struct wcd_cpe_core *core = core_handle;
3794
3795 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3796 if (ret)
3797 goto done;
3798
3799 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3800
3801 memset(&out_fmt_cfg, 0, sizeof(out_fmt_cfg));
3802 if (fill_lsm_cmd_header_v0_inband(&out_fmt_cfg.hdr,
3803 session->id, OUT_FMT_CFG_CMD_PAYLOAD_SIZE,
3804 CPE_LSM_SESSION_CMD_TX_BUFF_OUTPUT_CONFIG)) {
3805 ret = -EINVAL;
3806 goto err_ret;
3807 }
3808
3809 out_fmt_cfg.format = session->out_fmt_cfg.format;
3810 out_fmt_cfg.packing = session->out_fmt_cfg.pack_mode;
3811 out_fmt_cfg.data_path_events = session->out_fmt_cfg.data_path_events;
3812
3813 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &out_fmt_cfg);
3814 if (ret)
3815 dev_err(core->dev,
3816 "%s: lsm_set_output_format_cfg failed, err = %d\n",
3817 __func__, ret);
3818
3819err_ret:
3820 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3821done:
3822 return ret;
3823}
3824
3825static void wcd_cpe_snd_model_offset(void *core_handle,
3826 struct cpe_lsm_session *session, size_t *offset)
3827{
3828 *offset = sizeof(struct cpe_param_data);
3829}
3830
3831static int wcd_cpe_lsm_set_media_fmt_params(void *core_handle,
3832 struct cpe_lsm_session *session,
3833 struct lsm_hw_params *param)
3834{
3835 struct cpe_lsm_media_fmt_param media_fmt;
3836 struct cmi_hdr *msg_hdr = &media_fmt.hdr;
3837 struct wcd_cpe_core *core = core_handle;
3838 struct cpe_param_data *param_d = &media_fmt.param;
3839 struct cpe_lsm_ids ids;
3840 int ret;
3841
3842 memset(&media_fmt, 0, sizeof(media_fmt));
3843 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
3844 session->id,
3845 CPE_MEDIA_FMT_PLD_SIZE,
3846 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3847 ret = -EINVAL;
3848 goto done;
3849 }
3850
3851 memset(&ids, 0, sizeof(ids));
3852 ids.module_id = CPE_LSM_MODULE_FRAMEWORK;
3853 ids.param_id = CPE_LSM_PARAM_ID_MEDIA_FMT;
3854
3855 wcd_cpe_set_param_data(param_d, &ids, CPE_MEDIA_FMT_PARAM_SIZE,
3856 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3857
3858 media_fmt.minor_version = 1;
3859 media_fmt.sample_rate = param->sample_rate;
3860 media_fmt.num_channels = param->num_chs;
3861 media_fmt.bit_width = param->bit_width;
3862
3863 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3864 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &media_fmt);
3865 if (ret)
3866 dev_err(core->dev,
3867 "%s: Set_param(media_format) failed, err=%d\n",
3868 __func__, ret);
3869 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3870done:
3871 return ret;
3872}
3873
3874static int wcd_cpe_lsm_set_port(void *core_handle,
3875 struct cpe_lsm_session *session, void *data)
3876{
3877 u32 port_id;
3878 int ret;
3879 struct cpe_lsm_ids ids;
3880 struct wcd_cpe_core *core = core_handle;
3881
3882 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3883 if (ret)
3884 goto done;
3885
3886 if (!data) {
3887 dev_err(core->dev, "%s: data is NULL\n", __func__);
3888 ret = -EINVAL;
3889 goto done;
3890 }
3891 port_id = *(u32 *)data;
3892 dev_dbg(core->dev, "%s: port_id: %d\n", __func__, port_id);
3893
3894 memset(&ids, 0, sizeof(ids));
3895 ids.module_id = LSM_MODULE_ID_FRAMEWORK;
3896 ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
3897
3898 ret = wcd_cpe_send_param_connectport(core, session, NULL,
3899 &ids, port_id);
3900 if (ret)
3901 dev_err(core->dev,
3902 "%s: send_param_connectport failed, err %d\n",
3903 __func__, ret);
3904done:
3905 return ret;
3906}
3907
3908/*
3909 * wcd_cpe_get_lsm_ops: register lsm driver to codec
3910 * @lsm_ops: structure with lsm callbacks
Meng Wang15c825d2018-09-06 10:49:18 +08003911 * @component: codec to which this lsm driver is registered to
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303912 */
3913int wcd_cpe_get_lsm_ops(struct wcd_cpe_lsm_ops *lsm_ops)
3914{
3915 lsm_ops->lsm_alloc_session = wcd_cpe_alloc_lsm_session;
3916 lsm_ops->lsm_dealloc_session = wcd_cpe_dealloc_lsm_session;
3917 lsm_ops->lsm_open_tx = wcd_cpe_cmd_lsm_open_tx;
3918 lsm_ops->lsm_close_tx = wcd_cpe_cmd_lsm_close_tx;
3919 lsm_ops->lsm_shmem_alloc = wcd_cpe_cmd_lsm_shmem_alloc;
3920 lsm_ops->lsm_shmem_dealloc = wcd_cpe_cmd_lsm_shmem_dealloc;
3921 lsm_ops->lsm_register_snd_model = wcd_cpe_lsm_reg_snd_model;
3922 lsm_ops->lsm_deregister_snd_model = wcd_cpe_lsm_dereg_snd_model;
3923 lsm_ops->lsm_get_afe_out_port_id = wcd_cpe_lsm_get_afe_out_port_id;
3924 lsm_ops->lsm_start = wcd_cpe_cmd_lsm_start;
3925 lsm_ops->lsm_stop = wcd_cpe_cmd_lsm_stop;
3926 lsm_ops->lsm_lab_control = wcd_cpe_lsm_lab_control;
3927 lsm_ops->lab_ch_setup = wcd_cpe_lab_ch_setup;
3928 lsm_ops->lsm_set_data = wcd_cpe_lsm_set_data;
3929 lsm_ops->lsm_set_fmt_cfg = wcd_cpe_lsm_set_fmt_cfg;
3930 lsm_ops->lsm_set_one_param = wcd_cpe_set_one_param;
3931 lsm_ops->lsm_get_snd_model_offset = wcd_cpe_snd_model_offset;
3932 lsm_ops->lsm_set_media_fmt_params = wcd_cpe_lsm_set_media_fmt_params;
3933 lsm_ops->lsm_set_port = wcd_cpe_lsm_set_port;
3934
3935 return 0;
3936}
3937EXPORT_SYMBOL(wcd_cpe_get_lsm_ops);
3938
3939static int fill_afe_cmd_header(struct cmi_hdr *hdr, u8 port_id,
3940 u16 opcode, u8 pld_size,
3941 bool obm_flag)
3942{
3943 CMI_HDR_SET_SESSION(hdr, port_id);
3944 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_AFE_SERVICE_ID);
3945
3946 CMI_HDR_SET_PAYLOAD_SIZE(hdr, pld_size);
3947
3948 hdr->opcode = opcode;
3949
3950 if (obm_flag)
3951 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
3952 else
3953 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
3954
3955 return 0;
3956}
3957
3958/*
3959 * wcd_cpe_cmi_send_afe_msg: send message to AFE service
3960 * @core: wcd cpe core handle
3961 * @port_cfg: configuration data for the afe port
3962 * for which this message is to be sent
3963 * @message: actual message with header and payload
3964 *
3965 * Port specific lock needs to be acquired before this
3966 * function can be invoked
3967 */
3968static int wcd_cpe_cmi_send_afe_msg(
3969 struct wcd_cpe_core *core,
3970 struct wcd_cmi_afe_port_data *port_d,
3971 void *message)
3972{
3973 int ret = 0;
3974 struct cmi_hdr *hdr = message;
3975
3976 pr_debug("%s: sending message with opcode 0x%x\n",
3977 __func__, hdr->opcode);
3978
3979 if (unlikely(!wcd_cpe_is_online_state(core))) {
3980 dev_err(core->dev, "%s: CPE offline\n", __func__);
3981 return 0;
3982 }
3983
3984 if (CMI_HDR_GET_OBM_FLAG(hdr))
3985 wcd_cpe_bus_vote_max_bw(core, true);
3986
3987 ret = cmi_send_msg(message);
3988 if (ret) {
3989 pr_err("%s: cmd 0x%x send failed, err = %d\n",
3990 __func__, hdr->opcode, ret);
3991 goto rel_bus_vote;
3992 }
3993
3994 ret = wait_for_completion_timeout(&port_d->afe_cmd_complete,
3995 CMI_CMD_TIMEOUT);
3996 if (ret > 0) {
3997 pr_debug("%s: command 0x%x, received response 0x%x\n",
3998 __func__, hdr->opcode, port_d->cmd_result);
3999 if (port_d->cmd_result == CMI_SHMEM_ALLOC_FAILED)
4000 port_d->cmd_result = CPE_ENOMEMORY;
4001 if (port_d->cmd_result > 0)
4002 pr_err("%s: CPE returned error[%s]\n",
4003 __func__, cpe_err_get_err_str(
4004 port_d->cmd_result));
4005 ret = cpe_err_get_lnx_err_code(port_d->cmd_result);
4006 goto rel_bus_vote;
4007 } else {
4008 pr_err("%s: command 0x%x send timed out\n",
4009 __func__, hdr->opcode);
4010 ret = -ETIMEDOUT;
4011 goto rel_bus_vote;
4012 }
4013
4014rel_bus_vote:
4015 reinit_completion(&port_d->afe_cmd_complete);
4016
4017 if (CMI_HDR_GET_OBM_FLAG(hdr))
4018 wcd_cpe_bus_vote_max_bw(core, false);
4019
4020 return ret;
4021}
4022
4023
4024
4025/*
4026 * wcd_cpe_afe_shmem_alloc: allocate the cpe memory for afe service
4027 * @core: handle to cpe core
4028 * @port_cfg: configuration data for the port which needs
4029 * memory to be allocated on CPE
4030 * @size: size of the memory to be allocated
4031 */
4032static int wcd_cpe_afe_shmem_alloc(
4033 struct wcd_cpe_core *core,
4034 struct wcd_cmi_afe_port_data *port_d,
4035 u32 size)
4036{
4037 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
4038 int ret = 0;
4039
4040 pr_debug("%s: enter: size = %d\n", __func__, size);
4041
4042 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
4043 if (fill_afe_cmd_header(&cmd_shmem_alloc.hdr, port_d->port_id,
4044 CPE_AFE_PORT_CMD_SHARED_MEM_ALLOC,
4045 SHMEM_ALLOC_CMD_PLD_SIZE, false)) {
4046 ret = -EINVAL;
4047 goto end_ret;
4048 }
4049
4050 cmd_shmem_alloc.size = size;
4051
4052 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_shmem_alloc);
4053 if (ret) {
4054 pr_err("%s: afe_shmem_alloc fail,ret = %d\n",
4055 __func__, ret);
4056 goto end_ret;
4057 }
4058
4059 pr_debug("%s: completed %s, mem_handle = 0x%x\n",
4060 __func__, "CPE_AFE_CMD_SHARED_MEM_ALLOC",
4061 port_d->mem_handle);
4062
4063end_ret:
4064 return ret;
4065}
4066
4067/*
4068 * wcd_cpe_afe_shmem_dealloc: deallocate the cpe memory for
4069 * afe service
4070 * @core: handle to cpe core
4071 * @port_d: configuration data for the port which needs
4072 * memory to be deallocated on CPE
4073 * The memory handle to be de-allocated is saved in the
4074 * port configuration data
4075 */
4076static int wcd_cpe_afe_shmem_dealloc(
4077 struct wcd_cpe_core *core,
4078 struct wcd_cmi_afe_port_data *port_d)
4079{
4080 struct cpe_cmd_shmem_dealloc cmd_dealloc;
4081 int ret = 0;
4082
4083 pr_debug("%s: enter, port_id = %d\n",
4084 __func__, port_d->port_id);
4085
4086 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
4087 if (fill_afe_cmd_header(&cmd_dealloc.hdr, port_d->port_id,
4088 CPE_AFE_PORT_CMD_SHARED_MEM_DEALLOC,
4089 SHMEM_DEALLOC_CMD_PLD_SIZE, false)) {
4090 ret = -EINVAL;
4091 goto end_ret;
4092 }
4093
4094 cmd_dealloc.addr = port_d->mem_handle;
4095 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_dealloc);
4096 if (ret) {
4097 pr_err("failed to send shmem_dealloc cmd\n");
4098 goto end_ret;
4099 }
4100 memset(&port_d->mem_handle, 0,
4101 sizeof(port_d->mem_handle));
4102
4103end_ret:
4104 return ret;
4105}
4106
4107/*
4108 * wcd_cpe_send_afe_cal: send the acdb calibration to AFE port
4109 * @core: handle to cpe core
4110 * @port_d: configuration data for the port for which the
4111 * calibration needs to be appplied
4112 */
4113static int wcd_cpe_send_afe_cal(void *core_handle,
4114 struct wcd_cmi_afe_port_data *port_d)
4115{
4116
4117 struct cal_block_data *afe_cal = NULL;
4118 struct wcd_cpe_core *core = core_handle;
4119 struct cmi_obm_msg obm_msg;
4120 void *inb_msg = NULL;
4121 void *msg;
4122 int rc = 0;
4123 bool is_obm_msg;
4124
4125 if (core->cal_data[WCD_CPE_LSM_CAL_AFE] == NULL) {
4126 pr_err("%s: LSM cal not allocated!\n",
4127 __func__);
4128 rc = -EINVAL;
4129 goto rel_cal_mutex;
4130 }
4131
4132 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4133 afe_cal = cal_utils_get_only_cal_block(
4134 core->cal_data[WCD_CPE_LSM_CAL_AFE]);
4135 if (!afe_cal) {
4136 pr_err("%s: failed to get afe cal block\n",
4137 __func__);
4138 rc = -EINVAL;
4139 goto rel_cal_mutex;
4140 }
4141
4142 if (afe_cal->cal_data.size == 0) {
4143 dev_dbg(core->dev, "%s: No AFE cal to send\n",
4144 __func__);
4145 rc = 0;
4146 goto rel_cal_mutex;
4147 }
4148
4149 is_obm_msg = (afe_cal->cal_data.size >
4150 CMI_INBAND_MESSAGE_SIZE) ? true : false;
4151
4152 if (is_obm_msg) {
4153 struct cmi_hdr *hdr = &(obm_msg.hdr);
4154 struct cmi_obm *pld = &(obm_msg.pld);
4155
4156 rc = wcd_cpe_afe_shmem_alloc(core, port_d,
4157 afe_cal->cal_data.size);
4158 if (rc) {
4159 dev_err(core->dev,
4160 "%s: AFE shmem alloc fail %d\n",
4161 __func__, rc);
4162 goto rel_cal_mutex;
4163 }
4164
4165 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4166 CPE_AFE_CMD_SET_PARAM,
4167 CPE_AFE_PARAM_PAYLOAD_SIZE,
4168 true);
4169 if (rc) {
4170 dev_err(core->dev,
4171 "%s: invalid params for header, err = %d\n",
4172 __func__, rc);
4173 wcd_cpe_afe_shmem_dealloc(core, port_d);
4174 goto rel_cal_mutex;
4175 }
4176
4177 pld->version = 0;
4178 pld->size = afe_cal->cal_data.size;
4179 pld->data_ptr.kvaddr = afe_cal->cal_data.kvaddr;
4180 pld->mem_handle = port_d->mem_handle;
4181 msg = &obm_msg;
4182
4183 } else {
4184 u8 *msg_pld;
4185 struct cmi_hdr *hdr;
4186
4187 inb_msg = kzalloc(sizeof(struct cmi_hdr) +
4188 afe_cal->cal_data.size,
4189 GFP_KERNEL);
4190 if (!inb_msg) {
4191 dev_err(core->dev,
4192 "%s: no memory for afe cal inband\n",
4193 __func__);
4194 rc = -ENOMEM;
4195 goto rel_cal_mutex;
4196 }
4197
4198 hdr = (struct cmi_hdr *) inb_msg;
4199
4200 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4201 CPE_AFE_CMD_SET_PARAM,
4202 CPE_AFE_PARAM_PAYLOAD_SIZE,
4203 false);
4204 if (rc) {
4205 dev_err(core->dev,
4206 "%s: invalid params for header, err = %d\n",
4207 __func__, rc);
4208 kfree(inb_msg);
4209 inb_msg = NULL;
4210 goto rel_cal_mutex;
4211 }
4212
4213 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
4214 memcpy(msg_pld, afe_cal->cal_data.kvaddr,
4215 afe_cal->cal_data.size);
4216
4217 msg = inb_msg;
4218 }
4219
4220 rc = wcd_cpe_cmi_send_afe_msg(core, port_d, msg);
4221 if (rc)
4222 pr_err("%s: afe cal for listen failed, rc = %d\n",
4223 __func__, rc);
4224
4225 if (is_obm_msg) {
4226 wcd_cpe_afe_shmem_dealloc(core, port_d);
4227 port_d->mem_handle = 0;
4228 } else {
4229 kfree(inb_msg);
4230 inb_msg = NULL;
4231 }
4232
4233rel_cal_mutex:
4234 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4235 return rc;
4236}
4237
4238/*
4239 * wcd_cpe_is_valid_port: check validity of afe port id
4240 * @core: handle to core to check for validity
4241 * @afe_cfg: client provided afe configuration
4242 * @func: function name invoking this validity check,
4243 * used for logging purpose only.
4244 */
4245static int wcd_cpe_is_valid_port(struct wcd_cpe_core *core,
4246 struct wcd_cpe_afe_port_cfg *afe_cfg,
4247 const char *func)
4248{
4249 if (unlikely(IS_ERR_OR_NULL(core))) {
4250 pr_err("%s: Invalid core handle\n", func);
4251 return -EINVAL;
4252 }
4253
4254 if (afe_cfg->port_id > WCD_CPE_AFE_MAX_PORTS) {
4255 dev_err(core->dev,
4256 "%s: invalid afe port (%u)\n",
4257 func, afe_cfg->port_id);
4258 return -EINVAL;
4259 }
4260
4261 dev_dbg(core->dev,
4262 "%s: port_id = %u\n",
4263 func, afe_cfg->port_id);
4264
4265 return 0;
4266}
4267
4268static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
4269 u8 mode)
4270{
4271 struct cpe_afe_svc_cmd_mode afe_mode;
4272 struct wcd_cpe_core *core = core_handle;
4273 struct wcd_cmi_afe_port_data *afe_port_d;
4274 int ret;
4275
4276 afe_port_d = &afe_ports[0];
4277 /*
4278 * AFE SVC mode command is for the service and not port
4279 * specific, hence use AFE port as 0 so the command will
4280 * be applied to all AFE ports on CPE.
4281 */
4282 afe_port_d->port_id = 0;
4283
4284 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4285 memset(&afe_mode, 0, sizeof(afe_mode));
4286 if (fill_afe_cmd_header(&afe_mode.hdr, afe_port_d->port_id,
4287 CPE_AFE_SVC_CMD_LAB_MODE,
4288 CPE_AFE_CMD_MODE_PAYLOAD_SIZE,
4289 false)) {
4290 ret = -EINVAL;
4291 goto err_ret;
4292 }
4293
4294 afe_mode.mode = mode;
4295
4296 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_mode);
4297 if (ret)
4298 dev_err(core->dev,
4299 "%s: afe_svc_mode cmd failed, err = %d\n",
4300 __func__, ret);
4301
4302err_ret:
4303 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4304 return ret;
4305}
4306
4307static int wcd_cpe_afe_cmd_port_cfg(void *core_handle,
4308 struct wcd_cpe_afe_port_cfg *afe_cfg)
4309{
4310 struct cpe_afe_cmd_port_cfg port_cfg_cmd;
4311 struct wcd_cpe_core *core = core_handle;
4312 struct wcd_cmi_afe_port_data *afe_port_d;
4313 int ret;
4314
4315 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4316 if (ret)
4317 goto done;
4318
4319 afe_port_d = &afe_ports[afe_cfg->port_id];
4320 afe_port_d->port_id = afe_cfg->port_id;
4321
4322 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4323 memset(&port_cfg_cmd, 0, sizeof(port_cfg_cmd));
4324 if (fill_afe_cmd_header(&port_cfg_cmd.hdr,
4325 afe_cfg->port_id,
4326 CPE_AFE_PORT_CMD_GENERIC_CONFIG,
4327 CPE_AFE_CMD_PORT_CFG_PAYLOAD_SIZE,
4328 false)) {
4329 ret = -EINVAL;
4330 goto err_ret;
4331 }
4332
4333 port_cfg_cmd.bit_width = afe_cfg->bit_width;
4334 port_cfg_cmd.num_channels = afe_cfg->num_channels;
4335 port_cfg_cmd.sample_rate = afe_cfg->sample_rate;
4336
4337 if (afe_port_d->port_id == CPE_AFE_PORT_3_TX)
4338 port_cfg_cmd.buffer_size = WCD_CPE_EC_PP_BUF_SIZE;
4339 else
4340 port_cfg_cmd.buffer_size = AFE_OUT_BUF_SIZE(afe_cfg->bit_width,
4341 afe_cfg->sample_rate);
4342
4343 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &port_cfg_cmd);
4344 if (ret)
4345 dev_err(core->dev,
4346 "%s: afe_port_config failed, err = %d\n",
4347 __func__, ret);
4348
4349err_ret:
4350 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4351done:
4352 return ret;
4353}
4354
4355/*
4356 * wcd_cpe_afe_set_params: set the parameters for afe port
4357 * @afe_cfg: configuration data for the port for which the
4358 * parameters are to be set
4359 */
4360static int wcd_cpe_afe_set_params(void *core_handle,
4361 struct wcd_cpe_afe_port_cfg *afe_cfg, bool afe_mad_ctl)
4362{
4363 struct cpe_afe_params afe_params;
4364 struct cpe_afe_hw_mad_ctrl *hw_mad_ctrl = &afe_params.hw_mad_ctrl;
4365 struct cpe_afe_port_cfg *port_cfg = &afe_params.port_cfg;
4366 struct wcd_cpe_core *core = core_handle;
4367 struct wcd_cmi_afe_port_data *afe_port_d;
4368 int ret = 0, pld_size = 0;
4369
4370 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4371 if (ret)
4372 return ret;
4373
4374 afe_port_d = &afe_ports[afe_cfg->port_id];
4375 afe_port_d->port_id = afe_cfg->port_id;
4376
4377 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4378
4379 ret = wcd_cpe_send_afe_cal(core, afe_port_d);
4380 if (ret) {
4381 dev_err(core->dev,
4382 "%s: afe acdb cal send failed, err = %d\n",
4383 __func__, ret);
4384 goto err_ret;
4385 }
4386
4387 pld_size = CPE_AFE_PARAM_PAYLOAD_SIZE;
4388 memset(&afe_params, 0, sizeof(afe_params));
4389
4390 if (fill_afe_cmd_header(&afe_params.hdr,
4391 afe_cfg->port_id,
4392 CPE_AFE_CMD_SET_PARAM,
4393 (u8) pld_size, false)) {
4394 ret = -EINVAL;
4395 goto err_ret;
4396 }
4397
4398 hw_mad_ctrl->param.module_id = CPE_AFE_MODULE_HW_MAD;
4399 hw_mad_ctrl->param.param_id = CPE_AFE_PARAM_ID_HW_MAD_CTL;
4400 hw_mad_ctrl->param.p_size.sr.param_size = PARAM_SIZE_AFE_HW_MAD_CTRL;
4401 hw_mad_ctrl->param.p_size.sr.reserved = 0;
4402 hw_mad_ctrl->minor_version = 1;
4403 hw_mad_ctrl->mad_type = MAD_TYPE_AUDIO;
4404 hw_mad_ctrl->mad_enable = afe_mad_ctl;
4405
4406 port_cfg->param.module_id = CPE_AFE_MODULE_AUDIO_DEV_INTERFACE;
4407 port_cfg->param.param_id = CPE_AFE_PARAM_ID_GENERIC_PORT_CONFIG;
4408 port_cfg->param.p_size.sr.param_size = PARAM_SIZE_AFE_PORT_CFG;
4409 port_cfg->param.p_size.sr.reserved = 0;
4410 port_cfg->minor_version = 1;
4411 port_cfg->bit_width = afe_cfg->bit_width;
4412 port_cfg->num_channels = afe_cfg->num_channels;
4413 port_cfg->sample_rate = afe_cfg->sample_rate;
4414
4415 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_params);
4416 if (ret)
4417 dev_err(core->dev,
4418 "%s: afe_port_config failed, err = %d\n",
4419 __func__, ret);
4420err_ret:
4421 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4422 return ret;
4423}
4424
4425/*
4426 * wcd_cpe_afe_port_start: send the start command to afe service
4427 * @core_handle: handle to the cpe core
4428 * @port_cfg: configuration data for the afe port which needs
4429 * to be started.
4430 */
4431static int wcd_cpe_afe_port_start(void *core_handle,
4432 struct wcd_cpe_afe_port_cfg *port_cfg)
4433{
4434
4435 struct cmi_hdr hdr;
4436 struct wcd_cpe_core *core = core_handle;
4437 struct wcd_cmi_afe_port_data *afe_port_d;
4438 int ret = 0;
4439
4440 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4441 if (ret)
4442 return ret;
4443
4444 afe_port_d = &afe_ports[port_cfg->port_id];
4445 afe_port_d->port_id = port_cfg->port_id;
4446
4447 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4448
4449 memset(&hdr, 0, sizeof(struct cmi_hdr));
4450 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4451 CPE_AFE_PORT_CMD_START,
4452 0, false);
4453 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4454 if (ret)
4455 dev_err(core->dev,
4456 "%s: afe_port_start cmd failed, err = %d\n",
4457 __func__, ret);
4458 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4459 return ret;
4460}
4461
4462/*
4463 * wcd_cpe_afe_port_stop: send stop command to afe service
4464 * @core_handle: handle to the cpe core
4465 * @port_cfg: configuration data for the afe port which needs
4466 * to be stopped.
4467 */
4468static int wcd_cpe_afe_port_stop(void *core_handle,
4469 struct wcd_cpe_afe_port_cfg *port_cfg)
4470{
4471 struct cmi_hdr hdr;
4472 struct wcd_cpe_core *core = core_handle;
4473 struct wcd_cmi_afe_port_data *afe_port_d;
4474 int ret = 0;
4475
4476 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4477 if (ret)
4478 return ret;
4479
4480 afe_port_d = &afe_ports[port_cfg->port_id];
4481 afe_port_d->port_id = port_cfg->port_id;
4482
4483 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4484
4485 memset(&hdr, 0, sizeof(hdr));
4486 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4487 CPE_AFE_PORT_CMD_STOP,
4488 0, false);
4489 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4490 if (ret)
4491 dev_err(core->dev,
4492 "%s: afe_stop cmd failed, err = %d\n",
4493 __func__, ret);
4494
4495 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4496 return ret;
4497}
4498
4499/*
4500 * wcd_cpe_afe_port_suspend: send suspend command to afe service
4501 * @core_handle: handle to the cpe core
4502 * @port_cfg: configuration data for the afe port which needs
4503 * to be suspended.
4504 */
4505static int wcd_cpe_afe_port_suspend(void *core_handle,
4506 struct wcd_cpe_afe_port_cfg *port_cfg)
4507{
4508 struct cmi_hdr hdr;
4509 struct wcd_cpe_core *core = core_handle;
4510 struct wcd_cmi_afe_port_data *afe_port_d;
4511 int ret = 0;
4512
4513 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4514 if (ret)
4515 return ret;
4516
4517 afe_port_d = &afe_ports[port_cfg->port_id];
4518 afe_port_d->port_id = port_cfg->port_id;
4519
4520 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4521
4522 memset(&hdr, 0, sizeof(struct cmi_hdr));
4523 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4524 CPE_AFE_PORT_CMD_SUSPEND,
4525 0, false);
4526 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4527 if (ret)
4528 dev_err(core->dev,
4529 "%s: afe_suspend cmd failed, err = %d\n",
4530 __func__, ret);
4531 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4532 return ret;
4533}
4534
4535/*
4536 * wcd_cpe_afe_port_resume: send the resume command to afe service
4537 * @core_handle: handle to the cpe core
4538 * @port_cfg: configuration data for the afe port which needs
4539 * to be resumed.
4540 */
4541static int wcd_cpe_afe_port_resume(void *core_handle,
4542 struct wcd_cpe_afe_port_cfg *port_cfg)
4543{
4544 struct cmi_hdr hdr;
4545 struct wcd_cpe_core *core = core_handle;
4546 struct wcd_cmi_afe_port_data *afe_port_d;
4547 int ret = 0;
4548
4549 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4550 if (ret)
4551 return ret;
4552
4553 afe_port_d = &afe_ports[port_cfg->port_id];
4554 afe_port_d->port_id = port_cfg->port_id;
4555
4556 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4557
4558 memset(&hdr, 0, sizeof(hdr));
4559 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4560 CPE_AFE_PORT_CMD_RESUME,
4561 0, false);
4562 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4563 if (ret)
4564 dev_err(core->dev,
4565 "%s: afe_resume cmd failed, err = %d\n",
4566 __func__, ret);
4567 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4568 return ret;
4569
4570}
4571
4572/*
4573 * wcd_cpe_register_afe_driver: register lsm driver to codec
4574 * @cpe_ops: structure with lsm callbacks
4575 * @codec: codec to which this lsm driver is registered to
4576 */
4577int wcd_cpe_get_afe_ops(struct wcd_cpe_afe_ops *afe_ops)
4578{
4579 afe_ops->afe_set_params = wcd_cpe_afe_set_params;
4580 afe_ops->afe_port_start = wcd_cpe_afe_port_start;
4581 afe_ops->afe_port_stop = wcd_cpe_afe_port_stop;
4582 afe_ops->afe_port_suspend = wcd_cpe_afe_port_suspend;
4583 afe_ops->afe_port_resume = wcd_cpe_afe_port_resume;
4584 afe_ops->afe_port_cmd_cfg = wcd_cpe_afe_cmd_port_cfg;
4585
4586 return 0;
4587}
4588EXPORT_SYMBOL(wcd_cpe_get_afe_ops);
4589
4590MODULE_DESCRIPTION("WCD CPE Core");
4591MODULE_LICENSE("GPL v2");