blob: a7fcce316e6d934e5c05e55eba10807f885752da [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/firmware.h>
15#include <linux/device.h>
16#include <linux/slab.h>
17#include <linux/elf.h>
18#include <linux/wait.h>
19#include <linux/debugfs.h>
20#include <linux/delay.h>
21#include <linux/pm_qos.h>
22#include <linux/dma-mapping.h>
23#include <sound/soc.h>
24#include <sound/info.h>
25#include <sound/lsm_params.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053026#include <soc/qcom/pm.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053027#include <dsp/audio_cal_utils.h>
28#include "core.h"
29#include "cpe_core.h"
30#include "cpe_err.h"
Laxminath Kasam7e057cf2017-08-09 23:55:15 +053031#include "cpe_cmi.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053032#include "wcd_cpe_core.h"
33#include "wcd_cpe_services.h"
34#include "wcd_cmi_api.h"
Laxminath Kasam605b42f2017-08-01 22:02:15 +053035#include "wcd9xxx-irq.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053036
37#define CMI_CMD_TIMEOUT (10 * HZ)
38#define WCD_CPE_LSM_MAX_SESSIONS 2
39#define WCD_CPE_AFE_MAX_PORTS 4
40#define AFE_SVC_EXPLICIT_PORT_START 1
41#define WCD_CPE_EC_PP_BUF_SIZE 480 /* 5 msec buffer */
42
43#define ELF_FLAG_EXECUTE (1 << 0)
44#define ELF_FLAG_WRITE (1 << 1)
45#define ELF_FLAG_READ (1 << 2)
46
47#define ELF_FLAG_RW (ELF_FLAG_READ | ELF_FLAG_WRITE)
48
49#define WCD_CPE_GRAB_LOCK(lock, name) \
50{ \
51 pr_debug("%s: %s lock acquire\n", \
52 __func__, name); \
53 mutex_lock(lock); \
54}
55
56#define WCD_CPE_REL_LOCK(lock, name) \
57{ \
58 pr_debug("%s: %s lock release\n", \
59 __func__, name); \
60 mutex_unlock(lock); \
61}
62
63#define WCD_CPE_STATE_MAX_LEN 11
64#define CPE_OFFLINE_WAIT_TIMEOUT (2 * HZ)
65#define CPE_READY_WAIT_TIMEOUT (3 * HZ)
66#define WCD_CPE_SYSFS_DIR_MAX_LENGTH 32
67
68#define CPE_ERR_IRQ_CB(core) \
69 (core->cpe_cdc_cb->cpe_err_irq_control)
70
71/*
72 * AFE output buffer size is always
73 * (sample_rate * number of bytes per sample/2*1000)
74 */
75#define AFE_OUT_BUF_SIZE(bit_width, sample_rate) \
76 (((sample_rate) * (bit_width / BITS_PER_BYTE))/(2*1000))
77
78enum afe_port_state {
79 AFE_PORT_STATE_DEINIT = 0,
80 AFE_PORT_STATE_INIT,
81 AFE_PORT_STATE_CONFIG,
82 AFE_PORT_STATE_STARTED,
83 AFE_PORT_STATE_SUSPENDED,
84};
85
86struct wcd_cmi_afe_port_data {
87 u8 port_id;
88 struct mutex afe_lock;
89 struct completion afe_cmd_complete;
90 enum afe_port_state port_state;
91 u8 cmd_result;
92 u32 mem_handle;
93};
94
95struct cpe_lsm_ids {
96 u32 module_id;
97 u32 param_id;
98};
99
100static struct wcd_cpe_core *core_d;
101static struct cpe_lsm_session
102 *lsm_sessions[WCD_CPE_LSM_MAX_SESSIONS + 1];
103struct wcd_cpe_core * (*wcd_get_cpe_core)(struct snd_soc_codec *);
104static struct wcd_cmi_afe_port_data afe_ports[WCD_CPE_AFE_MAX_PORTS + 1];
105static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param);
106static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core);
107static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core);
108static ssize_t cpe_ftm_test_trigger(struct file *file,
109 const char __user *user_buf,
110 size_t count, loff_t *ppos);
111static u32 ramdump_enable;
112static u32 cpe_ftm_test_status;
113static const struct file_operations cpe_ftm_test_trigger_fops = {
114 .open = simple_open,
115 .write = cpe_ftm_test_trigger,
116};
117
118static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
119 u8 mode);
120struct wcd_cpe_attribute {
121 struct attribute attr;
122 ssize_t (*show)(struct wcd_cpe_core *core, char *buf);
123 ssize_t (*store)(struct wcd_cpe_core *core, const char *buf,
124 ssize_t count);
125};
126
127#define WCD_CPE_ATTR(_name, _mode, _show, _store) \
128static struct wcd_cpe_attribute cpe_attr_##_name = { \
129 .attr = {.name = __stringify(_name), .mode = _mode}, \
130 .show = _show, \
131 .store = _store, \
132}
133
134#define to_wcd_cpe_attr(a) \
135 container_of((a), struct wcd_cpe_attribute, attr)
136
137#define kobj_to_cpe_core(kobj) \
138 container_of((kobj), struct wcd_cpe_core, cpe_kobj)
139
140/* wcd_cpe_lsm_session_active: check if any session is active
141 * return true if any session is active.
142 */
143static bool wcd_cpe_lsm_session_active(void)
144{
145 int index = 1;
146 bool lsm_active = false;
147
148 /* session starts from index 1 */
149 for (; index <= WCD_CPE_LSM_MAX_SESSIONS; index++) {
150 if (lsm_sessions[index] != NULL) {
151 lsm_active = true;
152 break;
153 } else {
154 lsm_active = false;
155 }
156 }
157 return lsm_active;
158}
159
160static int wcd_cpe_get_sfr_dump(struct wcd_cpe_core *core)
161{
162 struct cpe_svc_mem_segment dump_seg;
163 int rc;
164 u8 *sfr_dump;
165
166 sfr_dump = kzalloc(core->sfr_buf_size, GFP_KERNEL);
167 if (!sfr_dump)
168 goto done;
169
170 dump_seg.type = CPE_SVC_DATA_MEM;
171 dump_seg.cpe_addr = core->sfr_buf_addr;
172 dump_seg.size = core->sfr_buf_size;
173 dump_seg.data = sfr_dump;
174 dev_dbg(core->dev,
175 "%s: reading SFR from CPE, size = %zu\n",
176 __func__, core->sfr_buf_size);
177
178 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
179 if (rc < 0) {
180 dev_err(core->dev,
181 "%s: Failed to read cpe sfr_dump, err = %d\n",
182 __func__, rc);
183 goto free_sfr_dump;
184 }
185
186 dev_info(core->dev,
187 "%s: cpe_sfr = %s\n", __func__, sfr_dump);
188
189free_sfr_dump:
190 kfree(sfr_dump);
191done:
192 /* Even if SFR dump failed, do not return error */
193 return 0;
194}
195
196static int wcd_cpe_collect_ramdump(struct wcd_cpe_core *core)
197{
198 struct cpe_svc_mem_segment dump_seg;
199 int rc;
200
201 if (!core->cpe_ramdump_dev || !core->cpe_dump_v_addr ||
202 core->hw_info.dram_size == 0) {
203 dev_err(core->dev,
204 "%s: Ramdump devices not set up, size = %zu\n",
205 __func__, core->hw_info.dram_size);
206 return -EINVAL;
207 }
208
209 dump_seg.type = CPE_SVC_DATA_MEM;
210 dump_seg.cpe_addr = core->hw_info.dram_offset;
211 dump_seg.size = core->hw_info.dram_size;
212 dump_seg.data = core->cpe_dump_v_addr;
213
214 dev_dbg(core->dev,
215 "%s: Reading ramdump from CPE\n",
216 __func__);
217
218 rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
219 if (rc < 0) {
220 dev_err(core->dev,
221 "%s: Failed to read CPE ramdump, err = %d\n",
222 __func__, rc);
223 return rc;
224 }
225
226 dev_dbg(core->dev,
227 "%s: completed reading ramdump from CPE\n",
228 __func__);
229
230 core->cpe_ramdump_seg.address = (unsigned long) core->cpe_dump_addr;
231 core->cpe_ramdump_seg.size = core->hw_info.dram_size;
232 core->cpe_ramdump_seg.v_address = core->cpe_dump_v_addr;
233
234 rc = do_ramdump(core->cpe_ramdump_dev,
235 &core->cpe_ramdump_seg, 1);
236 if (rc)
237 dev_err(core->dev,
238 "%s: fail to dump cpe ram to device, err = %d\n",
239 __func__, rc);
240 return rc;
241}
242
243/* wcd_cpe_is_valid_elf_hdr: check if the ELF header is valid
244 * @core: handle to wcd_cpe_core
245 * @fw_size: size of firmware from request_firmware
246 * @ehdr: the elf header to be checked for
247 * return true if all checks pass, true if any elf check fails
248 */
249static bool wcd_cpe_is_valid_elf_hdr(struct wcd_cpe_core *core, size_t fw_size,
250 const struct elf32_hdr *ehdr)
251{
252 if (fw_size < sizeof(*ehdr)) {
253 dev_err(core->dev, "%s:Firmware too small\n", __func__);
254 goto elf_check_fail;
255 }
256
257 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
258 dev_err(core->dev, "%s: Not an ELF file\n", __func__);
259 goto elf_check_fail;
260 }
261
262 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
263 dev_err(core->dev, "%s: Not a executable image\n", __func__);
264 goto elf_check_fail;
265 }
266
267 if (ehdr->e_phnum == 0) {
268 dev_err(core->dev, "%s: no segments to load\n", __func__);
269 goto elf_check_fail;
270 }
271
272 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
273 sizeof(struct elf32_hdr) > fw_size) {
274 dev_err(core->dev, "%s: Too small MDT file\n", __func__);
275 goto elf_check_fail;
276 }
277
278 return true;
279
280elf_check_fail:
281 return false;
282}
283
284/*
285 * wcd_cpe_load_each_segment: download segment to CPE
286 * @core: handle to struct wcd_cpe_core
287 * @file_idx: index of split firmware image file name
288 * @phdr: program header from metadata
289 */
290static int wcd_cpe_load_each_segment(struct wcd_cpe_core *core,
291 int file_idx, const struct elf32_phdr *phdr)
292{
293 const struct firmware *split_fw;
294 char split_fname[32];
295 int ret = 0;
296 struct cpe_svc_mem_segment *segment;
297
298 if (!core || !phdr) {
299 pr_err("%s: Invalid params\n", __func__);
300 return -EINVAL;
301 }
302
303 /* file size can be 0 for bss segments */
304 if (phdr->p_filesz == 0 || phdr->p_memsz == 0)
305 return 0;
306
307 segment = kzalloc(sizeof(struct cpe_svc_mem_segment), GFP_KERNEL);
308 if (!segment)
309 return -ENOMEM;
310
311 snprintf(split_fname, sizeof(split_fname), "%s.b%02d",
312 core->fname, file_idx);
313
314 ret = request_firmware(&split_fw, split_fname, core->dev);
315 if (ret) {
316 dev_err(core->dev, "firmware %s not found\n",
317 split_fname);
318 ret = -EIO;
319 goto fw_req_fail;
320 }
321
322 if (phdr->p_flags & ELF_FLAG_EXECUTE)
323 segment->type = CPE_SVC_INSTRUCTION_MEM;
324 else if (phdr->p_flags & ELF_FLAG_RW)
325 segment->type = CPE_SVC_DATA_MEM;
326 else {
327 dev_err(core->dev, "%s invalid flags 0x%x\n",
328 __func__, phdr->p_flags);
329 goto done;
330 }
331
332 segment->cpe_addr = phdr->p_paddr;
333 segment->size = phdr->p_filesz;
334 segment->data = (u8 *) split_fw->data;
335
336 dev_dbg(core->dev,
337 "%s: cpe segment type %s read from firmware\n", __func__,
338 (segment->type == CPE_SVC_INSTRUCTION_MEM) ?
339 "INSTRUCTION" : "DATA");
340
341 ret = cpe_svc_download_segment(core->cpe_handle, segment);
342 if (ret) {
343 dev_err(core->dev,
344 "%s: Failed to download %s, error = %d\n",
345 __func__, split_fname, ret);
346 goto done;
347 }
348
349done:
350 release_firmware(split_fw);
351
352fw_req_fail:
353 kfree(segment);
354 return ret;
355}
356
357/*
358 * wcd_cpe_enable_cpe_clks: enable the clocks for CPE
359 * @core: handle to wcd_cpe_core
360 * @enable: flag indicating whether to enable/disable cpe clocks
361 */
362static int wcd_cpe_enable_cpe_clks(struct wcd_cpe_core *core, bool enable)
363{
364 int ret, ret1;
365
366 if (!core || !core->cpe_cdc_cb ||
367 !core->cpe_cdc_cb->cpe_clk_en) {
368 pr_err("%s: invalid handle\n",
369 __func__);
370 return -EINVAL;
371 }
372
373 ret = core->cpe_cdc_cb->cdc_clk_en(core->codec, enable);
374 if (ret) {
375 dev_err(core->dev, "%s: Failed to enable RCO\n",
376 __func__);
377 return ret;
378 }
379
380 if (!enable && core->cpe_clk_ref > 0)
381 core->cpe_clk_ref--;
382
383 /*
384 * CPE clk will be enabled at the first time
385 * and be disabled at the last time.
386 */
387 if (core->cpe_clk_ref == 0) {
388 ret = core->cpe_cdc_cb->cpe_clk_en(core->codec, enable);
389 if (ret) {
390 dev_err(core->dev,
391 "%s: cpe_clk_en() failed, err = %d\n",
392 __func__, ret);
393 goto cpe_clk_fail;
394 }
395 }
396
397 if (enable)
398 core->cpe_clk_ref++;
399
400 return 0;
401
402cpe_clk_fail:
403 /* Release the codec clk if CPE clk enable failed */
404 if (enable) {
405 ret1 = core->cpe_cdc_cb->cdc_clk_en(core->codec, !enable);
406 if (ret1)
407 dev_err(core->dev,
408 "%s: Fail to release codec clk, err = %d\n",
409 __func__, ret1);
410 }
411
412 return ret;
413}
414
415/*
416 * wcd_cpe_bus_vote_max_bw: Function to vote for max bandwidth on codec bus
417 * @core: handle to core for cpe
418 * @vote: flag to indicate enable/disable of vote
419 *
420 * This function will try to use the codec provided callback to
421 * vote/unvote for the max bandwidth of the bus that is used by
422 * the codec for register reads/writes.
423 */
424static int wcd_cpe_bus_vote_max_bw(struct wcd_cpe_core *core,
425 bool vote)
426{
427 if (!core || !core->cpe_cdc_cb) {
428 pr_err("%s: Invalid handle to %s\n",
429 __func__,
430 (!core) ? "core" : "codec callbacks");
431 return -EINVAL;
432 }
433
434 if (core->cpe_cdc_cb->bus_vote_bw) {
435 dev_dbg(core->dev, "%s: %s cdc bus max bandwidth\n",
436 __func__, vote ? "Vote" : "Unvote");
437 core->cpe_cdc_cb->bus_vote_bw(core->codec, vote);
438 }
439
440 return 0;
441}
442
443/*
444 * wcd_cpe_load_fw: Function to load the fw image
445 * @core: cpe core pointer
446 * @load_type: indicates whether to load to data section
447 * or the instruction section
448 *
449 * Parse the mdt file to look for program headers, load each
450 * split file corresponding to the program headers.
451 */
452static int wcd_cpe_load_fw(struct wcd_cpe_core *core,
453 unsigned int load_type)
454{
455
456 int ret, phdr_idx;
457 struct snd_soc_codec *codec = NULL;
458 struct wcd9xxx *wcd9xxx = NULL;
459 const struct elf32_hdr *ehdr;
460 const struct elf32_phdr *phdr;
461 const struct firmware *fw;
462 const u8 *elf_ptr;
463 char mdt_name[64];
464 bool img_dload_fail = false;
465 bool load_segment;
466
467 if (!core || !core->cpe_handle) {
468 pr_err("%s: Error CPE core %pK\n", __func__,
469 core);
470 return -EINVAL;
471 }
472 codec = core->codec;
473 wcd9xxx = dev_get_drvdata(codec->dev->parent);
474 snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", core->fname);
475 ret = request_firmware(&fw, mdt_name, core->dev);
476 if (ret < 0) {
477 dev_err(core->dev, "firmware %s not found\n", mdt_name);
478 return ret;
479 }
480
481 ehdr = (struct elf32_hdr *) fw->data;
482 if (!wcd_cpe_is_valid_elf_hdr(core, fw->size, ehdr)) {
483 dev_err(core->dev, "%s: fw mdt %s is invalid\n",
484 __func__, mdt_name);
485 ret = -EINVAL;
486 goto done;
487 }
488
489 elf_ptr = fw->data + sizeof(*ehdr);
490
491 if (load_type == ELF_FLAG_EXECUTE) {
492 /* Reset CPE first */
493 ret = cpe_svc_reset(core->cpe_handle);
494 if (ret < 0) {
495 dev_err(core->dev,
496 "%s: Failed to reset CPE with error %d\n",
497 __func__, ret);
498 goto done;
499 }
500 }
501
502 dev_dbg(core->dev, "%s: start image dload, name = %s, load_type = 0x%x\n",
503 __func__, core->fname, load_type);
504
505 wcd_cpe_bus_vote_max_bw(core, true);
506
507 /* parse every program header and request corresponding firmware */
508 for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
509 phdr = (struct elf32_phdr *)elf_ptr;
510 load_segment = false;
511
512 dev_dbg(core->dev,
513 "index = %d, vaddr = 0x%x, paddr = 0x%x, filesz = 0x%x, memsz = 0x%x, flags = 0x%x\n"
514 , phdr_idx, phdr->p_vaddr, phdr->p_paddr,
515 phdr->p_filesz, phdr->p_memsz, phdr->p_flags);
516
517 switch (load_type) {
518 case ELF_FLAG_EXECUTE:
519 if (phdr->p_flags & load_type)
520 load_segment = true;
521 break;
522 case ELF_FLAG_RW:
523 if (!(phdr->p_flags & ELF_FLAG_EXECUTE) &&
524 (phdr->p_flags & load_type))
525 load_segment = true;
526 break;
527 default:
528 pr_err("%s: Invalid load_type 0x%x\n",
529 __func__, load_type);
530 ret = -EINVAL;
531 goto rel_bus_vote;
532 }
533
534 if (load_segment) {
535 ret = wcd_cpe_load_each_segment(core,
536 phdr_idx, phdr);
537 if (ret < 0) {
538 dev_err(core->dev,
539 "Failed to load segment %d, aborting img dload\n",
540 phdr_idx);
541 img_dload_fail = true;
542 goto rel_bus_vote;
543 }
544 } else {
545 dev_dbg(core->dev,
546 "%s: skipped segment with index %d\n",
547 __func__, phdr_idx);
548 }
549
550 elf_ptr = elf_ptr + sizeof(*phdr);
551 }
552 if (load_type == ELF_FLAG_EXECUTE)
553 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
554
555rel_bus_vote:
556 wcd_cpe_bus_vote_max_bw(core, false);
557
558done:
559 release_firmware(fw);
560 return ret;
561}
562
563/*
564 * wcd_cpe_change_online_state - mark cpe online/offline state
565 * @core: core session to mark
566 * @online: whether online of offline
567 *
568 */
569static void wcd_cpe_change_online_state(struct wcd_cpe_core *core,
570 int online)
571{
572 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
573 unsigned long ret;
574
575 if (!core) {
576 pr_err("%s: Invalid core handle\n",
577 __func__);
578 return;
579 }
580
581 ssr_entry = &core->ssr_entry;
582 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
583 ssr_entry->offline = !online;
584
585 /* Make sure write to offline state is completed. */
586 wmb();
587 ret = xchg(&ssr_entry->offline_change, 1);
588 wake_up_interruptible(&ssr_entry->offline_poll_wait);
589 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
590 pr_debug("%s: change state 0x%x offline_change 0x%x\n"
591 " core->offline 0x%x, ret = %ld\n",
592 __func__, online,
593 ssr_entry->offline_change,
594 core->ssr_entry.offline, ret);
595}
596
597/*
598 * wcd_cpe_load_fw_image: work function to load the fw image
599 * @work: work that is scheduled to perform the image loading
600 *
601 * Parse the mdt file to look for program headers, load each
602 * split file corresponding to the program headers.
603 */
604static void wcd_cpe_load_fw_image(struct work_struct *work)
605{
606 struct wcd_cpe_core *core;
607 int ret = 0;
608
609 core = container_of(work, struct wcd_cpe_core, load_fw_work);
610 ret = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
611 if (!ret)
612 wcd_cpe_change_online_state(core, 1);
613 else
614 pr_err("%s: failed to load instruction section, err = %d\n",
615 __func__, ret);
616}
617
618/*
619 * wcd_cpe_get_core_handle: get the handle to wcd_cpe_core
620 * @codec: codec from which this handle is to be obtained
621 * Codec driver should provide a callback function to obtain
622 * handle to wcd_cpe_core during initialization of wcd_cpe_core
623 */
624void *wcd_cpe_get_core_handle(
625 struct snd_soc_codec *codec)
626{
627 struct wcd_cpe_core *core = NULL;
628
629 if (!codec) {
630 pr_err("%s: Invalid codec handle\n",
631 __func__);
632 goto done;
633 }
634
635 if (!wcd_get_cpe_core) {
636 dev_err(codec->dev,
637 "%s: codec callback not available\n",
638 __func__);
639 goto done;
640 }
641
642 core = wcd_get_cpe_core(codec);
643
644 if (!core)
645 dev_err(codec->dev,
646 "%s: handle to core not available\n",
647 __func__);
648done:
649 return core;
650}
651EXPORT_SYMBOL(wcd_cpe_get_core_handle);
652
653/*
654 * svass_engine_irq: threaded interrupt handler for svass engine irq
655 * @irq: interrupt number
656 * @data: data pointer passed during irq registration
657 */
658static irqreturn_t svass_engine_irq(int irq, void *data)
659{
660 struct wcd_cpe_core *core = data;
661 int ret = 0;
662
663 if (!core) {
664 pr_err("%s: Invalid data for interrupt handler\n",
665 __func__);
666 goto done;
667 }
668
669 ret = cpe_svc_process_irq(core->cpe_handle, CPE_IRQ_OUTBOX_IRQ);
670 if (ret < 0)
671 dev_err(core->dev,
672 "%s: Error processing irq from cpe_Services\n",
673 __func__);
674done:
675 return IRQ_HANDLED;
676}
677
678/*
679 * wcd_cpe_state_read - update read status in procfs
680 * @entry: snd_info_entry
681 * @buf: buffer where the read status is updated.
682 *
683 */
684static ssize_t wcd_cpe_state_read(struct snd_info_entry *entry,
685 void *file_private_data, struct file *file,
686 char __user *buf, size_t count, loff_t pos)
687{
688 int len = 0;
689 char buffer[WCD_CPE_STATE_MAX_LEN];
690 struct wcd_cpe_core *core = NULL;
691 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
692
693 core = (struct wcd_cpe_core *) entry->private_data;
694 if (!core) {
695 pr_err("%s: CPE core NULL\n", __func__);
696 return -EINVAL;
697 }
698 ssr_entry = &core->ssr_entry;
699
700 /* Make sure read from ssr_entry is completed. */
701 rmb();
702 dev_dbg(core->dev,
703 "%s: Offline 0x%x\n", __func__,
704 ssr_entry->offline);
705
706 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
707 len = snprintf(buffer, sizeof(buffer), "%s\n",
708 ssr_entry->offline ? "OFFLINE" : "ONLINE");
709 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
710
711 return simple_read_from_buffer(buf, count, &pos, buffer, len);
712}
713
714/*
715 * wcd_cpe_state_poll - polls for change state
716 * @entry: snd_info_entry
717 * @wait: wait for duration for poll wait
718 *
719 */
720static unsigned int wcd_cpe_state_poll(struct snd_info_entry *entry,
721 void *private_data, struct file *file,
722 poll_table *wait)
723{
724 struct wcd_cpe_core *core = NULL;
725 struct wcd_cpe_ssr_entry *ssr_entry = NULL;
726 int ret = 0;
727
728 core = (struct wcd_cpe_core *) entry->private_data;
729 if (!core) {
730 pr_err("%s: CPE core NULL\n", __func__);
731 return -EINVAL;
732 }
733
734 ssr_entry = &core->ssr_entry;
735
736 dev_dbg(core->dev, "%s: CPE Poll wait\n",
737 __func__);
738 poll_wait(file, &ssr_entry->offline_poll_wait, wait);
739 dev_dbg(core->dev, "%s: Wake-up Poll wait\n",
740 __func__);
741 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
742
743 if (xchg(&ssr_entry->offline_change, 0))
744 ret = POLLIN | POLLPRI | POLLRDNORM;
745
746 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
747
748 dev_dbg(core->dev, "%s: ret (%d) from poll_wait\n",
749 __func__, ret);
750 return ret;
751}
752
753/*
754 * wcd_cpe_is_online_state - return true if card is online state
755 * @core: core offline to query
756 */
757static bool wcd_cpe_is_online_state(void *core_handle)
758{
759 struct wcd_cpe_core *core = core_handle;
760
761 if (core_handle) {
762 return !core->ssr_entry.offline;
763 } else {
764 pr_err("%s: Core handle NULL\n", __func__);
765 /* still return 1- offline if core ptr null */
766 return false;
767 }
768}
769
770static struct snd_info_entry_ops wcd_cpe_state_proc_ops = {
771 .read = wcd_cpe_state_read,
772 .poll = wcd_cpe_state_poll,
773};
774
775static int wcd_cpe_check_new_image(struct wcd_cpe_core *core)
776{
777 int rc = 0;
778 char temp_img_name[WCD_CPE_IMAGE_FNAME_MAX];
779
780 if (!strcmp(core->fname, core->dyn_fname) &&
781 core->ssr_type != WCD_CPE_INITIALIZED) {
782 dev_dbg(core->dev,
783 "%s: Firmware unchanged, fname = %s, ssr_type 0x%x\n",
784 __func__, core->fname, core->ssr_type);
785 goto done;
786 }
787
788 /*
789 * Different firmware name requested,
790 * Re-load the instruction section
791 */
792 strlcpy(temp_img_name, core->fname,
793 WCD_CPE_IMAGE_FNAME_MAX);
794 strlcpy(core->fname, core->dyn_fname,
795 WCD_CPE_IMAGE_FNAME_MAX);
796
797 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
798 if (rc) {
799 dev_err(core->dev,
800 "%s: Failed to dload new image %s, err = %d\n",
801 __func__, core->fname, rc);
802 /* If new image download failed, revert back to old image */
803 strlcpy(core->fname, temp_img_name,
804 WCD_CPE_IMAGE_FNAME_MAX);
805 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
806 if (rc)
807 dev_err(core->dev,
808 "%s: Failed to re-dload image %s, err = %d\n",
809 __func__, core->fname, rc);
810 } else {
811 dev_info(core->dev, "%s: fw changed to %s\n",
812 __func__, core->fname);
813 }
814done:
815 return rc;
816}
817
818static int wcd_cpe_enable(struct wcd_cpe_core *core,
819 bool enable)
820{
821 int ret = 0;
822
823 if (enable) {
824 /* Reset CPE first */
825 ret = cpe_svc_reset(core->cpe_handle);
826 if (ret < 0) {
827 dev_err(core->dev,
828 "%s: CPE Reset failed, error = %d\n",
829 __func__, ret);
830 goto done;
831 }
832
833 ret = wcd_cpe_setup_irqs(core);
834 if (ret) {
835 dev_err(core->dev,
836 "%s: CPE IRQs setup failed, error = %d\n",
837 __func__, ret);
838 goto done;
839 }
840 ret = wcd_cpe_check_new_image(core);
841 if (ret)
842 goto fail_boot;
843
844 /* Dload data section */
845 ret = wcd_cpe_load_fw(core, ELF_FLAG_RW);
846 if (ret) {
847 dev_err(core->dev,
848 "%s: Failed to dload data section, err = %d\n",
849 __func__, ret);
850 goto fail_boot;
851 }
852
853 ret = wcd_cpe_enable_cpe_clks(core, true);
854 if (ret < 0) {
855 dev_err(core->dev,
856 "%s: CPE clk enable failed, err = %d\n",
857 __func__, ret);
858 goto fail_boot;
859 }
860
861 ret = cpe_svc_boot(core->cpe_handle,
862 core->cpe_debug_mode);
863 if (ret < 0) {
864 dev_err(core->dev,
865 "%s: Failed to boot CPE\n",
866 __func__);
867 goto fail_boot;
868 }
869
870 /* wait for CPE to be online */
871 dev_dbg(core->dev,
872 "%s: waiting for CPE bootup\n",
873 __func__);
874
875 wait_for_completion(&core->online_compl);
876
877 dev_dbg(core->dev,
878 "%s: CPE bootup done\n",
879 __func__);
880
881 core->ssr_type = WCD_CPE_ENABLED;
882 } else {
883 if (core->ssr_type == WCD_CPE_BUS_DOWN_EVENT ||
884 core->ssr_type == WCD_CPE_SSR_EVENT) {
885 /*
886 * If this disable vote is when
887 * SSR is in progress, do not disable CPE here,
888 * instead SSR handler will control CPE.
889 */
890 wcd_cpe_enable_cpe_clks(core, false);
891 wcd_cpe_cleanup_irqs(core);
892 goto done;
893 }
894
895 ret = cpe_svc_shutdown(core->cpe_handle);
896 if (ret < 0) {
897 dev_err(core->dev,
898 "%s: CPE shutdown failed, error %d\n",
899 __func__, ret);
900 goto done;
901 }
902
903 wcd_cpe_enable_cpe_clks(core, false);
904 wcd_cpe_cleanup_irqs(core);
905 core->ssr_type = WCD_CPE_IMEM_DOWNLOADED;
906 }
907
908 return ret;
909
910fail_boot:
911 wcd_cpe_cleanup_irqs(core);
912
913done:
914 return ret;
915}
916
917/*
918 * wcd_cpe_boot_ssr: Load the images to CPE after ssr and bootup cpe
919 * @core: handle to the core
920 */
921static int wcd_cpe_boot_ssr(struct wcd_cpe_core *core)
922{
923 int rc = 0;
924
925 if (!core || !core->cpe_handle) {
926 pr_err("%s: Invalid handle\n", __func__);
927 rc = -EINVAL;
928 goto fail;
929 }
930 /* Load the instruction section and mark CPE as online */
931 rc = wcd_cpe_load_fw(core, ELF_FLAG_EXECUTE);
932 if (rc) {
933 dev_err(core->dev,
934 "%s: Failed to load instruction, err = %d\n",
935 __func__, rc);
936 goto fail;
937 } else {
938 wcd_cpe_change_online_state(core, 1);
939 }
940
941fail:
942 return rc;
943}
944
945/*
946 * wcd_cpe_clr_ready_status:
947 * Clear the value from the ready status for CPE
948 * @core: handle to the core
949 * @value: flag/bitmask that is to be cleared
950 *
951 * This function should not be invoked with ssr_lock acquired
952 */
953static void wcd_cpe_clr_ready_status(struct wcd_cpe_core *core,
954 u8 value)
955{
956 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
957 core->ready_status &= ~(value);
958 dev_dbg(core->dev,
959 "%s: ready_status = 0x%x\n",
960 __func__, core->ready_status);
961 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
962}
963
964/*
965 * wcd_cpe_set_and_complete:
966 * Set the ready status with the provided value and
967 * flag the completion object if ready status moves
968 * to ready to download
969 * @core: handle to the core
970 * @value: flag/bitmask that is to be set
971 */
972static void wcd_cpe_set_and_complete(struct wcd_cpe_core *core,
973 u8 value)
974{
975 WCD_CPE_GRAB_LOCK(&core->ssr_lock, "SSR");
976 core->ready_status |= value;
977 if ((core->ready_status & WCD_CPE_READY_TO_DLOAD) ==
978 WCD_CPE_READY_TO_DLOAD) {
979 dev_dbg(core->dev,
980 "%s: marking ready, status = 0x%x\n",
981 __func__, core->ready_status);
982 complete(&core->ready_compl);
983 }
984 WCD_CPE_REL_LOCK(&core->ssr_lock, "SSR");
985}
986
987
988/*
989 * wcd_cpe_ssr_work: work function to handle CPE SSR
990 * @work: work that is scheduled to perform CPE shutdown
991 * and restart
992 */
993static void wcd_cpe_ssr_work(struct work_struct *work)
994{
995
996 int rc = 0;
997 u32 irq = 0;
998 struct wcd_cpe_core *core = NULL;
999 u8 status = 0;
1000
1001 core = container_of(work, struct wcd_cpe_core, ssr_work);
1002 if (!core) {
1003 pr_err("%s: Core handle NULL\n", __func__);
1004 return;
1005 }
1006
1007 /* Obtain pm request up in case of suspend mode */
1008 pm_qos_add_request(&core->pm_qos_req,
1009 PM_QOS_CPU_DMA_LATENCY,
1010 PM_QOS_DEFAULT_VALUE);
1011 pm_qos_update_request(&core->pm_qos_req,
1012 msm_cpuidle_get_deep_idle_latency());
1013
1014 dev_dbg(core->dev,
1015 "%s: CPE SSR with event %d\n",
1016 __func__, core->ssr_type);
1017
1018 if (core->ssr_type == WCD_CPE_SSR_EVENT) {
1019 if (CPE_ERR_IRQ_CB(core))
1020 core->cpe_cdc_cb->cpe_err_irq_control(
1021 core->codec,
1022 CPE_ERR_IRQ_STATUS,
1023 &status);
1024 if (status & core->irq_info.cpe_fatal_irqs)
1025 irq = CPE_IRQ_WDOG_BITE;
1026 } else {
1027 /* If bus is down, cdc reg cannot be read */
1028 irq = CPE_IRQ_WDOG_BITE;
1029 }
1030
1031 if (core->cpe_users > 0) {
1032 rc = cpe_svc_process_irq(core->cpe_handle, irq);
1033 if (rc < 0)
1034 /*
1035 * Even if process_irq fails,
1036 * wait for cpe to move to offline state
1037 */
1038 dev_err(core->dev,
1039 "%s: irq processing failed, error = %d\n",
1040 __func__, rc);
1041
1042 rc = wait_for_completion_timeout(&core->offline_compl,
1043 CPE_OFFLINE_WAIT_TIMEOUT);
1044 if (!rc) {
1045 dev_err(core->dev,
1046 "%s: wait for cpe offline timed out\n",
1047 __func__);
1048 goto err_ret;
1049 }
1050 if (core->ssr_type != WCD_CPE_BUS_DOWN_EVENT) {
1051 wcd_cpe_get_sfr_dump(core);
1052
1053 /*
1054 * Ramdump has to be explicitly enabled
1055 * through debugfs and cannot be collected
1056 * when bus is down.
1057 */
1058 if (ramdump_enable)
1059 wcd_cpe_collect_ramdump(core);
1060 }
1061 } else {
1062 pr_err("%s: no cpe users, mark as offline\n", __func__);
1063 wcd_cpe_change_online_state(core, 0);
1064 wcd_cpe_set_and_complete(core,
1065 WCD_CPE_BLK_READY);
1066 }
1067
1068 rc = wait_for_completion_timeout(&core->ready_compl,
1069 CPE_READY_WAIT_TIMEOUT);
1070 if (!rc) {
1071 dev_err(core->dev,
1072 "%s: ready to online timed out, status = %u\n",
1073 __func__, core->ready_status);
1074 goto err_ret;
1075 }
1076
1077 rc = wcd_cpe_boot_ssr(core);
1078
1079 /* Once image are downloaded make sure all
1080 * error interrupts are cleared
1081 */
1082 if (CPE_ERR_IRQ_CB(core))
1083 core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
1084 CPE_ERR_IRQ_CLEAR, NULL);
1085
1086err_ret:
1087 /* remove after default pm qos */
1088 pm_qos_update_request(&core->pm_qos_req,
1089 PM_QOS_DEFAULT_VALUE);
1090 pm_qos_remove_request(&core->pm_qos_req);
1091}
1092
1093/*
1094 * wcd_cpe_ssr_handle: handle SSR events here.
1095 * @core_handle: handle to the cpe core
1096 * @event: indicates ADSP or CDSP SSR.
1097 */
1098int wcd_cpe_ssr_event(void *core_handle,
1099 enum wcd_cpe_ssr_state_event event)
1100{
1101 struct wcd_cpe_core *core = core_handle;
1102
1103 if (!core) {
1104 pr_err("%s: Invalid handle to core\n",
1105 __func__);
1106 return -EINVAL;
1107 }
1108
1109 /*
1110 * If CPE is not even enabled, the SSR event for
1111 * CPE needs to be ignored
1112 */
1113 if (core->ssr_type == WCD_CPE_INITIALIZED) {
1114 dev_info(core->dev,
1115 "%s: CPE initialized but not enabled, skip CPE ssr\n",
1116 __func__);
1117 return 0;
1118 }
1119
1120 dev_dbg(core->dev,
1121 "%s: Schedule ssr work, event = %d\n",
1122 __func__, core->ssr_type);
1123
1124 switch (event) {
1125 case WCD_CPE_BUS_DOWN_EVENT:
1126 /*
1127 * If bus down, then CPE block is also
1128 * treated to be down
1129 */
1130 wcd_cpe_clr_ready_status(core, WCD_CPE_READY_TO_DLOAD);
1131 core->ssr_type = event;
1132 schedule_work(&core->ssr_work);
1133 break;
1134
1135 case WCD_CPE_SSR_EVENT:
1136 wcd_cpe_clr_ready_status(core, WCD_CPE_BLK_READY);
1137 core->ssr_type = event;
1138 schedule_work(&core->ssr_work);
1139 break;
1140
1141 case WCD_CPE_BUS_UP_EVENT:
1142 wcd_cpe_set_and_complete(core, WCD_CPE_BUS_READY);
1143 /*
1144 * In case of bus up event ssr_type will be changed
1145 * to WCD_CPE_ACTIVE once CPE is online
1146 */
1147 break;
1148
1149 default:
1150 dev_err(core->dev,
1151 "%s: unhandled SSR event %d\n",
1152 __func__, event);
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158EXPORT_SYMBOL(wcd_cpe_ssr_event);
1159
1160/*
1161 * svass_exception_irq: threaded irq handler for sva error interrupts
1162 * @irq: interrupt number
1163 * @data: data pointer passed during irq registration
1164 *
1165 * Once a error interrupt is received, it is not cleared, since
1166 * clearing this interrupt will raise spurious interrupts unless
1167 * CPE is reset.
1168 */
1169static irqreturn_t svass_exception_irq(int irq, void *data)
1170{
1171 struct wcd_cpe_core *core = data;
1172 u8 status = 0;
1173
1174 if (!core || !CPE_ERR_IRQ_CB(core)) {
1175 pr_err("%s: Invalid %s\n",
1176 __func__,
1177 (!core) ? "core" : "cdc control");
1178 return IRQ_HANDLED;
1179 }
1180
1181 core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
1182 CPE_ERR_IRQ_STATUS, &status);
1183
1184 while (status != 0) {
1185 if (status & core->irq_info.cpe_fatal_irqs) {
1186 dev_err(core->dev,
1187 "%s: CPE SSR event,err_status = 0x%02x\n",
1188 __func__, status);
1189 wcd_cpe_ssr_event(core, WCD_CPE_SSR_EVENT);
1190 /*
1191 * If fatal interrupt is received,
1192 * trigger SSR and stop processing
1193 * further interrupts
1194 */
1195 break;
1196 }
1197 /*
1198 * Mask the interrupt that was raised to
1199 * avoid spurious interrupts
1200 */
1201 core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
1202 CPE_ERR_IRQ_MASK, &status);
1203
1204 /* Clear only the interrupt that was raised */
1205 core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
1206 CPE_ERR_IRQ_CLEAR, &status);
1207 dev_err(core->dev,
1208 "%s: err_interrupt status = 0x%x\n",
1209 __func__, status);
1210
1211 /* Read status for pending interrupts */
1212 core->cpe_cdc_cb->cpe_err_irq_control(core->codec,
1213 CPE_ERR_IRQ_STATUS, &status);
1214 }
1215
1216 return IRQ_HANDLED;
1217}
1218
1219/*
1220 * wcd_cpe_cmi_afe_cb: callback called on response to afe commands
1221 * @param: parameter containing the response code, etc
1222 *
1223 * Process the request to the command sent to CPE and wakeup the
1224 * command send wait.
1225 */
1226static void wcd_cpe_cmi_afe_cb(const struct cmi_api_notification *param)
1227{
1228 struct cmi_hdr *hdr;
1229 struct wcd_cmi_afe_port_data *afe_port_d;
1230 u8 port_id;
1231
1232 if (!param) {
1233 pr_err("%s: param is null\n", __func__);
1234 return;
1235 }
1236
1237 if (param->event != CMI_API_MSG) {
1238 pr_err("%s: unhandled event 0x%x\n",
1239 __func__, param->event);
1240 return;
1241 }
1242
1243 pr_debug("%s: param->result = %d\n",
1244 __func__, param->result);
1245
1246 hdr = (struct cmi_hdr *) param->message;
1247
1248 /*
1249 * for AFE cmd response, port id is
1250 * stored at session id field of header
1251 */
1252 port_id = CMI_HDR_GET_SESSION_ID(hdr);
1253 if (port_id > WCD_CPE_AFE_MAX_PORTS) {
1254 pr_err("%s: invalid port_id %d\n",
1255 __func__, port_id);
1256 return;
1257 }
1258
1259 afe_port_d = &(afe_ports[port_id]);
1260
1261 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
1262
1263 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
1264 u8 result = payload[0];
1265
1266 afe_port_d->cmd_result = result;
1267 complete(&afe_port_d->afe_cmd_complete);
1268
1269 } else if (hdr->opcode == CPE_AFE_PORT_CMDRSP_SHARED_MEM_ALLOC) {
1270
1271 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
1272 (struct cpe_cmdrsp_shmem_alloc *) param->message;
1273
1274 if (cmdrsp_shmem_alloc->addr == 0) {
1275 pr_err("%s: Failed AFE shared mem alloc\n", __func__);
1276 afe_port_d->cmd_result = CMI_SHMEM_ALLOC_FAILED;
1277 } else {
1278 pr_debug("%s AFE shared mem addr = 0x%x\n",
1279 __func__, cmdrsp_shmem_alloc->addr);
1280 afe_port_d->mem_handle = cmdrsp_shmem_alloc->addr;
1281 afe_port_d->cmd_result = 0;
1282 }
1283 complete(&afe_port_d->afe_cmd_complete);
1284 }
1285}
1286
1287/*
1288 * wcd_cpe_initialize_afe_port_data: Initialize all AFE ports
1289 *
1290 * Initialize the data for all the afe ports. Assign the
1291 * afe port state to INIT state.
1292 */
1293static void wcd_cpe_initialize_afe_port_data(void)
1294{
1295 struct wcd_cmi_afe_port_data *afe_port_d;
1296 int i;
1297
1298 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1299 afe_port_d = &afe_ports[i];
1300 afe_port_d->port_id = i;
1301 init_completion(&afe_port_d->afe_cmd_complete);
1302 afe_port_d->port_state = AFE_PORT_STATE_INIT;
1303 mutex_init(&afe_port_d->afe_lock);
1304 }
1305}
1306
1307/*
1308 * wcd_cpe_deinitialize_afe_port_data: De-initialize all AFE ports
1309 *
1310 * De-Initialize the data for all the afe ports. Assign the
1311 * afe port state to DEINIT state.
1312 */
1313static void wcd_cpe_deinitialize_afe_port_data(void)
1314{
1315 struct wcd_cmi_afe_port_data *afe_port_d;
1316 int i;
1317
1318 for (i = 0; i <= WCD_CPE_AFE_MAX_PORTS; i++) {
1319 afe_port_d = &afe_ports[i];
1320 afe_port_d->port_state = AFE_PORT_STATE_DEINIT;
1321 mutex_destroy(&afe_port_d->afe_lock);
1322 }
1323}
1324
1325/*
1326 * wcd_cpe_svc_event_cb: callback from cpe services, indicating
1327 * CPE is online or offline.
1328 * @param: parameter / payload for event to be notified
1329 */
1330static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param)
1331{
1332 struct snd_soc_codec *codec;
1333 struct wcd_cpe_core *core;
1334 struct cpe_svc_boot_event *boot_data;
1335 bool active_sessions;
1336
1337 if (!param) {
1338 pr_err("%s: Invalid event\n", __func__);
1339 return;
1340 }
1341
1342 codec = param->private_data;
1343 if (!codec) {
1344 pr_err("%s: Invalid handle to codec\n",
1345 __func__);
1346 return;
1347 }
1348
1349 core = wcd_cpe_get_core_handle(codec);
1350 if (!core) {
1351 pr_err("%s: Invalid handle to core\n",
1352 __func__);
1353 return;
1354 }
1355
1356 dev_dbg(core->dev,
1357 "%s: event = 0x%x, ssr_type = 0x%x\n",
1358 __func__, param->event, core->ssr_type);
1359
1360 switch (param->event) {
1361 case CPE_SVC_BOOT:
1362 boot_data = (struct cpe_svc_boot_event *)
1363 param->payload;
1364 core->sfr_buf_addr = boot_data->debug_address;
1365 core->sfr_buf_size = boot_data->debug_buffer_size;
1366 dev_dbg(core->dev,
1367 "%s: CPE booted, sfr_addr = %d, sfr_size = %zu\n",
1368 __func__, core->sfr_buf_addr,
1369 core->sfr_buf_size);
1370 break;
1371 case CPE_SVC_ONLINE:
1372 core->ssr_type = WCD_CPE_ACTIVE;
1373 dev_dbg(core->dev, "%s CPE is now online\n",
1374 __func__);
1375 complete(&core->online_compl);
1376 break;
1377 case CPE_SVC_OFFLINE:
1378 /*
1379 * offline can happen during normal shutdown,
1380 * but we are interested in offline only during
1381 * SSR.
1382 */
1383 if (core->ssr_type != WCD_CPE_SSR_EVENT &&
1384 core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
1385 break;
1386
1387 active_sessions = wcd_cpe_lsm_session_active();
1388 wcd_cpe_change_online_state(core, 0);
1389 complete(&core->offline_compl);
1390 dev_err(core->dev, "%s: CPE is now offline\n",
1391 __func__);
1392 break;
1393 case CPE_SVC_CMI_CLIENTS_DEREG:
1394
1395 /*
1396 * Only when either CPE SSR is in progress,
1397 * or the bus is down, we need to mark the CPE
1398 * as ready. In all other cases, this event is
1399 * ignored
1400 */
1401 if (core->ssr_type == WCD_CPE_SSR_EVENT ||
1402 core->ssr_type == WCD_CPE_BUS_DOWN_EVENT)
1403 wcd_cpe_set_and_complete(core,
1404 WCD_CPE_BLK_READY);
1405 break;
1406 default:
1407 dev_err(core->dev,
1408 "%s: unhandled notification\n",
1409 __func__);
1410 break;
1411 }
1412}
1413
1414/*
1415 * wcd_cpe_cleanup_irqs: free the irq resources required by cpe
1416 * @core: handle the cpe core
1417 *
1418 * This API will free the IRQs for CPE but does not mask the
1419 * CPE interrupts. If masking is needed, it has to be done
1420 * explicity by caller.
1421 */
1422static void wcd_cpe_cleanup_irqs(struct wcd_cpe_core *core)
1423{
1424
1425 struct snd_soc_codec *codec = core->codec;
1426 struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
1427 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1428
1429 wcd9xxx_free_irq(core_res,
1430 core->irq_info.cpe_engine_irq,
1431 core);
1432 wcd9xxx_free_irq(core_res,
1433 core->irq_info.cpe_err_irq,
1434 core);
1435
1436}
1437
1438/*
1439 * wcd_cpe_setup_sva_err_intr: setup the irqs for CPE
1440 * @core: handle to wcd_cpe_core
1441 * All interrupts needed for CPE are acquired. If any
1442 * request_irq fails, then all irqs are free'd
1443 */
1444static int wcd_cpe_setup_irqs(struct wcd_cpe_core *core)
1445{
1446 int ret;
1447 struct snd_soc_codec *codec = core->codec;
1448 struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
1449 struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
1450
1451 ret = wcd9xxx_request_irq(core_res,
1452 core->irq_info.cpe_engine_irq,
1453 svass_engine_irq, "SVASS_Engine", core);
1454 if (ret) {
1455 dev_err(core->dev,
1456 "%s: Failed to request svass engine irq\n",
1457 __func__);
1458 goto fail_engine_irq;
1459 }
1460
1461 /* Make sure all error interrupts are cleared */
1462 if (CPE_ERR_IRQ_CB(core))
1463 core->cpe_cdc_cb->cpe_err_irq_control(
1464 core->codec,
1465 CPE_ERR_IRQ_CLEAR,
1466 NULL);
1467
1468 /* Enable required error interrupts */
1469 if (CPE_ERR_IRQ_CB(core))
1470 core->cpe_cdc_cb->cpe_err_irq_control(
1471 core->codec,
1472 CPE_ERR_IRQ_UNMASK,
1473 NULL);
1474
1475 ret = wcd9xxx_request_irq(core_res,
1476 core->irq_info.cpe_err_irq,
1477 svass_exception_irq, "SVASS_Exception", core);
1478 if (ret) {
1479 dev_err(core->dev,
1480 "%s: Failed to request svass err irq\n",
1481 __func__);
1482 goto fail_exception_irq;
1483 }
1484
1485 return 0;
1486
1487fail_exception_irq:
1488 wcd9xxx_free_irq(core_res,
1489 core->irq_info.cpe_engine_irq, core);
1490
1491fail_engine_irq:
1492 return ret;
1493}
1494
1495static int wcd_cpe_get_cal_index(int32_t cal_type)
1496{
1497 int cal_index = -EINVAL;
1498
1499 if (cal_type == ULP_AFE_CAL_TYPE)
1500 cal_index = WCD_CPE_LSM_CAL_AFE;
1501 else if (cal_type == ULP_LSM_CAL_TYPE)
1502 cal_index = WCD_CPE_LSM_CAL_LSM;
1503 else if (cal_type == ULP_LSM_TOPOLOGY_ID_CAL_TYPE)
1504 cal_index = WCD_CPE_LSM_CAL_TOPOLOGY_ID;
1505 else
1506 pr_err("%s: invalid cal_type %d\n",
1507 __func__, cal_type);
1508
1509 return cal_index;
1510}
1511
1512static int wcd_cpe_alloc_cal(int32_t cal_type, size_t data_size, void *data)
1513{
1514 int ret = 0;
1515 int cal_index;
1516
1517 cal_index = wcd_cpe_get_cal_index(cal_type);
1518 if (cal_index < 0) {
1519 pr_err("%s: invalid caltype %d\n",
1520 __func__, cal_type);
1521 return -EINVAL;
1522 }
1523
1524 ret = cal_utils_alloc_cal(data_size, data,
1525 core_d->cal_data[cal_index],
1526 0, NULL);
1527 if (ret < 0)
1528 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
1529 __func__, ret, cal_type);
1530 return ret;
1531}
1532
1533static int wcd_cpe_dealloc_cal(int32_t cal_type, size_t data_size,
1534 void *data)
1535{
1536 int ret = 0;
1537 int cal_index;
1538
1539 cal_index = wcd_cpe_get_cal_index(cal_type);
1540 if (cal_index < 0) {
1541 pr_err("%s: invalid caltype %d\n",
1542 __func__, cal_type);
1543 return -EINVAL;
1544 }
1545
1546 ret = cal_utils_dealloc_cal(data_size, data,
1547 core_d->cal_data[cal_index]);
1548 if (ret < 0)
1549 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
1550 __func__, ret, cal_type);
1551 return ret;
1552}
1553
1554static int wcd_cpe_set_cal(int32_t cal_type, size_t data_size, void *data)
1555{
1556 int ret = 0;
1557 int cal_index;
1558
1559 cal_index = wcd_cpe_get_cal_index(cal_type);
1560 if (cal_index < 0) {
1561 pr_err("%s: invalid caltype %d\n",
1562 __func__, cal_type);
1563 return -EINVAL;
1564 }
1565
1566 ret = cal_utils_set_cal(data_size, data,
1567 core_d->cal_data[cal_index],
1568 0, NULL);
1569 if (ret < 0)
1570 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
1571 __func__, ret, cal_type);
1572 return ret;
1573}
1574
1575static int wcd_cpe_cal_init(struct wcd_cpe_core *core)
1576{
1577 int ret = 0;
1578
1579 struct cal_type_info cal_type_info[] = {
1580 {{ULP_AFE_CAL_TYPE,
1581 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1582 wcd_cpe_set_cal, NULL, NULL} },
1583 {NULL, NULL, cal_utils_match_buf_num} },
1584
1585 {{ULP_LSM_CAL_TYPE,
1586 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1587 wcd_cpe_set_cal, NULL, NULL} },
1588 {NULL, NULL, cal_utils_match_buf_num} },
1589
1590 {{ULP_LSM_TOPOLOGY_ID_CAL_TYPE,
1591 {wcd_cpe_alloc_cal, wcd_cpe_dealloc_cal, NULL,
1592 wcd_cpe_set_cal, NULL, NULL} },
1593 {NULL, NULL, cal_utils_match_buf_num} },
1594 };
1595
1596 ret = cal_utils_create_cal_types(WCD_CPE_LSM_CAL_MAX,
1597 core->cal_data,
1598 cal_type_info);
1599 if (ret < 0)
1600 pr_err("%s: could not create cal type!\n",
1601 __func__);
1602 return ret;
1603}
1604
1605/*
1606 * wcd_cpe_enable: setup the cpe interrupts and schedule
1607 * the work to download image and bootup the CPE.
1608 * core: handle to cpe core structure
1609 */
1610static int wcd_cpe_vote(struct wcd_cpe_core *core,
1611 bool enable)
1612{
1613 int ret = 0;
1614
1615 if (!core) {
1616 pr_err("%s: Invalid handle to core\n",
1617 __func__);
1618 ret = -EINVAL;
1619 goto done;
1620 }
1621
1622 dev_dbg(core->dev,
1623 "%s: enter, enable = %s, cpe_users = %u\n",
1624 __func__, (enable ? "true" : "false"),
1625 core->cpe_users);
1626
1627 if (enable) {
1628 core->cpe_users++;
1629 if (core->cpe_users == 1) {
1630 ret = wcd_cpe_enable(core, enable);
1631 if (ret) {
1632 dev_err(core->dev,
1633 "%s: CPE enable failed, err = %d\n",
1634 __func__, ret);
1635 goto done;
1636 }
1637 } else {
1638 dev_dbg(core->dev,
1639 "%s: cpe already enabled, users = %u\n",
1640 __func__, core->cpe_users);
1641 goto done;
1642 }
1643 } else {
1644 core->cpe_users--;
1645 if (core->cpe_users == 0) {
1646 ret = wcd_cpe_enable(core, enable);
1647 if (ret) {
1648 dev_err(core->dev,
1649 "%s: CPE disable failed, err = %d\n",
1650 __func__, ret);
1651 goto done;
1652 }
1653 } else {
1654 dev_dbg(core->dev,
1655 "%s: %u valid users on cpe\n",
1656 __func__, core->cpe_users);
1657 goto done;
1658 }
1659 }
1660
1661 dev_dbg(core->dev,
1662 "%s: leave, enable = %s, cpe_users = %u\n",
1663 __func__, (enable ? "true" : "false"),
1664 core->cpe_users);
1665
1666done:
1667 return ret;
1668}
1669
1670static int wcd_cpe_debugfs_init(struct wcd_cpe_core *core)
1671{
1672 int rc = 0;
1673
1674 struct dentry *dir = debugfs_create_dir("wcd_cpe", NULL);
1675
1676 if (IS_ERR_OR_NULL(dir)) {
1677 dir = NULL;
1678 rc = -ENODEV;
1679 goto err_create_dir;
1680 }
1681
1682 if (!debugfs_create_u32("ramdump_enable", 0644,
1683 dir, &ramdump_enable)) {
1684 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1685 __func__, "ramdump_enable");
1686 rc = -ENODEV;
1687 goto err_create_entry;
1688 }
1689
1690 if (!debugfs_create_file("cpe_ftm_test_trigger", 0200,
1691 dir, core, &cpe_ftm_test_trigger_fops)) {
1692 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1693 __func__, "cpe_ftm_test_trigger");
1694 rc = -ENODEV;
1695 goto err_create_entry;
1696 }
1697
1698 if (!debugfs_create_u32("cpe_ftm_test_status", 0444,
1699 dir, &cpe_ftm_test_status)) {
1700 dev_err(core->dev, "%s: Failed to create debugfs node %s\n",
1701 __func__, "cpe_ftm_test_status");
1702 rc = -ENODEV;
1703 goto err_create_entry;
1704 }
1705
1706err_create_entry:
1707 debugfs_remove(dir);
1708
1709err_create_dir:
1710 return rc;
1711}
1712
1713static ssize_t fw_name_show(struct wcd_cpe_core *core, char *buf)
1714{
1715 return snprintf(buf, WCD_CPE_IMAGE_FNAME_MAX, "%s",
1716 core->dyn_fname);
1717}
1718
1719static ssize_t fw_name_store(struct wcd_cpe_core *core,
1720 const char *buf, ssize_t count)
1721{
1722 int copy_count = count;
1723 const char *pos;
1724
1725 pos = memchr(buf, '\n', count);
1726 if (pos)
1727 copy_count = pos - buf;
1728
1729 if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
1730 dev_err(core->dev,
1731 "%s: Invalid length %d, max allowed %d\n",
1732 __func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
1733 return -EINVAL;
1734 }
1735
1736 strlcpy(core->dyn_fname, buf, copy_count + 1);
1737
1738 return count;
1739}
1740
1741WCD_CPE_ATTR(fw_name, 0660, fw_name_show, fw_name_store);
1742
1743static ssize_t wcd_cpe_sysfs_show(struct kobject *kobj,
1744 struct attribute *attr, char *buf)
1745{
1746 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1747 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1748 ssize_t ret = -EINVAL;
1749
1750 if (core && cpe_attr->show)
1751 ret = cpe_attr->show(core, buf);
1752
1753 return ret;
1754}
1755
1756static ssize_t wcd_cpe_sysfs_store(struct kobject *kobj,
1757 struct attribute *attr, const char *buf,
1758 size_t count)
1759{
1760 struct wcd_cpe_attribute *cpe_attr = to_wcd_cpe_attr(attr);
1761 struct wcd_cpe_core *core = kobj_to_cpe_core(kobj);
1762 ssize_t ret = -EINVAL;
1763
1764 if (core && cpe_attr->store)
1765 ret = cpe_attr->store(core, buf, count);
1766
1767 return ret;
1768}
1769
1770static const struct sysfs_ops wcd_cpe_sysfs_ops = {
1771 .show = wcd_cpe_sysfs_show,
1772 .store = wcd_cpe_sysfs_store,
1773};
1774
1775static struct kobj_type wcd_cpe_ktype = {
1776 .sysfs_ops = &wcd_cpe_sysfs_ops,
1777};
1778
1779static int wcd_cpe_sysfs_init(struct wcd_cpe_core *core, int id)
1780{
1781 char sysfs_dir_name[WCD_CPE_SYSFS_DIR_MAX_LENGTH];
1782 int rc = 0;
1783
1784 snprintf(sysfs_dir_name, WCD_CPE_SYSFS_DIR_MAX_LENGTH,
1785 "%s%d", "wcd_cpe", id);
1786
1787 rc = kobject_init_and_add(&core->cpe_kobj, &wcd_cpe_ktype,
1788 kernel_kobj,
1789 sysfs_dir_name);
1790 if (unlikely(rc)) {
1791 dev_err(core->dev,
1792 "%s: Failed to add kobject %s, err = %d\n",
1793 __func__, sysfs_dir_name, rc);
1794 goto done;
1795 }
1796
1797 rc = sysfs_create_file(&core->cpe_kobj, &cpe_attr_fw_name.attr);
1798 if (rc) {
1799 dev_err(core->dev,
1800 "%s: Failed to fw_name sysfs entry to %s\n",
1801 __func__, sysfs_dir_name);
1802 goto fail_create_file;
1803 }
1804
1805 return 0;
1806
1807fail_create_file:
1808 kobject_put(&core->cpe_kobj);
1809done:
1810 return rc;
1811}
1812
1813static ssize_t cpe_ftm_test_trigger(struct file *file,
1814 const char __user *user_buf,
1815 size_t count, loff_t *ppos)
1816{
1817 struct wcd_cpe_core *core = file->private_data;
1818 int ret = 0;
1819
1820 /* Enable the clks for cpe */
1821 ret = wcd_cpe_enable_cpe_clks(core, true);
1822 if (ret < 0) {
1823 dev_err(core->dev,
1824 "%s: CPE clk enable failed, err = %d\n",
1825 __func__, ret);
1826 goto done;
1827 }
1828
1829 /* Get the CPE_STATUS */
1830 ret = cpe_svc_ftm_test(core->cpe_handle, &cpe_ftm_test_status);
1831 if (ret < 0) {
1832 dev_err(core->dev,
1833 "%s: CPE FTM test failed, err = %d\n",
1834 __func__, ret);
1835 if (ret == CPE_SVC_BUSY) {
1836 cpe_ftm_test_status = 1;
1837 ret = 0;
1838 }
1839 }
1840
1841 /* Disable the clks for cpe */
1842 ret = wcd_cpe_enable_cpe_clks(core, false);
1843 if (ret < 0) {
1844 dev_err(core->dev,
1845 "%s: CPE clk disable failed, err = %d\n",
1846 __func__, ret);
1847 }
1848
1849done:
1850 if (ret < 0)
1851 return ret;
1852 else
1853 return count;
1854}
1855
1856static int wcd_cpe_validate_params(
1857 struct snd_soc_codec *codec,
1858 struct wcd_cpe_params *params)
1859{
1860
1861 if (!codec) {
1862 pr_err("%s: Invalid codec\n", __func__);
1863 return -EINVAL;
1864 }
1865
1866 if (!params) {
1867 dev_err(codec->dev,
1868 "%s: No params supplied for codec %s\n",
1869 __func__, codec->component.name);
1870 return -EINVAL;
1871 }
1872
1873 if (!params->codec || !params->get_cpe_core ||
1874 !params->cdc_cb) {
1875 dev_err(codec->dev,
1876 "%s: Invalid params for codec %s\n",
1877 __func__, codec->component.name);
1878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884/*
1885 * wcd_cpe_init: Initialize CPE related structures
1886 * @img_fname: filename for firmware image
1887 * @codec: handle to codec requesting for image download
1888 * @params: parameter structure passed from caller
1889 *
1890 * This API will initialize the cpe core but will not
1891 * download the image or boot the cpe core.
1892 */
1893struct wcd_cpe_core *wcd_cpe_init(const char *img_fname,
1894 struct snd_soc_codec *codec,
1895 struct wcd_cpe_params *params)
1896{
1897 struct wcd_cpe_core *core;
1898 int ret = 0;
1899 struct snd_card *card = NULL;
1900 struct snd_info_entry *entry = NULL;
1901 char proc_name[WCD_CPE_STATE_MAX_LEN];
1902 const char *cpe_name = "cpe";
1903 const char *state_name = "_state";
1904 const struct cpe_svc_hw_cfg *hw_info;
1905 int id = 0;
1906
1907 if (wcd_cpe_validate_params(codec, params))
1908 return NULL;
1909
1910 core = kzalloc(sizeof(struct wcd_cpe_core), GFP_KERNEL);
1911 if (!core)
1912 return NULL;
1913
1914 snprintf(core->fname, sizeof(core->fname), "%s", img_fname);
1915 strlcpy(core->dyn_fname, core->fname, WCD_CPE_IMAGE_FNAME_MAX);
1916
1917 wcd_get_cpe_core = params->get_cpe_core;
1918
1919 core->codec = params->codec;
1920 core->dev = params->codec->dev;
1921 core->cpe_debug_mode = params->dbg_mode;
1922
1923 core->cdc_info.major_version = params->cdc_major_ver;
1924 core->cdc_info.minor_version = params->cdc_minor_ver;
1925 core->cdc_info.id = params->cdc_id;
1926
1927 core->cpe_cdc_cb = params->cdc_cb;
1928
1929 memcpy(&core->irq_info, &params->cdc_irq_info,
1930 sizeof(core->irq_info));
1931
1932 INIT_WORK(&core->load_fw_work, wcd_cpe_load_fw_image);
1933 INIT_WORK(&core->ssr_work, wcd_cpe_ssr_work);
1934 init_completion(&core->offline_compl);
1935 init_completion(&core->ready_compl);
1936 init_completion(&core->online_compl);
1937 init_waitqueue_head(&core->ssr_entry.offline_poll_wait);
1938 mutex_init(&core->ssr_lock);
1939 core->cpe_users = 0;
1940 core->cpe_clk_ref = 0;
1941
1942 /*
1943 * By default, during probe, it is assumed that
1944 * both CPE hardware block and underlying bus to codec
1945 * are ready
1946 */
1947 core->ready_status = WCD_CPE_READY_TO_DLOAD;
1948
1949 core->cpe_handle = cpe_svc_initialize(NULL, &core->cdc_info,
1950 params->cpe_svc_params);
1951 if (!core->cpe_handle) {
1952 dev_err(core->dev,
1953 "%s: failed to initialize cpe services\n",
1954 __func__);
1955 goto fail_cpe_initialize;
1956 }
1957
1958 core->cpe_reg_handle = cpe_svc_register(core->cpe_handle,
1959 wcd_cpe_svc_event_cb,
1960 CPE_SVC_ONLINE | CPE_SVC_OFFLINE |
1961 CPE_SVC_BOOT |
1962 CPE_SVC_CMI_CLIENTS_DEREG,
1963 "codec cpe handler");
1964 if (!core->cpe_reg_handle) {
1965 dev_err(core->dev,
1966 "%s: failed to register cpe service\n",
1967 __func__);
1968 goto fail_cpe_register;
1969 }
1970
1971 card = codec->component.card->snd_card;
1972 snprintf(proc_name, (sizeof("cpe") + sizeof("_state") +
1973 sizeof(id) - 2), "%s%d%s", cpe_name, id, state_name);
1974 entry = snd_info_create_card_entry(card, proc_name,
1975 card->proc_root);
1976 if (entry) {
1977 core->ssr_entry.entry = entry;
1978 core->ssr_entry.offline = 1;
1979 entry->size = WCD_CPE_STATE_MAX_LEN;
1980 entry->content = SNDRV_INFO_CONTENT_DATA;
1981 entry->c.ops = &wcd_cpe_state_proc_ops;
1982 entry->private_data = core;
1983 ret = snd_info_register(entry);
1984 if (ret < 0) {
1985 dev_err(core->dev,
1986 "%s: snd_info_register failed (%d)\n",
1987 __func__, ret);
1988 snd_info_free_entry(entry);
1989 entry = NULL;
1990 }
1991 } else {
1992 dev_err(core->dev,
1993 "%s: Failed to create CPE SSR status entry\n",
1994 __func__);
1995 /*
1996 * Even if SSR entry creation fails, continue
1997 * with image download
1998 */
1999 }
2000
2001 core_d = core;
2002 ret = wcd_cpe_cal_init(core);
2003 if (ret < 0) {
2004 dev_err(core->dev,
2005 "%s: CPE calibration init failed, err = %d\n",
2006 __func__, ret);
2007 goto fail_cpe_reset;
2008 }
2009
2010 wcd_cpe_debugfs_init(core);
2011
2012 wcd_cpe_sysfs_init(core, id);
2013
2014 hw_info = cpe_svc_get_hw_cfg(core->cpe_handle);
2015 if (!hw_info) {
2016 dev_err(core->dev,
2017 "%s: hw info not available\n",
2018 __func__);
2019 goto schedule_dload_work;
2020 } else {
2021 core->hw_info.dram_offset = hw_info->DRAM_offset;
2022 core->hw_info.dram_size = hw_info->DRAM_size;
2023 core->hw_info.iram_offset = hw_info->IRAM_offset;
2024 core->hw_info.iram_size = hw_info->IRAM_size;
2025 }
2026
2027 /* Setup the ramdump device and buffer */
2028 core->cpe_ramdump_dev = create_ramdump_device("cpe",
2029 core->dev);
2030 if (!core->cpe_ramdump_dev) {
2031 dev_err(core->dev,
2032 "%s: Failed to create ramdump device\n",
2033 __func__);
2034 goto schedule_dload_work;
2035 }
2036
2037 arch_setup_dma_ops(core->dev, 0, 0, NULL, 0);
2038 core->cpe_dump_v_addr = dma_alloc_coherent(core->dev,
2039 core->hw_info.dram_size,
2040 &core->cpe_dump_addr,
2041 GFP_KERNEL);
2042 if (!core->cpe_dump_v_addr) {
2043 dev_err(core->dev,
2044 "%s: Failed to alloc memory for cpe dump, size = %zd\n",
2045 __func__, core->hw_info.dram_size);
2046 goto schedule_dload_work;
2047 } else {
2048 memset(core->cpe_dump_v_addr, 0, core->hw_info.dram_size);
2049 }
2050
2051schedule_dload_work:
2052 core->ssr_type = WCD_CPE_INITIALIZED;
2053 schedule_work(&core->load_fw_work);
2054 return core;
2055
2056fail_cpe_reset:
2057 cpe_svc_deregister(core->cpe_handle, core->cpe_reg_handle);
2058
2059fail_cpe_register:
2060 cpe_svc_deinitialize(core->cpe_handle);
2061
2062fail_cpe_initialize:
2063 kfree(core);
2064 return NULL;
2065}
2066EXPORT_SYMBOL(wcd_cpe_init);
2067
2068/*
2069 * wcd_cpe_cmi_lsm_callback: callback called from cpe services
2070 * to notify command response for lsm
2071 * service
2072 * @param: param containing the response code and status
2073 *
2074 * This callback is registered with cpe services while registering
2075 * the LSM service
2076 */
2077static void wcd_cpe_cmi_lsm_callback(const struct cmi_api_notification *param)
2078{
2079 struct cmi_hdr *hdr;
2080 struct cpe_lsm_session *lsm_session;
2081 u8 session_id;
2082
2083 if (!param) {
2084 pr_err("%s: param is null\n", __func__);
2085 return;
2086 }
2087
2088 if (param->event != CMI_API_MSG) {
2089 pr_err("%s: unhandled event 0x%x\n", __func__, param->event);
2090 return;
2091 }
2092
2093 hdr = (struct cmi_hdr *) param->message;
2094 session_id = CMI_HDR_GET_SESSION_ID(hdr);
2095
2096 if (session_id > WCD_CPE_LSM_MAX_SESSIONS) {
2097 pr_err("%s: invalid lsm session id = %d\n",
2098 __func__, session_id);
2099 return;
2100 }
2101
2102 lsm_session = lsm_sessions[session_id];
2103
2104 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
2105
2106 u8 *payload = ((u8 *)param->message) + (sizeof(struct cmi_hdr));
2107 u8 result = payload[0];
2108
2109 lsm_session->cmd_err_code = result;
2110 complete(&lsm_session->cmd_comp);
2111
2112 } else if (hdr->opcode == CPE_LSM_SESSION_CMDRSP_SHARED_MEM_ALLOC) {
2113
2114 struct cpe_cmdrsp_shmem_alloc *cmdrsp_shmem_alloc =
2115 (struct cpe_cmdrsp_shmem_alloc *) param->message;
2116
2117 if (cmdrsp_shmem_alloc->addr == 0) {
2118 pr_err("%s: Failed LSM shared mem alloc\n", __func__);
2119 lsm_session->cmd_err_code = CMI_SHMEM_ALLOC_FAILED;
2120
2121 } else {
2122
2123 pr_debug("%s LSM shared mem addr = 0x%x\n",
2124 __func__, cmdrsp_shmem_alloc->addr);
2125 lsm_session->lsm_mem_handle = cmdrsp_shmem_alloc->addr;
2126 lsm_session->cmd_err_code = 0;
2127 }
2128
2129 complete(&lsm_session->cmd_comp);
2130
2131 } else if (hdr->opcode == CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
2132
2133 struct cpe_lsm_event_detect_v2 *event_detect_v2 =
2134 (struct cpe_lsm_event_detect_v2 *) param->message;
2135
2136 if (!lsm_session->priv_d) {
2137 pr_err("%s: private data is not present\n",
2138 __func__);
2139 return;
2140 }
2141
2142 pr_debug("%s: event payload, status = %u, size = %u\n",
2143 __func__, event_detect_v2->detection_status,
2144 event_detect_v2->size);
2145
2146 if (lsm_session->event_cb)
2147 lsm_session->event_cb(
2148 lsm_session->priv_d,
2149 event_detect_v2->detection_status,
2150 event_detect_v2->size,
2151 event_detect_v2->payload);
2152 }
2153}
2154
2155/*
2156 * wcd_cpe_cmi_send_lsm_msg: send a message to lsm service
2157 * @core: handle to cpe core
2158 * @session: session on which to send the message
2159 * @message: actual message containing header and payload
2160 *
2161 * Sends message to lsm service for specified session and wait
2162 * for response back on the message.
2163 * should be called after acquiring session specific mutex
2164 */
2165static int wcd_cpe_cmi_send_lsm_msg(
2166 struct wcd_cpe_core *core,
2167 struct cpe_lsm_session *session,
2168 void *message)
2169{
2170 int ret = 0;
2171 struct cmi_hdr *hdr = message;
2172
2173 pr_debug("%s: sending message with opcode 0x%x\n",
2174 __func__, hdr->opcode);
2175
2176 if (unlikely(!wcd_cpe_is_online_state(core))) {
2177 dev_err(core->dev,
2178 "%s: MSG not sent, CPE offline\n",
2179 __func__);
2180 goto done;
2181 }
2182
2183 if (CMI_HDR_GET_OBM_FLAG(hdr))
2184 wcd_cpe_bus_vote_max_bw(core, true);
2185
2186 reinit_completion(&session->cmd_comp);
2187 ret = cmi_send_msg(message);
2188 if (ret) {
2189 pr_err("%s: msg opcode (0x%x) send failed (%d)\n",
2190 __func__, hdr->opcode, ret);
2191 goto rel_bus_vote;
2192 }
2193
2194 ret = wait_for_completion_timeout(&session->cmd_comp,
2195 CMI_CMD_TIMEOUT);
2196 if (ret > 0) {
2197 pr_debug("%s: command 0x%x, received response 0x%x\n",
2198 __func__, hdr->opcode, session->cmd_err_code);
2199 if (session->cmd_err_code == CMI_SHMEM_ALLOC_FAILED)
2200 session->cmd_err_code = CPE_ENOMEMORY;
2201 if (session->cmd_err_code > 0)
2202 pr_err("%s: CPE returned error[%s]\n",
2203 __func__, cpe_err_get_err_str(
2204 session->cmd_err_code));
2205 ret = cpe_err_get_lnx_err_code(session->cmd_err_code);
2206 goto rel_bus_vote;
2207 } else {
2208 pr_err("%s: command (0x%x) send timed out\n",
2209 __func__, hdr->opcode);
2210 ret = -ETIMEDOUT;
2211 goto rel_bus_vote;
2212 }
2213
2214
2215rel_bus_vote:
2216
2217 if (CMI_HDR_GET_OBM_FLAG(hdr))
2218 wcd_cpe_bus_vote_max_bw(core, false);
2219
2220done:
2221 return ret;
2222}
2223
2224
2225/*
2226 * fill_cmi_header: fill the cmi header with specified values
2227 *
2228 * @hdr: header to be updated with values
2229 * @session_id: session id of the header,
2230 * in case of AFE service it is port_id
2231 * @service_id: afe/lsm, etc
2232 * @version: update the version field in header
2233 * @payload_size: size of the payload following after header
2234 * @opcode: opcode of the message
2235 * @obm_flag: indicates if this header is for obm message
2236 *
2237 */
2238static int fill_cmi_header(struct cmi_hdr *hdr,
2239 u8 session_id, u8 service_id,
2240 bool version, u8 payload_size,
2241 u16 opcode, bool obm_flag)
2242{
2243 /* sanitize the data */
2244 if (!IS_VALID_SESSION_ID(session_id) ||
2245 !IS_VALID_SERVICE_ID(service_id) ||
2246 !IS_VALID_PLD_SIZE(payload_size)) {
2247 pr_err("Invalid header creation request\n");
2248 return -EINVAL;
2249 }
2250
2251 CMI_HDR_SET_SESSION(hdr, session_id);
2252 CMI_HDR_SET_SERVICE(hdr, service_id);
2253 if (version)
2254 CMI_HDR_SET_VERSION(hdr, 1);
2255 else
2256 CMI_HDR_SET_VERSION(hdr, 0);
2257
2258 CMI_HDR_SET_PAYLOAD_SIZE(hdr, payload_size);
2259
2260 hdr->opcode = opcode;
2261
2262 if (obm_flag)
2263 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
2264 else
2265 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
2266
2267 return 0;
2268}
2269
2270/*
2271 * fill_lsm_cmd_header_v0_inband:
2272 * Given the header, fill the header with information
2273 * for lsm service, version 0 and inband message
2274 * @hdr: the cmi header to be filled.
2275 * @session_id: ID for the lsm session
2276 * @payload_size: size for cmi message payload
2277 * @opcode: opcode for cmi message
2278 */
2279static int fill_lsm_cmd_header_v0_inband(struct cmi_hdr *hdr,
2280 u8 session_id, u8 payload_size, u16 opcode)
2281{
2282 return fill_cmi_header(hdr, session_id,
2283 CMI_CPE_LSM_SERVICE_ID, false,
2284 payload_size, opcode, false);
2285}
2286
2287/*
2288 * wcd_cpe_is_valid_lsm_session:
2289 * Check session parameters to identify validity for the sesion
2290 * @core: handle to cpe core
2291 * @session: handle to the lsm session
2292 * @func: invoking function to be printed in error logs
2293 */
2294static int wcd_cpe_is_valid_lsm_session(struct wcd_cpe_core *core,
2295 struct cpe_lsm_session *session,
2296 const char *func)
2297{
2298 if (unlikely(IS_ERR_OR_NULL(core))) {
2299 pr_err("%s: invalid handle to core\n",
2300 func);
2301 return -EINVAL;
2302 }
2303
2304 if (unlikely(IS_ERR_OR_NULL(session))) {
2305 dev_err(core->dev, "%s: invalid session\n",
2306 func);
2307 return -EINVAL;
2308 }
2309
2310 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
2311 dev_err(core->dev, "%s: invalid session id (%u)\n",
2312 func, session->id);
2313 return -EINVAL;
2314 }
2315
2316 dev_dbg(core->dev, "%s: session_id = %u\n",
2317 func, session->id);
2318 return 0;
2319}
2320
2321static int wcd_cpe_cmd_lsm_open_tx_v2(
2322 struct wcd_cpe_core *core,
2323 struct cpe_lsm_session *session)
2324{
2325 struct cpe_lsm_cmd_open_tx_v2 cmd_open_tx_v2;
2326 struct cal_block_data *top_cal = NULL;
2327 struct audio_cal_info_lsm_top *lsm_top;
2328 int ret = 0;
2329
2330 ret = wcd_cpe_is_valid_lsm_session(core, session,
2331 __func__);
2332 if (ret)
2333 return ret;
2334
2335 if (core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID] == NULL) {
2336 dev_err(core->dev,
2337 "%s: LSM_TOPOLOGY cal not allocated!\n",
2338 __func__);
2339 return -EINVAL;
2340 }
2341
2342 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2343 top_cal = cal_utils_get_only_cal_block(
2344 core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]);
2345 if (!top_cal) {
2346 dev_err(core->dev,
2347 "%s: Failed to get LSM TOPOLOGY cal block\n",
2348 __func__);
2349 ret = -EINVAL;
2350 goto unlock_cal_mutex;
2351 }
2352
2353 lsm_top = (struct audio_cal_info_lsm_top *)
2354 top_cal->cal_info;
2355
2356 if (!lsm_top) {
2357 dev_err(core->dev,
2358 "%s: cal_info for LSM_TOPOLOGY not found\n",
2359 __func__);
2360 ret = -EINVAL;
2361 goto unlock_cal_mutex;
2362 }
2363
2364 dev_dbg(core->dev,
2365 "%s: topology_id = 0x%x, acdb_id = 0x%x, app_type = 0x%x\n",
2366 __func__, lsm_top->topology, lsm_top->acdb_id,
2367 lsm_top->app_type);
2368
2369 if (lsm_top->topology == 0) {
2370 dev_err(core->dev,
2371 "%s: topology id not sent for app_type 0x%x\n",
2372 __func__, lsm_top->app_type);
2373 ret = -EINVAL;
2374 goto unlock_cal_mutex;
2375 }
2376
2377 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2378
2379 memset(&cmd_open_tx_v2, 0, sizeof(struct cpe_lsm_cmd_open_tx_v2));
2380 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx_v2.hdr,
2381 session->id, OPEN_V2_CMD_PAYLOAD_SIZE,
2382 CPE_LSM_SESSION_CMD_OPEN_TX_V2)) {
2383 ret = -EINVAL;
2384 goto end_ret;
2385 }
2386
2387 cmd_open_tx_v2.topology_id = lsm_top->topology;
2388 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx_v2);
2389 if (ret)
2390 dev_err(core->dev,
2391 "%s: failed to send open_tx_v2 cmd, err = %d\n",
2392 __func__, ret);
2393 else
2394 session->is_topology_used = true;
2395end_ret:
2396 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2397
2398unlock_cal_mutex:
2399 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_TOPOLOGY_ID]->lock);
2400 return ret;
2401}
2402
2403/*
2404 * wcd_cpe_cmd_lsm_open_tx: compose and send lsm open command
2405 * @core_handle: handle to cpe core
2406 * @session: session for which the command needs to be sent
2407 * @app_id: application id part of the command
2408 * @sample_rate: sample rate for this session
2409 */
2410static int wcd_cpe_cmd_lsm_open_tx(void *core_handle,
2411 struct cpe_lsm_session *session,
2412 u16 app_id, u16 sample_rate)
2413{
2414 struct cpe_lsm_cmd_open_tx cmd_open_tx;
2415 struct wcd_cpe_core *core = core_handle;
2416 int ret = 0;
2417
2418 ret = wcd_cpe_is_valid_lsm_session(core, session,
2419 __func__);
2420 if (ret)
2421 return ret;
2422
2423 /* Try to open with topology first */
2424 ret = wcd_cpe_cmd_lsm_open_tx_v2(core, session);
2425 if (!ret)
2426 goto done;
2427
2428 dev_dbg(core->dev, "%s: Try open_tx without topology\n",
2429 __func__);
2430
2431 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2432
2433 memset(&cmd_open_tx, 0, sizeof(struct cpe_lsm_cmd_open_tx));
2434 if (fill_lsm_cmd_header_v0_inband(&cmd_open_tx.hdr,
2435 session->id, OPEN_CMD_PAYLOAD_SIZE,
2436 CPE_LSM_SESSION_CMD_OPEN_TX)) {
2437 ret = -EINVAL;
2438 goto end_ret;
2439 }
2440
2441 cmd_open_tx.app_id = app_id;
2442 cmd_open_tx.sampling_rate = sample_rate;
2443
2444 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_open_tx);
2445 if (ret)
2446 dev_err(core->dev,
2447 "%s: failed to send open_tx cmd, err = %d\n",
2448 __func__, ret);
2449end_ret:
2450 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2451done:
2452 return ret;
2453}
2454
2455/*
2456 * wcd_cpe_cmd_close_tx: compose and send lsm close command
2457 * @core_handle: handle to cpe core
2458 * @session: session for which the command needs to be sent
2459 */
2460static int wcd_cpe_cmd_lsm_close_tx(void *core_handle,
2461 struct cpe_lsm_session *session)
2462{
2463 struct cmi_hdr cmd_close_tx;
2464 struct wcd_cpe_core *core = core_handle;
2465 int ret = 0;
2466
2467 ret = wcd_cpe_is_valid_lsm_session(core, session,
2468 __func__);
2469 if (ret)
2470 return ret;
2471
2472 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2473
2474 memset(&cmd_close_tx, 0, sizeof(cmd_close_tx));
2475 if (fill_lsm_cmd_header_v0_inband(&cmd_close_tx, session->id,
2476 0, CPE_LSM_SESSION_CMD_CLOSE_TX)) {
2477 ret = -EINVAL;
2478 goto end_ret;
2479 }
2480
2481 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_close_tx);
2482 if (ret)
2483 dev_err(core->dev,
2484 "%s: lsm close_tx cmd failed, err = %d\n",
2485 __func__, ret);
2486 else
2487 session->is_topology_used = false;
2488end_ret:
2489 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2490 return ret;
2491}
2492
2493/*
2494 * wcd_cpe_cmd_shmem_alloc: compose and send lsm shared
2495 * memory allocation command
2496 * @core_handle: handle to cpe core
2497 * @session: session for which the command needs to be sent
2498 * @size: size of memory to be allocated
2499 */
2500static int wcd_cpe_cmd_lsm_shmem_alloc(void *core_handle,
2501 struct cpe_lsm_session *session,
2502 u32 size)
2503{
2504 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
2505 struct wcd_cpe_core *core = core_handle;
2506 int ret = 0;
2507
2508 ret = wcd_cpe_is_valid_lsm_session(core, session,
2509 __func__);
2510 if (ret)
2511 return ret;
2512
2513 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2514
2515 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
2516 if (fill_lsm_cmd_header_v0_inband(&cmd_shmem_alloc.hdr, session->id,
2517 SHMEM_ALLOC_CMD_PLD_SIZE,
2518 CPE_LSM_SESSION_CMD_SHARED_MEM_ALLOC)) {
2519 ret = -EINVAL;
2520 goto end_ret;
2521 }
2522
2523 cmd_shmem_alloc.size = size;
2524 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_shmem_alloc);
2525 if (ret)
2526 dev_err(core->dev,
2527 "%s: lsm_shmem_alloc cmd send fail, %d\n",
2528 __func__, ret);
2529end_ret:
2530 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2531 return ret;
2532}
2533
2534/*
2535 * wcd_cpe_cmd_lsm_shmem_dealloc: deallocate the shared memory
2536 * for the specified session
2537 * @core_handle: handle to cpe core
2538 * @session: session for which memory needs to be deallocated.
2539 */
2540static int wcd_cpe_cmd_lsm_shmem_dealloc(void *core_handle,
2541 struct cpe_lsm_session *session)
2542{
2543 struct cpe_cmd_shmem_dealloc cmd_dealloc;
2544 struct wcd_cpe_core *core = core_handle;
2545 int ret = 0;
2546
2547 ret = wcd_cpe_is_valid_lsm_session(core, session,
2548 __func__);
2549 if (ret)
2550 return ret;
2551
2552 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2553
2554 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
2555 if (fill_lsm_cmd_header_v0_inband(&cmd_dealloc.hdr, session->id,
2556 SHMEM_DEALLOC_CMD_PLD_SIZE,
2557 CPE_LSM_SESSION_CMD_SHARED_MEM_DEALLOC)) {
2558 ret = -EINVAL;
2559 goto end_ret;
2560 }
2561
2562 cmd_dealloc.addr = session->lsm_mem_handle;
2563 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dealloc);
2564 if (ret) {
2565 dev_err(core->dev,
2566 "%s: lsm_shmem_dealloc cmd failed, rc %d\n",
2567 __func__, ret);
2568 goto end_ret;
2569 }
2570
2571 memset(&session->lsm_mem_handle, 0,
2572 sizeof(session->lsm_mem_handle));
2573
2574end_ret:
2575 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2576 return ret;
2577}
2578
2579/*
2580 * wcd_cpe_send_lsm_cal: send the calibration for lsm service
2581 * from acdb to the cpe
2582 * @core: handle to cpe core
2583 * @session: session for which the calibration needs to be set.
2584 */
2585static int wcd_cpe_send_lsm_cal(
2586 struct wcd_cpe_core *core,
2587 struct cpe_lsm_session *session)
2588{
2589
2590 u8 *msg_pld;
2591 struct cmi_hdr *hdr;
2592 struct cal_block_data *lsm_cal = NULL;
2593 void *inb_msg;
2594 int rc = 0;
2595
2596 if (core->cal_data[WCD_CPE_LSM_CAL_LSM] == NULL) {
2597 pr_err("%s: LSM cal not allocated!\n", __func__);
2598 return -EINVAL;
2599 }
2600
2601 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2602 lsm_cal = cal_utils_get_only_cal_block(
2603 core->cal_data[WCD_CPE_LSM_CAL_LSM]);
2604 if (!lsm_cal) {
2605 pr_err("%s: failed to get lsm cal block\n", __func__);
2606 rc = -EINVAL;
2607 goto unlock_cal_mutex;
2608 }
2609
2610 if (lsm_cal->cal_data.size == 0) {
2611 dev_dbg(core->dev, "%s: No LSM cal to send\n",
2612 __func__);
2613 rc = 0;
2614 goto unlock_cal_mutex;
2615 }
2616
2617 inb_msg = kzalloc(sizeof(struct cmi_hdr) + lsm_cal->cal_data.size,
2618 GFP_KERNEL);
2619 if (!inb_msg) {
2620 rc = -ENOMEM;
2621 goto unlock_cal_mutex;
2622 }
2623
2624 hdr = (struct cmi_hdr *) inb_msg;
2625
2626 rc = fill_lsm_cmd_header_v0_inband(hdr, session->id,
2627 lsm_cal->cal_data.size,
2628 CPE_LSM_SESSION_CMD_SET_PARAMS);
2629 if (rc) {
2630 pr_err("%s: invalid params for header, err = %d\n",
2631 __func__, rc);
2632 goto free_msg;
2633 }
2634
2635 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
2636 memcpy(msg_pld, lsm_cal->cal_data.kvaddr,
2637 lsm_cal->cal_data.size);
2638
2639 rc = wcd_cpe_cmi_send_lsm_msg(core, session, inb_msg);
2640 if (rc)
2641 pr_err("%s: acdb lsm_params send failed, err = %d\n",
2642 __func__, rc);
2643
2644free_msg:
2645 kfree(inb_msg);
2646
2647unlock_cal_mutex:
2648 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_LSM]->lock);
2649 return rc;
2650
2651}
2652
2653static void wcd_cpe_set_param_data(struct cpe_param_data *param_d,
2654 struct cpe_lsm_ids *ids, u32 p_size,
2655 u32 set_param_cmd)
2656{
2657 param_d->module_id = ids->module_id;
2658 param_d->param_id = ids->param_id;
2659
2660 switch (set_param_cmd) {
2661 case CPE_LSM_SESSION_CMD_SET_PARAMS_V2:
2662 param_d->p_size.param_size = p_size;
2663 break;
2664 case CPE_LSM_SESSION_CMD_SET_PARAMS:
2665 default:
2666 param_d->p_size.sr.param_size =
2667 (u16) p_size;
2668 param_d->p_size.sr.reserved = 0;
2669 break;
2670 }
2671}
2672
2673static int wcd_cpe_send_param_epd_thres(struct wcd_cpe_core *core,
2674 struct cpe_lsm_session *session,
2675 void *data, struct cpe_lsm_ids *ids)
2676{
2677 struct snd_lsm_ep_det_thres *ep_det_data;
2678 struct cpe_lsm_param_epd_thres epd_cmd;
2679 struct cmi_hdr *msg_hdr = &epd_cmd.hdr;
2680 struct cpe_param_data *param_d =
2681 &epd_cmd.param;
2682 int rc;
2683
2684 memset(&epd_cmd, 0, sizeof(epd_cmd));
2685 ep_det_data = (struct snd_lsm_ep_det_thres *) data;
2686 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2687 session->id,
2688 CPE_CMD_EPD_THRES_PLD_SIZE,
2689 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2690 rc = -EINVAL;
2691 goto err_ret;
2692 }
2693
2694 wcd_cpe_set_param_data(param_d, ids,
2695 CPE_EPD_THRES_PARAM_SIZE,
2696 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2697
2698 epd_cmd.minor_version = 1;
2699 epd_cmd.epd_begin = ep_det_data->epd_begin;
2700 epd_cmd.epd_end = ep_det_data->epd_end;
2701
2702 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2703 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &epd_cmd);
2704 if (unlikely(rc))
2705 dev_err(core->dev,
2706 "%s: set_param(EPD Threshold) failed, rc %dn",
2707 __func__, rc);
2708 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2709err_ret:
2710 return rc;
2711}
2712
2713static int wcd_cpe_send_param_opmode(struct wcd_cpe_core *core,
2714 struct cpe_lsm_session *session,
2715 void *data, struct cpe_lsm_ids *ids)
2716{
2717 struct snd_lsm_detect_mode *opmode_d;
2718 struct cpe_lsm_param_opmode opmode_cmd;
2719 struct cmi_hdr *msg_hdr = &opmode_cmd.hdr;
2720 struct cpe_param_data *param_d =
2721 &opmode_cmd.param;
2722 int rc;
2723
2724 memset(&opmode_cmd, 0, sizeof(opmode_cmd));
2725 opmode_d = (struct snd_lsm_detect_mode *) data;
2726 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2727 session->id,
2728 CPE_CMD_OPMODE_PLD_SIZE,
2729 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2730 rc = -EINVAL;
2731 goto err_ret;
2732 }
2733
2734 wcd_cpe_set_param_data(param_d, ids,
2735 CPE_OPMODE_PARAM_SIZE,
2736 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2737
2738 opmode_cmd.minor_version = 1;
2739 if (opmode_d->mode == LSM_MODE_KEYWORD_ONLY_DETECTION)
2740 opmode_cmd.mode = 1;
2741 else
2742 opmode_cmd.mode = 3;
2743
2744 if (opmode_d->detect_failure)
2745 opmode_cmd.mode |= 0x04;
2746
2747 opmode_cmd.reserved = 0;
2748
2749 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2750 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &opmode_cmd);
2751 if (unlikely(rc))
2752 dev_err(core->dev,
2753 "%s: set_param(operation_mode) failed, rc %dn",
2754 __func__, rc);
2755 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2756err_ret:
2757 return rc;
2758}
2759
2760static int wcd_cpe_send_param_gain(struct wcd_cpe_core *core,
2761 struct cpe_lsm_session *session,
2762 void *data, struct cpe_lsm_ids *ids)
2763{
2764 struct snd_lsm_gain *gain_d;
2765 struct cpe_lsm_param_gain gain_cmd;
2766 struct cmi_hdr *msg_hdr = &gain_cmd.hdr;
2767 struct cpe_param_data *param_d =
2768 &gain_cmd.param;
2769 int rc;
2770
2771 memset(&gain_cmd, 0, sizeof(gain_cmd));
2772 gain_d = (struct snd_lsm_gain *) data;
2773 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2774 session->id,
2775 CPE_CMD_GAIN_PLD_SIZE,
2776 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2777 rc = -EINVAL;
2778 goto err_ret;
2779 }
2780
2781 wcd_cpe_set_param_data(param_d, ids,
2782 CPE_GAIN_PARAM_SIZE,
2783 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2784
2785 gain_cmd.minor_version = 1;
2786 gain_cmd.gain = gain_d->gain;
2787 gain_cmd.reserved = 0;
2788
2789 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2790 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &gain_cmd);
2791 if (unlikely(rc))
2792 dev_err(core->dev,
2793 "%s: set_param(lsm_gain) failed, rc %dn",
2794 __func__, rc);
2795 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2796err_ret:
2797 return rc;
2798}
2799
2800static int wcd_cpe_send_param_connectport(struct wcd_cpe_core *core,
2801 struct cpe_lsm_session *session,
2802 void *data, struct cpe_lsm_ids *ids, u16 port_id)
2803{
2804 struct cpe_lsm_param_connectport con_port_cmd;
2805 struct cmi_hdr *msg_hdr = &con_port_cmd.hdr;
2806 struct cpe_param_data *param_d =
2807 &con_port_cmd.param;
2808 int rc;
2809
2810 memset(&con_port_cmd, 0, sizeof(con_port_cmd));
2811 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
2812 session->id,
2813 CPE_CMD_CONNECTPORT_PLD_SIZE,
2814 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2815 rc = -EINVAL;
2816 goto err_ret;
2817 }
2818
2819 wcd_cpe_set_param_data(param_d, ids,
2820 CPE_CONNECTPORT_PARAM_SIZE,
2821 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2822
2823 con_port_cmd.minor_version = 1;
2824 con_port_cmd.afe_port_id = port_id;
2825 con_port_cmd.reserved = 0;
2826
2827 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2828 rc = wcd_cpe_cmi_send_lsm_msg(core, session, &con_port_cmd);
2829 if (unlikely(rc))
2830 dev_err(core->dev,
2831 "%s: set_param(connect_port) failed, rc %dn",
2832 __func__, rc);
2833 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2834err_ret:
2835 return rc;
2836}
2837
2838static int wcd_cpe_send_param_conf_levels(
2839 struct wcd_cpe_core *core,
2840 struct cpe_lsm_session *session,
2841 struct cpe_lsm_ids *ids)
2842{
2843 struct cpe_lsm_conf_level conf_level_data;
2844 struct cmi_hdr *hdr = &(conf_level_data.hdr);
2845 struct cpe_param_data *param_d = &(conf_level_data.param);
2846 u8 pld_size = 0;
2847 u8 pad_bytes = 0;
2848 void *message;
2849 int ret = 0;
2850
2851 memset(&conf_level_data, 0, sizeof(conf_level_data));
2852
2853 pld_size = (sizeof(struct cpe_lsm_conf_level) - sizeof(struct cmi_hdr));
2854 pld_size += session->num_confidence_levels;
2855 pad_bytes = ((4 - (pld_size % 4)) % 4);
2856 pld_size += pad_bytes;
2857
2858 fill_cmi_header(hdr, session->id, CMI_CPE_LSM_SERVICE_ID,
2859 false, pld_size,
2860 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, false);
2861
2862 wcd_cpe_set_param_data(param_d, ids,
2863 pld_size - sizeof(struct cpe_param_data),
2864 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2865
2866 conf_level_data.num_active_models = session->num_confidence_levels;
2867
2868 message = kzalloc(sizeof(struct cpe_lsm_conf_level) +
2869 conf_level_data.num_active_models + pad_bytes,
2870 GFP_KERNEL);
2871 if (!message) {
2872 pr_err("%s: no memory for conf_level\n", __func__);
2873 return -ENOMEM;
2874 }
2875
2876 memcpy(message, &conf_level_data,
2877 sizeof(struct cpe_lsm_conf_level));
2878 memcpy(((u8 *) message) + sizeof(struct cpe_lsm_conf_level),
2879 session->conf_levels, conf_level_data.num_active_models);
2880
2881 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2882 ret = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2883 if (ret)
2884 pr_err("%s: lsm_set_conf_levels failed, err = %d\n",
2885 __func__, ret);
2886 kfree(message);
2887 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2888 return ret;
2889}
2890
2891static int wcd_cpe_send_param_snd_model(struct wcd_cpe_core *core,
2892 struct cpe_lsm_session *session, struct cpe_lsm_ids *ids)
2893{
2894 int ret = 0;
2895 struct cmi_obm_msg obm_msg;
2896 struct cpe_param_data *param_d;
2897
2898
2899 ret = fill_cmi_header(&obm_msg.hdr, session->id,
2900 CMI_CPE_LSM_SERVICE_ID, 0, 20,
2901 CPE_LSM_SESSION_CMD_SET_PARAMS_V2, true);
2902 if (ret) {
2903 dev_err(core->dev,
2904 "%s: Invalid parameters, rc = %d\n",
2905 __func__, ret);
2906 goto err_ret;
2907 }
2908
2909 obm_msg.pld.version = 0;
2910 obm_msg.pld.size = session->snd_model_size;
2911 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
2912 obm_msg.pld.mem_handle = session->lsm_mem_handle;
2913
2914 param_d = (struct cpe_param_data *) session->snd_model_data;
2915 wcd_cpe_set_param_data(param_d, ids,
2916 (session->snd_model_size - sizeof(*param_d)),
2917 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2918
2919 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2920 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
2921 if (ret)
2922 dev_err(core->dev,
2923 "%s: snd_model_register failed, %d\n",
2924 __func__, ret);
2925 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2926
2927err_ret:
2928 return ret;
2929}
2930
2931static int wcd_cpe_send_param_dereg_model(
2932 struct wcd_cpe_core *core,
2933 struct cpe_lsm_session *session,
2934 struct cpe_lsm_ids *ids)
2935{
2936 struct cmi_hdr *hdr;
2937 struct cpe_param_data *param_d;
2938 u8 *message;
2939 u32 pld_size;
2940 int rc = 0;
2941
2942 pld_size = sizeof(*hdr) + sizeof(*param_d);
2943
2944 message = kzalloc(pld_size, GFP_KERNEL);
2945 if (!message)
2946 return -ENOMEM;
2947
2948 hdr = (struct cmi_hdr *) message;
2949 param_d = (struct cpe_param_data *)
2950 (((u8 *) message) + sizeof(*hdr));
2951
2952 if (fill_lsm_cmd_header_v0_inband(hdr,
2953 session->id,
2954 sizeof(*param_d),
2955 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
2956 rc = -EINVAL;
2957 goto err_ret;
2958 }
2959 wcd_cpe_set_param_data(param_d, ids, 0,
2960 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
2961 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
2962 rc = wcd_cpe_cmi_send_lsm_msg(core, session, message);
2963 if (rc)
2964 dev_err(core->dev,
2965 "%s: snd_model_deregister failed, %d\n",
2966 __func__, rc);
2967 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
2968err_ret:
2969 kfree(message);
2970 return rc;
2971}
2972
2973static int wcd_cpe_send_custom_param(
2974 struct wcd_cpe_core *core,
2975 struct cpe_lsm_session *session,
2976 void *data, u32 msg_size)
2977{
2978 u8 *msg;
2979 struct cmi_hdr *hdr;
2980 u8 *msg_pld;
2981 int rc;
2982
2983 if (msg_size > CMI_INBAND_MESSAGE_SIZE) {
2984 dev_err(core->dev,
2985 "%s: out of band custom params not supported\n",
2986 __func__);
2987 return -EINVAL;
2988 }
2989
2990 msg = kzalloc(sizeof(*hdr) + msg_size, GFP_KERNEL);
2991 if (!msg)
2992 return -ENOMEM;
2993
2994 hdr = (struct cmi_hdr *) msg;
2995 msg_pld = msg + sizeof(struct cmi_hdr);
2996
2997 if (fill_lsm_cmd_header_v0_inband(hdr,
2998 session->id,
2999 msg_size,
3000 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3001 rc = -EINVAL;
3002 goto err_ret;
3003 }
3004
3005 memcpy(msg_pld, data, msg_size);
3006 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3007 rc = wcd_cpe_cmi_send_lsm_msg(core, session, msg);
3008 if (rc)
3009 dev_err(core->dev,
3010 "%s: custom params send failed, err = %d\n",
3011 __func__, rc);
3012 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3013err_ret:
3014 kfree(msg);
3015 return rc;
3016}
3017
3018static int wcd_cpe_set_one_param(void *core_handle,
3019 struct cpe_lsm_session *session, struct lsm_params_info *p_info,
3020 void *data, uint32_t param_type)
3021{
3022 struct wcd_cpe_core *core = core_handle;
3023 int rc = 0;
3024 struct cpe_lsm_ids ids;
3025
3026 memset(&ids, 0, sizeof(ids));
3027 ids.module_id = p_info->module_id;
3028 ids.param_id = p_info->param_id;
3029
3030 switch (param_type) {
3031 case LSM_ENDPOINT_DETECT_THRESHOLD:
3032 rc = wcd_cpe_send_param_epd_thres(core, session,
3033 data, &ids);
3034 break;
3035 case LSM_OPERATION_MODE:
3036 rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
3037 break;
3038 case LSM_GAIN:
3039 rc = wcd_cpe_send_param_gain(core, session, data, &ids);
3040 break;
3041 case LSM_MIN_CONFIDENCE_LEVELS:
3042 rc = wcd_cpe_send_param_conf_levels(core, session, &ids);
3043 break;
3044 case LSM_REG_SND_MODEL:
3045 rc = wcd_cpe_send_param_snd_model(core, session, &ids);
3046 break;
3047 case LSM_DEREG_SND_MODEL:
3048 rc = wcd_cpe_send_param_dereg_model(core, session, &ids);
3049 break;
3050 case LSM_CUSTOM_PARAMS:
3051 rc = wcd_cpe_send_custom_param(core, session,
3052 data, p_info->param_size);
3053 break;
3054 default:
3055 pr_err("%s: wrong param_type 0x%x\n",
3056 __func__, param_type);
3057 }
3058
3059 if (rc)
3060 dev_err(core->dev,
3061 "%s: send_param(%d) failed, err %d\n",
3062 __func__, param_type, rc);
3063 return rc;
3064}
3065
3066/*
3067 * wcd_cpe_lsm_set_params: set the parameters for lsm service
3068 * @core: handle to cpe core
3069 * @session: session for which the parameters are to be set
3070 * @detect_mode: mode for detection
3071 * @detect_failure: flag indicating failure detection enabled/disabled
3072 *
3073 */
3074static int wcd_cpe_lsm_set_params(struct wcd_cpe_core *core,
3075 struct cpe_lsm_session *session,
3076 enum lsm_detection_mode detect_mode, bool detect_failure)
3077{
3078 struct cpe_lsm_ids ids;
3079 struct snd_lsm_detect_mode det_mode;
3080
3081 int ret = 0;
3082
3083 /* Send lsm calibration */
3084 ret = wcd_cpe_send_lsm_cal(core, session);
3085 if (ret) {
3086 pr_err("%s: fail to sent acdb cal, err = %d",
3087 __func__, ret);
3088 goto err_ret;
3089 }
3090
3091 /* Send operation mode */
3092 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3093 ids.param_id = CPE_LSM_PARAM_ID_OPERATION_MODE;
3094 det_mode.mode = detect_mode;
3095 det_mode.detect_failure = detect_failure;
3096 ret = wcd_cpe_send_param_opmode(core, session,
3097 &det_mode, &ids);
3098 if (ret)
3099 dev_err(core->dev,
3100 "%s: Failed to set opmode, err=%d\n",
3101 __func__, ret);
3102
3103err_ret:
3104 return ret;
3105}
3106
3107static int wcd_cpe_lsm_set_data(void *core_handle,
3108 struct cpe_lsm_session *session,
3109 enum lsm_detection_mode detect_mode,
3110 bool detect_failure)
3111{
3112 struct wcd_cpe_core *core = core_handle;
3113 struct cpe_lsm_ids ids;
3114 int ret = 0;
3115
3116 if (session->num_confidence_levels > 0) {
3117 ret = wcd_cpe_lsm_set_params(core, session, detect_mode,
3118 detect_failure);
3119 if (ret) {
3120 dev_err(core->dev,
3121 "%s: lsm set params failed, rc = %d\n",
3122 __func__, ret);
3123 goto err_ret;
3124 }
3125
3126 ids.module_id = CPE_LSM_MODULE_ID_VOICE_WAKEUP;
3127 ids.param_id = CPE_LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
3128 ret = wcd_cpe_send_param_conf_levels(core, session, &ids);
3129 if (ret) {
3130 dev_err(core->dev,
3131 "%s: lsm confidence levels failed, rc = %d\n",
3132 __func__, ret);
3133 goto err_ret;
3134 }
3135 } else {
3136 dev_dbg(core->dev,
3137 "%s: no conf levels to set\n",
3138 __func__);
3139 }
3140
3141err_ret:
3142 return ret;
3143}
3144
3145/*
3146 * wcd_cpe_lsm_reg_snd_model: register the sound model for listen
3147 * @session: session for which to register the sound model
3148 * @detect_mode: detection mode, user dependent/independent
3149 * @detect_failure: flag to indicate if failure detection is enabled
3150 *
3151 * The memory required for sound model should be pre-allocated on CPE
3152 * before this function is invoked.
3153 */
3154static int wcd_cpe_lsm_reg_snd_model(void *core_handle,
3155 struct cpe_lsm_session *session,
3156 enum lsm_detection_mode detect_mode,
3157 bool detect_failure)
3158{
3159 int ret = 0;
3160 struct cmi_obm_msg obm_msg;
3161 struct wcd_cpe_core *core = core_handle;
3162
3163 ret = wcd_cpe_is_valid_lsm_session(core, session,
3164 __func__);
3165 if (ret)
3166 return ret;
3167
3168 ret = wcd_cpe_lsm_set_data(core_handle, session,
3169 detect_mode, detect_failure);
3170 if (ret) {
3171 dev_err(core->dev,
3172 "%s: fail to set lsm data, err = %d\n",
3173 __func__, ret);
3174 return ret;
3175 }
3176
3177 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3178
3179 ret = fill_cmi_header(&obm_msg.hdr, session->id,
3180 CMI_CPE_LSM_SERVICE_ID, 0, 20,
3181 CPE_LSM_SESSION_CMD_REGISTER_SOUND_MODEL, true);
3182 if (ret) {
3183 dev_err(core->dev,
3184 "%s: Invalid parameters, rc = %d\n",
3185 __func__, ret);
3186 goto err_ret;
3187 }
3188
3189 obm_msg.pld.version = 0;
3190 obm_msg.pld.size = session->snd_model_size;
3191 obm_msg.pld.data_ptr.kvaddr = session->snd_model_data;
3192 obm_msg.pld.mem_handle = session->lsm_mem_handle;
3193
3194 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &obm_msg);
3195 if (ret)
3196 dev_err(core->dev,
3197 "%s: snd_model_register failed, %d\n",
3198 __func__, ret);
3199err_ret:
3200 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3201 return ret;
3202}
3203
3204/*
3205 * wcd_cpe_lsm_dereg_snd_model: deregister the sound model for listen
3206 * @core_handle: handle to cpe core
3207 * @session: session for which to deregister the sound model
3208 *
3209 */
3210static int wcd_cpe_lsm_dereg_snd_model(void *core_handle,
3211 struct cpe_lsm_session *session)
3212{
3213 struct cmi_hdr cmd_dereg_snd_model;
3214 struct wcd_cpe_core *core = core_handle;
3215 int ret = 0;
3216
3217 ret = wcd_cpe_is_valid_lsm_session(core, session,
3218 __func__);
3219 if (ret)
3220 return ret;
3221
3222 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3223
3224 memset(&cmd_dereg_snd_model, 0, sizeof(cmd_dereg_snd_model));
3225 if (fill_lsm_cmd_header_v0_inband(&cmd_dereg_snd_model, session->id,
3226 0, CPE_LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL)) {
3227 ret = -EINVAL;
3228 goto end_ret;
3229 }
3230
3231 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_dereg_snd_model);
3232 if (ret)
3233 dev_err(core->dev,
3234 "%s: failed to send dereg_snd_model cmd\n",
3235 __func__);
3236end_ret:
3237 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3238 return ret;
3239}
3240
3241/*
3242 * wcd_cpe_lsm_get_afe_out_port_id: get afe output port id
3243 * @core_handle: handle to the CPE core
3244 * @session: session for which port id needs to get
3245 */
3246static int wcd_cpe_lsm_get_afe_out_port_id(void *core_handle,
3247 struct cpe_lsm_session *session)
3248{
3249 struct wcd_cpe_core *core = core_handle;
3250 struct snd_soc_codec *codec;
3251 int rc = 0;
3252
3253 if (!core || !core->codec) {
3254 pr_err("%s: Invalid handle to %s\n",
3255 __func__,
3256 (!core) ? "core" : "codec");
3257 rc = -EINVAL;
3258 goto done;
3259 }
3260
3261 if (!session) {
3262 dev_err(core->dev, "%s: Invalid session\n",
3263 __func__);
3264 rc = -EINVAL;
3265 goto done;
3266 }
3267
3268 if (!core->cpe_cdc_cb ||
3269 !core->cpe_cdc_cb->get_afe_out_port_id) {
3270 session->afe_out_port_id = WCD_CPE_AFE_OUT_PORT_2;
3271 dev_dbg(core->dev,
3272 "%s: callback not defined, default port_id = %d\n",
3273 __func__, session->afe_out_port_id);
3274 goto done;
3275 }
3276
3277 codec = core->codec;
3278 rc = core->cpe_cdc_cb->get_afe_out_port_id(codec,
3279 &session->afe_out_port_id);
3280 if (rc) {
3281 dev_err(core->dev,
3282 "%s: failed to get port id, err = %d\n",
3283 __func__, rc);
3284 goto done;
3285 }
3286 dev_dbg(core->dev, "%s: port_id: %d\n", __func__,
3287 session->afe_out_port_id);
3288
3289done:
3290 return rc;
3291}
3292
3293/*
3294 * wcd_cpe_cmd_lsm_start: send the start command to lsm
3295 * @core_handle: handle to the CPE core
3296 * @session: session for which start command to be sent
3297 *
3298 */
3299static int wcd_cpe_cmd_lsm_start(void *core_handle,
3300 struct cpe_lsm_session *session)
3301{
3302 struct cmi_hdr cmd_lsm_start;
3303 struct wcd_cpe_core *core = core_handle;
3304 int ret = 0;
3305
3306 ret = wcd_cpe_is_valid_lsm_session(core, session,
3307 __func__);
3308 if (ret)
3309 return ret;
3310
3311 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3312
3313 memset(&cmd_lsm_start, 0, sizeof(struct cmi_hdr));
3314 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_start, session->id, 0,
3315 CPE_LSM_SESSION_CMD_START)) {
3316 ret = -EINVAL;
3317 goto end_ret;
3318 }
3319
3320 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_start);
3321 if (ret)
3322 dev_err(core->dev, "failed to send lsm_start cmd\n");
3323end_ret:
3324 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3325 return ret;
3326}
3327
3328/*
3329 * wcd_cpe_cmd_lsm_stop: send the stop command for LSM service
3330 * @core_handle: handle to the cpe core
3331 * @session: session for which stop command to be sent
3332 *
3333 */
3334static int wcd_cpe_cmd_lsm_stop(void *core_handle,
3335 struct cpe_lsm_session *session)
3336{
3337 struct cmi_hdr cmd_lsm_stop;
3338 struct wcd_cpe_core *core = core_handle;
3339 int ret = 0;
3340
3341 ret = wcd_cpe_is_valid_lsm_session(core, session,
3342 __func__);
3343 if (ret)
3344 return ret;
3345
3346 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3347
3348 memset(&cmd_lsm_stop, 0, sizeof(struct cmi_hdr));
3349 if (fill_lsm_cmd_header_v0_inband(&cmd_lsm_stop, session->id, 0,
3350 CPE_LSM_SESSION_CMD_STOP)) {
3351 ret = -EINVAL;
3352 goto end_ret;
3353 }
3354
3355 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cmd_lsm_stop);
3356 if (ret)
3357 dev_err(core->dev,
3358 "%s: failed to send lsm_stop cmd\n",
3359 __func__);
3360end_ret:
3361 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3362 return ret;
3363
3364}
3365
3366/*
3367 * wcd_cpe_alloc_lsm_session: allocate a lsm session
3368 * @core: handle to wcd_cpe_core
3369 * @lsm_priv_d: lsm private data
3370 */
3371static struct cpe_lsm_session *wcd_cpe_alloc_lsm_session(
3372 void *core_handle, void *client_data,
3373 void (*event_cb)(void *, u8, u8, u8 *))
3374{
3375 struct cpe_lsm_session *session;
3376 int i, session_id = -1;
3377 struct wcd_cpe_core *core = core_handle;
3378 bool afe_register_service = false;
3379 int ret = 0;
3380
3381 /*
3382 * Even if multiple listen sessions can be
3383 * allocated, the AFE service registration
3384 * should be done only once as CPE can only
3385 * have one instance of AFE service.
3386 *
3387 * If this is the first session to be allocated,
3388 * only then register the afe service.
3389 */
3390 if (!wcd_cpe_lsm_session_active())
3391 afe_register_service = true;
3392
3393 for (i = 1; i <= WCD_CPE_LSM_MAX_SESSIONS; i++) {
3394 if (!lsm_sessions[i]) {
3395 session_id = i;
3396 break;
3397 }
3398 }
3399
3400 if (session_id < 0) {
3401 dev_err(core->dev,
3402 "%s: max allowed sessions already allocated\n",
3403 __func__);
3404 return NULL;
3405 }
3406
3407 ret = wcd_cpe_vote(core, true);
3408 if (ret) {
3409 dev_err(core->dev,
3410 "%s: Failed to enable cpe, err = %d\n",
3411 __func__, ret);
3412 return NULL;
3413 }
3414
3415 session = kzalloc(sizeof(struct cpe_lsm_session), GFP_KERNEL);
3416 if (!session)
3417 goto err_session_alloc;
3418
3419 session->id = session_id;
3420 session->event_cb = event_cb;
3421 session->cmi_reg_handle = cmi_register(wcd_cpe_cmi_lsm_callback,
3422 CMI_CPE_LSM_SERVICE_ID);
3423 if (!session->cmi_reg_handle) {
3424 dev_err(core->dev,
3425 "%s: Failed to register LSM service with CMI\n",
3426 __func__);
3427 goto err_ret;
3428 }
3429 session->priv_d = client_data;
3430 mutex_init(&session->lsm_lock);
3431 if (afe_register_service) {
3432 /* Register for AFE Service */
3433 core->cmi_afe_handle = cmi_register(wcd_cpe_cmi_afe_cb,
3434 CMI_CPE_AFE_SERVICE_ID);
3435 wcd_cpe_initialize_afe_port_data();
3436 if (!core->cmi_afe_handle) {
3437 dev_err(core->dev,
3438 "%s: Failed to register AFE service with CMI\n",
3439 __func__);
3440 goto err_afe_svc_reg;
3441 }
3442
3443 /* Once AFE service is registered, send the mode command */
3444 ret = wcd_cpe_afe_svc_cmd_mode(core,
3445 AFE_SVC_EXPLICIT_PORT_START);
3446 if (ret)
3447 goto err_afe_mode_cmd;
3448 }
3449
3450 session->lsm_mem_handle = 0;
3451 init_completion(&session->cmd_comp);
3452
3453 lsm_sessions[session_id] = session;
3454 return session;
3455
3456err_afe_mode_cmd:
3457 cmi_deregister(core->cmi_afe_handle);
3458
3459err_afe_svc_reg:
3460 cmi_deregister(session->cmi_reg_handle);
3461 mutex_destroy(&session->lsm_lock);
3462
3463err_ret:
3464 kfree(session);
3465
3466err_session_alloc:
3467 wcd_cpe_vote(core, false);
3468 return NULL;
3469}
3470
3471/*
3472 * wcd_cpe_lsm_config_lab_latency: send lab latency value
3473 * @core: handle to wcd_cpe_core
3474 * @session: lsm session
3475 * @latency: the value of latency for lab setup in msec
3476 */
3477static int wcd_cpe_lsm_config_lab_latency(
3478 struct wcd_cpe_core *core,
3479 struct cpe_lsm_session *session,
3480 u32 latency)
3481{
3482 int ret = 0, pld_size = CPE_PARAM_LSM_LAB_LATENCY_SIZE;
3483 struct cpe_lsm_lab_latency_config cpe_lab_latency;
3484 struct cpe_lsm_lab_config *lab_lat = &cpe_lab_latency.latency_cfg;
3485 struct cpe_param_data *param_d = &lab_lat->param;
3486 struct cpe_lsm_ids ids;
3487
3488 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_latency.hdr, session->id,
3489 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3490 pr_err("%s: Failed to create header\n", __func__);
3491 return -EINVAL;
3492 }
3493 if (latency == 0x00 || latency > WCD_CPE_LAB_MAX_LATENCY) {
3494 pr_err("%s: Invalid latency %u\n",
3495 __func__, latency);
3496 return -EINVAL;
3497 }
3498
3499 lab_lat->latency = latency;
3500 lab_lat->minor_ver = 1;
3501 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3502 ids.param_id = CPE_LSM_PARAM_ID_LAB_CONFIG;
3503 wcd_cpe_set_param_data(param_d, &ids,
3504 PARAM_SIZE_LSM_LATENCY_SIZE,
3505 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3506
3507 pr_debug("%s: Module 0x%x Param 0x%x size %zu pld_size 0x%x\n",
3508 __func__, lab_lat->param.module_id,
3509 lab_lat->param.param_id, PARAM_SIZE_LSM_LATENCY_SIZE,
3510 pld_size);
3511
3512 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3513 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_latency);
3514 if (ret != 0)
3515 pr_err("%s: lsm_set_params failed, error = %d\n",
3516 __func__, ret);
3517 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3518 return ret;
3519}
3520
3521/*
3522 * wcd_cpe_lsm_lab_control: enable/disable lab
3523 * @core: handle to wcd_cpe_core
3524 * @session: lsm session
3525 * @enable: Indicates whether to enable / disable lab
3526 */
3527static int wcd_cpe_lsm_lab_control(
3528 void *core_handle,
3529 struct cpe_lsm_session *session,
3530 bool enable)
3531{
3532 struct wcd_cpe_core *core = core_handle;
3533 int ret = 0, pld_size = CPE_PARAM_SIZE_LSM_LAB_CONTROL;
3534 struct cpe_lsm_control_lab cpe_lab_enable;
3535 struct cpe_lsm_lab_enable *lab_enable = &cpe_lab_enable.lab_enable;
3536 struct cpe_param_data *param_d = &lab_enable->param;
3537 struct cpe_lsm_ids ids;
3538
3539 pr_debug("%s: enter payload_size = %d Enable %d\n",
3540 __func__, pld_size, enable);
3541
3542 memset(&cpe_lab_enable, 0, sizeof(cpe_lab_enable));
3543
3544 if (fill_lsm_cmd_header_v0_inband(&cpe_lab_enable.hdr, session->id,
3545 (u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3546 return -EINVAL;
3547 }
3548 if (enable == true)
3549 lab_enable->enable = 1;
3550 else
3551 lab_enable->enable = 0;
3552
3553 ids.module_id = CPE_LSM_MODULE_ID_LAB;
3554 ids.param_id = CPE_LSM_PARAM_ID_LAB_ENABLE;
3555 wcd_cpe_set_param_data(param_d, &ids,
3556 PARAM_SIZE_LSM_CONTROL_SIZE,
3557 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3558
3559 pr_debug("%s: Module 0x%x, Param 0x%x size %zu pld_size 0x%x\n",
3560 __func__, lab_enable->param.module_id,
3561 lab_enable->param.param_id, PARAM_SIZE_LSM_CONTROL_SIZE,
3562 pld_size);
3563
3564 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3565 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &cpe_lab_enable);
3566 if (ret != 0) {
3567 pr_err("%s: lsm_set_params failed, error = %d\n",
3568 __func__, ret);
3569 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3570 goto done;
3571 }
3572 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3573
3574 if (lab_enable->enable)
3575 ret = wcd_cpe_lsm_config_lab_latency(core, session,
3576 WCD_CPE_LAB_MAX_LATENCY);
3577done:
3578 return ret;
3579}
3580
3581/*
3582 * wcd_cpe_lsm_eob: stop lab
3583 * @core: handle to wcd_cpe_core
3584 * @session: lsm session to be deallocated
3585 */
3586static int wcd_cpe_lsm_eob(
3587 struct wcd_cpe_core *core,
3588 struct cpe_lsm_session *session)
3589{
3590 int ret = 0;
3591 struct cmi_hdr lab_eob;
3592
3593 if (fill_lsm_cmd_header_v0_inband(&lab_eob, session->id,
3594 0, CPE_LSM_SESSION_CMD_EOB)) {
3595 return -EINVAL;
3596 }
3597
3598 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3599 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &lab_eob);
3600 if (ret != 0)
3601 pr_err("%s: lsm_set_params failed\n", __func__);
3602 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3603
3604 return ret;
3605}
3606
3607/*
3608 * wcd_cpe_dealloc_lsm_session: deallocate lsm session
3609 * @core: handle to wcd_cpe_core
3610 * @session: lsm session to be deallocated
3611 */
3612static int wcd_cpe_dealloc_lsm_session(void *core_handle,
3613 struct cpe_lsm_session *session)
3614{
3615 struct wcd_cpe_core *core = core_handle;
3616 int ret = 0;
3617
3618 if (!session) {
3619 dev_err(core->dev,
3620 "%s: Invalid lsm session\n", __func__);
3621 return -EINVAL;
3622 }
3623
3624 dev_dbg(core->dev, "%s: session %d being deallocated\n",
3625 __func__, session->id);
3626 if (session->id > WCD_CPE_LSM_MAX_SESSIONS) {
3627 dev_err(core->dev,
3628 "%s: Wrong session id %d max allowed = %d\n",
3629 __func__, session->id,
3630 WCD_CPE_LSM_MAX_SESSIONS);
3631 return -EINVAL;
3632 }
3633
3634 cmi_deregister(session->cmi_reg_handle);
3635 mutex_destroy(&session->lsm_lock);
3636 lsm_sessions[session->id] = NULL;
3637 kfree(session);
3638
3639 if (!wcd_cpe_lsm_session_active()) {
3640 cmi_deregister(core->cmi_afe_handle);
3641 core->cmi_afe_handle = NULL;
3642 wcd_cpe_deinitialize_afe_port_data();
3643 }
3644
3645 ret = wcd_cpe_vote(core, false);
3646 if (ret)
3647 dev_dbg(core->dev,
3648 "%s: Failed to un-vote cpe, err = %d\n",
3649 __func__, ret);
3650
3651 return ret;
3652}
3653
3654static int wcd_cpe_lab_ch_setup(void *core_handle,
3655 struct cpe_lsm_session *session,
3656 enum wcd_cpe_event event)
3657{
3658 struct wcd_cpe_core *core = core_handle;
3659 struct snd_soc_codec *codec;
3660 int rc = 0;
3661 u8 cpe_intr_bits;
3662
3663 if (!core || !core->codec) {
3664 pr_err("%s: Invalid handle to %s\n",
3665 __func__,
3666 (!core) ? "core" : "codec");
3667 rc = EINVAL;
3668 goto done;
3669 }
3670
3671 if (!core->cpe_cdc_cb ||
3672 !core->cpe_cdc_cb->cdc_ext_clk ||
3673 !core->cpe_cdc_cb->lab_cdc_ch_ctl) {
3674 dev_err(core->dev,
3675 "%s: Invalid codec callbacks\n",
3676 __func__);
3677 rc = -EINVAL;
3678 goto done;
3679 }
3680
3681 codec = core->codec;
3682 dev_dbg(core->dev,
3683 "%s: event = 0x%x\n",
3684 __func__, event);
3685
3686 switch (event) {
3687 case WCD_CPE_PRE_ENABLE:
3688 rc = core->cpe_cdc_cb->cdc_ext_clk(codec, true, false);
3689 if (rc) {
3690 dev_err(core->dev,
3691 "%s: failed to enable cdc clk, err = %d\n",
3692 __func__, rc);
3693 goto done;
3694 }
3695
3696 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(codec,
3697 true);
3698 if (rc) {
3699 dev_err(core->dev,
3700 "%s: failed to enable cdc port, err = %d\n",
3701 __func__, rc);
3702 rc = core->cpe_cdc_cb->cdc_ext_clk(codec, false, false);
3703 goto done;
3704 }
3705
3706 break;
3707
3708 case WCD_CPE_POST_ENABLE:
3709 rc = cpe_svc_toggle_lab(core->cpe_handle, true);
3710 if (rc)
3711 dev_err(core->dev,
3712 "%s: Failed to enable lab\n", __func__);
3713 break;
3714
3715 case WCD_CPE_PRE_DISABLE:
3716 /*
3717 * Mask the non-fatal interrupts in CPE as they will
3718 * be generated during lab teardown and may flood.
3719 */
3720 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3721 if (CPE_ERR_IRQ_CB(core))
3722 core->cpe_cdc_cb->cpe_err_irq_control(
3723 core->codec,
3724 CPE_ERR_IRQ_MASK,
3725 &cpe_intr_bits);
3726
3727 rc = core->cpe_cdc_cb->lab_cdc_ch_ctl(codec,
3728 false);
3729 if (rc)
3730 dev_err(core->dev,
3731 "%s: failed to disable cdc port, err = %d\n",
3732 __func__, rc);
3733 break;
3734
3735 case WCD_CPE_POST_DISABLE:
3736 rc = wcd_cpe_lsm_eob(core, session);
3737 if (rc)
3738 dev_err(core->dev,
3739 "%s: eob send failed, err = %d\n",
3740 __func__, rc);
3741
3742 /* Continue teardown even if eob failed */
3743 rc = cpe_svc_toggle_lab(core->cpe_handle, false);
3744 if (rc)
3745 dev_err(core->dev,
3746 "%s: Failed to disable lab\n", __func__);
3747
3748 /* Continue with disabling even if toggle lab fails */
3749 rc = core->cpe_cdc_cb->cdc_ext_clk(codec, false, false);
3750 if (rc)
3751 dev_err(core->dev,
3752 "%s: failed to disable cdc clk, err = %d\n",
3753 __func__, rc);
3754
3755 /* Unmask non-fatal CPE interrupts */
3756 cpe_intr_bits = ~(core->irq_info.cpe_fatal_irqs & 0xFF);
3757 if (CPE_ERR_IRQ_CB(core))
3758 core->cpe_cdc_cb->cpe_err_irq_control(
3759 core->codec,
3760 CPE_ERR_IRQ_UNMASK,
3761 &cpe_intr_bits);
3762 break;
3763
3764 default:
3765 dev_err(core->dev,
3766 "%s: Invalid event 0x%x\n",
3767 __func__, event);
3768 rc = -EINVAL;
3769 break;
3770 }
3771
3772done:
3773 return rc;
3774}
3775
3776static int wcd_cpe_lsm_set_fmt_cfg(void *core_handle,
3777 struct cpe_lsm_session *session)
3778{
3779 int ret;
3780 struct cpe_lsm_output_format_cfg out_fmt_cfg;
3781 struct wcd_cpe_core *core = core_handle;
3782
3783 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3784 if (ret)
3785 goto done;
3786
3787 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3788
3789 memset(&out_fmt_cfg, 0, sizeof(out_fmt_cfg));
3790 if (fill_lsm_cmd_header_v0_inband(&out_fmt_cfg.hdr,
3791 session->id, OUT_FMT_CFG_CMD_PAYLOAD_SIZE,
3792 CPE_LSM_SESSION_CMD_TX_BUFF_OUTPUT_CONFIG)) {
3793 ret = -EINVAL;
3794 goto err_ret;
3795 }
3796
3797 out_fmt_cfg.format = session->out_fmt_cfg.format;
3798 out_fmt_cfg.packing = session->out_fmt_cfg.pack_mode;
3799 out_fmt_cfg.data_path_events = session->out_fmt_cfg.data_path_events;
3800
3801 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &out_fmt_cfg);
3802 if (ret)
3803 dev_err(core->dev,
3804 "%s: lsm_set_output_format_cfg failed, err = %d\n",
3805 __func__, ret);
3806
3807err_ret:
3808 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3809done:
3810 return ret;
3811}
3812
3813static void wcd_cpe_snd_model_offset(void *core_handle,
3814 struct cpe_lsm_session *session, size_t *offset)
3815{
3816 *offset = sizeof(struct cpe_param_data);
3817}
3818
3819static int wcd_cpe_lsm_set_media_fmt_params(void *core_handle,
3820 struct cpe_lsm_session *session,
3821 struct lsm_hw_params *param)
3822{
3823 struct cpe_lsm_media_fmt_param media_fmt;
3824 struct cmi_hdr *msg_hdr = &media_fmt.hdr;
3825 struct wcd_cpe_core *core = core_handle;
3826 struct cpe_param_data *param_d = &media_fmt.param;
3827 struct cpe_lsm_ids ids;
3828 int ret;
3829
3830 memset(&media_fmt, 0, sizeof(media_fmt));
3831 if (fill_lsm_cmd_header_v0_inband(msg_hdr,
3832 session->id,
3833 CPE_MEDIA_FMT_PLD_SIZE,
3834 CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
3835 ret = -EINVAL;
3836 goto done;
3837 }
3838
3839 memset(&ids, 0, sizeof(ids));
3840 ids.module_id = CPE_LSM_MODULE_FRAMEWORK;
3841 ids.param_id = CPE_LSM_PARAM_ID_MEDIA_FMT;
3842
3843 wcd_cpe_set_param_data(param_d, &ids, CPE_MEDIA_FMT_PARAM_SIZE,
3844 CPE_LSM_SESSION_CMD_SET_PARAMS_V2);
3845
3846 media_fmt.minor_version = 1;
3847 media_fmt.sample_rate = param->sample_rate;
3848 media_fmt.num_channels = param->num_chs;
3849 media_fmt.bit_width = param->bit_width;
3850
3851 WCD_CPE_GRAB_LOCK(&session->lsm_lock, "lsm");
3852 ret = wcd_cpe_cmi_send_lsm_msg(core, session, &media_fmt);
3853 if (ret)
3854 dev_err(core->dev,
3855 "%s: Set_param(media_format) failed, err=%d\n",
3856 __func__, ret);
3857 WCD_CPE_REL_LOCK(&session->lsm_lock, "lsm");
3858done:
3859 return ret;
3860}
3861
3862static int wcd_cpe_lsm_set_port(void *core_handle,
3863 struct cpe_lsm_session *session, void *data)
3864{
3865 u32 port_id;
3866 int ret;
3867 struct cpe_lsm_ids ids;
3868 struct wcd_cpe_core *core = core_handle;
3869
3870 ret = wcd_cpe_is_valid_lsm_session(core, session, __func__);
3871 if (ret)
3872 goto done;
3873
3874 if (!data) {
3875 dev_err(core->dev, "%s: data is NULL\n", __func__);
3876 ret = -EINVAL;
3877 goto done;
3878 }
3879 port_id = *(u32 *)data;
3880 dev_dbg(core->dev, "%s: port_id: %d\n", __func__, port_id);
3881
3882 memset(&ids, 0, sizeof(ids));
3883 ids.module_id = LSM_MODULE_ID_FRAMEWORK;
3884 ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
3885
3886 ret = wcd_cpe_send_param_connectport(core, session, NULL,
3887 &ids, port_id);
3888 if (ret)
3889 dev_err(core->dev,
3890 "%s: send_param_connectport failed, err %d\n",
3891 __func__, ret);
3892done:
3893 return ret;
3894}
3895
3896/*
3897 * wcd_cpe_get_lsm_ops: register lsm driver to codec
3898 * @lsm_ops: structure with lsm callbacks
3899 * @codec: codec to which this lsm driver is registered to
3900 */
3901int wcd_cpe_get_lsm_ops(struct wcd_cpe_lsm_ops *lsm_ops)
3902{
3903 lsm_ops->lsm_alloc_session = wcd_cpe_alloc_lsm_session;
3904 lsm_ops->lsm_dealloc_session = wcd_cpe_dealloc_lsm_session;
3905 lsm_ops->lsm_open_tx = wcd_cpe_cmd_lsm_open_tx;
3906 lsm_ops->lsm_close_tx = wcd_cpe_cmd_lsm_close_tx;
3907 lsm_ops->lsm_shmem_alloc = wcd_cpe_cmd_lsm_shmem_alloc;
3908 lsm_ops->lsm_shmem_dealloc = wcd_cpe_cmd_lsm_shmem_dealloc;
3909 lsm_ops->lsm_register_snd_model = wcd_cpe_lsm_reg_snd_model;
3910 lsm_ops->lsm_deregister_snd_model = wcd_cpe_lsm_dereg_snd_model;
3911 lsm_ops->lsm_get_afe_out_port_id = wcd_cpe_lsm_get_afe_out_port_id;
3912 lsm_ops->lsm_start = wcd_cpe_cmd_lsm_start;
3913 lsm_ops->lsm_stop = wcd_cpe_cmd_lsm_stop;
3914 lsm_ops->lsm_lab_control = wcd_cpe_lsm_lab_control;
3915 lsm_ops->lab_ch_setup = wcd_cpe_lab_ch_setup;
3916 lsm_ops->lsm_set_data = wcd_cpe_lsm_set_data;
3917 lsm_ops->lsm_set_fmt_cfg = wcd_cpe_lsm_set_fmt_cfg;
3918 lsm_ops->lsm_set_one_param = wcd_cpe_set_one_param;
3919 lsm_ops->lsm_get_snd_model_offset = wcd_cpe_snd_model_offset;
3920 lsm_ops->lsm_set_media_fmt_params = wcd_cpe_lsm_set_media_fmt_params;
3921 lsm_ops->lsm_set_port = wcd_cpe_lsm_set_port;
3922
3923 return 0;
3924}
3925EXPORT_SYMBOL(wcd_cpe_get_lsm_ops);
3926
3927static int fill_afe_cmd_header(struct cmi_hdr *hdr, u8 port_id,
3928 u16 opcode, u8 pld_size,
3929 bool obm_flag)
3930{
3931 CMI_HDR_SET_SESSION(hdr, port_id);
3932 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_AFE_SERVICE_ID);
3933
3934 CMI_HDR_SET_PAYLOAD_SIZE(hdr, pld_size);
3935
3936 hdr->opcode = opcode;
3937
3938 if (obm_flag)
3939 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_OUT_BAND);
3940 else
3941 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
3942
3943 return 0;
3944}
3945
3946/*
3947 * wcd_cpe_cmi_send_afe_msg: send message to AFE service
3948 * @core: wcd cpe core handle
3949 * @port_cfg: configuration data for the afe port
3950 * for which this message is to be sent
3951 * @message: actual message with header and payload
3952 *
3953 * Port specific lock needs to be acquired before this
3954 * function can be invoked
3955 */
3956static int wcd_cpe_cmi_send_afe_msg(
3957 struct wcd_cpe_core *core,
3958 struct wcd_cmi_afe_port_data *port_d,
3959 void *message)
3960{
3961 int ret = 0;
3962 struct cmi_hdr *hdr = message;
3963
3964 pr_debug("%s: sending message with opcode 0x%x\n",
3965 __func__, hdr->opcode);
3966
3967 if (unlikely(!wcd_cpe_is_online_state(core))) {
3968 dev_err(core->dev, "%s: CPE offline\n", __func__);
3969 return 0;
3970 }
3971
3972 if (CMI_HDR_GET_OBM_FLAG(hdr))
3973 wcd_cpe_bus_vote_max_bw(core, true);
3974
3975 ret = cmi_send_msg(message);
3976 if (ret) {
3977 pr_err("%s: cmd 0x%x send failed, err = %d\n",
3978 __func__, hdr->opcode, ret);
3979 goto rel_bus_vote;
3980 }
3981
3982 ret = wait_for_completion_timeout(&port_d->afe_cmd_complete,
3983 CMI_CMD_TIMEOUT);
3984 if (ret > 0) {
3985 pr_debug("%s: command 0x%x, received response 0x%x\n",
3986 __func__, hdr->opcode, port_d->cmd_result);
3987 if (port_d->cmd_result == CMI_SHMEM_ALLOC_FAILED)
3988 port_d->cmd_result = CPE_ENOMEMORY;
3989 if (port_d->cmd_result > 0)
3990 pr_err("%s: CPE returned error[%s]\n",
3991 __func__, cpe_err_get_err_str(
3992 port_d->cmd_result));
3993 ret = cpe_err_get_lnx_err_code(port_d->cmd_result);
3994 goto rel_bus_vote;
3995 } else {
3996 pr_err("%s: command 0x%x send timed out\n",
3997 __func__, hdr->opcode);
3998 ret = -ETIMEDOUT;
3999 goto rel_bus_vote;
4000 }
4001
4002rel_bus_vote:
4003 reinit_completion(&port_d->afe_cmd_complete);
4004
4005 if (CMI_HDR_GET_OBM_FLAG(hdr))
4006 wcd_cpe_bus_vote_max_bw(core, false);
4007
4008 return ret;
4009}
4010
4011
4012
4013/*
4014 * wcd_cpe_afe_shmem_alloc: allocate the cpe memory for afe service
4015 * @core: handle to cpe core
4016 * @port_cfg: configuration data for the port which needs
4017 * memory to be allocated on CPE
4018 * @size: size of the memory to be allocated
4019 */
4020static int wcd_cpe_afe_shmem_alloc(
4021 struct wcd_cpe_core *core,
4022 struct wcd_cmi_afe_port_data *port_d,
4023 u32 size)
4024{
4025 struct cpe_cmd_shmem_alloc cmd_shmem_alloc;
4026 int ret = 0;
4027
4028 pr_debug("%s: enter: size = %d\n", __func__, size);
4029
4030 memset(&cmd_shmem_alloc, 0, sizeof(cmd_shmem_alloc));
4031 if (fill_afe_cmd_header(&cmd_shmem_alloc.hdr, port_d->port_id,
4032 CPE_AFE_PORT_CMD_SHARED_MEM_ALLOC,
4033 SHMEM_ALLOC_CMD_PLD_SIZE, false)) {
4034 ret = -EINVAL;
4035 goto end_ret;
4036 }
4037
4038 cmd_shmem_alloc.size = size;
4039
4040 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_shmem_alloc);
4041 if (ret) {
4042 pr_err("%s: afe_shmem_alloc fail,ret = %d\n",
4043 __func__, ret);
4044 goto end_ret;
4045 }
4046
4047 pr_debug("%s: completed %s, mem_handle = 0x%x\n",
4048 __func__, "CPE_AFE_CMD_SHARED_MEM_ALLOC",
4049 port_d->mem_handle);
4050
4051end_ret:
4052 return ret;
4053}
4054
4055/*
4056 * wcd_cpe_afe_shmem_dealloc: deallocate the cpe memory for
4057 * afe service
4058 * @core: handle to cpe core
4059 * @port_d: configuration data for the port which needs
4060 * memory to be deallocated on CPE
4061 * The memory handle to be de-allocated is saved in the
4062 * port configuration data
4063 */
4064static int wcd_cpe_afe_shmem_dealloc(
4065 struct wcd_cpe_core *core,
4066 struct wcd_cmi_afe_port_data *port_d)
4067{
4068 struct cpe_cmd_shmem_dealloc cmd_dealloc;
4069 int ret = 0;
4070
4071 pr_debug("%s: enter, port_id = %d\n",
4072 __func__, port_d->port_id);
4073
4074 memset(&cmd_dealloc, 0, sizeof(cmd_dealloc));
4075 if (fill_afe_cmd_header(&cmd_dealloc.hdr, port_d->port_id,
4076 CPE_AFE_PORT_CMD_SHARED_MEM_DEALLOC,
4077 SHMEM_DEALLOC_CMD_PLD_SIZE, false)) {
4078 ret = -EINVAL;
4079 goto end_ret;
4080 }
4081
4082 cmd_dealloc.addr = port_d->mem_handle;
4083 ret = wcd_cpe_cmi_send_afe_msg(core, port_d, &cmd_dealloc);
4084 if (ret) {
4085 pr_err("failed to send shmem_dealloc cmd\n");
4086 goto end_ret;
4087 }
4088 memset(&port_d->mem_handle, 0,
4089 sizeof(port_d->mem_handle));
4090
4091end_ret:
4092 return ret;
4093}
4094
4095/*
4096 * wcd_cpe_send_afe_cal: send the acdb calibration to AFE port
4097 * @core: handle to cpe core
4098 * @port_d: configuration data for the port for which the
4099 * calibration needs to be appplied
4100 */
4101static int wcd_cpe_send_afe_cal(void *core_handle,
4102 struct wcd_cmi_afe_port_data *port_d)
4103{
4104
4105 struct cal_block_data *afe_cal = NULL;
4106 struct wcd_cpe_core *core = core_handle;
4107 struct cmi_obm_msg obm_msg;
4108 void *inb_msg = NULL;
4109 void *msg;
4110 int rc = 0;
4111 bool is_obm_msg;
4112
4113 if (core->cal_data[WCD_CPE_LSM_CAL_AFE] == NULL) {
4114 pr_err("%s: LSM cal not allocated!\n",
4115 __func__);
4116 rc = -EINVAL;
4117 goto rel_cal_mutex;
4118 }
4119
4120 mutex_lock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4121 afe_cal = cal_utils_get_only_cal_block(
4122 core->cal_data[WCD_CPE_LSM_CAL_AFE]);
4123 if (!afe_cal) {
4124 pr_err("%s: failed to get afe cal block\n",
4125 __func__);
4126 rc = -EINVAL;
4127 goto rel_cal_mutex;
4128 }
4129
4130 if (afe_cal->cal_data.size == 0) {
4131 dev_dbg(core->dev, "%s: No AFE cal to send\n",
4132 __func__);
4133 rc = 0;
4134 goto rel_cal_mutex;
4135 }
4136
4137 is_obm_msg = (afe_cal->cal_data.size >
4138 CMI_INBAND_MESSAGE_SIZE) ? true : false;
4139
4140 if (is_obm_msg) {
4141 struct cmi_hdr *hdr = &(obm_msg.hdr);
4142 struct cmi_obm *pld = &(obm_msg.pld);
4143
4144 rc = wcd_cpe_afe_shmem_alloc(core, port_d,
4145 afe_cal->cal_data.size);
4146 if (rc) {
4147 dev_err(core->dev,
4148 "%s: AFE shmem alloc fail %d\n",
4149 __func__, rc);
4150 goto rel_cal_mutex;
4151 }
4152
4153 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4154 CPE_AFE_CMD_SET_PARAM,
4155 CPE_AFE_PARAM_PAYLOAD_SIZE,
4156 true);
4157 if (rc) {
4158 dev_err(core->dev,
4159 "%s: invalid params for header, err = %d\n",
4160 __func__, rc);
4161 wcd_cpe_afe_shmem_dealloc(core, port_d);
4162 goto rel_cal_mutex;
4163 }
4164
4165 pld->version = 0;
4166 pld->size = afe_cal->cal_data.size;
4167 pld->data_ptr.kvaddr = afe_cal->cal_data.kvaddr;
4168 pld->mem_handle = port_d->mem_handle;
4169 msg = &obm_msg;
4170
4171 } else {
4172 u8 *msg_pld;
4173 struct cmi_hdr *hdr;
4174
4175 inb_msg = kzalloc(sizeof(struct cmi_hdr) +
4176 afe_cal->cal_data.size,
4177 GFP_KERNEL);
4178 if (!inb_msg) {
4179 dev_err(core->dev,
4180 "%s: no memory for afe cal inband\n",
4181 __func__);
4182 rc = -ENOMEM;
4183 goto rel_cal_mutex;
4184 }
4185
4186 hdr = (struct cmi_hdr *) inb_msg;
4187
4188 rc = fill_afe_cmd_header(hdr, port_d->port_id,
4189 CPE_AFE_CMD_SET_PARAM,
4190 CPE_AFE_PARAM_PAYLOAD_SIZE,
4191 false);
4192 if (rc) {
4193 dev_err(core->dev,
4194 "%s: invalid params for header, err = %d\n",
4195 __func__, rc);
4196 kfree(inb_msg);
4197 inb_msg = NULL;
4198 goto rel_cal_mutex;
4199 }
4200
4201 msg_pld = ((u8 *) inb_msg) + sizeof(struct cmi_hdr);
4202 memcpy(msg_pld, afe_cal->cal_data.kvaddr,
4203 afe_cal->cal_data.size);
4204
4205 msg = inb_msg;
4206 }
4207
4208 rc = wcd_cpe_cmi_send_afe_msg(core, port_d, msg);
4209 if (rc)
4210 pr_err("%s: afe cal for listen failed, rc = %d\n",
4211 __func__, rc);
4212
4213 if (is_obm_msg) {
4214 wcd_cpe_afe_shmem_dealloc(core, port_d);
4215 port_d->mem_handle = 0;
4216 } else {
4217 kfree(inb_msg);
4218 inb_msg = NULL;
4219 }
4220
4221rel_cal_mutex:
4222 mutex_unlock(&core->cal_data[WCD_CPE_LSM_CAL_AFE]->lock);
4223 return rc;
4224}
4225
4226/*
4227 * wcd_cpe_is_valid_port: check validity of afe port id
4228 * @core: handle to core to check for validity
4229 * @afe_cfg: client provided afe configuration
4230 * @func: function name invoking this validity check,
4231 * used for logging purpose only.
4232 */
4233static int wcd_cpe_is_valid_port(struct wcd_cpe_core *core,
4234 struct wcd_cpe_afe_port_cfg *afe_cfg,
4235 const char *func)
4236{
4237 if (unlikely(IS_ERR_OR_NULL(core))) {
4238 pr_err("%s: Invalid core handle\n", func);
4239 return -EINVAL;
4240 }
4241
4242 if (afe_cfg->port_id > WCD_CPE_AFE_MAX_PORTS) {
4243 dev_err(core->dev,
4244 "%s: invalid afe port (%u)\n",
4245 func, afe_cfg->port_id);
4246 return -EINVAL;
4247 }
4248
4249 dev_dbg(core->dev,
4250 "%s: port_id = %u\n",
4251 func, afe_cfg->port_id);
4252
4253 return 0;
4254}
4255
4256static int wcd_cpe_afe_svc_cmd_mode(void *core_handle,
4257 u8 mode)
4258{
4259 struct cpe_afe_svc_cmd_mode afe_mode;
4260 struct wcd_cpe_core *core = core_handle;
4261 struct wcd_cmi_afe_port_data *afe_port_d;
4262 int ret;
4263
4264 afe_port_d = &afe_ports[0];
4265 /*
4266 * AFE SVC mode command is for the service and not port
4267 * specific, hence use AFE port as 0 so the command will
4268 * be applied to all AFE ports on CPE.
4269 */
4270 afe_port_d->port_id = 0;
4271
4272 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4273 memset(&afe_mode, 0, sizeof(afe_mode));
4274 if (fill_afe_cmd_header(&afe_mode.hdr, afe_port_d->port_id,
4275 CPE_AFE_SVC_CMD_LAB_MODE,
4276 CPE_AFE_CMD_MODE_PAYLOAD_SIZE,
4277 false)) {
4278 ret = -EINVAL;
4279 goto err_ret;
4280 }
4281
4282 afe_mode.mode = mode;
4283
4284 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_mode);
4285 if (ret)
4286 dev_err(core->dev,
4287 "%s: afe_svc_mode cmd failed, err = %d\n",
4288 __func__, ret);
4289
4290err_ret:
4291 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4292 return ret;
4293}
4294
4295static int wcd_cpe_afe_cmd_port_cfg(void *core_handle,
4296 struct wcd_cpe_afe_port_cfg *afe_cfg)
4297{
4298 struct cpe_afe_cmd_port_cfg port_cfg_cmd;
4299 struct wcd_cpe_core *core = core_handle;
4300 struct wcd_cmi_afe_port_data *afe_port_d;
4301 int ret;
4302
4303 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4304 if (ret)
4305 goto done;
4306
4307 afe_port_d = &afe_ports[afe_cfg->port_id];
4308 afe_port_d->port_id = afe_cfg->port_id;
4309
4310 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4311 memset(&port_cfg_cmd, 0, sizeof(port_cfg_cmd));
4312 if (fill_afe_cmd_header(&port_cfg_cmd.hdr,
4313 afe_cfg->port_id,
4314 CPE_AFE_PORT_CMD_GENERIC_CONFIG,
4315 CPE_AFE_CMD_PORT_CFG_PAYLOAD_SIZE,
4316 false)) {
4317 ret = -EINVAL;
4318 goto err_ret;
4319 }
4320
4321 port_cfg_cmd.bit_width = afe_cfg->bit_width;
4322 port_cfg_cmd.num_channels = afe_cfg->num_channels;
4323 port_cfg_cmd.sample_rate = afe_cfg->sample_rate;
4324
4325 if (afe_port_d->port_id == CPE_AFE_PORT_3_TX)
4326 port_cfg_cmd.buffer_size = WCD_CPE_EC_PP_BUF_SIZE;
4327 else
4328 port_cfg_cmd.buffer_size = AFE_OUT_BUF_SIZE(afe_cfg->bit_width,
4329 afe_cfg->sample_rate);
4330
4331 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &port_cfg_cmd);
4332 if (ret)
4333 dev_err(core->dev,
4334 "%s: afe_port_config failed, err = %d\n",
4335 __func__, ret);
4336
4337err_ret:
4338 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4339done:
4340 return ret;
4341}
4342
4343/*
4344 * wcd_cpe_afe_set_params: set the parameters for afe port
4345 * @afe_cfg: configuration data for the port for which the
4346 * parameters are to be set
4347 */
4348static int wcd_cpe_afe_set_params(void *core_handle,
4349 struct wcd_cpe_afe_port_cfg *afe_cfg, bool afe_mad_ctl)
4350{
4351 struct cpe_afe_params afe_params;
4352 struct cpe_afe_hw_mad_ctrl *hw_mad_ctrl = &afe_params.hw_mad_ctrl;
4353 struct cpe_afe_port_cfg *port_cfg = &afe_params.port_cfg;
4354 struct wcd_cpe_core *core = core_handle;
4355 struct wcd_cmi_afe_port_data *afe_port_d;
4356 int ret = 0, pld_size = 0;
4357
4358 ret = wcd_cpe_is_valid_port(core, afe_cfg, __func__);
4359 if (ret)
4360 return ret;
4361
4362 afe_port_d = &afe_ports[afe_cfg->port_id];
4363 afe_port_d->port_id = afe_cfg->port_id;
4364
4365 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4366
4367 ret = wcd_cpe_send_afe_cal(core, afe_port_d);
4368 if (ret) {
4369 dev_err(core->dev,
4370 "%s: afe acdb cal send failed, err = %d\n",
4371 __func__, ret);
4372 goto err_ret;
4373 }
4374
4375 pld_size = CPE_AFE_PARAM_PAYLOAD_SIZE;
4376 memset(&afe_params, 0, sizeof(afe_params));
4377
4378 if (fill_afe_cmd_header(&afe_params.hdr,
4379 afe_cfg->port_id,
4380 CPE_AFE_CMD_SET_PARAM,
4381 (u8) pld_size, false)) {
4382 ret = -EINVAL;
4383 goto err_ret;
4384 }
4385
4386 hw_mad_ctrl->param.module_id = CPE_AFE_MODULE_HW_MAD;
4387 hw_mad_ctrl->param.param_id = CPE_AFE_PARAM_ID_HW_MAD_CTL;
4388 hw_mad_ctrl->param.p_size.sr.param_size = PARAM_SIZE_AFE_HW_MAD_CTRL;
4389 hw_mad_ctrl->param.p_size.sr.reserved = 0;
4390 hw_mad_ctrl->minor_version = 1;
4391 hw_mad_ctrl->mad_type = MAD_TYPE_AUDIO;
4392 hw_mad_ctrl->mad_enable = afe_mad_ctl;
4393
4394 port_cfg->param.module_id = CPE_AFE_MODULE_AUDIO_DEV_INTERFACE;
4395 port_cfg->param.param_id = CPE_AFE_PARAM_ID_GENERIC_PORT_CONFIG;
4396 port_cfg->param.p_size.sr.param_size = PARAM_SIZE_AFE_PORT_CFG;
4397 port_cfg->param.p_size.sr.reserved = 0;
4398 port_cfg->minor_version = 1;
4399 port_cfg->bit_width = afe_cfg->bit_width;
4400 port_cfg->num_channels = afe_cfg->num_channels;
4401 port_cfg->sample_rate = afe_cfg->sample_rate;
4402
4403 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &afe_params);
4404 if (ret)
4405 dev_err(core->dev,
4406 "%s: afe_port_config failed, err = %d\n",
4407 __func__, ret);
4408err_ret:
4409 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4410 return ret;
4411}
4412
4413/*
4414 * wcd_cpe_afe_port_start: send the start command to afe service
4415 * @core_handle: handle to the cpe core
4416 * @port_cfg: configuration data for the afe port which needs
4417 * to be started.
4418 */
4419static int wcd_cpe_afe_port_start(void *core_handle,
4420 struct wcd_cpe_afe_port_cfg *port_cfg)
4421{
4422
4423 struct cmi_hdr hdr;
4424 struct wcd_cpe_core *core = core_handle;
4425 struct wcd_cmi_afe_port_data *afe_port_d;
4426 int ret = 0;
4427
4428 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4429 if (ret)
4430 return ret;
4431
4432 afe_port_d = &afe_ports[port_cfg->port_id];
4433 afe_port_d->port_id = port_cfg->port_id;
4434
4435 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4436
4437 memset(&hdr, 0, sizeof(struct cmi_hdr));
4438 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4439 CPE_AFE_PORT_CMD_START,
4440 0, false);
4441 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4442 if (ret)
4443 dev_err(core->dev,
4444 "%s: afe_port_start cmd failed, err = %d\n",
4445 __func__, ret);
4446 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4447 return ret;
4448}
4449
4450/*
4451 * wcd_cpe_afe_port_stop: send stop command to afe service
4452 * @core_handle: handle to the cpe core
4453 * @port_cfg: configuration data for the afe port which needs
4454 * to be stopped.
4455 */
4456static int wcd_cpe_afe_port_stop(void *core_handle,
4457 struct wcd_cpe_afe_port_cfg *port_cfg)
4458{
4459 struct cmi_hdr hdr;
4460 struct wcd_cpe_core *core = core_handle;
4461 struct wcd_cmi_afe_port_data *afe_port_d;
4462 int ret = 0;
4463
4464 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4465 if (ret)
4466 return ret;
4467
4468 afe_port_d = &afe_ports[port_cfg->port_id];
4469 afe_port_d->port_id = port_cfg->port_id;
4470
4471 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4472
4473 memset(&hdr, 0, sizeof(hdr));
4474 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4475 CPE_AFE_PORT_CMD_STOP,
4476 0, false);
4477 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4478 if (ret)
4479 dev_err(core->dev,
4480 "%s: afe_stop cmd failed, err = %d\n",
4481 __func__, ret);
4482
4483 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4484 return ret;
4485}
4486
4487/*
4488 * wcd_cpe_afe_port_suspend: send suspend command to afe service
4489 * @core_handle: handle to the cpe core
4490 * @port_cfg: configuration data for the afe port which needs
4491 * to be suspended.
4492 */
4493static int wcd_cpe_afe_port_suspend(void *core_handle,
4494 struct wcd_cpe_afe_port_cfg *port_cfg)
4495{
4496 struct cmi_hdr hdr;
4497 struct wcd_cpe_core *core = core_handle;
4498 struct wcd_cmi_afe_port_data *afe_port_d;
4499 int ret = 0;
4500
4501 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4502 if (ret)
4503 return ret;
4504
4505 afe_port_d = &afe_ports[port_cfg->port_id];
4506 afe_port_d->port_id = port_cfg->port_id;
4507
4508 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4509
4510 memset(&hdr, 0, sizeof(struct cmi_hdr));
4511 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4512 CPE_AFE_PORT_CMD_SUSPEND,
4513 0, false);
4514 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4515 if (ret)
4516 dev_err(core->dev,
4517 "%s: afe_suspend cmd failed, err = %d\n",
4518 __func__, ret);
4519 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4520 return ret;
4521}
4522
4523/*
4524 * wcd_cpe_afe_port_resume: send the resume command to afe service
4525 * @core_handle: handle to the cpe core
4526 * @port_cfg: configuration data for the afe port which needs
4527 * to be resumed.
4528 */
4529static int wcd_cpe_afe_port_resume(void *core_handle,
4530 struct wcd_cpe_afe_port_cfg *port_cfg)
4531{
4532 struct cmi_hdr hdr;
4533 struct wcd_cpe_core *core = core_handle;
4534 struct wcd_cmi_afe_port_data *afe_port_d;
4535 int ret = 0;
4536
4537 ret = wcd_cpe_is_valid_port(core, port_cfg, __func__);
4538 if (ret)
4539 return ret;
4540
4541 afe_port_d = &afe_ports[port_cfg->port_id];
4542 afe_port_d->port_id = port_cfg->port_id;
4543
4544 WCD_CPE_GRAB_LOCK(&afe_port_d->afe_lock, "afe");
4545
4546 memset(&hdr, 0, sizeof(hdr));
4547 fill_afe_cmd_header(&hdr, port_cfg->port_id,
4548 CPE_AFE_PORT_CMD_RESUME,
4549 0, false);
4550 ret = wcd_cpe_cmi_send_afe_msg(core, afe_port_d, &hdr);
4551 if (ret)
4552 dev_err(core->dev,
4553 "%s: afe_resume cmd failed, err = %d\n",
4554 __func__, ret);
4555 WCD_CPE_REL_LOCK(&afe_port_d->afe_lock, "afe");
4556 return ret;
4557
4558}
4559
4560/*
4561 * wcd_cpe_register_afe_driver: register lsm driver to codec
4562 * @cpe_ops: structure with lsm callbacks
4563 * @codec: codec to which this lsm driver is registered to
4564 */
4565int wcd_cpe_get_afe_ops(struct wcd_cpe_afe_ops *afe_ops)
4566{
4567 afe_ops->afe_set_params = wcd_cpe_afe_set_params;
4568 afe_ops->afe_port_start = wcd_cpe_afe_port_start;
4569 afe_ops->afe_port_stop = wcd_cpe_afe_port_stop;
4570 afe_ops->afe_port_suspend = wcd_cpe_afe_port_suspend;
4571 afe_ops->afe_port_resume = wcd_cpe_afe_port_resume;
4572 afe_ops->afe_port_cmd_cfg = wcd_cpe_afe_cmd_port_cfg;
4573
4574 return 0;
4575}
4576EXPORT_SYMBOL(wcd_cpe_get_afe_ops);
4577
4578MODULE_DESCRIPTION("WCD CPE Core");
4579MODULE_LICENSE("GPL v2");