blob: e584cf05a6eff4a542ceab708a4130985c3b894f [file] [log] [blame]
Vaishnavi Kommaraju859c58f2018-01-23 18:10:21 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17#include <linux/completion.h>
18#include <linux/kthread.h>
19#include <linux/delay.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053020#include <sound/soc.h>
Laxminath Kasam7e057cf2017-08-09 23:55:15 +053021#include "wcd9335_registers.h"
Laxminath Kasam605b42f2017-08-01 22:02:15 +053022#include "core.h"
23#include "cpe_cmi.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053024#include "wcd_cpe_services.h"
25#include "wcd_cmi_api.h"
26
27#define CPE_MSG_BUFFER_SIZE 132
28#define CPE_NO_SERVICE 0
29
30#define CMI_DRIVER_SUPPORTED_VERSION 0
31#define CMI_API_SUCCESS 0
32#define CMI_MSG_TRANSPORT (0x0002)
33#define CPE_SVC_INACTIVE_STATE_RETRIES_MAX 10
34
35#define TOMTOM_A_SVASS_SPE_DRAM_OFFSET 0x50000
36#define TOMTOM_A_SVASS_SPE_DRAM_SIZE 0x30000
37#define TOMTOM_A_SVASS_SPE_IRAM_OFFSET 0x80000
38#define TOMTOM_A_SVASS_SPE_IRAM_SIZE 0xC000
39#define TOMTOM_A_SVASS_SPE_INBOX_SIZE 12
40#define TOMTOM_A_SVASS_SPE_OUTBOX_SIZE 12
41
42#define MEM_ACCESS_NONE_VAL 0x0
43#define MEM_ACCESS_IRAM_VAL 0x1
44#define MEM_ACCESS_DRAM_VAL 0x2
45#define LISTEN_CTL_SPE_VAL 0x0
46#define LISTEN_CTL_MSM_VAL 0x1
47
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053048#define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000
49#define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000
50#define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000
51#define WCD9335_CPE_SS_SPE_IRAM_SIZE 0x20000
52
53#define WCD9335_CPE_SS_SPE_INBOX_SIZE 16
54#define WCD9335_CPE_SS_SPE_OUTBOX_SIZE 16
55#define WCD9335_CPE_SS_SPE_MEM_BANK_SIZ 16
56
57#define WCD9335_CPE_SS_SPE_INBOX1(N) (WCD9335_CPE_SS_INBOX1_0 + (N))
58#define WCD9335_CPE_SS_SPE_OUTBOX1(N) (WCD9335_CPE_SS_OUTBOX1_0 + (N))
59#define WCD9335_CPE_SS_MEM_BANK(N) (WCD9335_CPE_SS_MEM_BANK_0 + (N))
60
61#define CHUNK_SIZE 16
62
63#define CPE_SVC_GRAB_LOCK(lock, name) \
64{ \
65 pr_debug("%s: %s lock acquire\n", \
66 __func__, name); \
67 mutex_lock(lock); \
68}
69
70#define CPE_SVC_REL_LOCK(lock, name) \
71{ \
72 pr_debug("%s: %s lock release\n", \
73 __func__, name); \
74 mutex_unlock(lock); \
75}
76
77static const struct cpe_svc_hw_cfg cpe_svc_tomtom_info = {
78 TOMTOM_A_SVASS_SPE_DRAM_SIZE,
79 TOMTOM_A_SVASS_SPE_DRAM_OFFSET,
80 TOMTOM_A_SVASS_SPE_IRAM_SIZE,
81 TOMTOM_A_SVASS_SPE_IRAM_OFFSET,
82 TOMTOM_A_SVASS_SPE_INBOX_SIZE,
83 TOMTOM_A_SVASS_SPE_OUTBOX_SIZE
84};
85
86static const struct cpe_svc_hw_cfg cpe_svc_wcd9335_info = {
87 WCD9335_CPE_SS_SPE_DRAM_SIZE,
88 WCD9335_CPE_SS_SPE_DRAM_OFFSET,
89 WCD9335_CPE_SS_SPE_IRAM_SIZE,
90 WCD9335_CPE_SS_SPE_IRAM_OFFSET,
91 WCD9335_CPE_SS_SPE_INBOX_SIZE,
92 WCD9335_CPE_SS_SPE_OUTBOX_SIZE
93};
94
95enum cpe_state {
96 CPE_STATE_UNINITIALIZED = 0,
97 CPE_STATE_INITIALIZED,
98 CPE_STATE_IDLE,
99 CPE_STATE_DOWNLOADING,
100 CPE_STATE_BOOTING,
101 CPE_STATE_SENDING_MSG,
102 CPE_STATE_OFFLINE,
103 CPE_STATE_BUFFERING,
104 CPE_STATE_BUFFERING_CANCELLED
105};
106
107enum cpe_substate {
108 CPE_SS_IDLE = 0,
109 CPE_SS_MSG_REQUEST_ACCESS,
110 CPE_SS_MSG_SEND_INBOX,
111 CPE_SS_MSG_SENT,
112 CPE_SS_DL_DOWNLOADING,
113 CPE_SS_DL_COMPLETED,
114 CPE_SS_BOOT,
115 CPE_SS_BOOT_INIT,
116 CPE_SS_ONLINE
117};
118
119enum cpe_command {
120 CPE_CMD_KILL_THREAD = 0,
121 CPE_CMD_BOOT,
122 CPE_CMD_BOOT_INITIALIZE,
123 CPE_CMD_BOOT_COMPLETE,
124 CPE_CMD_SEND_MSG,
125 CPE_CMD_SEND_TRANS_MSG,
126 CPE_CMD_SEND_MSG_COMPLETE,
127 CPE_CMD_PROCESS_IRQ,
128 CPE_CMD_RAMDUMP,
129 CPE_CMD_DL_SEGMENT,
130 CPE_CMD_SHUTDOWN,
131 CPE_CMD_RESET,
132 CPE_CMD_DEINITIALIZE,
133 CPE_CMD_READ,
134 CPE_CMD_ENABLE_LAB,
135 CPE_CMD_DISABLE_LAB,
136 CPE_CMD_SWAP_BUFFER,
137 CPE_LAB_CFG_SB,
138 CPE_CMD_CANCEL_MEMACCESS,
139 CPE_CMD_PROC_INCOMING_MSG,
140 CPE_CMD_FTM_TEST,
141};
142
143enum cpe_process_result {
144 CPE_PROC_SUCCESS = 0,
145 CPE_PROC_FAILED,
146 CPE_PROC_KILLED,
147 CPE_PROC_QUEUED,
148};
149
150struct cpe_command_node {
151 enum cpe_command command;
152 enum cpe_svc_result result;
153 void *data;
154 struct list_head list;
155};
156
157struct cpe_info {
158 struct list_head main_queue;
159 struct completion cmd_complete;
160 struct completion thread_comp;
161 void *thread_handler;
162 bool stop_thread;
163 struct mutex msg_lock;
164 enum cpe_state state;
165 enum cpe_substate substate;
166 struct list_head client_list;
167 enum cpe_process_result (*cpe_process_command)
168 (struct cpe_command_node *command_node);
169 enum cpe_svc_result (*cpe_cmd_validate)
170 (const struct cpe_info *i,
171 enum cpe_command command);
172 enum cpe_svc_result (*cpe_start_notification)
173 (struct cpe_info *i);
174 u32 initialized;
175 struct cpe_svc_tgt_abstraction *tgt;
176 void *pending;
177 void *data;
178 void *client_context;
179 u32 codec_id;
180 struct work_struct clk_plan_work;
181 struct completion core_svc_cmd_compl;
182};
183
184struct cpe_tgt_waiti_info {
185 u8 tgt_waiti_size;
186 u8 *tgt_waiti_data;
187};
188
189struct cpe_svc_tgt_abstraction {
190 enum cpe_svc_result (*tgt_boot)(int debug_mode);
191
192 u32 (*tgt_cpar_init_done)(void);
193
194 u32 (*tgt_is_active)(void);
195
196 enum cpe_svc_result (*tgt_reset)(void);
197
198 enum cpe_svc_result (*tgt_stop)(void);
199
200 enum cpe_svc_result (*tgt_read_mailbox)
201 (u8 *buffer, size_t size);
202
203 enum cpe_svc_result (*tgt_write_mailbox)
204 (u8 *buffer, size_t size);
205
206 enum cpe_svc_result (*tgt_read_ram)
207 (struct cpe_info *c,
208 struct cpe_svc_mem_segment *data);
209
210 enum cpe_svc_result (*tgt_write_ram)
211 (struct cpe_info *c,
212 const struct cpe_svc_mem_segment *data);
213
214 enum cpe_svc_result (*tgt_route_notification)
215 (enum cpe_svc_module module,
216 enum cpe_svc_route_dest dest);
217
218 enum cpe_svc_result (*tgt_set_debug_mode)(u32 enable);
219 const struct cpe_svc_hw_cfg *(*tgt_get_cpe_info)(void);
220 enum cpe_svc_result (*tgt_deinit)
221 (struct cpe_svc_tgt_abstraction *param);
222 enum cpe_svc_result (*tgt_voice_tx_lab)
223 (bool);
224 u8 *inbox;
225 u8 *outbox;
226 struct cpe_tgt_waiti_info *tgt_waiti_info;
227};
228
229static enum cpe_svc_result cpe_tgt_tomtom_init(
230 struct cpe_svc_codec_info_v1 *codec_info,
231 struct cpe_svc_tgt_abstraction *param);
232
233static enum cpe_svc_result cpe_tgt_wcd9335_init(
234 struct cpe_svc_codec_info_v1 *codec_info,
235 struct cpe_svc_tgt_abstraction *param);
236
237struct cpe_send_msg {
238 u8 *payload;
239 u32 isobm;
240 u32 address;
241 size_t size;
242};
243
244struct cpe_read_handle {
245 void *registration;
246 struct cpe_info t_info;
247 struct list_head buffers;
248 void *config;
249};
250
251struct generic_notification {
252 void (*notification)
253 (const struct cpe_svc_notification *parameter);
254 void (*cmi_notification)
255 (const struct cmi_api_notification *parameter);
256};
257
258struct cpe_notif_node {
259 struct generic_notification notif;
260 u32 mask;
261 u32 service;
262 const struct cpe_info *context;
263 const char *name;
264 u32 disabled;
265 struct list_head list;
266};
267
268struct cpe_priv {
269 struct cpe_info *cpe_default_handle;
270 void (*cpe_irq_control_callback)(u32 enable);
271 void (*cpe_query_freq_plans_cb)
272 (void *cdc_priv,
273 struct cpe_svc_cfg_clk_plan *clk_freq);
274 void (*cpe_change_freq_plan_cb)(void *cdc_priv,
275 u32 clk_freq);
276 u32 cpe_msg_buffer;
277 void *cpe_cmi_handle;
278 struct mutex cpe_api_mutex;
279 struct mutex cpe_svc_lock;
280 struct cpe_svc_boot_event cpe_debug_vector;
281 void *cdc_priv;
282};
283
284static struct cpe_priv cpe_d;
285
286static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);
287
288static enum cpe_svc_result cpe_is_command_valid(
289 const struct cpe_info *t_info,
290 enum cpe_command command);
291
292static int cpe_register_read(u32 reg, u8 *val)
293{
294 *(val) = snd_soc_read(cpe_d.cdc_priv, reg);
295 return 0;
296}
297
298static enum cpe_svc_result cpe_update_bits(u32 reg,
299 u32 mask, u32 value)
300{
301 int ret = 0;
302
303 ret = snd_soc_update_bits(cpe_d.cdc_priv, reg,
304 mask, value);
305 if (ret < 0)
306 return CPE_SVC_FAILED;
307
308 return CPE_SVC_SUCCESS;
309}
310
311static int cpe_register_write(u32 reg, u32 val)
312{
313 int ret = 0;
314
Asish Bhattacharya84f7f732017-07-25 16:29:27 +0530315 if (reg != WCD9335_CPE_SS_MEM_BANK_0)
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530316 pr_debug("%s: reg = 0x%x, value = 0x%x\n",
317 __func__, reg, val);
318
319 ret = snd_soc_write(cpe_d.cdc_priv, reg, val);
320 if (ret < 0)
321 return CPE_SVC_FAILED;
322
323 return CPE_SVC_SUCCESS;
324}
325
326static int cpe_register_write_repeat(u32 reg, u8 *ptr, u32 to_write)
327{
328 struct snd_soc_codec *codec = cpe_d.cdc_priv;
329 struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
330 int ret = 0;
331
332 ret = wcd9xxx_slim_write_repeat(wcd9xxx, reg, to_write, ptr);
333 if (ret != 0)
334 pr_err("%s: slim_write_repeat failed\n", __func__);
335
336 if (ret < 0)
337 return CPE_SVC_FAILED;
338
339 return CPE_SVC_SUCCESS;
340}
341
342static bool cpe_register_read_autoinc_supported(void)
343{
344 return true;
345}
346
347
348/* Called under msgq locked context */
349static void cpe_cmd_received(struct cpe_info *t_info)
350{
351 struct cpe_command_node *node = NULL;
352 enum cpe_process_result proc_rc = CPE_PROC_SUCCESS;
353
354 if (!t_info) {
355 pr_err("%s: Invalid thread info\n",
356 __func__);
357 return;
358 }
359
360 while (!list_empty(&t_info->main_queue)) {
361 if (proc_rc != CPE_PROC_SUCCESS)
362 break;
363 node = list_first_entry(&t_info->main_queue,
364 struct cpe_command_node, list);
365 if (!node)
366 break;
367 list_del(&node->list);
368 proc_rc = t_info->cpe_process_command(node);
369 pr_debug("%s: process command return %d\n",
370 __func__, proc_rc);
371
372 switch (proc_rc) {
373 case CPE_PROC_SUCCESS:
374 kfree(node);
375 break;
376 case CPE_PROC_FAILED:
377 kfree(node);
378 pr_err("%s: cmd failed\n", __func__);
379 break;
380 case CPE_PROC_KILLED:
381 break;
382 default:
383 list_add(&node->list, &(t_info->main_queue));
384
385 }
386 }
387}
388
389static int cpe_worker_thread(void *context)
390{
391 struct cpe_info *t_info = (struct cpe_info *)context;
392
393 /*
394 * Thread will run until requested to stop explicitly
395 * by setting the t_info->stop_thread flag
396 */
397 while (1) {
398 /* Wait for command to be processed */
399 wait_for_completion(&t_info->cmd_complete);
400
401 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
402 cpe_cmd_received(t_info);
403 reinit_completion(&t_info->cmd_complete);
404 /* Check if thread needs to be stopped */
405 if (t_info->stop_thread)
406 goto unlock_and_exit;
407 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
408 };
409
410unlock_and_exit:
411 pr_debug("%s: thread stopped\n", __func__);
412 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
413 complete_and_exit(&t_info->thread_comp, 0);
414}
415
416static void cpe_create_worker_thread(struct cpe_info *t_info)
417{
418 INIT_LIST_HEAD(&t_info->main_queue);
419 init_completion(&t_info->cmd_complete);
420 init_completion(&t_info->thread_comp);
421 t_info->stop_thread = false;
422 t_info->thread_handler = kthread_run(cpe_worker_thread,
423 (void *)t_info, "cpe-worker-thread");
424 pr_debug("%s: Created new worker thread\n",
425 __func__);
426}
427
428static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
429{
430 if (!t_info->thread_handler) {
431 pr_err("%s: thread not created\n", __func__);
432 return;
433 }
434
435 /*
436 * Wake up the command handler in case
437 * it is waiting for an command to be processed.
438 */
439 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
440 t_info->stop_thread = true;
441 complete(&t_info->cmd_complete);
442 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
443
444 /* Wait for the thread to exit */
445 wait_for_completion(&t_info->thread_comp);
446 t_info->thread_handler = NULL;
447
448 pr_debug("%s: Thread cleaned up successfully\n",
449 __func__);
450}
451
452static enum cpe_svc_result
453cpe_send_cmd_to_thread(struct cpe_info *t_info,
454 enum cpe_command command, void *data,
455 bool high_prio)
456{
457 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
458 struct cpe_command_node *cmd = NULL;
459
460 rc = cpe_is_command_valid(t_info, command);
461 if (rc != CPE_SVC_SUCCESS) {
462 pr_err("%s: Invalid command %d\n",
463 __func__, command);
464 return rc;
465 }
466
467 cmd = kzalloc(sizeof(struct cpe_command_node),
468 GFP_ATOMIC);
469 if (!cmd)
470 return CPE_SVC_NO_MEMORY;
471
472 cmd->command = command;
473 cmd->data = data;
474
475 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
476 if (high_prio)
477 list_add(&(cmd->list),
478 &(t_info->main_queue));
479 else
480 list_add_tail(&(cmd->list),
481 &(t_info->main_queue));
482 complete(&t_info->cmd_complete);
483 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
484
485 return rc;
486}
487
488static enum cpe_svc_result cpe_change_state(
489 struct cpe_info *t_info,
490 enum cpe_state state, enum cpe_substate ss)
491{
492 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
493
494 if (!t_info)
495 t_info = cpe_d.cpe_default_handle;
496
497 t_info->state = state;
498 t_info->substate = ss;
499
500 pr_debug("%s: current state: %d,%d, new_state: %d,%d\n",
501 __func__, t_info->state, t_info->substate,
502 state, ss);
503
504 return rc;
505}
506
507static enum cpe_svc_result
508cpe_is_command_valid(const struct cpe_info *t_info,
509 enum cpe_command command)
510{
511 enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
512
513 if (t_info && t_info->cpe_cmd_validate)
514 rc = t_info->cpe_cmd_validate(t_info, command);
515 else
516 pr_err("%s: invalid handle or callback\n",
517 __func__);
518 return rc;
519}
520
521static void cpe_notify_client(struct cpe_notif_node *client,
522 struct cpe_svc_notification *payload)
523{
524 if (!client || !payload) {
525 pr_err("%s: invalid client or payload\n",
526 __func__);
527 return;
528 }
529
530 if (!(client->mask & payload->event)) {
531 pr_debug("%s: client mask 0x%x not registered for event 0x%x\n",
532 __func__, client->mask, payload->event);
533 return;
534 }
535
536 if (client->notif.notification && !client->disabled)
537 client->notif.notification(payload);
538
539 if ((client->mask & CPE_SVC_CMI_MSG) &&
540 client->notif.cmi_notification)
541 client->notif.cmi_notification(
542 (const struct cmi_api_notification *)payload);
543}
544
545static void cpe_broadcast_notification(const struct cpe_info *t_info,
546 struct cpe_svc_notification *payload)
547{
548 struct cpe_notif_node *n = NULL;
549
550 if (!t_info || !payload) {
551 pr_err("%s: invalid handle\n", __func__);
552 return;
553 }
554
555 pr_debug("%s: notify clients, event = %d\n",
556 __func__, payload->event);
557 payload->private_data = cpe_d.cdc_priv;
558
559 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
560 list_for_each_entry(n, &t_info->client_list, list) {
561 if (!(n->mask & CPE_SVC_CMI_MSG))
562 cpe_notify_client(n, payload);
563 }
564 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
565}
566
567static void *cpe_register_generic(struct cpe_info *t_info,
568 void notification_callback(
569 const struct cpe_svc_notification *parameter),
570 void cmi_callback(
571 const struct cmi_api_notification *parameter),
572 u32 mask, u32 service, const char *name)
573{
574 struct cpe_notif_node *n = NULL;
575
576 n = kzalloc(sizeof(struct cpe_notif_node),
577 GFP_KERNEL);
578 if (!n)
579 return NULL;
580 n->mask = mask;
581 n->service = service;
582 n->notif.notification = notification_callback;
583 n->notif.cmi_notification = cmi_callback;
584 n->context = t_info;
585 n->disabled = false;
586 n->name = name;
587
588 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
589 /* Make sure CPE core service is first */
590 if (service == CMI_CPE_CORE_SERVICE_ID)
591 list_add(&n->list, &t_info->client_list);
592 else
593 list_add_tail(&n->list, &t_info->client_list);
594 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
595
596 return n;
597}
598
599static enum cpe_svc_result cpe_deregister_generic(struct cpe_info *t_info,
600 void *reg_handle)
601{
602 struct cpe_notif_node *n = (struct cpe_notif_node *)reg_handle;
603
604 if (!t_info || !reg_handle) {
605 pr_err("%s: invalid handle\n", __func__);
606 return CPE_SVC_INVALID_HANDLE;
607 }
608
Vaishnavi Kommaraju859c58f2018-01-23 18:10:21 +0530609 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530610 list_del(&(n->list));
611 kfree(reg_handle);
Vaishnavi Kommaraju859c58f2018-01-23 18:10:21 +0530612 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530613
614 return CPE_SVC_SUCCESS;
615}
616
617static enum cpe_svc_result cpe_svc_tgt_init(struct cpe_svc_codec_info_v1 *i,
618 struct cpe_svc_tgt_abstraction *abs)
619{
620 if (!i || !abs) {
621 pr_err("%s: Incorrect information provided\n",
622 __func__);
623 return CPE_SVC_FAILED;
624 }
625
626 switch (i->id) {
627 case CPE_SVC_CODEC_TOMTOM:
628 return cpe_tgt_tomtom_init(i, abs);
629 case CPE_SVC_CODEC_WCD9335:
630 return cpe_tgt_wcd9335_init(i, abs);
631 default:
632 pr_err("%s: Codec type %d not supported\n",
633 __func__, i->id);
634 return CPE_SVC_FAILED;
635 }
636
637 return CPE_SVC_SUCCESS;
638}
639
640static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
641 enum cpe_svc_result result)
642{
643 struct cpe_notif_node *n = NULL;
644 struct cmi_api_notification notif;
645 struct cmi_hdr *hdr;
646 u8 service = 0;
647
648 if (!t_info || !payload) {
649 pr_err("%s: invalid payload/handle\n",
650 __func__);
651 return;
652 }
653
654 hdr = CMI_GET_HEADER(payload);
655 service = CMI_HDR_GET_SERVICE(hdr);
656
Soumya Managolia8eda112019-11-11 18:44:59 +0530657 notif.event = CMI_API_MSG;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530658 notif.result = result;
659 notif.message = payload;
660
661 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
662 list_for_each_entry(n, &t_info->client_list, list) {
663
664 if ((n->mask & CPE_SVC_CMI_MSG) &&
665 n->service == service &&
666 n->notif.cmi_notification) {
667 n->notif.cmi_notification(&notif);
668 break;
669 }
670 }
671 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
672}
673
674static void cpe_toggle_irq_notification(struct cpe_info *t_info, u32 value)
675{
676 if (cpe_d.cpe_irq_control_callback)
677 cpe_d.cpe_irq_control_callback(value);
678}
679
680static void cpe_command_cleanup(struct cpe_command_node *command_node)
681{
682 switch (command_node->command) {
683 case CPE_CMD_SEND_MSG:
684 case CPE_CMD_SEND_TRANS_MSG:
685 case CPE_CMD_SEND_MSG_COMPLETE:
686 case CPE_CMD_SHUTDOWN:
687 case CPE_CMD_READ:
688 kfree(command_node->data);
689 command_node->data = NULL;
690 break;
691 default:
692 pr_err("%s: unhandled command\n",
693 __func__);
694 break;
695 }
696}
697
698static enum cpe_svc_result cpe_send_msg_to_inbox(
699 struct cpe_info *t_info, u32 opcode,
700 struct cpe_send_msg *msg)
701{
702 size_t bytes = 0;
703 size_t inbox_size =
704 t_info->tgt->tgt_get_cpe_info()->inbox_size;
705 struct cmi_hdr *hdr;
706 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
707
708 memset(t_info->tgt->inbox, 0, inbox_size);
709 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
710 CMI_HDR_SET_SESSION(hdr, 1);
711 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
712 CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
713 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
714
715 switch (opcode) {
716 case CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC: {
717 struct cmi_core_svc_cmd_shared_mem_alloc *m;
718
719 CMI_HDR_SET_OPCODE(hdr,
720 CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC);
721 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
722 sizeof(struct cmi_core_svc_cmd_shared_mem_alloc));
723 m = (struct cmi_core_svc_cmd_shared_mem_alloc *)
724 CMI_GET_PAYLOAD(t_info->tgt->inbox);
725 m->size = CPE_MSG_BUFFER_SIZE;
726 pr_debug("send shared mem alloc msg to cpe inbox\n");
727 }
728 break;
729 case CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ:
730 CMI_HDR_SET_OPCODE(hdr,
731 CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ);
732 CMI_HDR_SET_PAYLOAD_SIZE(hdr, 0);
733 pr_debug("%s: Creating DRAM acces request msg\n",
734 __func__);
735 break;
736
737 case CPE_CMI_BASIC_RSP_OPCODE: {
738 struct cmi_basic_rsp_result *rsp;
739
740 CMI_HDR_SET_OPCODE(hdr,
741 CPE_CMI_BASIC_RSP_OPCODE);
742 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
743 sizeof(struct cmi_basic_rsp_result));
744 rsp = (struct cmi_basic_rsp_result *)
745 CMI_GET_PAYLOAD(t_info->tgt->inbox);
746 rsp->status = 0;
747 pr_debug("%s: send basic response\n", __func__);
748 }
749 break;
750
751 default:
752 if (msg->address != 0) {
753 struct cmi_msg_transport *m = NULL;
754 struct cpe_svc_mem_segment mem_seg;
755
756 mem_seg.type = CPE_SVC_DATA_MEM;
757 if (msg->isobm) {
758 struct cmi_obm *obm = (struct cmi_obm *)
759
760 CMI_GET_PAYLOAD(msg->payload);
761 mem_seg.cpe_addr = obm->mem_handle;
762 mem_seg.data = (u8 *)obm->data_ptr.kvaddr;
763 mem_seg.size = obm->size;
764 t_info->tgt->tgt_write_ram(t_info, &mem_seg);
765 }
766
767 mem_seg.cpe_addr = msg->address;
768 mem_seg.data = msg->payload;
769 mem_seg.size = msg->size;
770 t_info->tgt->tgt_write_ram(t_info, &mem_seg);
771
772 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
773 CMI_HDR_SET_OPCODE(hdr, CMI_MSG_TRANSPORT);
774 m = (struct cmi_msg_transport *)
775 CMI_GET_PAYLOAD(t_info->tgt->inbox);
776 m->addr = msg->address;
777 m->size = msg->size;
778 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
779 sizeof(struct cmi_msg_transport));
780 } else {
781 memcpy(t_info->tgt->inbox, msg->payload,
782 msg->size);
783 }
784
785 break;
786 }
787
788 pr_debug("%s: sending message to cpe inbox\n",
789 __func__);
790 bytes = sizeof(struct cmi_hdr);
791 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
792 bytes += CMI_HDR_GET_PAYLOAD_SIZE(hdr);
793 rc = t_info->tgt->tgt_write_mailbox(t_info->tgt->inbox, bytes);
794
795 return rc;
796}
797
798static bool cpe_is_cmd_clk_req(void *cmd)
799{
800 struct cmi_hdr *hdr;
801
802 hdr = CMI_GET_HEADER(cmd);
803
804 if ((CMI_HDR_GET_SERVICE(hdr) ==
805 CMI_CPE_CORE_SERVICE_ID)) {
806 if (CMI_GET_OPCODE(cmd) ==
807 CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST)
808 return true;
809 }
810
811 return false;
812}
813
814static enum cpe_svc_result cpe_process_clk_change_req(
815 struct cpe_info *t_info)
816{
817 struct cmi_core_svc_cmd_clk_freq_request *req;
818
819 req = (struct cmi_core_svc_cmd_clk_freq_request *)
820 CMI_GET_PAYLOAD(t_info->tgt->outbox);
821
822 if (!cpe_d.cpe_change_freq_plan_cb) {
823 pr_err("%s: No support for clk freq change\n",
824 __func__);
825 return CPE_SVC_FAILED;
826 }
827
828 cpe_d.cpe_change_freq_plan_cb(cpe_d.cdc_priv,
829 req->clk_freq);
830
831 /*send a basic response*/
832 cpe_send_msg_to_inbox(t_info,
833 CPE_CMI_BASIC_RSP_OPCODE, NULL);
834
835 return CPE_SVC_SUCCESS;
836}
837
838static void cpe_process_irq_int(u32 irq,
839 struct cpe_info *t_info)
840{
841 struct cpe_command_node temp_node;
842 struct cpe_send_msg *m;
843 u8 size = 0;
844 bool err_irq = false;
845 struct cmi_hdr *hdr;
846
847 pr_debug("%s: irq = %u\n", __func__, irq);
848
849 if (!t_info) {
850 pr_err("%s: Invalid handle\n",
851 __func__);
852 return;
853 }
854
855 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
856 switch (irq) {
857 case CPE_IRQ_OUTBOX_IRQ:
858 size = t_info->tgt->tgt_get_cpe_info()->outbox_size;
859 t_info->tgt->tgt_read_mailbox(t_info->tgt->outbox, size);
860 break;
861
862 case CPE_IRQ_MEM_ACCESS_ERROR:
863 err_irq = true;
864 cpe_change_state(t_info, CPE_STATE_OFFLINE, CPE_SS_IDLE);
865 break;
866
867 case CPE_IRQ_WDOG_BITE:
868 case CPE_IRQ_RCO_WDOG_INT:
869 err_irq = true;
870 __cpe_svc_shutdown(t_info);
871 break;
872
873 case CPE_IRQ_FLL_LOCK_LOST:
874 default:
875 err_irq = true;
876 break;
877 }
878
879 if (err_irq) {
880 pr_err("%s: CPE error IRQ %u occurred\n",
881 __func__, irq);
882 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
883 return;
884 }
885
886 switch (t_info->state) {
887 case CPE_STATE_BOOTING:
888
889 switch (t_info->substate) {
890 case CPE_SS_BOOT:
891 temp_node.command = CPE_CMD_BOOT_INITIALIZE;
892 temp_node.result = CPE_SVC_SUCCESS;
893 t_info->substate = CPE_SS_BOOT_INIT;
894 t_info->cpe_process_command(&temp_node);
895 break;
896
897 case CPE_SS_BOOT_INIT:
898 temp_node.command = CPE_CMD_BOOT_COMPLETE;
899 temp_node.result = CPE_SVC_SUCCESS;
900 t_info->substate = CPE_SS_ONLINE;
901 t_info->cpe_process_command(&temp_node);
902 break;
903
904 default:
905 pr_debug("%s: unhandled substate %d for state %d\n",
906 __func__, t_info->state, t_info->substate);
907 break;
908 }
909 break;
910
911 case CPE_STATE_SENDING_MSG:
912 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
913 if (CMI_GET_OPCODE(t_info->tgt->outbox) ==
914 CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
915 pr_debug("%s: session_id: %u, state: %d,%d, event received\n",
916 __func__, CMI_HDR_GET_SESSION_ID(hdr),
917 t_info->state, t_info->substate);
918 temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
919 temp_node.data = NULL;
920 t_info->cpe_process_command(&temp_node);
921 break;
922 }
923
924 m = (struct cpe_send_msg *)t_info->pending;
925
926 switch (t_info->substate) {
927 case CPE_SS_MSG_REQUEST_ACCESS:
928 cpe_send_cmd_to_thread(t_info,
929 CPE_CMD_SEND_TRANS_MSG, m, true);
930 break;
931
932 case CPE_SS_MSG_SEND_INBOX:
933 if (cpe_is_cmd_clk_req(t_info->tgt->outbox))
934 cpe_process_clk_change_req(t_info);
935 else
936 cpe_send_cmd_to_thread(t_info,
937 CPE_CMD_SEND_MSG_COMPLETE, m, true);
938 break;
939
940 default:
941 pr_debug("%s: unhandled substate %d for state %d\n",
942 __func__, t_info->state, t_info->substate);
943 break;
944 }
945 break;
946
947 case CPE_STATE_IDLE:
948 pr_debug("%s: Message received, notifying client\n",
949 __func__);
950 temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
951 temp_node.data = NULL;
952 t_info->cpe_process_command(&temp_node);
953 break;
954
955 default:
956 pr_debug("%s: unhandled state %d\n",
957 __func__, t_info->state);
958 break;
959 }
960
961 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
962}
963
964
965static void broacast_boot_failed(void)
966{
967 struct cpe_info *t_info = cpe_d.cpe_default_handle;
968 struct cpe_svc_notification payload;
969
970 payload.event = CPE_SVC_BOOT_FAILED;
971 payload.result = CPE_SVC_FAILED;
972 payload.payload = NULL;
973 if (t_info)
974 payload.private_data =
975 t_info->client_context;
976 cpe_broadcast_notification(t_info, &payload);
977}
978
979static enum cpe_svc_result broadcast_boot_event(
980 struct cpe_info *t_info)
981{
982 struct cpe_svc_notification payload;
983
984 payload.event = CPE_SVC_ONLINE;
985 payload.result = CPE_SVC_SUCCESS;
986 payload.payload = NULL;
987 if (t_info)
988 payload.private_data =
989 t_info->client_context;
990 cpe_broadcast_notification(t_info, &payload);
991
992 return CPE_SVC_SUCCESS;
993}
994
995static enum cpe_process_result cpe_boot_initialize(struct cpe_info *t_info,
996 enum cpe_svc_result *cpe_rc)
997{
998 enum cpe_process_result rc = CPE_SVC_FAILED;
999 struct cpe_svc_notification payload;
1000 struct cmi_core_svc_event_system_boot *p = NULL;
1001
1002 if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
1003 CPE_CORE_SVC_EVENT_SYSTEM_BOOT) {
1004 broacast_boot_failed();
1005 return rc;
1006 }
1007
1008 p = (struct cmi_core_svc_event_system_boot *)
1009 CMI_GET_PAYLOAD(t_info->tgt->outbox);
1010 if (p->status != CPE_BOOT_SUCCESS) {
1011 pr_err("%s: cpe boot failed, status = %d\n",
1012 __func__, p->status);
1013 broacast_boot_failed();
1014 return rc;
1015 }
1016
1017 /* boot was successful */
1018 if (p->version ==
1019 CPE_CORE_VERSION_SYSTEM_BOOT_EVENT) {
1020 cpe_d.cpe_debug_vector.debug_address =
1021 p->sfr_buff_address;
1022 cpe_d.cpe_debug_vector.debug_buffer_size =
1023 p->sfr_buff_size;
1024 cpe_d.cpe_debug_vector.status = p->status;
1025 payload.event = CPE_SVC_BOOT;
1026 payload.result = CPE_SVC_SUCCESS;
1027 payload.payload = (void *)&cpe_d.cpe_debug_vector;
1028 payload.private_data = t_info->client_context;
1029 cpe_broadcast_notification(t_info, &payload);
1030 }
1031 cpe_change_state(t_info, CPE_STATE_BOOTING,
1032 CPE_SS_BOOT_INIT);
1033 (*cpe_rc) = cpe_send_msg_to_inbox(t_info,
1034 CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC, NULL);
1035 rc = CPE_PROC_SUCCESS;
1036 return rc;
1037}
1038
1039static void cpe_svc_core_cmi_handler(
1040 const struct cmi_api_notification *parameter)
1041{
1042 struct cmi_hdr *hdr;
1043
1044 if (!parameter)
1045 return;
1046
1047 pr_debug("%s: event = %d\n",
1048 __func__, parameter->event);
1049
1050 if (parameter->event != CMI_API_MSG)
1051 return;
1052
1053 hdr = (struct cmi_hdr *) parameter->message;
1054
1055 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
1056 struct cmi_basic_rsp_result *result;
1057
1058 result = (struct cmi_basic_rsp_result *)
1059 ((u8 *)parameter->message) + (sizeof(*hdr));
1060 if (result->status)
1061 pr_err("%s: error response, error code = %u\n",
1062 __func__, result->status);
1063 complete(&cpe_d.cpe_default_handle->core_svc_cmd_compl);
1064 }
1065}
1066
1067static void cpe_clk_plan_work(struct work_struct *work)
1068{
1069 struct cpe_info *t_info = NULL;
1070 size_t size = 0;
1071 struct cpe_svc_cfg_clk_plan plan;
1072 u8 *cmi_msg;
1073 struct cmi_hdr *hdr;
1074 int rc;
1075
1076 t_info = container_of(work, struct cpe_info, clk_plan_work);
1077 if (!t_info) {
1078 pr_err("%s: Invalid handle for cpe_info\n",
1079 __func__);
1080 return;
1081 }
1082
1083 /* Register the core service */
1084 cpe_d.cpe_cmi_handle = cmi_register(
1085 cpe_svc_core_cmi_handler,
1086 CMI_CPE_CORE_SERVICE_ID);
1087
1088 /* send the clk plan command */
1089 if (!cpe_d.cpe_query_freq_plans_cb) {
1090 pr_err("%s: No support for querying clk plans\n",
1091 __func__);
1092 return;
1093 }
1094
1095 cpe_d.cpe_query_freq_plans_cb(cpe_d.cdc_priv, &plan);
1096 size = sizeof(plan.current_clk_feq) +
1097 sizeof(plan.num_clk_freqs);
1098 size += plan.num_clk_freqs *
1099 sizeof(plan.clk_freqs[0]);
1100 cmi_msg = kzalloc(size + sizeof(struct cmi_hdr),
1101 GFP_KERNEL);
1102 if (!cmi_msg)
1103 return;
1104
1105 hdr = (struct cmi_hdr *) cmi_msg;
1106 CMI_HDR_SET_OPCODE(hdr,
1107 CPE_CORE_SVC_CMD_CFG_CLK_PLAN);
1108 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
1109 CMI_HDR_SET_SESSION(hdr, 1);
1110 CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
1111 CMI_HDR_SET_PAYLOAD_SIZE(hdr, size);
1112 memcpy(CMI_GET_PAYLOAD(cmi_msg), &plan,
1113 size);
1114 cmi_send_msg(cmi_msg);
1115
1116 /* Wait for clk plan command to complete */
1117 rc = wait_for_completion_timeout(&t_info->core_svc_cmd_compl,
1118 (10 * HZ));
1119 if (!rc) {
1120 pr_err("%s: clk plan cmd timed out\n",
1121 __func__);
1122 goto cmd_fail;
1123 }
1124
1125 /* clk plan cmd is successful, send start notification */
1126 if (t_info->cpe_start_notification)
1127 t_info->cpe_start_notification(t_info);
1128 else
1129 pr_err("%s: no start notification\n",
1130 __func__);
1131
1132cmd_fail:
1133 kfree(cmi_msg);
1134 cmi_deregister(cpe_d.cpe_cmi_handle);
1135}
1136
1137static enum cpe_process_result cpe_boot_complete(
1138 struct cpe_info *t_info)
1139{
1140 struct cmi_core_svc_cmdrsp_shared_mem_alloc *p = NULL;
1141
1142 if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
1143 CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC) {
1144 broacast_boot_failed();
1145 return CPE_PROC_FAILED;
1146 }
1147
1148 p = (struct cmi_core_svc_cmdrsp_shared_mem_alloc *)
1149 CMI_GET_PAYLOAD(t_info->tgt->outbox);
1150 cpe_d.cpe_msg_buffer = p->addr;
1151
1152 if (cpe_d.cpe_msg_buffer == 0) {
1153 pr_err("%s: Invalid cpe buffer for message\n",
1154 __func__);
1155 broacast_boot_failed();
1156 return CPE_PROC_FAILED;
1157 }
1158
1159 cpe_change_state(t_info, CPE_STATE_IDLE, CPE_SS_IDLE);
1160 cpe_create_worker_thread(t_info);
1161
1162 if (t_info->codec_id != CPE_SVC_CODEC_TOMTOM) {
1163 schedule_work(&t_info->clk_plan_work);
1164 } else {
1165 if (t_info->cpe_start_notification)
1166 t_info->cpe_start_notification(t_info);
1167 else
1168 pr_err("%s: no start notification\n",
1169 __func__);
1170 }
1171
1172 pr_debug("%s: boot complete\n", __func__);
Soumya Managolia8eda112019-11-11 18:44:59 +05301173 return CPE_PROC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301174}
1175
1176static enum cpe_process_result cpe_process_send_msg(
1177 struct cpe_info *t_info,
1178 enum cpe_svc_result *cpe_rc,
1179 struct cpe_command_node *command_node)
1180{
1181 enum cpe_process_result rc = CPE_PROC_SUCCESS;
1182 struct cpe_send_msg *m =
1183 (struct cpe_send_msg *)command_node->data;
1184 u32 size = m->size;
1185
1186 if (t_info->pending) {
1187 pr_debug("%s: message queued\n", __func__);
1188 *cpe_rc = CPE_SVC_SUCCESS;
1189 return CPE_PROC_QUEUED;
1190 }
1191
1192 pr_debug("%s: Send CMI message, size = %u\n",
1193 __func__, size);
1194
1195 if (size <= t_info->tgt->tgt_get_cpe_info()->inbox_size) {
1196 pr_debug("%s: Msg fits mailbox, size %u\n",
1197 __func__, size);
1198 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1199 CPE_SS_MSG_SEND_INBOX);
1200 t_info->pending = m;
1201 *cpe_rc = cpe_send_msg_to_inbox(t_info, 0, m);
1202 } else if (size < CPE_MSG_BUFFER_SIZE) {
1203 m->address = cpe_d.cpe_msg_buffer;
1204 pr_debug("%s: Message req CMI mem access\n",
1205 __func__);
1206 t_info->pending = m;
1207 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1208 CPE_SS_MSG_REQUEST_ACCESS);
1209 *cpe_rc = cpe_send_msg_to_inbox(t_info,
1210 CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ, m);
1211 } else {
1212 pr_debug("%s: Invalid msg size %u\n",
1213 __func__, size);
1214 cpe_command_cleanup(command_node);
1215 rc = CPE_PROC_FAILED;
1216 cpe_change_state(t_info, CPE_STATE_IDLE,
1217 CPE_SS_IDLE);
1218 }
1219
1220 return rc;
1221}
1222
1223static enum cpe_process_result cpe_process_incoming(
1224 struct cpe_info *t_info)
1225{
1226 enum cpe_process_result rc = CPE_PROC_FAILED;
1227 struct cmi_hdr *hdr;
1228
1229 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
1230
1231 if (CMI_HDR_GET_SERVICE(hdr) ==
1232 CMI_CPE_CORE_SERVICE_ID) {
1233 pr_debug("%s: core service message received\n",
1234 __func__);
1235
1236 switch (CMI_GET_OPCODE(t_info->tgt->outbox)) {
1237 case CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST:
1238 cpe_process_clk_change_req(t_info);
1239 rc = CPE_PROC_SUCCESS;
1240 break;
1241 case CMI_MSG_TRANSPORT:
1242 pr_debug("%s: transport msg received\n",
1243 __func__);
1244 rc = CPE_PROC_SUCCESS;
1245 break;
1246 case CPE_CMI_BASIC_RSP_OPCODE:
1247 pr_debug("%s: received basic rsp\n",
1248 __func__);
1249 rc = CPE_PROC_SUCCESS;
1250 break;
1251 default:
1252 pr_debug("%s: unknown message received\n",
1253 __func__);
1254 break;
1255 }
1256 } else {
1257 /* if service id if for a CMI client, notify client */
1258 pr_debug("%s: Message received, notifying client\n",
1259 __func__);
1260 cpe_notify_cmi_client(t_info,
1261 t_info->tgt->outbox, CPE_SVC_SUCCESS);
1262 rc = CPE_PROC_SUCCESS;
1263 }
1264
1265 return rc;
1266}
1267
1268static enum cpe_process_result cpe_process_kill_thread(
1269 struct cpe_info *t_info,
1270 struct cpe_command_node *command_node)
1271{
1272 struct cpe_svc_notification payload;
1273
1274 cpe_d.cpe_msg_buffer = 0;
1275 payload.result = CPE_SVC_SHUTTING_DOWN;
1276 payload.event = CPE_SVC_OFFLINE;
1277 payload.payload = NULL;
1278 payload.private_data = t_info->client_context;
1279 /*
1280 * Make state as offline before broadcasting
1281 * the message to clients.
1282 */
1283 cpe_change_state(t_info, CPE_STATE_OFFLINE,
1284 CPE_SS_IDLE);
1285 cpe_broadcast_notification(t_info, &payload);
1286
1287 return CPE_PROC_KILLED;
1288}
1289
1290static enum cpe_process_result cpe_mt_process_cmd(
1291 struct cpe_command_node *command_node)
1292{
1293 struct cpe_info *t_info = cpe_d.cpe_default_handle;
1294 enum cpe_svc_result cpe_rc = CPE_SVC_SUCCESS;
1295 enum cpe_process_result rc = CPE_PROC_SUCCESS;
1296 struct cpe_send_msg *m;
1297 struct cmi_hdr *hdr;
1298 u8 service = 0;
1299 u8 retries = 0;
1300
1301 if (!t_info || !command_node) {
1302 pr_err("%s: Invalid handle/command node\n",
1303 __func__);
1304 return CPE_PROC_FAILED;
1305 }
1306
1307 pr_debug("%s: cmd = %u\n", __func__, command_node->command);
1308
1309 cpe_rc = cpe_is_command_valid(t_info, command_node->command);
1310
1311 if (cpe_rc != CPE_SVC_SUCCESS) {
1312 pr_err("%s: Invalid command %d, err = %d\n",
1313 __func__, command_node->command, cpe_rc);
1314 return CPE_PROC_FAILED;
1315 }
1316
1317 switch (command_node->command) {
1318
1319 case CPE_CMD_BOOT_INITIALIZE:
1320 rc = cpe_boot_initialize(t_info, &cpe_rc);
1321 break;
1322
1323 case CPE_CMD_BOOT_COMPLETE:
1324 rc = cpe_boot_complete(t_info);
1325 break;
1326
1327 case CPE_CMD_SEND_MSG:
1328 rc = cpe_process_send_msg(t_info, &cpe_rc,
1329 command_node);
1330 break;
1331
1332 case CPE_CMD_SEND_TRANS_MSG:
1333 m = (struct cpe_send_msg *)command_node->data;
1334
1335 while (retries < CPE_SVC_INACTIVE_STATE_RETRIES_MAX) {
1336 if (t_info->tgt->tgt_is_active()) {
1337 ++retries;
1338 /* Wait for CPE to be inactive */
1339 usleep_range(5000, 5100);
1340 } else {
1341 break;
1342 }
1343 }
1344
1345 pr_debug("%s: cpe inactive after %d attempts\n",
1346 __func__, retries);
1347
1348 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1349 CPE_SS_MSG_SEND_INBOX);
1350 rc = cpe_send_msg_to_inbox(t_info, 0, m);
1351 break;
1352
1353 case CPE_CMD_SEND_MSG_COMPLETE:
1354 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
1355 service = CMI_HDR_GET_SERVICE(hdr);
1356 pr_debug("%s: msg send success, notifying clients\n",
1357 __func__);
1358 cpe_command_cleanup(command_node);
1359 t_info->pending = NULL;
1360 cpe_change_state(t_info,
1361 CPE_STATE_IDLE, CPE_SS_IDLE);
1362 cpe_notify_cmi_client(t_info,
1363 t_info->tgt->outbox, CPE_SVC_SUCCESS);
1364 break;
1365
1366 case CPE_CMD_PROC_INCOMING_MSG:
1367 rc = cpe_process_incoming(t_info);
1368 break;
1369
1370 case CPE_CMD_KILL_THREAD:
1371 rc = cpe_process_kill_thread(t_info, command_node);
1372 break;
1373
1374 default:
1375 pr_err("%s: unhandled cpe cmd = %d\n",
1376 __func__, command_node->command);
1377 break;
1378 }
1379
1380 if (cpe_rc != CPE_SVC_SUCCESS) {
1381 pr_err("%s: failed to execute command\n", __func__);
1382 if (t_info->pending) {
1383 m = (struct cpe_send_msg *)t_info->pending;
1384 cpe_notify_cmi_client(t_info, m->payload,
1385 CPE_SVC_FAILED);
1386 t_info->pending = NULL;
1387 }
1388
1389 cpe_command_cleanup(command_node);
1390 rc = CPE_PROC_FAILED;
1391 cpe_change_state(t_info, CPE_STATE_IDLE,
1392 CPE_SS_IDLE);
1393 }
1394
1395 return rc;
1396}
1397
1398static enum cpe_svc_result cpe_mt_validate_cmd(
1399 const struct cpe_info *t_info,
1400 enum cpe_command command)
1401{
1402 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1403
1404 if ((t_info == NULL) || t_info->initialized == false) {
1405 pr_err("%s: cpe service is not ready\n",
1406 __func__);
1407 return CPE_SVC_NOT_READY;
1408 }
1409
1410 switch (t_info->state) {
1411 case CPE_STATE_UNINITIALIZED:
1412 case CPE_STATE_INITIALIZED:
1413 switch (command) {
1414 case CPE_CMD_RESET:
1415 case CPE_CMD_DL_SEGMENT:
1416 case CPE_CMD_RAMDUMP:
1417 case CPE_CMD_PROCESS_IRQ:
1418 case CPE_CMD_KILL_THREAD:
1419 case CPE_CMD_DEINITIALIZE:
1420 case CPE_CMD_FTM_TEST:
1421 rc = CPE_SVC_SUCCESS;
1422 break;
1423 default:
1424 rc = CPE_SVC_NOT_READY;
1425 break;
1426 }
1427 break;
1428
1429 case CPE_STATE_DOWNLOADING:
1430 switch (command) {
1431 case CPE_CMD_RESET:
1432 case CPE_CMD_DL_SEGMENT:
1433 case CPE_CMD_BOOT:
1434 case CPE_CMD_FTM_TEST:
1435 rc = CPE_SVC_SUCCESS;
1436 break;
1437 default:
1438 rc = CPE_SVC_NOT_READY;
1439 break;
1440 }
1441 break;
1442
1443 case CPE_STATE_BOOTING:
1444 switch (command) {
1445 case CPE_CMD_PROCESS_IRQ:
1446 case CPE_CMD_BOOT_INITIALIZE:
1447 case CPE_CMD_BOOT_COMPLETE:
1448 case CPE_CMD_SHUTDOWN:
1449 rc = CPE_SVC_SUCCESS;
1450 break;
1451 case CPE_CMD_FTM_TEST:
1452 rc = CPE_SVC_BUSY;
1453 break;
1454 default:
1455 rc = CPE_SVC_NOT_READY;
1456 break;
1457 }
1458 break;
1459
1460 case CPE_STATE_IDLE:
1461 switch (command) {
1462 case CPE_CMD_SEND_MSG:
1463 case CPE_CMD_SEND_TRANS_MSG:
1464 case CPE_CMD_SEND_MSG_COMPLETE:
1465 case CPE_CMD_PROCESS_IRQ:
1466 case CPE_CMD_RESET:
1467 case CPE_CMD_SHUTDOWN:
1468 case CPE_CMD_KILL_THREAD:
1469 case CPE_CMD_PROC_INCOMING_MSG:
1470 rc = CPE_SVC_SUCCESS;
1471 break;
1472 case CPE_CMD_FTM_TEST:
1473 rc = CPE_SVC_BUSY;
1474 break;
1475 default:
1476 rc = CPE_SVC_FAILED;
1477 break;
1478 }
1479 break;
1480
1481 case CPE_STATE_SENDING_MSG:
1482 switch (command) {
1483 case CPE_CMD_SEND_MSG:
1484 case CPE_CMD_SEND_TRANS_MSG:
1485 case CPE_CMD_SEND_MSG_COMPLETE:
1486 case CPE_CMD_PROCESS_IRQ:
1487 case CPE_CMD_SHUTDOWN:
1488 case CPE_CMD_KILL_THREAD:
1489 case CPE_CMD_PROC_INCOMING_MSG:
1490 rc = CPE_SVC_SUCCESS;
1491 break;
1492 case CPE_CMD_FTM_TEST:
1493 rc = CPE_SVC_BUSY;
1494 break;
1495 default:
1496 rc = CPE_SVC_FAILED;
1497 break;
1498 }
1499 break;
1500
1501 case CPE_STATE_OFFLINE:
1502 switch (command) {
1503 case CPE_CMD_RESET:
1504 case CPE_CMD_RAMDUMP:
1505 case CPE_CMD_KILL_THREAD:
1506 rc = CPE_SVC_SUCCESS;
1507 break;
1508 default:
1509 rc = CPE_SVC_NOT_READY;
1510 break;
1511 }
1512 break;
1513
1514 default:
1515 pr_debug("%s: unhandled state %d\n",
1516 __func__, t_info->state);
1517 break;
1518 }
1519
1520 if (rc != CPE_SVC_SUCCESS)
1521 pr_err("%s: invalid command %d, state = %d\n",
1522 __func__, command, t_info->state);
1523 return rc;
1524}
1525
1526void *cpe_svc_initialize(
1527 void irq_control_callback(u32 enable),
1528 const void *codec_info, void *context)
1529{
1530 struct cpe_info *t_info = NULL;
1531 const struct cpe_svc_hw_cfg *cap = NULL;
1532 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1533 struct cpe_svc_init_param *init_context =
1534 (struct cpe_svc_init_param *) context;
1535 void *client_context = NULL;
1536
1537 if (cpe_d.cpe_default_handle &&
1538 cpe_d.cpe_default_handle->initialized == true)
1539 return (void *)cpe_d.cpe_default_handle;
1540 cpe_d.cpe_query_freq_plans_cb = NULL;
1541 cpe_d.cpe_change_freq_plan_cb = NULL;
1542
1543 if (context) {
1544 client_context = init_context->context;
1545 switch (init_context->version) {
1546 case CPE_SVC_INIT_PARAM_V1:
1547 cpe_d.cpe_query_freq_plans_cb =
1548 init_context->query_freq_plans_cb;
1549 cpe_d.cpe_change_freq_plan_cb =
1550 init_context->change_freq_plan_cb;
1551 break;
1552 default:
1553 break;
1554 }
1555 }
1556
1557 if (!cpe_d.cpe_default_handle) {
1558 cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
1559 GFP_KERNEL);
1560 if (!cpe_d.cpe_default_handle)
1561 goto err_register;
1562
1563 memset(cpe_d.cpe_default_handle, 0,
1564 sizeof(struct cpe_info));
1565 }
1566
1567 t_info = cpe_d.cpe_default_handle;
1568 t_info->client_context = client_context;
1569
1570 INIT_LIST_HEAD(&t_info->client_list);
1571 cpe_d.cdc_priv = client_context;
1572 INIT_WORK(&t_info->clk_plan_work, cpe_clk_plan_work);
1573 init_completion(&t_info->core_svc_cmd_compl);
1574
1575 t_info->tgt = kzalloc(sizeof(struct cpe_svc_tgt_abstraction),
1576 GFP_KERNEL);
1577 if (!t_info->tgt)
1578 goto err_tgt_alloc;
1579 t_info->codec_id =
1580 ((struct cpe_svc_codec_info_v1 *) codec_info)->id;
1581
1582 rc = cpe_svc_tgt_init((struct cpe_svc_codec_info_v1 *)codec_info,
1583 t_info->tgt);
1584
1585 if (rc != CPE_SVC_SUCCESS)
1586 goto err_tgt_init;
1587
1588 cap = t_info->tgt->tgt_get_cpe_info();
1589
1590 memset(t_info->tgt->outbox, 0, cap->outbox_size);
1591 memset(t_info->tgt->inbox, 0, cap->inbox_size);
1592 mutex_init(&t_info->msg_lock);
1593 cpe_d.cpe_irq_control_callback = irq_control_callback;
1594 t_info->cpe_process_command = cpe_mt_process_cmd;
1595 t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
1596 t_info->cpe_start_notification = broadcast_boot_event;
1597 mutex_init(&cpe_d.cpe_api_mutex);
1598 mutex_init(&cpe_d.cpe_svc_lock);
1599 pr_debug("%s: cpe services initialized\n", __func__);
1600 t_info->state = CPE_STATE_INITIALIZED;
1601 t_info->initialized = true;
1602
1603 return t_info;
1604
1605err_tgt_init:
1606 kfree(t_info->tgt);
1607
1608err_tgt_alloc:
1609 kfree(cpe_d.cpe_default_handle);
1610 cpe_d.cpe_default_handle = NULL;
1611
1612err_register:
1613 return NULL;
1614}
1615
1616enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
1617{
1618 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1619 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1620
1621 if (!t_info)
1622 t_info = cpe_d.cpe_default_handle;
1623
1624 rc = cpe_is_command_valid(t_info, CPE_CMD_DEINITIALIZE);
1625
1626 if (rc != CPE_SVC_SUCCESS) {
1627 pr_err("%s: Invalid command %d\n",
1628 __func__, CPE_CMD_DEINITIALIZE);
1629 return rc;
1630 }
1631
1632 if (cpe_d.cpe_default_handle == t_info)
1633 cpe_d.cpe_default_handle = NULL;
1634
1635 t_info->tgt->tgt_deinit(t_info->tgt);
1636 cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
1637 CPE_SS_IDLE);
1638 mutex_destroy(&t_info->msg_lock);
1639 kfree(t_info->tgt);
1640 kfree(t_info);
1641 mutex_destroy(&cpe_d.cpe_api_mutex);
1642 mutex_destroy(&cpe_d.cpe_svc_lock);
1643
1644 return rc;
1645}
1646
1647void *cpe_svc_register(void *cpe_handle,
1648 void (*notification_callback)
1649 (const struct cpe_svc_notification *parameter),
1650 u32 mask, const char *name)
1651{
1652 void *reg_handle;
1653
1654 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1655 if (!cpe_d.cpe_default_handle) {
1656 cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
1657 GFP_KERNEL);
1658 if (!cpe_d.cpe_default_handle) {
1659 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1660 return NULL;
1661 }
1662
1663 memset(cpe_d.cpe_default_handle, 0,
1664 sizeof(struct cpe_info));
1665 }
1666
1667 if (!cpe_handle)
1668 cpe_handle = cpe_d.cpe_default_handle;
1669
1670 reg_handle = cpe_register_generic((struct cpe_info *)cpe_handle,
1671 notification_callback,
1672 NULL,
1673 mask, CPE_NO_SERVICE, name);
1674 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1675
1676 return reg_handle;
1677}
1678
1679enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle)
1680{
1681 enum cpe_svc_result rc;
1682
1683 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1684 if (!cpe_handle)
1685 cpe_handle = cpe_d.cpe_default_handle;
1686
1687 rc = cpe_deregister_generic((struct cpe_info *)cpe_handle,
1688 reg_handle);
1689 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1690
1691 return rc;
1692}
1693
1694enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
1695 const struct cpe_svc_mem_segment *segment)
1696{
1697 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1698 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1699
1700 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1701 if (!t_info)
1702 t_info = cpe_d.cpe_default_handle;
1703
1704 rc = cpe_is_command_valid(t_info, CPE_CMD_DL_SEGMENT);
1705
1706 if (rc != CPE_SVC_SUCCESS) {
1707 pr_err("%s: cmd validation fail, cmd = %d\n",
1708 __func__, CPE_CMD_DL_SEGMENT);
1709 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1710 return rc;
1711 }
1712
1713 cpe_toggle_irq_notification(t_info, false);
1714 t_info->state = CPE_STATE_DOWNLOADING;
1715 t_info->substate = CPE_SS_DL_DOWNLOADING;
1716 rc = t_info->tgt->tgt_write_ram(t_info, segment);
1717 cpe_toggle_irq_notification(t_info, true);
1718 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1719
1720 return rc;
1721}
1722
1723enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode)
1724{
1725 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1726 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1727
1728 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1729 if (!t_info)
1730 t_info = cpe_d.cpe_default_handle;
1731
1732 rc = cpe_is_command_valid(t_info, CPE_CMD_BOOT);
1733
1734 if (rc != CPE_SVC_SUCCESS) {
1735 pr_err("%s: cmd validation fail, cmd = %d\n",
1736 __func__, CPE_CMD_BOOT);
1737 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1738 return rc;
1739 }
1740
1741 if (rc == CPE_SVC_SUCCESS) {
1742 t_info->tgt->tgt_boot(debug_mode);
1743 t_info->state = CPE_STATE_BOOTING;
1744 t_info->substate = CPE_SS_BOOT;
1745 pr_debug("%s: cpe service booting\n",
1746 __func__);
1747 }
1748
1749 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1750 return rc;
1751}
1752
1753enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq)
1754{
1755 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1756 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1757
1758 if (!t_info)
1759 t_info = cpe_d.cpe_default_handle;
1760
1761 cpe_toggle_irq_notification(t_info, false);
1762 cpe_process_irq_int(cpe_irq, t_info);
1763 cpe_toggle_irq_notification(t_info, true);
1764
1765 return rc;
1766}
1767
1768enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
1769 enum cpe_svc_module module, enum cpe_svc_route_dest dest)
1770{
1771 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1772 enum cpe_svc_result rc = CPE_SVC_NOT_READY;
1773
1774 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1775 if (!t_info)
1776 t_info = cpe_d.cpe_default_handle;
1777
1778 if (t_info->tgt)
1779 rc = t_info->tgt->tgt_route_notification(module, dest);
1780
1781 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1782 return rc;
1783}
1784
1785static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
1786{
1787 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1788 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1789 struct cpe_command_node *n = NULL;
1790 struct cpe_command_node kill_cmd;
1791
1792 if (!t_info)
1793 t_info = cpe_d.cpe_default_handle;
1794
1795 rc = cpe_is_command_valid(t_info, CPE_CMD_SHUTDOWN);
1796
1797 if (rc != CPE_SVC_SUCCESS) {
1798 pr_err("%s: cmd validation fail, cmd = %d\n",
1799 __func__, CPE_CMD_SHUTDOWN);
1800 return rc;
1801 }
1802
1803 while (!list_empty(&t_info->main_queue)) {
1804 n = list_first_entry(&t_info->main_queue,
1805 struct cpe_command_node, list);
1806
1807 if (n->command == CPE_CMD_SEND_MSG) {
1808 cpe_notify_cmi_client(t_info, (u8 *)n->data,
1809 CPE_SVC_SHUTTING_DOWN);
1810 }
1811 /*
1812 * Since command cannot be processed,
1813 * delete it from the list and perform cleanup
1814 */
1815 list_del(&n->list);
1816 cpe_command_cleanup(n);
1817 kfree(n);
1818 }
1819
1820 pr_debug("%s: cpe service OFFLINE state\n", __func__);
1821
1822 t_info->state = CPE_STATE_OFFLINE;
1823 t_info->substate = CPE_SS_IDLE;
1824
1825 memset(&kill_cmd, 0, sizeof(kill_cmd));
1826 kill_cmd.command = CPE_CMD_KILL_THREAD;
1827
1828 if (t_info->pending) {
1829 struct cpe_send_msg *m =
1830 (struct cpe_send_msg *)t_info->pending;
1831 cpe_notify_cmi_client(t_info, m->payload,
1832 CPE_SVC_SHUTTING_DOWN);
1833 kfree(t_info->pending);
1834 t_info->pending = NULL;
1835 }
1836
1837 cpe_cleanup_worker_thread(t_info);
1838 t_info->cpe_process_command(&kill_cmd);
1839
1840 return rc;
1841}
1842
1843enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
1844{
1845 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1846
1847 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1848 rc = __cpe_svc_shutdown(cpe_handle);
1849 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1850 return rc;
1851}
1852
1853enum cpe_svc_result cpe_svc_reset(void *cpe_handle)
1854{
1855 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1856 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1857
1858 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1859 if (!t_info)
1860 t_info = cpe_d.cpe_default_handle;
1861
1862 rc = cpe_is_command_valid(t_info, CPE_CMD_RESET);
1863
1864 if (rc != CPE_SVC_SUCCESS) {
1865 pr_err("%s: cmd validation fail, cmd = %d\n",
1866 __func__, CPE_CMD_RESET);
1867 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1868 return rc;
1869 }
1870
1871 if (t_info && t_info->tgt) {
1872 rc = t_info->tgt->tgt_reset();
1873 pr_debug("%s: cpe services in INITIALIZED state\n",
1874 __func__);
1875 t_info->state = CPE_STATE_INITIALIZED;
1876 t_info->substate = CPE_SS_IDLE;
1877 }
1878 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1879
1880 return rc;
1881}
1882
1883enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
1884 struct cpe_svc_mem_segment *buffer)
1885{
1886 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1887 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1888
1889 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1890 if (!t_info)
1891 t_info = cpe_d.cpe_default_handle;
1892
1893 rc = cpe_is_command_valid(t_info, CPE_CMD_RAMDUMP);
1894 if (rc != CPE_SVC_SUCCESS) {
1895 pr_err("%s: cmd validation fail, cmd = %d\n",
1896 __func__, CPE_CMD_RAMDUMP);
1897 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1898 return rc;
1899 }
1900
1901 if (t_info->tgt) {
1902 rc = t_info->tgt->tgt_read_ram(t_info, buffer);
1903 } else {
1904 pr_err("%s: cpe service not ready\n", __func__);
1905 rc = CPE_SVC_NOT_READY;
1906 }
1907 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1908
1909 return rc;
1910}
1911
1912enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode)
1913{
1914 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1915 enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
1916
1917 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1918 if (!t_info)
1919 t_info = cpe_d.cpe_default_handle;
1920
1921 if (t_info->tgt)
1922 rc = t_info->tgt->tgt_set_debug_mode(mode);
1923 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1924
1925 return rc;
1926}
1927
1928const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle)
1929{
1930 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1931
1932 if (!t_info)
1933 t_info = cpe_d.cpe_default_handle;
1934
1935 if (t_info->tgt)
1936 return t_info->tgt->tgt_get_cpe_info();
1937
1938 return NULL;
1939}
1940
1941void *cmi_register(
1942 void notification_callback(
1943 const struct cmi_api_notification *parameter),
1944 u32 service)
1945{
1946 void *reg_handle = NULL;
1947
1948 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1949 reg_handle = cpe_register_generic(cpe_d.cpe_default_handle,
1950 NULL,
1951 notification_callback,
1952 (CPE_SVC_CMI_MSG | CPE_SVC_OFFLINE |
1953 CPE_SVC_ONLINE),
1954 service,
1955 "CMI_CLIENT");
1956 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1957
1958 return reg_handle;
1959}
1960
1961enum cmi_api_result cmi_deregister(void *reg_handle)
1962{
1963 u32 clients = 0;
1964 struct cpe_notif_node *n = NULL;
1965 enum cmi_api_result rc = CMI_API_SUCCESS;
1966 struct cpe_svc_notification payload;
1967
1968 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1969 rc = (enum cmi_api_result) cpe_deregister_generic(
1970 cpe_d.cpe_default_handle, reg_handle);
1971
1972 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
1973 list_for_each_entry(n, &cpe_d.cpe_default_handle->client_list, list) {
1974 if (n->mask & CPE_SVC_CMI_MSG)
1975 clients++;
1976 }
1977 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
1978
1979 if (clients == 0) {
1980 payload.event = CPE_SVC_CMI_CLIENTS_DEREG;
1981 payload.payload = NULL;
1982 payload.result = CPE_SVC_SUCCESS;
1983 cpe_broadcast_notification(cpe_d.cpe_default_handle, &payload);
1984 }
1985
1986 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1987 return rc;
1988}
1989
1990enum cmi_api_result cmi_send_msg(void *message)
1991{
1992 enum cmi_api_result rc = CMI_API_SUCCESS;
1993 struct cpe_send_msg *msg = NULL;
1994 struct cmi_hdr *hdr;
1995
1996 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1997 hdr = CMI_GET_HEADER(message);
1998 msg = kzalloc(sizeof(struct cpe_send_msg),
1999 GFP_ATOMIC);
2000 if (!msg) {
2001 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2002 return CPE_SVC_NO_MEMORY;
2003 }
2004
2005 if (CMI_HDR_GET_OBM_FLAG(hdr) == CMI_OBM_FLAG_OUT_BAND)
2006 msg->isobm = 1;
2007 else
2008 msg->isobm = 0;
2009
2010 msg->size = sizeof(struct cmi_hdr) +
2011 CMI_HDR_GET_PAYLOAD_SIZE(hdr);
2012
2013 msg->payload = kzalloc(msg->size, GFP_ATOMIC);
2014 if (!msg->payload) {
2015 kfree(msg);
2016 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2017 return CPE_SVC_NO_MEMORY;
2018 }
2019
2020 msg->address = 0;
2021 memcpy((void *)msg->payload, message, msg->size);
2022
2023 rc = (enum cmi_api_result) cpe_send_cmd_to_thread(
2024 cpe_d.cpe_default_handle,
2025 CPE_CMD_SEND_MSG,
2026 (void *)msg, false);
2027
2028 if (rc != 0) {
2029 pr_err("%s: Failed to queue message\n", __func__);
2030 kfree(msg->payload);
2031 kfree(msg);
2032 }
2033
2034 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2035 return rc;
2036}
2037
2038enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status)
2039{
2040 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2041 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
2042 struct cpe_svc_mem_segment backup_seg;
2043 struct cpe_svc_mem_segment waiti_seg;
2044 u8 *backup_data = NULL;
2045
2046 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2047 if (!t_info)
2048 t_info = cpe_d.cpe_default_handle;
2049
2050 rc = cpe_is_command_valid(t_info, CPE_CMD_FTM_TEST);
2051 if (rc != CPE_SVC_SUCCESS) {
2052 pr_err("%s: cmd validation fail, cmd = %d\n",
2053 __func__, CPE_CMD_FTM_TEST);
2054 goto fail_cmd;
2055 }
2056
2057 if (t_info && t_info->tgt) {
2058 backup_data = kzalloc(
2059 t_info->tgt->tgt_waiti_info->tgt_waiti_size,
2060 GFP_KERNEL);
2061
2062 /* CPE reset */
2063 rc = t_info->tgt->tgt_reset();
2064 if (rc != CPE_SVC_SUCCESS) {
2065 pr_err("%s: CPE reset fail! err = %d\n",
2066 __func__, rc);
2067 goto err_return;
2068 }
2069
2070 /* Back up the 4 byte IRAM data first */
2071 backup_seg.type = CPE_SVC_INSTRUCTION_MEM;
2072 backup_seg.cpe_addr =
2073 t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
2074 backup_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
2075 backup_seg.data = backup_data;
2076
2077 pr_debug("%s: Backing up IRAM data from CPE\n",
2078 __func__);
2079
2080 rc = t_info->tgt->tgt_read_ram(t_info, &backup_seg);
2081 if (rc != CPE_SVC_SUCCESS) {
2082 pr_err("%s: Fail to backup CPE IRAM data, err = %d\n",
2083 __func__, rc);
2084 goto err_return;
2085 }
2086
2087 pr_debug("%s: Complete backing up IRAM data from CPE\n",
2088 __func__);
2089
2090 /* Write the WAITI instruction data */
2091 waiti_seg.type = CPE_SVC_INSTRUCTION_MEM;
2092 waiti_seg.cpe_addr =
2093 t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
2094 waiti_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
2095 waiti_seg.data = t_info->tgt->tgt_waiti_info->tgt_waiti_data;
2096
2097 rc = t_info->tgt->tgt_write_ram(t_info, &waiti_seg);
2098 if (rc != CPE_SVC_SUCCESS) {
2099 pr_err("%s: Fail to write the WAITI data, err = %d\n",
2100 __func__, rc);
2101 goto restore_iram;
2102 }
2103
2104 /* Boot up cpe to execute the WAITI instructions */
2105 rc = t_info->tgt->tgt_boot(1);
2106 if (rc != CPE_SVC_SUCCESS) {
2107 pr_err("%s: Fail to boot CPE, err = %d\n",
2108 __func__, rc);
2109 goto reset;
2110 }
2111
2112 /*
2113 * 1ms delay is suggested by the hw team to
2114 * wait for cpe to boot up.
2115 */
2116 usleep_range(1000, 1100);
2117
2118 /* Check if the cpe init is done after executing the WAITI */
2119 *status = t_info->tgt->tgt_cpar_init_done();
2120
2121reset:
2122 /* Set the cpe back to reset state */
2123 rc = t_info->tgt->tgt_reset();
2124 if (rc != CPE_SVC_SUCCESS) {
2125 pr_err("%s: CPE reset fail! err = %d\n",
2126 __func__, rc);
2127 goto restore_iram;
2128 }
2129
2130restore_iram:
2131 /* Restore the IRAM 4 bytes data */
2132 rc = t_info->tgt->tgt_write_ram(t_info, &backup_seg);
2133 if (rc != CPE_SVC_SUCCESS) {
2134 pr_err("%s: Fail to restore the IRAM data, err = %d\n",
2135 __func__, rc);
2136 goto err_return;
2137 }
2138 }
2139
2140err_return:
2141 kfree(backup_data);
2142fail_cmd:
2143 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2144 return rc;
2145}
2146
2147static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
2148{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302149 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302150}
2151
2152static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
2153{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302154 return 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302155}
2156
2157static u32 cpe_tgt_tomtom_is_active(void)
2158{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302159 return 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302160}
2161
2162static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
2163{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302164 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302165}
2166
2167enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
2168{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302169 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302170}
2171
2172enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
2173{
2174
2175 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
2176
2177 if (!t_info)
2178 t_info = cpe_d.cpe_default_handle;
2179
2180 if (t_info->tgt)
2181 return t_info->tgt->tgt_voice_tx_lab(enable);
2182 else
2183 return CPE_SVC_INVALID_HANDLE;
2184}
2185
2186static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
2187 size_t size)
2188{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302189 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302190}
2191
2192static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
2193 size_t size)
2194{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302195 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302196}
2197
2198static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
2199 struct cpe_svc_mem_segment *mem_seg)
2200{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302201 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302202}
2203
2204static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
2205 const struct cpe_svc_mem_segment *mem_seg)
2206{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302207 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302208}
2209
2210static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
2211 enum cpe_svc_module module,
2212 enum cpe_svc_route_dest dest)
2213{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302214 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302215}
2216
2217static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
2218{
Asish Bhattacharya84f7f732017-07-25 16:29:27 +05302219 return CPE_SVC_SUCCESS;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302220}
2221
2222static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
2223{
2224 return &cpe_svc_tomtom_info;
2225}
2226
2227static enum cpe_svc_result cpe_tgt_tomtom_deinit(
2228 struct cpe_svc_tgt_abstraction *param)
2229{
2230 kfree(param->inbox);
2231 param->inbox = NULL;
2232 kfree(param->outbox);
2233 param->outbox = NULL;
2234 memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
2235 return CPE_SVC_SUCCESS;
2236}
2237
2238static u8 cpe_tgt_tomtom_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
2239
2240static struct cpe_tgt_waiti_info cpe_tgt_tomtom_waiti_info = {
2241 .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_tomtom_waiti_data),
2242 .tgt_waiti_data = cpe_tgt_tomtom_waiti_data,
2243};
2244
2245static enum cpe_svc_result cpe_tgt_tomtom_init(
2246 struct cpe_svc_codec_info_v1 *codec_info,
2247 struct cpe_svc_tgt_abstraction *param)
2248{
2249 if (!codec_info)
2250 return CPE_SVC_INVALID_HANDLE;
2251 if (!param)
2252 return CPE_SVC_INVALID_HANDLE;
2253
2254 if (codec_info->id == CPE_SVC_CODEC_TOMTOM) {
2255 param->tgt_boot = cpe_tgt_tomtom_boot;
2256 param->tgt_cpar_init_done = cpe_tgt_tomtom_is_cpar_init_done;
2257 param->tgt_is_active = cpe_tgt_tomtom_is_active;
2258 param->tgt_reset = cpe_tgt_tomtom_reset;
2259 param->tgt_read_mailbox = cpe_tgt_tomtom_read_mailbox;
2260 param->tgt_write_mailbox = cpe_tgt_tomtom_write_mailbox;
2261 param->tgt_read_ram = cpe_tgt_tomtom_read_RAM;
2262 param->tgt_write_ram = cpe_tgt_tomtom_write_RAM;
2263 param->tgt_route_notification =
2264 cpe_tgt_tomtom_route_notification;
2265 param->tgt_set_debug_mode = cpe_tgt_tomtom_set_debug_mode;
2266 param->tgt_get_cpe_info = cpe_tgt_tomtom_get_cpe_info;
2267 param->tgt_deinit = cpe_tgt_tomtom_deinit;
2268 param->tgt_voice_tx_lab = cpe_tgt_tomtom_voicetx;
2269 param->tgt_waiti_info = &cpe_tgt_tomtom_waiti_info;
2270
2271 param->inbox = kzalloc(TOMTOM_A_SVASS_SPE_INBOX_SIZE,
2272 GFP_KERNEL);
2273 if (!param->inbox)
2274 return CPE_SVC_NO_MEMORY;
2275
2276 param->outbox = kzalloc(TOMTOM_A_SVASS_SPE_OUTBOX_SIZE,
2277 GFP_KERNEL);
2278 if (!param->outbox) {
2279 kfree(param->inbox);
2280 return CPE_SVC_NO_MEMORY;
2281 }
2282 }
2283
2284 return CPE_SVC_SUCCESS;
2285}
2286
2287static enum cpe_svc_result cpe_tgt_wcd9335_boot(int debug_mode)
2288{
2289 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2290
2291 if (!debug_mode)
2292 rc |= cpe_update_bits(
2293 WCD9335_CPE_SS_WDOG_CFG,
2294 0x3f, 0x31);
2295 else
2296 pr_info("%s: CPE in debug mode, WDOG disabled\n",
2297 __func__);
2298
2299 rc |= cpe_register_write(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 19);
2300 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x00);
2301 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x02, 0x02);
2302 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x01, 0x01);
2303
2304 if (unlikely(rc)) {
2305 pr_err("%s: Failed to boot, err = %d\n",
2306 __func__, rc);
2307 rc = CPE_SVC_FAILED;
2308 }
2309
2310 return rc;
2311}
2312
2313static u32 cpe_tgt_wcd9335_is_cpar_init_done(void)
2314{
2315 u8 temp = 0;
2316
2317 cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
2318 return temp & 0x1;
2319}
2320
2321static u32 cpe_tgt_wcd9335_is_active(void)
2322{
2323 u8 temp = 0;
2324
2325 cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
2326 return temp & 0x4;
2327}
2328
2329static enum cpe_svc_result cpe_tgt_wcd9335_reset(void)
2330{
2331 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2332
2333 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CFG, 0x01, 0x00);
2334
2335 rc |= cpe_register_write(
2336 WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN, 0x00);
2337 rc |= cpe_register_write(
2338 WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN, 0x00);
2339 rc |= cpe_register_write(
2340 WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1, 0x00);
2341 rc |= cpe_register_write(
2342 WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2, 0x00);
2343
2344 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x04);
2345
2346 if (unlikely(rc)) {
2347 pr_err("%s: failed to reset cpe, err = %d\n",
2348 __func__, rc);
2349 rc = CPE_SVC_FAILED;
2350 }
2351
2352 return rc;
2353}
2354
2355static enum cpe_svc_result cpe_tgt_wcd9335_read_mailbox(u8 *buffer,
2356 size_t size)
2357{
2358 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2359 u32 cnt = 0;
2360
2361 pr_debug("%s: size=%zd\n", __func__, size);
2362
2363 if (size > WCD9335_CPE_SS_SPE_OUTBOX_SIZE)
2364 size = WCD9335_CPE_SS_SPE_OUTBOX_SIZE;
2365
2366 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++)
2367 rc = cpe_register_read(WCD9335_CPE_SS_SPE_OUTBOX1(cnt),
2368 &buffer[cnt]);
2369
2370 rc = cpe_register_write(WCD9335_CPE_SS_OUTBOX1_ACK, 0x01);
2371
2372 if (unlikely(rc)) {
2373 pr_err("%s: failed to ACK outbox, err = %d\n",
2374 __func__, rc);
2375 rc = CPE_SVC_FAILED;
2376 }
2377
2378 return rc;
2379}
2380
2381static enum cpe_svc_result cpe_tgt_wcd9335_write_mailbox(u8 *buffer,
2382 size_t size)
2383{
2384 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2385 u32 cnt = 0;
2386
2387 pr_debug("%s: size = %zd\n", __func__, size);
2388 if (size > WCD9335_CPE_SS_SPE_INBOX_SIZE)
2389 size = WCD9335_CPE_SS_SPE_INBOX_SIZE;
2390 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
2391 rc |= cpe_register_write(WCD9335_CPE_SS_SPE_INBOX1(cnt),
2392 buffer[cnt]);
2393 }
2394
2395 if (unlikely(rc)) {
2396 pr_err("%s: Error %d writing mailbox registers\n",
2397 __func__, rc);
2398 return rc;
2399 }
2400
2401 rc = cpe_register_write(WCD9335_CPE_SS_INBOX1_TRG, 1);
2402 return rc;
2403}
2404
2405static enum cpe_svc_result cpe_wcd9335_get_mem_addr(struct cpe_info *t_info,
2406 const struct cpe_svc_mem_segment *mem_seg,
2407 u32 *addr, u8 *mem)
2408{
2409 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2410 u32 offset, mem_sz, address;
2411 u8 mem_type;
2412
2413 switch (mem_seg->type) {
2414 case CPE_SVC_DATA_MEM:
2415 mem_type = MEM_ACCESS_DRAM_VAL;
2416 offset = WCD9335_CPE_SS_SPE_DRAM_OFFSET;
2417 mem_sz = WCD9335_CPE_SS_SPE_DRAM_SIZE;
2418 break;
2419
2420 case CPE_SVC_INSTRUCTION_MEM:
2421 mem_type = MEM_ACCESS_IRAM_VAL;
2422 offset = WCD9335_CPE_SS_SPE_IRAM_OFFSET;
2423 mem_sz = WCD9335_CPE_SS_SPE_IRAM_SIZE;
2424 break;
2425
2426 default:
2427 pr_err("%s: Invalid mem type = %u\n",
2428 __func__, mem_seg->type);
2429 return CPE_SVC_INVALID_HANDLE;
2430 }
2431
2432 if (mem_seg->cpe_addr < offset) {
2433 pr_err("%s: Invalid addr %x for mem type %u\n",
2434 __func__, mem_seg->cpe_addr, mem_type);
2435 return CPE_SVC_INVALID_HANDLE;
2436 }
2437
2438 address = mem_seg->cpe_addr - offset;
2439 if (address + mem_seg->size > mem_sz) {
2440 pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
2441 __func__, mem_seg->size, address, mem_type);
2442 return CPE_SVC_INVALID_HANDLE;
2443 }
2444
2445 (*addr) = address;
2446 (*mem) = mem_type;
2447
2448 return rc;
2449}
2450
2451static enum cpe_svc_result cpe_tgt_wcd9335_read_RAM(struct cpe_info *t_info,
2452 struct cpe_svc_mem_segment *mem_seg)
2453{
2454 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2455 u8 temp = 0;
2456 u32 cnt = 0;
2457 u8 mem = 0x0;
2458 u32 addr = 0;
2459 u32 lastaddr = 0;
2460 u32 ptr_update = true;
2461 bool autoinc;
2462
2463 if (!mem_seg) {
2464 pr_err("%s: Invalid buffer\n", __func__);
2465 return CPE_SVC_INVALID_HANDLE;
2466 }
2467
2468 rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
2469
2470 if (rc != CPE_SVC_SUCCESS) {
2471 pr_err("%s: Cannot obtain address, mem_type %u\n",
2472 __func__, mem_seg->type);
2473 return rc;
2474 }
2475
2476 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2477 autoinc = cpe_register_read_autoinc_supported();
2478
2479 if (autoinc)
2480 temp = 0x18;
2481 else
2482 temp = 0x10;
2483
2484 temp |= mem;
2485
2486 lastaddr = ~addr;
2487 do {
2488 if (!autoinc || (ptr_update)) {
2489 /* write LSB only if modified */
2490 if ((lastaddr & 0xFF) != (addr & 0xFF))
2491 rc |= cpe_register_write(
2492 WCD9335_CPE_SS_MEM_PTR_0,
2493 (addr & 0xFF));
2494 /* write middle byte only if modified */
2495 if (((lastaddr >> 8) & 0xFF) != ((addr >> 8) & 0xFF))
2496 rc |= cpe_register_write(
2497 WCD9335_CPE_SS_MEM_PTR_1,
2498 ((addr>>8) & 0xFF));
2499 /* write MSB only if modified */
2500 if (((lastaddr >> 16) & 0xFF) != ((addr >> 16) & 0xFF))
2501 rc |= cpe_register_write(
2502 WCD9335_CPE_SS_MEM_PTR_2,
2503 ((addr>>16) & 0xFF));
2504
2505 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, temp);
2506 lastaddr = addr;
2507 addr++;
2508 ptr_update = false;
2509 }
2510
2511 rc |= cpe_register_read(WCD9335_CPE_SS_MEM_BANK_0,
2512 &mem_seg->data[cnt]);
2513
2514 if (!autoinc)
2515 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2516 } while ((++cnt < mem_seg->size) ||
2517 (rc != CPE_SVC_SUCCESS));
2518
2519 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2520
2521 if (rc)
2522 pr_err("%s: Failed to read registers, err = %d\n",
2523 __func__, rc);
2524
2525 return rc;
2526}
2527
2528static enum cpe_svc_result cpe_tgt_wcd9335_write_RAM(struct cpe_info *t_info,
2529 const struct cpe_svc_mem_segment *mem_seg)
2530{
2531 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2532 u8 mem_reg_val = 0;
2533 u8 mem = MEM_ACCESS_NONE_VAL;
2534 u32 addr = 0;
2535 u8 *temp_ptr = NULL;
2536 u32 temp_size = 0;
2537 bool autoinc;
2538
2539 if (!mem_seg) {
2540 pr_err("%s: Invalid mem segment\n",
2541 __func__);
2542 return CPE_SVC_INVALID_HANDLE;
2543 }
2544
2545 rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
2546
2547 if (rc != CPE_SVC_SUCCESS) {
2548 pr_err("%s: Cannot obtain address, mem_type %u\n",
2549 __func__, mem_seg->type);
2550 return rc;
2551 }
2552
2553 autoinc = cpe_register_read_autoinc_supported();
2554 if (autoinc)
2555 mem_reg_val = 0x18;
2556 else
2557 mem_reg_val = 0x10;
2558
2559 mem_reg_val |= mem;
2560
2561 rc = cpe_update_bits(WCD9335_CPE_SS_MEM_CTRL,
2562 0x0F, mem_reg_val);
2563
2564 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_0,
2565 (addr & 0xFF));
2566 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_1,
2567 ((addr >> 8) & 0xFF));
2568
2569 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_2,
2570 ((addr >> 16) & 0xFF));
2571
2572 temp_size = 0;
2573 temp_ptr = mem_seg->data;
2574
2575 while (temp_size <= mem_seg->size) {
2576 u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
2577 ? CHUNK_SIZE : (mem_seg->size - temp_size);
2578
2579 if (t_info->state == CPE_STATE_OFFLINE) {
2580 pr_err("%s: CPE is offline\n", __func__);
2581 return CPE_SVC_FAILED;
2582 }
2583
2584 cpe_register_write_repeat(WCD9335_CPE_SS_MEM_BANK_0,
2585 temp_ptr, to_write);
2586 temp_size += CHUNK_SIZE;
2587 temp_ptr += CHUNK_SIZE;
2588 }
2589
2590 rc = cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2591
2592 if (rc)
2593 pr_err("%s: Failed to write registers, err = %d\n",
2594 __func__, rc);
2595 return rc;
2596}
2597
2598static enum cpe_svc_result cpe_tgt_wcd9335_route_notification(
2599 enum cpe_svc_module module,
2600 enum cpe_svc_route_dest dest)
2601{
2602 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2603
2604 pr_debug("%s: Module = %d, Destination = %d\n",
2605 __func__, module, dest);
2606
2607 switch (module) {
2608 case CPE_SVC_LISTEN_PROC:
2609 switch (dest) {
2610 case CPE_SVC_EXTERNAL:
2611 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x01);
2612 break;
2613 case CPE_SVC_INTERNAL:
2614 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x00);
2615 break;
2616 default:
2617 pr_err("%s: Invalid destination %d\n",
2618 __func__, dest);
2619 return CPE_SVC_FAILED;
2620 }
2621 break;
2622 default:
2623 pr_err("%s: Invalid module %d\n",
2624 __func__, module);
2625 rc = CPE_SVC_FAILED;
2626 break;
2627 }
2628 return rc;
2629}
2630
2631static enum cpe_svc_result cpe_tgt_wcd9335_set_debug_mode(u32 enable)
2632{
2633 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2634
2635 pr_debug("%s: enable = %s\n", __func__,
2636 (enable) ? "true" : "false");
2637
2638 return rc;
2639}
2640
2641static const struct cpe_svc_hw_cfg *cpe_tgt_wcd9335_get_cpe_info(void)
2642{
2643 return &cpe_svc_wcd9335_info;
2644}
2645
2646static enum cpe_svc_result
2647cpe_tgt_wcd9335_deinit(struct cpe_svc_tgt_abstraction *param)
2648{
2649 kfree(param->inbox);
2650 param->inbox = NULL;
2651 kfree(param->outbox);
2652 param->outbox = NULL;
2653 memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
2654
2655 return CPE_SVC_SUCCESS;
2656}
2657
2658static enum cpe_svc_result
2659 cpe_tgt_wcd9335_voicetx(bool enable)
2660{
2661 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2662 u8 val = 0;
2663
2664 pr_debug("%s: enable = %u\n", __func__, enable);
2665 if (enable)
2666 val = 0x02;
2667 else
2668 val = 0x00;
2669
2670 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x02, val);
2671 val = 0;
2672 cpe_register_read(WCD9335_CPE_SS_CFG, &val);
2673
2674 return rc;
2675}
2676
2677static u8 cpe_tgt_wcd9335_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
2678
2679static struct cpe_tgt_waiti_info cpe_tgt_wcd9335_waiti_info = {
2680 .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_wcd9335_waiti_data),
2681 .tgt_waiti_data = cpe_tgt_wcd9335_waiti_data,
2682};
2683
2684static enum cpe_svc_result cpe_tgt_wcd9335_init(
2685 struct cpe_svc_codec_info_v1 *codec_info,
2686 struct cpe_svc_tgt_abstraction *param)
2687{
2688 if (!codec_info)
2689 return CPE_SVC_INVALID_HANDLE;
2690 if (!param)
2691 return CPE_SVC_INVALID_HANDLE;
2692
2693 if (codec_info->id == CPE_SVC_CODEC_WCD9335) {
2694 param->tgt_boot = cpe_tgt_wcd9335_boot;
2695 param->tgt_cpar_init_done = cpe_tgt_wcd9335_is_cpar_init_done;
2696 param->tgt_is_active = cpe_tgt_wcd9335_is_active;
2697 param->tgt_reset = cpe_tgt_wcd9335_reset;
2698 param->tgt_read_mailbox = cpe_tgt_wcd9335_read_mailbox;
2699 param->tgt_write_mailbox = cpe_tgt_wcd9335_write_mailbox;
2700 param->tgt_read_ram = cpe_tgt_wcd9335_read_RAM;
2701 param->tgt_write_ram = cpe_tgt_wcd9335_write_RAM;
2702 param->tgt_route_notification =
2703 cpe_tgt_wcd9335_route_notification;
2704 param->tgt_set_debug_mode = cpe_tgt_wcd9335_set_debug_mode;
2705 param->tgt_get_cpe_info = cpe_tgt_wcd9335_get_cpe_info;
2706 param->tgt_deinit = cpe_tgt_wcd9335_deinit;
2707 param->tgt_voice_tx_lab = cpe_tgt_wcd9335_voicetx;
2708 param->tgt_waiti_info = &cpe_tgt_wcd9335_waiti_info;
2709
2710 param->inbox = kzalloc(WCD9335_CPE_SS_SPE_INBOX_SIZE,
2711 GFP_KERNEL);
2712 if (!param->inbox)
2713 return CPE_SVC_NO_MEMORY;
2714
2715 param->outbox = kzalloc(WCD9335_CPE_SS_SPE_OUTBOX_SIZE,
2716 GFP_KERNEL);
2717 if (!param->outbox) {
2718 kfree(param->inbox);
2719 return CPE_SVC_NO_MEMORY;
2720 }
2721 }
2722
2723 return CPE_SVC_SUCCESS;
2724}
2725
2726MODULE_DESCRIPTION("WCD CPE Services");
2727MODULE_LICENSE("GPL v2");