blob: 0028ebc08d5fb5c8d0cd5179d68185208b5fcbba [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17#include <linux/completion.h>
18#include <linux/kthread.h>
19#include <linux/delay.h>
20#include <linux/mfd/wcd9xxx/core.h>
21#include <sound/cpe_cmi.h>
22#include <sound/soc.h>
23#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
24#include <linux/mfd/wcd9335/registers.h>
25#include "wcd_cpe_services.h"
26#include "wcd_cmi_api.h"
27
28#define CPE_MSG_BUFFER_SIZE 132
29#define CPE_NO_SERVICE 0
30
31#define CMI_DRIVER_SUPPORTED_VERSION 0
32#define CMI_API_SUCCESS 0
33#define CMI_MSG_TRANSPORT (0x0002)
34#define CPE_SVC_INACTIVE_STATE_RETRIES_MAX 10
35
36#define TOMTOM_A_SVASS_SPE_DRAM_OFFSET 0x50000
37#define TOMTOM_A_SVASS_SPE_DRAM_SIZE 0x30000
38#define TOMTOM_A_SVASS_SPE_IRAM_OFFSET 0x80000
39#define TOMTOM_A_SVASS_SPE_IRAM_SIZE 0xC000
40#define TOMTOM_A_SVASS_SPE_INBOX_SIZE 12
41#define TOMTOM_A_SVASS_SPE_OUTBOX_SIZE 12
42
43#define MEM_ACCESS_NONE_VAL 0x0
44#define MEM_ACCESS_IRAM_VAL 0x1
45#define MEM_ACCESS_DRAM_VAL 0x2
46#define LISTEN_CTL_SPE_VAL 0x0
47#define LISTEN_CTL_MSM_VAL 0x1
48
49#define TOMTOM_A_SVASS_SPE_INBOX(N) (TOMTOM_A_SVASS_SPE_INBOX_0 + (N))
50#define TOMTOM_A_SVASS_SPE_OUTBOX(N) (TOMTOM_A_SVASS_SPE_OUTBOX_0 + (N))
51
52#define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000
53#define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000
54#define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000
55#define WCD9335_CPE_SS_SPE_IRAM_SIZE 0x20000
56
57#define WCD9335_CPE_SS_SPE_INBOX_SIZE 16
58#define WCD9335_CPE_SS_SPE_OUTBOX_SIZE 16
59#define WCD9335_CPE_SS_SPE_MEM_BANK_SIZ 16
60
61#define WCD9335_CPE_SS_SPE_INBOX1(N) (WCD9335_CPE_SS_INBOX1_0 + (N))
62#define WCD9335_CPE_SS_SPE_OUTBOX1(N) (WCD9335_CPE_SS_OUTBOX1_0 + (N))
63#define WCD9335_CPE_SS_MEM_BANK(N) (WCD9335_CPE_SS_MEM_BANK_0 + (N))
64
65#define CHUNK_SIZE 16
66
67#define CPE_SVC_GRAB_LOCK(lock, name) \
68{ \
69 pr_debug("%s: %s lock acquire\n", \
70 __func__, name); \
71 mutex_lock(lock); \
72}
73
74#define CPE_SVC_REL_LOCK(lock, name) \
75{ \
76 pr_debug("%s: %s lock release\n", \
77 __func__, name); \
78 mutex_unlock(lock); \
79}
80
81static const struct cpe_svc_hw_cfg cpe_svc_tomtom_info = {
82 TOMTOM_A_SVASS_SPE_DRAM_SIZE,
83 TOMTOM_A_SVASS_SPE_DRAM_OFFSET,
84 TOMTOM_A_SVASS_SPE_IRAM_SIZE,
85 TOMTOM_A_SVASS_SPE_IRAM_OFFSET,
86 TOMTOM_A_SVASS_SPE_INBOX_SIZE,
87 TOMTOM_A_SVASS_SPE_OUTBOX_SIZE
88};
89
90static const struct cpe_svc_hw_cfg cpe_svc_wcd9335_info = {
91 WCD9335_CPE_SS_SPE_DRAM_SIZE,
92 WCD9335_CPE_SS_SPE_DRAM_OFFSET,
93 WCD9335_CPE_SS_SPE_IRAM_SIZE,
94 WCD9335_CPE_SS_SPE_IRAM_OFFSET,
95 WCD9335_CPE_SS_SPE_INBOX_SIZE,
96 WCD9335_CPE_SS_SPE_OUTBOX_SIZE
97};
98
99enum cpe_state {
100 CPE_STATE_UNINITIALIZED = 0,
101 CPE_STATE_INITIALIZED,
102 CPE_STATE_IDLE,
103 CPE_STATE_DOWNLOADING,
104 CPE_STATE_BOOTING,
105 CPE_STATE_SENDING_MSG,
106 CPE_STATE_OFFLINE,
107 CPE_STATE_BUFFERING,
108 CPE_STATE_BUFFERING_CANCELLED
109};
110
111enum cpe_substate {
112 CPE_SS_IDLE = 0,
113 CPE_SS_MSG_REQUEST_ACCESS,
114 CPE_SS_MSG_SEND_INBOX,
115 CPE_SS_MSG_SENT,
116 CPE_SS_DL_DOWNLOADING,
117 CPE_SS_DL_COMPLETED,
118 CPE_SS_BOOT,
119 CPE_SS_BOOT_INIT,
120 CPE_SS_ONLINE
121};
122
123enum cpe_command {
124 CPE_CMD_KILL_THREAD = 0,
125 CPE_CMD_BOOT,
126 CPE_CMD_BOOT_INITIALIZE,
127 CPE_CMD_BOOT_COMPLETE,
128 CPE_CMD_SEND_MSG,
129 CPE_CMD_SEND_TRANS_MSG,
130 CPE_CMD_SEND_MSG_COMPLETE,
131 CPE_CMD_PROCESS_IRQ,
132 CPE_CMD_RAMDUMP,
133 CPE_CMD_DL_SEGMENT,
134 CPE_CMD_SHUTDOWN,
135 CPE_CMD_RESET,
136 CPE_CMD_DEINITIALIZE,
137 CPE_CMD_READ,
138 CPE_CMD_ENABLE_LAB,
139 CPE_CMD_DISABLE_LAB,
140 CPE_CMD_SWAP_BUFFER,
141 CPE_LAB_CFG_SB,
142 CPE_CMD_CANCEL_MEMACCESS,
143 CPE_CMD_PROC_INCOMING_MSG,
144 CPE_CMD_FTM_TEST,
145};
146
147enum cpe_process_result {
148 CPE_PROC_SUCCESS = 0,
149 CPE_PROC_FAILED,
150 CPE_PROC_KILLED,
151 CPE_PROC_QUEUED,
152};
153
154struct cpe_command_node {
155 enum cpe_command command;
156 enum cpe_svc_result result;
157 void *data;
158 struct list_head list;
159};
160
161struct cpe_info {
162 struct list_head main_queue;
163 struct completion cmd_complete;
164 struct completion thread_comp;
165 void *thread_handler;
166 bool stop_thread;
167 struct mutex msg_lock;
168 enum cpe_state state;
169 enum cpe_substate substate;
170 struct list_head client_list;
171 enum cpe_process_result (*cpe_process_command)
172 (struct cpe_command_node *command_node);
173 enum cpe_svc_result (*cpe_cmd_validate)
174 (const struct cpe_info *i,
175 enum cpe_command command);
176 enum cpe_svc_result (*cpe_start_notification)
177 (struct cpe_info *i);
178 u32 initialized;
179 struct cpe_svc_tgt_abstraction *tgt;
180 void *pending;
181 void *data;
182 void *client_context;
183 u32 codec_id;
184 struct work_struct clk_plan_work;
185 struct completion core_svc_cmd_compl;
186};
187
188struct cpe_tgt_waiti_info {
189 u8 tgt_waiti_size;
190 u8 *tgt_waiti_data;
191};
192
193struct cpe_svc_tgt_abstraction {
194 enum cpe_svc_result (*tgt_boot)(int debug_mode);
195
196 u32 (*tgt_cpar_init_done)(void);
197
198 u32 (*tgt_is_active)(void);
199
200 enum cpe_svc_result (*tgt_reset)(void);
201
202 enum cpe_svc_result (*tgt_stop)(void);
203
204 enum cpe_svc_result (*tgt_read_mailbox)
205 (u8 *buffer, size_t size);
206
207 enum cpe_svc_result (*tgt_write_mailbox)
208 (u8 *buffer, size_t size);
209
210 enum cpe_svc_result (*tgt_read_ram)
211 (struct cpe_info *c,
212 struct cpe_svc_mem_segment *data);
213
214 enum cpe_svc_result (*tgt_write_ram)
215 (struct cpe_info *c,
216 const struct cpe_svc_mem_segment *data);
217
218 enum cpe_svc_result (*tgt_route_notification)
219 (enum cpe_svc_module module,
220 enum cpe_svc_route_dest dest);
221
222 enum cpe_svc_result (*tgt_set_debug_mode)(u32 enable);
223 const struct cpe_svc_hw_cfg *(*tgt_get_cpe_info)(void);
224 enum cpe_svc_result (*tgt_deinit)
225 (struct cpe_svc_tgt_abstraction *param);
226 enum cpe_svc_result (*tgt_voice_tx_lab)
227 (bool);
228 u8 *inbox;
229 u8 *outbox;
230 struct cpe_tgt_waiti_info *tgt_waiti_info;
231};
232
233static enum cpe_svc_result cpe_tgt_tomtom_init(
234 struct cpe_svc_codec_info_v1 *codec_info,
235 struct cpe_svc_tgt_abstraction *param);
236
237static enum cpe_svc_result cpe_tgt_wcd9335_init(
238 struct cpe_svc_codec_info_v1 *codec_info,
239 struct cpe_svc_tgt_abstraction *param);
240
241struct cpe_send_msg {
242 u8 *payload;
243 u32 isobm;
244 u32 address;
245 size_t size;
246};
247
248struct cpe_read_handle {
249 void *registration;
250 struct cpe_info t_info;
251 struct list_head buffers;
252 void *config;
253};
254
255struct generic_notification {
256 void (*notification)
257 (const struct cpe_svc_notification *parameter);
258 void (*cmi_notification)
259 (const struct cmi_api_notification *parameter);
260};
261
262struct cpe_notif_node {
263 struct generic_notification notif;
264 u32 mask;
265 u32 service;
266 const struct cpe_info *context;
267 const char *name;
268 u32 disabled;
269 struct list_head list;
270};
271
272struct cpe_priv {
273 struct cpe_info *cpe_default_handle;
274 void (*cpe_irq_control_callback)(u32 enable);
275 void (*cpe_query_freq_plans_cb)
276 (void *cdc_priv,
277 struct cpe_svc_cfg_clk_plan *clk_freq);
278 void (*cpe_change_freq_plan_cb)(void *cdc_priv,
279 u32 clk_freq);
280 u32 cpe_msg_buffer;
281 void *cpe_cmi_handle;
282 struct mutex cpe_api_mutex;
283 struct mutex cpe_svc_lock;
284 struct cpe_svc_boot_event cpe_debug_vector;
285 void *cdc_priv;
286};
287
288static struct cpe_priv cpe_d;
289
290static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);
291
292static enum cpe_svc_result cpe_is_command_valid(
293 const struct cpe_info *t_info,
294 enum cpe_command command);
295
296static int cpe_register_read(u32 reg, u8 *val)
297{
298 *(val) = snd_soc_read(cpe_d.cdc_priv, reg);
299 return 0;
300}
301
302static enum cpe_svc_result cpe_update_bits(u32 reg,
303 u32 mask, u32 value)
304{
305 int ret = 0;
306
307 ret = snd_soc_update_bits(cpe_d.cdc_priv, reg,
308 mask, value);
309 if (ret < 0)
310 return CPE_SVC_FAILED;
311
312 return CPE_SVC_SUCCESS;
313}
314
315static int cpe_register_write(u32 reg, u32 val)
316{
317 int ret = 0;
318
319 if (reg != TOMTOM_A_SVASS_MEM_BANK &&
320 reg != WCD9335_CPE_SS_MEM_BANK_0)
321 pr_debug("%s: reg = 0x%x, value = 0x%x\n",
322 __func__, reg, val);
323
324 ret = snd_soc_write(cpe_d.cdc_priv, reg, val);
325 if (ret < 0)
326 return CPE_SVC_FAILED;
327
328 return CPE_SVC_SUCCESS;
329}
330
331static int cpe_register_write_repeat(u32 reg, u8 *ptr, u32 to_write)
332{
333 struct snd_soc_codec *codec = cpe_d.cdc_priv;
334 struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
335 int ret = 0;
336
337 ret = wcd9xxx_slim_write_repeat(wcd9xxx, reg, to_write, ptr);
338 if (ret != 0)
339 pr_err("%s: slim_write_repeat failed\n", __func__);
340
341 if (ret < 0)
342 return CPE_SVC_FAILED;
343
344 return CPE_SVC_SUCCESS;
345}
346
347static bool cpe_register_read_autoinc_supported(void)
348{
349 return true;
350}
351
352
353/* Called under msgq locked context */
354static void cpe_cmd_received(struct cpe_info *t_info)
355{
356 struct cpe_command_node *node = NULL;
357 enum cpe_process_result proc_rc = CPE_PROC_SUCCESS;
358
359 if (!t_info) {
360 pr_err("%s: Invalid thread info\n",
361 __func__);
362 return;
363 }
364
365 while (!list_empty(&t_info->main_queue)) {
366 if (proc_rc != CPE_PROC_SUCCESS)
367 break;
368 node = list_first_entry(&t_info->main_queue,
369 struct cpe_command_node, list);
370 if (!node)
371 break;
372 list_del(&node->list);
373 proc_rc = t_info->cpe_process_command(node);
374 pr_debug("%s: process command return %d\n",
375 __func__, proc_rc);
376
377 switch (proc_rc) {
378 case CPE_PROC_SUCCESS:
379 kfree(node);
380 break;
381 case CPE_PROC_FAILED:
382 kfree(node);
383 pr_err("%s: cmd failed\n", __func__);
384 break;
385 case CPE_PROC_KILLED:
386 break;
387 default:
388 list_add(&node->list, &(t_info->main_queue));
389
390 }
391 }
392}
393
394static int cpe_worker_thread(void *context)
395{
396 struct cpe_info *t_info = (struct cpe_info *)context;
397
398 /*
399 * Thread will run until requested to stop explicitly
400 * by setting the t_info->stop_thread flag
401 */
402 while (1) {
403 /* Wait for command to be processed */
404 wait_for_completion(&t_info->cmd_complete);
405
406 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
407 cpe_cmd_received(t_info);
408 reinit_completion(&t_info->cmd_complete);
409 /* Check if thread needs to be stopped */
410 if (t_info->stop_thread)
411 goto unlock_and_exit;
412 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
413 };
414
415unlock_and_exit:
416 pr_debug("%s: thread stopped\n", __func__);
417 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
418 complete_and_exit(&t_info->thread_comp, 0);
419}
420
421static void cpe_create_worker_thread(struct cpe_info *t_info)
422{
423 INIT_LIST_HEAD(&t_info->main_queue);
424 init_completion(&t_info->cmd_complete);
425 init_completion(&t_info->thread_comp);
426 t_info->stop_thread = false;
427 t_info->thread_handler = kthread_run(cpe_worker_thread,
428 (void *)t_info, "cpe-worker-thread");
429 pr_debug("%s: Created new worker thread\n",
430 __func__);
431}
432
433static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
434{
435 if (!t_info->thread_handler) {
436 pr_err("%s: thread not created\n", __func__);
437 return;
438 }
439
440 /*
441 * Wake up the command handler in case
442 * it is waiting for an command to be processed.
443 */
444 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
445 t_info->stop_thread = true;
446 complete(&t_info->cmd_complete);
447 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
448
449 /* Wait for the thread to exit */
450 wait_for_completion(&t_info->thread_comp);
451 t_info->thread_handler = NULL;
452
453 pr_debug("%s: Thread cleaned up successfully\n",
454 __func__);
455}
456
457static enum cpe_svc_result
458cpe_send_cmd_to_thread(struct cpe_info *t_info,
459 enum cpe_command command, void *data,
460 bool high_prio)
461{
462 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
463 struct cpe_command_node *cmd = NULL;
464
465 rc = cpe_is_command_valid(t_info, command);
466 if (rc != CPE_SVC_SUCCESS) {
467 pr_err("%s: Invalid command %d\n",
468 __func__, command);
469 return rc;
470 }
471
472 cmd = kzalloc(sizeof(struct cpe_command_node),
473 GFP_ATOMIC);
474 if (!cmd)
475 return CPE_SVC_NO_MEMORY;
476
477 cmd->command = command;
478 cmd->data = data;
479
480 CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
481 if (high_prio)
482 list_add(&(cmd->list),
483 &(t_info->main_queue));
484 else
485 list_add_tail(&(cmd->list),
486 &(t_info->main_queue));
487 complete(&t_info->cmd_complete);
488 CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
489
490 return rc;
491}
492
493static enum cpe_svc_result cpe_change_state(
494 struct cpe_info *t_info,
495 enum cpe_state state, enum cpe_substate ss)
496{
497 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
498
499 if (!t_info)
500 t_info = cpe_d.cpe_default_handle;
501
502 t_info->state = state;
503 t_info->substate = ss;
504
505 pr_debug("%s: current state: %d,%d, new_state: %d,%d\n",
506 __func__, t_info->state, t_info->substate,
507 state, ss);
508
509 return rc;
510}
511
512static enum cpe_svc_result
513cpe_is_command_valid(const struct cpe_info *t_info,
514 enum cpe_command command)
515{
516 enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
517
518 if (t_info && t_info->cpe_cmd_validate)
519 rc = t_info->cpe_cmd_validate(t_info, command);
520 else
521 pr_err("%s: invalid handle or callback\n",
522 __func__);
523 return rc;
524}
525
526static void cpe_notify_client(struct cpe_notif_node *client,
527 struct cpe_svc_notification *payload)
528{
529 if (!client || !payload) {
530 pr_err("%s: invalid client or payload\n",
531 __func__);
532 return;
533 }
534
535 if (!(client->mask & payload->event)) {
536 pr_debug("%s: client mask 0x%x not registered for event 0x%x\n",
537 __func__, client->mask, payload->event);
538 return;
539 }
540
541 if (client->notif.notification && !client->disabled)
542 client->notif.notification(payload);
543
544 if ((client->mask & CPE_SVC_CMI_MSG) &&
545 client->notif.cmi_notification)
546 client->notif.cmi_notification(
547 (const struct cmi_api_notification *)payload);
548}
549
550static void cpe_broadcast_notification(const struct cpe_info *t_info,
551 struct cpe_svc_notification *payload)
552{
553 struct cpe_notif_node *n = NULL;
554
555 if (!t_info || !payload) {
556 pr_err("%s: invalid handle\n", __func__);
557 return;
558 }
559
560 pr_debug("%s: notify clients, event = %d\n",
561 __func__, payload->event);
562 payload->private_data = cpe_d.cdc_priv;
563
564 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
565 list_for_each_entry(n, &t_info->client_list, list) {
566 if (!(n->mask & CPE_SVC_CMI_MSG))
567 cpe_notify_client(n, payload);
568 }
569 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
570}
571
572static void *cpe_register_generic(struct cpe_info *t_info,
573 void notification_callback(
574 const struct cpe_svc_notification *parameter),
575 void cmi_callback(
576 const struct cmi_api_notification *parameter),
577 u32 mask, u32 service, const char *name)
578{
579 struct cpe_notif_node *n = NULL;
580
581 n = kzalloc(sizeof(struct cpe_notif_node),
582 GFP_KERNEL);
583 if (!n)
584 return NULL;
585 n->mask = mask;
586 n->service = service;
587 n->notif.notification = notification_callback;
588 n->notif.cmi_notification = cmi_callback;
589 n->context = t_info;
590 n->disabled = false;
591 n->name = name;
592
593 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
594 /* Make sure CPE core service is first */
595 if (service == CMI_CPE_CORE_SERVICE_ID)
596 list_add(&n->list, &t_info->client_list);
597 else
598 list_add_tail(&n->list, &t_info->client_list);
599 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
600
601 return n;
602}
603
604static enum cpe_svc_result cpe_deregister_generic(struct cpe_info *t_info,
605 void *reg_handle)
606{
607 struct cpe_notif_node *n = (struct cpe_notif_node *)reg_handle;
608
609 if (!t_info || !reg_handle) {
610 pr_err("%s: invalid handle\n", __func__);
611 return CPE_SVC_INVALID_HANDLE;
612 }
613
614 list_del(&(n->list));
615 kfree(reg_handle);
616
617 return CPE_SVC_SUCCESS;
618}
619
620static enum cpe_svc_result cpe_svc_tgt_init(struct cpe_svc_codec_info_v1 *i,
621 struct cpe_svc_tgt_abstraction *abs)
622{
623 if (!i || !abs) {
624 pr_err("%s: Incorrect information provided\n",
625 __func__);
626 return CPE_SVC_FAILED;
627 }
628
629 switch (i->id) {
630 case CPE_SVC_CODEC_TOMTOM:
631 return cpe_tgt_tomtom_init(i, abs);
632 case CPE_SVC_CODEC_WCD9335:
633 return cpe_tgt_wcd9335_init(i, abs);
634 default:
635 pr_err("%s: Codec type %d not supported\n",
636 __func__, i->id);
637 return CPE_SVC_FAILED;
638 }
639
640 return CPE_SVC_SUCCESS;
641}
642
643static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
644 enum cpe_svc_result result)
645{
646 struct cpe_notif_node *n = NULL;
647 struct cmi_api_notification notif;
648 struct cmi_hdr *hdr;
649 u8 service = 0;
650
651 if (!t_info || !payload) {
652 pr_err("%s: invalid payload/handle\n",
653 __func__);
654 return;
655 }
656
657 hdr = CMI_GET_HEADER(payload);
658 service = CMI_HDR_GET_SERVICE(hdr);
659
660 notif.event = CPE_SVC_CMI_MSG;
661 notif.result = result;
662 notif.message = payload;
663
664 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
665 list_for_each_entry(n, &t_info->client_list, list) {
666
667 if ((n->mask & CPE_SVC_CMI_MSG) &&
668 n->service == service &&
669 n->notif.cmi_notification) {
670 n->notif.cmi_notification(&notif);
671 break;
672 }
673 }
674 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
675}
676
677static void cpe_toggle_irq_notification(struct cpe_info *t_info, u32 value)
678{
679 if (cpe_d.cpe_irq_control_callback)
680 cpe_d.cpe_irq_control_callback(value);
681}
682
683static void cpe_command_cleanup(struct cpe_command_node *command_node)
684{
685 switch (command_node->command) {
686 case CPE_CMD_SEND_MSG:
687 case CPE_CMD_SEND_TRANS_MSG:
688 case CPE_CMD_SEND_MSG_COMPLETE:
689 case CPE_CMD_SHUTDOWN:
690 case CPE_CMD_READ:
691 kfree(command_node->data);
692 command_node->data = NULL;
693 break;
694 default:
695 pr_err("%s: unhandled command\n",
696 __func__);
697 break;
698 }
699}
700
701static enum cpe_svc_result cpe_send_msg_to_inbox(
702 struct cpe_info *t_info, u32 opcode,
703 struct cpe_send_msg *msg)
704{
705 size_t bytes = 0;
706 size_t inbox_size =
707 t_info->tgt->tgt_get_cpe_info()->inbox_size;
708 struct cmi_hdr *hdr;
709 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
710
711 memset(t_info->tgt->inbox, 0, inbox_size);
712 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
713 CMI_HDR_SET_SESSION(hdr, 1);
714 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
715 CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
716 CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
717
718 switch (opcode) {
719 case CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC: {
720 struct cmi_core_svc_cmd_shared_mem_alloc *m;
721
722 CMI_HDR_SET_OPCODE(hdr,
723 CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC);
724 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
725 sizeof(struct cmi_core_svc_cmd_shared_mem_alloc));
726 m = (struct cmi_core_svc_cmd_shared_mem_alloc *)
727 CMI_GET_PAYLOAD(t_info->tgt->inbox);
728 m->size = CPE_MSG_BUFFER_SIZE;
729 pr_debug("send shared mem alloc msg to cpe inbox\n");
730 }
731 break;
732 case CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ:
733 CMI_HDR_SET_OPCODE(hdr,
734 CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ);
735 CMI_HDR_SET_PAYLOAD_SIZE(hdr, 0);
736 pr_debug("%s: Creating DRAM acces request msg\n",
737 __func__);
738 break;
739
740 case CPE_CMI_BASIC_RSP_OPCODE: {
741 struct cmi_basic_rsp_result *rsp;
742
743 CMI_HDR_SET_OPCODE(hdr,
744 CPE_CMI_BASIC_RSP_OPCODE);
745 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
746 sizeof(struct cmi_basic_rsp_result));
747 rsp = (struct cmi_basic_rsp_result *)
748 CMI_GET_PAYLOAD(t_info->tgt->inbox);
749 rsp->status = 0;
750 pr_debug("%s: send basic response\n", __func__);
751 }
752 break;
753
754 default:
755 if (msg->address != 0) {
756 struct cmi_msg_transport *m = NULL;
757 struct cpe_svc_mem_segment mem_seg;
758
759 mem_seg.type = CPE_SVC_DATA_MEM;
760 if (msg->isobm) {
761 struct cmi_obm *obm = (struct cmi_obm *)
762
763 CMI_GET_PAYLOAD(msg->payload);
764 mem_seg.cpe_addr = obm->mem_handle;
765 mem_seg.data = (u8 *)obm->data_ptr.kvaddr;
766 mem_seg.size = obm->size;
767 t_info->tgt->tgt_write_ram(t_info, &mem_seg);
768 }
769
770 mem_seg.cpe_addr = msg->address;
771 mem_seg.data = msg->payload;
772 mem_seg.size = msg->size;
773 t_info->tgt->tgt_write_ram(t_info, &mem_seg);
774
775 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
776 CMI_HDR_SET_OPCODE(hdr, CMI_MSG_TRANSPORT);
777 m = (struct cmi_msg_transport *)
778 CMI_GET_PAYLOAD(t_info->tgt->inbox);
779 m->addr = msg->address;
780 m->size = msg->size;
781 CMI_HDR_SET_PAYLOAD_SIZE(hdr,
782 sizeof(struct cmi_msg_transport));
783 } else {
784 memcpy(t_info->tgt->inbox, msg->payload,
785 msg->size);
786 }
787
788 break;
789 }
790
791 pr_debug("%s: sending message to cpe inbox\n",
792 __func__);
793 bytes = sizeof(struct cmi_hdr);
794 hdr = CMI_GET_HEADER(t_info->tgt->inbox);
795 bytes += CMI_HDR_GET_PAYLOAD_SIZE(hdr);
796 rc = t_info->tgt->tgt_write_mailbox(t_info->tgt->inbox, bytes);
797
798 return rc;
799}
800
801static bool cpe_is_cmd_clk_req(void *cmd)
802{
803 struct cmi_hdr *hdr;
804
805 hdr = CMI_GET_HEADER(cmd);
806
807 if ((CMI_HDR_GET_SERVICE(hdr) ==
808 CMI_CPE_CORE_SERVICE_ID)) {
809 if (CMI_GET_OPCODE(cmd) ==
810 CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST)
811 return true;
812 }
813
814 return false;
815}
816
817static enum cpe_svc_result cpe_process_clk_change_req(
818 struct cpe_info *t_info)
819{
820 struct cmi_core_svc_cmd_clk_freq_request *req;
821
822 req = (struct cmi_core_svc_cmd_clk_freq_request *)
823 CMI_GET_PAYLOAD(t_info->tgt->outbox);
824
825 if (!cpe_d.cpe_change_freq_plan_cb) {
826 pr_err("%s: No support for clk freq change\n",
827 __func__);
828 return CPE_SVC_FAILED;
829 }
830
831 cpe_d.cpe_change_freq_plan_cb(cpe_d.cdc_priv,
832 req->clk_freq);
833
834 /*send a basic response*/
835 cpe_send_msg_to_inbox(t_info,
836 CPE_CMI_BASIC_RSP_OPCODE, NULL);
837
838 return CPE_SVC_SUCCESS;
839}
840
841static void cpe_process_irq_int(u32 irq,
842 struct cpe_info *t_info)
843{
844 struct cpe_command_node temp_node;
845 struct cpe_send_msg *m;
846 u8 size = 0;
847 bool err_irq = false;
848 struct cmi_hdr *hdr;
849
850 pr_debug("%s: irq = %u\n", __func__, irq);
851
852 if (!t_info) {
853 pr_err("%s: Invalid handle\n",
854 __func__);
855 return;
856 }
857
858 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
859 switch (irq) {
860 case CPE_IRQ_OUTBOX_IRQ:
861 size = t_info->tgt->tgt_get_cpe_info()->outbox_size;
862 t_info->tgt->tgt_read_mailbox(t_info->tgt->outbox, size);
863 break;
864
865 case CPE_IRQ_MEM_ACCESS_ERROR:
866 err_irq = true;
867 cpe_change_state(t_info, CPE_STATE_OFFLINE, CPE_SS_IDLE);
868 break;
869
870 case CPE_IRQ_WDOG_BITE:
871 case CPE_IRQ_RCO_WDOG_INT:
872 err_irq = true;
873 __cpe_svc_shutdown(t_info);
874 break;
875
876 case CPE_IRQ_FLL_LOCK_LOST:
877 default:
878 err_irq = true;
879 break;
880 }
881
882 if (err_irq) {
883 pr_err("%s: CPE error IRQ %u occurred\n",
884 __func__, irq);
885 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
886 return;
887 }
888
889 switch (t_info->state) {
890 case CPE_STATE_BOOTING:
891
892 switch (t_info->substate) {
893 case CPE_SS_BOOT:
894 temp_node.command = CPE_CMD_BOOT_INITIALIZE;
895 temp_node.result = CPE_SVC_SUCCESS;
896 t_info->substate = CPE_SS_BOOT_INIT;
897 t_info->cpe_process_command(&temp_node);
898 break;
899
900 case CPE_SS_BOOT_INIT:
901 temp_node.command = CPE_CMD_BOOT_COMPLETE;
902 temp_node.result = CPE_SVC_SUCCESS;
903 t_info->substate = CPE_SS_ONLINE;
904 t_info->cpe_process_command(&temp_node);
905 break;
906
907 default:
908 pr_debug("%s: unhandled substate %d for state %d\n",
909 __func__, t_info->state, t_info->substate);
910 break;
911 }
912 break;
913
914 case CPE_STATE_SENDING_MSG:
915 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
916 if (CMI_GET_OPCODE(t_info->tgt->outbox) ==
917 CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
918 pr_debug("%s: session_id: %u, state: %d,%d, event received\n",
919 __func__, CMI_HDR_GET_SESSION_ID(hdr),
920 t_info->state, t_info->substate);
921 temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
922 temp_node.data = NULL;
923 t_info->cpe_process_command(&temp_node);
924 break;
925 }
926
927 m = (struct cpe_send_msg *)t_info->pending;
928
929 switch (t_info->substate) {
930 case CPE_SS_MSG_REQUEST_ACCESS:
931 cpe_send_cmd_to_thread(t_info,
932 CPE_CMD_SEND_TRANS_MSG, m, true);
933 break;
934
935 case CPE_SS_MSG_SEND_INBOX:
936 if (cpe_is_cmd_clk_req(t_info->tgt->outbox))
937 cpe_process_clk_change_req(t_info);
938 else
939 cpe_send_cmd_to_thread(t_info,
940 CPE_CMD_SEND_MSG_COMPLETE, m, true);
941 break;
942
943 default:
944 pr_debug("%s: unhandled substate %d for state %d\n",
945 __func__, t_info->state, t_info->substate);
946 break;
947 }
948 break;
949
950 case CPE_STATE_IDLE:
951 pr_debug("%s: Message received, notifying client\n",
952 __func__);
953 temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
954 temp_node.data = NULL;
955 t_info->cpe_process_command(&temp_node);
956 break;
957
958 default:
959 pr_debug("%s: unhandled state %d\n",
960 __func__, t_info->state);
961 break;
962 }
963
964 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
965}
966
967
968static void broacast_boot_failed(void)
969{
970 struct cpe_info *t_info = cpe_d.cpe_default_handle;
971 struct cpe_svc_notification payload;
972
973 payload.event = CPE_SVC_BOOT_FAILED;
974 payload.result = CPE_SVC_FAILED;
975 payload.payload = NULL;
976 if (t_info)
977 payload.private_data =
978 t_info->client_context;
979 cpe_broadcast_notification(t_info, &payload);
980}
981
982static enum cpe_svc_result broadcast_boot_event(
983 struct cpe_info *t_info)
984{
985 struct cpe_svc_notification payload;
986
987 payload.event = CPE_SVC_ONLINE;
988 payload.result = CPE_SVC_SUCCESS;
989 payload.payload = NULL;
990 if (t_info)
991 payload.private_data =
992 t_info->client_context;
993 cpe_broadcast_notification(t_info, &payload);
994
995 return CPE_SVC_SUCCESS;
996}
997
998static enum cpe_process_result cpe_boot_initialize(struct cpe_info *t_info,
999 enum cpe_svc_result *cpe_rc)
1000{
1001 enum cpe_process_result rc = CPE_SVC_FAILED;
1002 struct cpe_svc_notification payload;
1003 struct cmi_core_svc_event_system_boot *p = NULL;
1004
1005 if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
1006 CPE_CORE_SVC_EVENT_SYSTEM_BOOT) {
1007 broacast_boot_failed();
1008 return rc;
1009 }
1010
1011 p = (struct cmi_core_svc_event_system_boot *)
1012 CMI_GET_PAYLOAD(t_info->tgt->outbox);
1013 if (p->status != CPE_BOOT_SUCCESS) {
1014 pr_err("%s: cpe boot failed, status = %d\n",
1015 __func__, p->status);
1016 broacast_boot_failed();
1017 return rc;
1018 }
1019
1020 /* boot was successful */
1021 if (p->version ==
1022 CPE_CORE_VERSION_SYSTEM_BOOT_EVENT) {
1023 cpe_d.cpe_debug_vector.debug_address =
1024 p->sfr_buff_address;
1025 cpe_d.cpe_debug_vector.debug_buffer_size =
1026 p->sfr_buff_size;
1027 cpe_d.cpe_debug_vector.status = p->status;
1028 payload.event = CPE_SVC_BOOT;
1029 payload.result = CPE_SVC_SUCCESS;
1030 payload.payload = (void *)&cpe_d.cpe_debug_vector;
1031 payload.private_data = t_info->client_context;
1032 cpe_broadcast_notification(t_info, &payload);
1033 }
1034 cpe_change_state(t_info, CPE_STATE_BOOTING,
1035 CPE_SS_BOOT_INIT);
1036 (*cpe_rc) = cpe_send_msg_to_inbox(t_info,
1037 CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC, NULL);
1038 rc = CPE_PROC_SUCCESS;
1039 return rc;
1040}
1041
1042static void cpe_svc_core_cmi_handler(
1043 const struct cmi_api_notification *parameter)
1044{
1045 struct cmi_hdr *hdr;
1046
1047 if (!parameter)
1048 return;
1049
1050 pr_debug("%s: event = %d\n",
1051 __func__, parameter->event);
1052
1053 if (parameter->event != CMI_API_MSG)
1054 return;
1055
1056 hdr = (struct cmi_hdr *) parameter->message;
1057
1058 if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
1059 struct cmi_basic_rsp_result *result;
1060
1061 result = (struct cmi_basic_rsp_result *)
1062 ((u8 *)parameter->message) + (sizeof(*hdr));
1063 if (result->status)
1064 pr_err("%s: error response, error code = %u\n",
1065 __func__, result->status);
1066 complete(&cpe_d.cpe_default_handle->core_svc_cmd_compl);
1067 }
1068}
1069
1070static void cpe_clk_plan_work(struct work_struct *work)
1071{
1072 struct cpe_info *t_info = NULL;
1073 size_t size = 0;
1074 struct cpe_svc_cfg_clk_plan plan;
1075 u8 *cmi_msg;
1076 struct cmi_hdr *hdr;
1077 int rc;
1078
1079 t_info = container_of(work, struct cpe_info, clk_plan_work);
1080 if (!t_info) {
1081 pr_err("%s: Invalid handle for cpe_info\n",
1082 __func__);
1083 return;
1084 }
1085
1086 /* Register the core service */
1087 cpe_d.cpe_cmi_handle = cmi_register(
1088 cpe_svc_core_cmi_handler,
1089 CMI_CPE_CORE_SERVICE_ID);
1090
1091 /* send the clk plan command */
1092 if (!cpe_d.cpe_query_freq_plans_cb) {
1093 pr_err("%s: No support for querying clk plans\n",
1094 __func__);
1095 return;
1096 }
1097
1098 cpe_d.cpe_query_freq_plans_cb(cpe_d.cdc_priv, &plan);
1099 size = sizeof(plan.current_clk_feq) +
1100 sizeof(plan.num_clk_freqs);
1101 size += plan.num_clk_freqs *
1102 sizeof(plan.clk_freqs[0]);
1103 cmi_msg = kzalloc(size + sizeof(struct cmi_hdr),
1104 GFP_KERNEL);
1105 if (!cmi_msg)
1106 return;
1107
1108 hdr = (struct cmi_hdr *) cmi_msg;
1109 CMI_HDR_SET_OPCODE(hdr,
1110 CPE_CORE_SVC_CMD_CFG_CLK_PLAN);
1111 CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
1112 CMI_HDR_SET_SESSION(hdr, 1);
1113 CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
1114 CMI_HDR_SET_PAYLOAD_SIZE(hdr, size);
1115 memcpy(CMI_GET_PAYLOAD(cmi_msg), &plan,
1116 size);
1117 cmi_send_msg(cmi_msg);
1118
1119 /* Wait for clk plan command to complete */
1120 rc = wait_for_completion_timeout(&t_info->core_svc_cmd_compl,
1121 (10 * HZ));
1122 if (!rc) {
1123 pr_err("%s: clk plan cmd timed out\n",
1124 __func__);
1125 goto cmd_fail;
1126 }
1127
1128 /* clk plan cmd is successful, send start notification */
1129 if (t_info->cpe_start_notification)
1130 t_info->cpe_start_notification(t_info);
1131 else
1132 pr_err("%s: no start notification\n",
1133 __func__);
1134
1135cmd_fail:
1136 kfree(cmi_msg);
1137 cmi_deregister(cpe_d.cpe_cmi_handle);
1138}
1139
1140static enum cpe_process_result cpe_boot_complete(
1141 struct cpe_info *t_info)
1142{
1143 struct cmi_core_svc_cmdrsp_shared_mem_alloc *p = NULL;
1144
1145 if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
1146 CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC) {
1147 broacast_boot_failed();
1148 return CPE_PROC_FAILED;
1149 }
1150
1151 p = (struct cmi_core_svc_cmdrsp_shared_mem_alloc *)
1152 CMI_GET_PAYLOAD(t_info->tgt->outbox);
1153 cpe_d.cpe_msg_buffer = p->addr;
1154
1155 if (cpe_d.cpe_msg_buffer == 0) {
1156 pr_err("%s: Invalid cpe buffer for message\n",
1157 __func__);
1158 broacast_boot_failed();
1159 return CPE_PROC_FAILED;
1160 }
1161
1162 cpe_change_state(t_info, CPE_STATE_IDLE, CPE_SS_IDLE);
1163 cpe_create_worker_thread(t_info);
1164
1165 if (t_info->codec_id != CPE_SVC_CODEC_TOMTOM) {
1166 schedule_work(&t_info->clk_plan_work);
1167 } else {
1168 if (t_info->cpe_start_notification)
1169 t_info->cpe_start_notification(t_info);
1170 else
1171 pr_err("%s: no start notification\n",
1172 __func__);
1173 }
1174
1175 pr_debug("%s: boot complete\n", __func__);
1176 return CPE_SVC_SUCCESS;
1177}
1178
1179static enum cpe_process_result cpe_process_send_msg(
1180 struct cpe_info *t_info,
1181 enum cpe_svc_result *cpe_rc,
1182 struct cpe_command_node *command_node)
1183{
1184 enum cpe_process_result rc = CPE_PROC_SUCCESS;
1185 struct cpe_send_msg *m =
1186 (struct cpe_send_msg *)command_node->data;
1187 u32 size = m->size;
1188
1189 if (t_info->pending) {
1190 pr_debug("%s: message queued\n", __func__);
1191 *cpe_rc = CPE_SVC_SUCCESS;
1192 return CPE_PROC_QUEUED;
1193 }
1194
1195 pr_debug("%s: Send CMI message, size = %u\n",
1196 __func__, size);
1197
1198 if (size <= t_info->tgt->tgt_get_cpe_info()->inbox_size) {
1199 pr_debug("%s: Msg fits mailbox, size %u\n",
1200 __func__, size);
1201 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1202 CPE_SS_MSG_SEND_INBOX);
1203 t_info->pending = m;
1204 *cpe_rc = cpe_send_msg_to_inbox(t_info, 0, m);
1205 } else if (size < CPE_MSG_BUFFER_SIZE) {
1206 m->address = cpe_d.cpe_msg_buffer;
1207 pr_debug("%s: Message req CMI mem access\n",
1208 __func__);
1209 t_info->pending = m;
1210 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1211 CPE_SS_MSG_REQUEST_ACCESS);
1212 *cpe_rc = cpe_send_msg_to_inbox(t_info,
1213 CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ, m);
1214 } else {
1215 pr_debug("%s: Invalid msg size %u\n",
1216 __func__, size);
1217 cpe_command_cleanup(command_node);
1218 rc = CPE_PROC_FAILED;
1219 cpe_change_state(t_info, CPE_STATE_IDLE,
1220 CPE_SS_IDLE);
1221 }
1222
1223 return rc;
1224}
1225
1226static enum cpe_process_result cpe_process_incoming(
1227 struct cpe_info *t_info)
1228{
1229 enum cpe_process_result rc = CPE_PROC_FAILED;
1230 struct cmi_hdr *hdr;
1231
1232 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
1233
1234 if (CMI_HDR_GET_SERVICE(hdr) ==
1235 CMI_CPE_CORE_SERVICE_ID) {
1236 pr_debug("%s: core service message received\n",
1237 __func__);
1238
1239 switch (CMI_GET_OPCODE(t_info->tgt->outbox)) {
1240 case CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST:
1241 cpe_process_clk_change_req(t_info);
1242 rc = CPE_PROC_SUCCESS;
1243 break;
1244 case CMI_MSG_TRANSPORT:
1245 pr_debug("%s: transport msg received\n",
1246 __func__);
1247 rc = CPE_PROC_SUCCESS;
1248 break;
1249 case CPE_CMI_BASIC_RSP_OPCODE:
1250 pr_debug("%s: received basic rsp\n",
1251 __func__);
1252 rc = CPE_PROC_SUCCESS;
1253 break;
1254 default:
1255 pr_debug("%s: unknown message received\n",
1256 __func__);
1257 break;
1258 }
1259 } else {
1260 /* if service id if for a CMI client, notify client */
1261 pr_debug("%s: Message received, notifying client\n",
1262 __func__);
1263 cpe_notify_cmi_client(t_info,
1264 t_info->tgt->outbox, CPE_SVC_SUCCESS);
1265 rc = CPE_PROC_SUCCESS;
1266 }
1267
1268 return rc;
1269}
1270
1271static enum cpe_process_result cpe_process_kill_thread(
1272 struct cpe_info *t_info,
1273 struct cpe_command_node *command_node)
1274{
1275 struct cpe_svc_notification payload;
1276
1277 cpe_d.cpe_msg_buffer = 0;
1278 payload.result = CPE_SVC_SHUTTING_DOWN;
1279 payload.event = CPE_SVC_OFFLINE;
1280 payload.payload = NULL;
1281 payload.private_data = t_info->client_context;
1282 /*
1283 * Make state as offline before broadcasting
1284 * the message to clients.
1285 */
1286 cpe_change_state(t_info, CPE_STATE_OFFLINE,
1287 CPE_SS_IDLE);
1288 cpe_broadcast_notification(t_info, &payload);
1289
1290 return CPE_PROC_KILLED;
1291}
1292
1293static enum cpe_process_result cpe_mt_process_cmd(
1294 struct cpe_command_node *command_node)
1295{
1296 struct cpe_info *t_info = cpe_d.cpe_default_handle;
1297 enum cpe_svc_result cpe_rc = CPE_SVC_SUCCESS;
1298 enum cpe_process_result rc = CPE_PROC_SUCCESS;
1299 struct cpe_send_msg *m;
1300 struct cmi_hdr *hdr;
1301 u8 service = 0;
1302 u8 retries = 0;
1303
1304 if (!t_info || !command_node) {
1305 pr_err("%s: Invalid handle/command node\n",
1306 __func__);
1307 return CPE_PROC_FAILED;
1308 }
1309
1310 pr_debug("%s: cmd = %u\n", __func__, command_node->command);
1311
1312 cpe_rc = cpe_is_command_valid(t_info, command_node->command);
1313
1314 if (cpe_rc != CPE_SVC_SUCCESS) {
1315 pr_err("%s: Invalid command %d, err = %d\n",
1316 __func__, command_node->command, cpe_rc);
1317 return CPE_PROC_FAILED;
1318 }
1319
1320 switch (command_node->command) {
1321
1322 case CPE_CMD_BOOT_INITIALIZE:
1323 rc = cpe_boot_initialize(t_info, &cpe_rc);
1324 break;
1325
1326 case CPE_CMD_BOOT_COMPLETE:
1327 rc = cpe_boot_complete(t_info);
1328 break;
1329
1330 case CPE_CMD_SEND_MSG:
1331 rc = cpe_process_send_msg(t_info, &cpe_rc,
1332 command_node);
1333 break;
1334
1335 case CPE_CMD_SEND_TRANS_MSG:
1336 m = (struct cpe_send_msg *)command_node->data;
1337
1338 while (retries < CPE_SVC_INACTIVE_STATE_RETRIES_MAX) {
1339 if (t_info->tgt->tgt_is_active()) {
1340 ++retries;
1341 /* Wait for CPE to be inactive */
1342 usleep_range(5000, 5100);
1343 } else {
1344 break;
1345 }
1346 }
1347
1348 pr_debug("%s: cpe inactive after %d attempts\n",
1349 __func__, retries);
1350
1351 cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
1352 CPE_SS_MSG_SEND_INBOX);
1353 rc = cpe_send_msg_to_inbox(t_info, 0, m);
1354 break;
1355
1356 case CPE_CMD_SEND_MSG_COMPLETE:
1357 hdr = CMI_GET_HEADER(t_info->tgt->outbox);
1358 service = CMI_HDR_GET_SERVICE(hdr);
1359 pr_debug("%s: msg send success, notifying clients\n",
1360 __func__);
1361 cpe_command_cleanup(command_node);
1362 t_info->pending = NULL;
1363 cpe_change_state(t_info,
1364 CPE_STATE_IDLE, CPE_SS_IDLE);
1365 cpe_notify_cmi_client(t_info,
1366 t_info->tgt->outbox, CPE_SVC_SUCCESS);
1367 break;
1368
1369 case CPE_CMD_PROC_INCOMING_MSG:
1370 rc = cpe_process_incoming(t_info);
1371 break;
1372
1373 case CPE_CMD_KILL_THREAD:
1374 rc = cpe_process_kill_thread(t_info, command_node);
1375 break;
1376
1377 default:
1378 pr_err("%s: unhandled cpe cmd = %d\n",
1379 __func__, command_node->command);
1380 break;
1381 }
1382
1383 if (cpe_rc != CPE_SVC_SUCCESS) {
1384 pr_err("%s: failed to execute command\n", __func__);
1385 if (t_info->pending) {
1386 m = (struct cpe_send_msg *)t_info->pending;
1387 cpe_notify_cmi_client(t_info, m->payload,
1388 CPE_SVC_FAILED);
1389 t_info->pending = NULL;
1390 }
1391
1392 cpe_command_cleanup(command_node);
1393 rc = CPE_PROC_FAILED;
1394 cpe_change_state(t_info, CPE_STATE_IDLE,
1395 CPE_SS_IDLE);
1396 }
1397
1398 return rc;
1399}
1400
1401static enum cpe_svc_result cpe_mt_validate_cmd(
1402 const struct cpe_info *t_info,
1403 enum cpe_command command)
1404{
1405 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1406
1407 if ((t_info == NULL) || t_info->initialized == false) {
1408 pr_err("%s: cpe service is not ready\n",
1409 __func__);
1410 return CPE_SVC_NOT_READY;
1411 }
1412
1413 switch (t_info->state) {
1414 case CPE_STATE_UNINITIALIZED:
1415 case CPE_STATE_INITIALIZED:
1416 switch (command) {
1417 case CPE_CMD_RESET:
1418 case CPE_CMD_DL_SEGMENT:
1419 case CPE_CMD_RAMDUMP:
1420 case CPE_CMD_PROCESS_IRQ:
1421 case CPE_CMD_KILL_THREAD:
1422 case CPE_CMD_DEINITIALIZE:
1423 case CPE_CMD_FTM_TEST:
1424 rc = CPE_SVC_SUCCESS;
1425 break;
1426 default:
1427 rc = CPE_SVC_NOT_READY;
1428 break;
1429 }
1430 break;
1431
1432 case CPE_STATE_DOWNLOADING:
1433 switch (command) {
1434 case CPE_CMD_RESET:
1435 case CPE_CMD_DL_SEGMENT:
1436 case CPE_CMD_BOOT:
1437 case CPE_CMD_FTM_TEST:
1438 rc = CPE_SVC_SUCCESS;
1439 break;
1440 default:
1441 rc = CPE_SVC_NOT_READY;
1442 break;
1443 }
1444 break;
1445
1446 case CPE_STATE_BOOTING:
1447 switch (command) {
1448 case CPE_CMD_PROCESS_IRQ:
1449 case CPE_CMD_BOOT_INITIALIZE:
1450 case CPE_CMD_BOOT_COMPLETE:
1451 case CPE_CMD_SHUTDOWN:
1452 rc = CPE_SVC_SUCCESS;
1453 break;
1454 case CPE_CMD_FTM_TEST:
1455 rc = CPE_SVC_BUSY;
1456 break;
1457 default:
1458 rc = CPE_SVC_NOT_READY;
1459 break;
1460 }
1461 break;
1462
1463 case CPE_STATE_IDLE:
1464 switch (command) {
1465 case CPE_CMD_SEND_MSG:
1466 case CPE_CMD_SEND_TRANS_MSG:
1467 case CPE_CMD_SEND_MSG_COMPLETE:
1468 case CPE_CMD_PROCESS_IRQ:
1469 case CPE_CMD_RESET:
1470 case CPE_CMD_SHUTDOWN:
1471 case CPE_CMD_KILL_THREAD:
1472 case CPE_CMD_PROC_INCOMING_MSG:
1473 rc = CPE_SVC_SUCCESS;
1474 break;
1475 case CPE_CMD_FTM_TEST:
1476 rc = CPE_SVC_BUSY;
1477 break;
1478 default:
1479 rc = CPE_SVC_FAILED;
1480 break;
1481 }
1482 break;
1483
1484 case CPE_STATE_SENDING_MSG:
1485 switch (command) {
1486 case CPE_CMD_SEND_MSG:
1487 case CPE_CMD_SEND_TRANS_MSG:
1488 case CPE_CMD_SEND_MSG_COMPLETE:
1489 case CPE_CMD_PROCESS_IRQ:
1490 case CPE_CMD_SHUTDOWN:
1491 case CPE_CMD_KILL_THREAD:
1492 case CPE_CMD_PROC_INCOMING_MSG:
1493 rc = CPE_SVC_SUCCESS;
1494 break;
1495 case CPE_CMD_FTM_TEST:
1496 rc = CPE_SVC_BUSY;
1497 break;
1498 default:
1499 rc = CPE_SVC_FAILED;
1500 break;
1501 }
1502 break;
1503
1504 case CPE_STATE_OFFLINE:
1505 switch (command) {
1506 case CPE_CMD_RESET:
1507 case CPE_CMD_RAMDUMP:
1508 case CPE_CMD_KILL_THREAD:
1509 rc = CPE_SVC_SUCCESS;
1510 break;
1511 default:
1512 rc = CPE_SVC_NOT_READY;
1513 break;
1514 }
1515 break;
1516
1517 default:
1518 pr_debug("%s: unhandled state %d\n",
1519 __func__, t_info->state);
1520 break;
1521 }
1522
1523 if (rc != CPE_SVC_SUCCESS)
1524 pr_err("%s: invalid command %d, state = %d\n",
1525 __func__, command, t_info->state);
1526 return rc;
1527}
1528
1529void *cpe_svc_initialize(
1530 void irq_control_callback(u32 enable),
1531 const void *codec_info, void *context)
1532{
1533 struct cpe_info *t_info = NULL;
1534 const struct cpe_svc_hw_cfg *cap = NULL;
1535 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1536 struct cpe_svc_init_param *init_context =
1537 (struct cpe_svc_init_param *) context;
1538 void *client_context = NULL;
1539
1540 if (cpe_d.cpe_default_handle &&
1541 cpe_d.cpe_default_handle->initialized == true)
1542 return (void *)cpe_d.cpe_default_handle;
1543 cpe_d.cpe_query_freq_plans_cb = NULL;
1544 cpe_d.cpe_change_freq_plan_cb = NULL;
1545
1546 if (context) {
1547 client_context = init_context->context;
1548 switch (init_context->version) {
1549 case CPE_SVC_INIT_PARAM_V1:
1550 cpe_d.cpe_query_freq_plans_cb =
1551 init_context->query_freq_plans_cb;
1552 cpe_d.cpe_change_freq_plan_cb =
1553 init_context->change_freq_plan_cb;
1554 break;
1555 default:
1556 break;
1557 }
1558 }
1559
1560 if (!cpe_d.cpe_default_handle) {
1561 cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
1562 GFP_KERNEL);
1563 if (!cpe_d.cpe_default_handle)
1564 goto err_register;
1565
1566 memset(cpe_d.cpe_default_handle, 0,
1567 sizeof(struct cpe_info));
1568 }
1569
1570 t_info = cpe_d.cpe_default_handle;
1571 t_info->client_context = client_context;
1572
1573 INIT_LIST_HEAD(&t_info->client_list);
1574 cpe_d.cdc_priv = client_context;
1575 INIT_WORK(&t_info->clk_plan_work, cpe_clk_plan_work);
1576 init_completion(&t_info->core_svc_cmd_compl);
1577
1578 t_info->tgt = kzalloc(sizeof(struct cpe_svc_tgt_abstraction),
1579 GFP_KERNEL);
1580 if (!t_info->tgt)
1581 goto err_tgt_alloc;
1582 t_info->codec_id =
1583 ((struct cpe_svc_codec_info_v1 *) codec_info)->id;
1584
1585 rc = cpe_svc_tgt_init((struct cpe_svc_codec_info_v1 *)codec_info,
1586 t_info->tgt);
1587
1588 if (rc != CPE_SVC_SUCCESS)
1589 goto err_tgt_init;
1590
1591 cap = t_info->tgt->tgt_get_cpe_info();
1592
1593 memset(t_info->tgt->outbox, 0, cap->outbox_size);
1594 memset(t_info->tgt->inbox, 0, cap->inbox_size);
1595 mutex_init(&t_info->msg_lock);
1596 cpe_d.cpe_irq_control_callback = irq_control_callback;
1597 t_info->cpe_process_command = cpe_mt_process_cmd;
1598 t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
1599 t_info->cpe_start_notification = broadcast_boot_event;
1600 mutex_init(&cpe_d.cpe_api_mutex);
1601 mutex_init(&cpe_d.cpe_svc_lock);
1602 pr_debug("%s: cpe services initialized\n", __func__);
1603 t_info->state = CPE_STATE_INITIALIZED;
1604 t_info->initialized = true;
1605
1606 return t_info;
1607
1608err_tgt_init:
1609 kfree(t_info->tgt);
1610
1611err_tgt_alloc:
1612 kfree(cpe_d.cpe_default_handle);
1613 cpe_d.cpe_default_handle = NULL;
1614
1615err_register:
1616 return NULL;
1617}
1618
1619enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
1620{
1621 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1622 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1623
1624 if (!t_info)
1625 t_info = cpe_d.cpe_default_handle;
1626
1627 rc = cpe_is_command_valid(t_info, CPE_CMD_DEINITIALIZE);
1628
1629 if (rc != CPE_SVC_SUCCESS) {
1630 pr_err("%s: Invalid command %d\n",
1631 __func__, CPE_CMD_DEINITIALIZE);
1632 return rc;
1633 }
1634
1635 if (cpe_d.cpe_default_handle == t_info)
1636 cpe_d.cpe_default_handle = NULL;
1637
1638 t_info->tgt->tgt_deinit(t_info->tgt);
1639 cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
1640 CPE_SS_IDLE);
1641 mutex_destroy(&t_info->msg_lock);
1642 kfree(t_info->tgt);
1643 kfree(t_info);
1644 mutex_destroy(&cpe_d.cpe_api_mutex);
1645 mutex_destroy(&cpe_d.cpe_svc_lock);
1646
1647 return rc;
1648}
1649
1650void *cpe_svc_register(void *cpe_handle,
1651 void (*notification_callback)
1652 (const struct cpe_svc_notification *parameter),
1653 u32 mask, const char *name)
1654{
1655 void *reg_handle;
1656
1657 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1658 if (!cpe_d.cpe_default_handle) {
1659 cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
1660 GFP_KERNEL);
1661 if (!cpe_d.cpe_default_handle) {
1662 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1663 return NULL;
1664 }
1665
1666 memset(cpe_d.cpe_default_handle, 0,
1667 sizeof(struct cpe_info));
1668 }
1669
1670 if (!cpe_handle)
1671 cpe_handle = cpe_d.cpe_default_handle;
1672
1673 reg_handle = cpe_register_generic((struct cpe_info *)cpe_handle,
1674 notification_callback,
1675 NULL,
1676 mask, CPE_NO_SERVICE, name);
1677 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1678
1679 return reg_handle;
1680}
1681
1682enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle)
1683{
1684 enum cpe_svc_result rc;
1685
1686 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1687 if (!cpe_handle)
1688 cpe_handle = cpe_d.cpe_default_handle;
1689
1690 rc = cpe_deregister_generic((struct cpe_info *)cpe_handle,
1691 reg_handle);
1692 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1693
1694 return rc;
1695}
1696
1697enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
1698 const struct cpe_svc_mem_segment *segment)
1699{
1700 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1701 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1702
1703 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1704 if (!t_info)
1705 t_info = cpe_d.cpe_default_handle;
1706
1707 rc = cpe_is_command_valid(t_info, CPE_CMD_DL_SEGMENT);
1708
1709 if (rc != CPE_SVC_SUCCESS) {
1710 pr_err("%s: cmd validation fail, cmd = %d\n",
1711 __func__, CPE_CMD_DL_SEGMENT);
1712 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1713 return rc;
1714 }
1715
1716 cpe_toggle_irq_notification(t_info, false);
1717 t_info->state = CPE_STATE_DOWNLOADING;
1718 t_info->substate = CPE_SS_DL_DOWNLOADING;
1719 rc = t_info->tgt->tgt_write_ram(t_info, segment);
1720 cpe_toggle_irq_notification(t_info, true);
1721 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1722
1723 return rc;
1724}
1725
1726enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode)
1727{
1728 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1729 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1730
1731 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1732 if (!t_info)
1733 t_info = cpe_d.cpe_default_handle;
1734
1735 rc = cpe_is_command_valid(t_info, CPE_CMD_BOOT);
1736
1737 if (rc != CPE_SVC_SUCCESS) {
1738 pr_err("%s: cmd validation fail, cmd = %d\n",
1739 __func__, CPE_CMD_BOOT);
1740 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1741 return rc;
1742 }
1743
1744 if (rc == CPE_SVC_SUCCESS) {
1745 t_info->tgt->tgt_boot(debug_mode);
1746 t_info->state = CPE_STATE_BOOTING;
1747 t_info->substate = CPE_SS_BOOT;
1748 pr_debug("%s: cpe service booting\n",
1749 __func__);
1750 }
1751
1752 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1753 return rc;
1754}
1755
1756enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq)
1757{
1758 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1759 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1760
1761 if (!t_info)
1762 t_info = cpe_d.cpe_default_handle;
1763
1764 cpe_toggle_irq_notification(t_info, false);
1765 cpe_process_irq_int(cpe_irq, t_info);
1766 cpe_toggle_irq_notification(t_info, true);
1767
1768 return rc;
1769}
1770
1771enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
1772 enum cpe_svc_module module, enum cpe_svc_route_dest dest)
1773{
1774 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1775 enum cpe_svc_result rc = CPE_SVC_NOT_READY;
1776
1777 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1778 if (!t_info)
1779 t_info = cpe_d.cpe_default_handle;
1780
1781 if (t_info->tgt)
1782 rc = t_info->tgt->tgt_route_notification(module, dest);
1783
1784 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1785 return rc;
1786}
1787
1788static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
1789{
1790 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1791 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1792 struct cpe_command_node *n = NULL;
1793 struct cpe_command_node kill_cmd;
1794
1795 if (!t_info)
1796 t_info = cpe_d.cpe_default_handle;
1797
1798 rc = cpe_is_command_valid(t_info, CPE_CMD_SHUTDOWN);
1799
1800 if (rc != CPE_SVC_SUCCESS) {
1801 pr_err("%s: cmd validation fail, cmd = %d\n",
1802 __func__, CPE_CMD_SHUTDOWN);
1803 return rc;
1804 }
1805
1806 while (!list_empty(&t_info->main_queue)) {
1807 n = list_first_entry(&t_info->main_queue,
1808 struct cpe_command_node, list);
1809
1810 if (n->command == CPE_CMD_SEND_MSG) {
1811 cpe_notify_cmi_client(t_info, (u8 *)n->data,
1812 CPE_SVC_SHUTTING_DOWN);
1813 }
1814 /*
1815 * Since command cannot be processed,
1816 * delete it from the list and perform cleanup
1817 */
1818 list_del(&n->list);
1819 cpe_command_cleanup(n);
1820 kfree(n);
1821 }
1822
1823 pr_debug("%s: cpe service OFFLINE state\n", __func__);
1824
1825 t_info->state = CPE_STATE_OFFLINE;
1826 t_info->substate = CPE_SS_IDLE;
1827
1828 memset(&kill_cmd, 0, sizeof(kill_cmd));
1829 kill_cmd.command = CPE_CMD_KILL_THREAD;
1830
1831 if (t_info->pending) {
1832 struct cpe_send_msg *m =
1833 (struct cpe_send_msg *)t_info->pending;
1834 cpe_notify_cmi_client(t_info, m->payload,
1835 CPE_SVC_SHUTTING_DOWN);
1836 kfree(t_info->pending);
1837 t_info->pending = NULL;
1838 }
1839
1840 cpe_cleanup_worker_thread(t_info);
1841 t_info->cpe_process_command(&kill_cmd);
1842
1843 return rc;
1844}
1845
1846enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
1847{
1848 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1849
1850 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1851 rc = __cpe_svc_shutdown(cpe_handle);
1852 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1853 return rc;
1854}
1855
1856enum cpe_svc_result cpe_svc_reset(void *cpe_handle)
1857{
1858 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1859 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1860
1861 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1862 if (!t_info)
1863 t_info = cpe_d.cpe_default_handle;
1864
1865 rc = cpe_is_command_valid(t_info, CPE_CMD_RESET);
1866
1867 if (rc != CPE_SVC_SUCCESS) {
1868 pr_err("%s: cmd validation fail, cmd = %d\n",
1869 __func__, CPE_CMD_RESET);
1870 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1871 return rc;
1872 }
1873
1874 if (t_info && t_info->tgt) {
1875 rc = t_info->tgt->tgt_reset();
1876 pr_debug("%s: cpe services in INITIALIZED state\n",
1877 __func__);
1878 t_info->state = CPE_STATE_INITIALIZED;
1879 t_info->substate = CPE_SS_IDLE;
1880 }
1881 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1882
1883 return rc;
1884}
1885
1886enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
1887 struct cpe_svc_mem_segment *buffer)
1888{
1889 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
1890 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1891
1892 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1893 if (!t_info)
1894 t_info = cpe_d.cpe_default_handle;
1895
1896 rc = cpe_is_command_valid(t_info, CPE_CMD_RAMDUMP);
1897 if (rc != CPE_SVC_SUCCESS) {
1898 pr_err("%s: cmd validation fail, cmd = %d\n",
1899 __func__, CPE_CMD_RAMDUMP);
1900 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1901 return rc;
1902 }
1903
1904 if (t_info->tgt) {
1905 rc = t_info->tgt->tgt_read_ram(t_info, buffer);
1906 } else {
1907 pr_err("%s: cpe service not ready\n", __func__);
1908 rc = CPE_SVC_NOT_READY;
1909 }
1910 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1911
1912 return rc;
1913}
1914
1915enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode)
1916{
1917 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1918 enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
1919
1920 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1921 if (!t_info)
1922 t_info = cpe_d.cpe_default_handle;
1923
1924 if (t_info->tgt)
1925 rc = t_info->tgt->tgt_set_debug_mode(mode);
1926 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1927
1928 return rc;
1929}
1930
1931const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle)
1932{
1933 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
1934
1935 if (!t_info)
1936 t_info = cpe_d.cpe_default_handle;
1937
1938 if (t_info->tgt)
1939 return t_info->tgt->tgt_get_cpe_info();
1940
1941 return NULL;
1942}
1943
1944void *cmi_register(
1945 void notification_callback(
1946 const struct cmi_api_notification *parameter),
1947 u32 service)
1948{
1949 void *reg_handle = NULL;
1950
1951 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1952 reg_handle = cpe_register_generic(cpe_d.cpe_default_handle,
1953 NULL,
1954 notification_callback,
1955 (CPE_SVC_CMI_MSG | CPE_SVC_OFFLINE |
1956 CPE_SVC_ONLINE),
1957 service,
1958 "CMI_CLIENT");
1959 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1960
1961 return reg_handle;
1962}
1963
1964enum cmi_api_result cmi_deregister(void *reg_handle)
1965{
1966 u32 clients = 0;
1967 struct cpe_notif_node *n = NULL;
1968 enum cmi_api_result rc = CMI_API_SUCCESS;
1969 struct cpe_svc_notification payload;
1970
1971 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1972 rc = (enum cmi_api_result) cpe_deregister_generic(
1973 cpe_d.cpe_default_handle, reg_handle);
1974
1975 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
1976 list_for_each_entry(n, &cpe_d.cpe_default_handle->client_list, list) {
1977 if (n->mask & CPE_SVC_CMI_MSG)
1978 clients++;
1979 }
1980 CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
1981
1982 if (clients == 0) {
1983 payload.event = CPE_SVC_CMI_CLIENTS_DEREG;
1984 payload.payload = NULL;
1985 payload.result = CPE_SVC_SUCCESS;
1986 cpe_broadcast_notification(cpe_d.cpe_default_handle, &payload);
1987 }
1988
1989 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
1990 return rc;
1991}
1992
1993enum cmi_api_result cmi_send_msg(void *message)
1994{
1995 enum cmi_api_result rc = CMI_API_SUCCESS;
1996 struct cpe_send_msg *msg = NULL;
1997 struct cmi_hdr *hdr;
1998
1999 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2000 hdr = CMI_GET_HEADER(message);
2001 msg = kzalloc(sizeof(struct cpe_send_msg),
2002 GFP_ATOMIC);
2003 if (!msg) {
2004 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2005 return CPE_SVC_NO_MEMORY;
2006 }
2007
2008 if (CMI_HDR_GET_OBM_FLAG(hdr) == CMI_OBM_FLAG_OUT_BAND)
2009 msg->isobm = 1;
2010 else
2011 msg->isobm = 0;
2012
2013 msg->size = sizeof(struct cmi_hdr) +
2014 CMI_HDR_GET_PAYLOAD_SIZE(hdr);
2015
2016 msg->payload = kzalloc(msg->size, GFP_ATOMIC);
2017 if (!msg->payload) {
2018 kfree(msg);
2019 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2020 return CPE_SVC_NO_MEMORY;
2021 }
2022
2023 msg->address = 0;
2024 memcpy((void *)msg->payload, message, msg->size);
2025
2026 rc = (enum cmi_api_result) cpe_send_cmd_to_thread(
2027 cpe_d.cpe_default_handle,
2028 CPE_CMD_SEND_MSG,
2029 (void *)msg, false);
2030
2031 if (rc != 0) {
2032 pr_err("%s: Failed to queue message\n", __func__);
2033 kfree(msg->payload);
2034 kfree(msg);
2035 }
2036
2037 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2038 return rc;
2039}
2040
2041enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status)
2042{
2043 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2044 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
2045 struct cpe_svc_mem_segment backup_seg;
2046 struct cpe_svc_mem_segment waiti_seg;
2047 u8 *backup_data = NULL;
2048
2049 CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2050 if (!t_info)
2051 t_info = cpe_d.cpe_default_handle;
2052
2053 rc = cpe_is_command_valid(t_info, CPE_CMD_FTM_TEST);
2054 if (rc != CPE_SVC_SUCCESS) {
2055 pr_err("%s: cmd validation fail, cmd = %d\n",
2056 __func__, CPE_CMD_FTM_TEST);
2057 goto fail_cmd;
2058 }
2059
2060 if (t_info && t_info->tgt) {
2061 backup_data = kzalloc(
2062 t_info->tgt->tgt_waiti_info->tgt_waiti_size,
2063 GFP_KERNEL);
2064
2065 /* CPE reset */
2066 rc = t_info->tgt->tgt_reset();
2067 if (rc != CPE_SVC_SUCCESS) {
2068 pr_err("%s: CPE reset fail! err = %d\n",
2069 __func__, rc);
2070 goto err_return;
2071 }
2072
2073 /* Back up the 4 byte IRAM data first */
2074 backup_seg.type = CPE_SVC_INSTRUCTION_MEM;
2075 backup_seg.cpe_addr =
2076 t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
2077 backup_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
2078 backup_seg.data = backup_data;
2079
2080 pr_debug("%s: Backing up IRAM data from CPE\n",
2081 __func__);
2082
2083 rc = t_info->tgt->tgt_read_ram(t_info, &backup_seg);
2084 if (rc != CPE_SVC_SUCCESS) {
2085 pr_err("%s: Fail to backup CPE IRAM data, err = %d\n",
2086 __func__, rc);
2087 goto err_return;
2088 }
2089
2090 pr_debug("%s: Complete backing up IRAM data from CPE\n",
2091 __func__);
2092
2093 /* Write the WAITI instruction data */
2094 waiti_seg.type = CPE_SVC_INSTRUCTION_MEM;
2095 waiti_seg.cpe_addr =
2096 t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
2097 waiti_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
2098 waiti_seg.data = t_info->tgt->tgt_waiti_info->tgt_waiti_data;
2099
2100 rc = t_info->tgt->tgt_write_ram(t_info, &waiti_seg);
2101 if (rc != CPE_SVC_SUCCESS) {
2102 pr_err("%s: Fail to write the WAITI data, err = %d\n",
2103 __func__, rc);
2104 goto restore_iram;
2105 }
2106
2107 /* Boot up cpe to execute the WAITI instructions */
2108 rc = t_info->tgt->tgt_boot(1);
2109 if (rc != CPE_SVC_SUCCESS) {
2110 pr_err("%s: Fail to boot CPE, err = %d\n",
2111 __func__, rc);
2112 goto reset;
2113 }
2114
2115 /*
2116 * 1ms delay is suggested by the hw team to
2117 * wait for cpe to boot up.
2118 */
2119 usleep_range(1000, 1100);
2120
2121 /* Check if the cpe init is done after executing the WAITI */
2122 *status = t_info->tgt->tgt_cpar_init_done();
2123
2124reset:
2125 /* Set the cpe back to reset state */
2126 rc = t_info->tgt->tgt_reset();
2127 if (rc != CPE_SVC_SUCCESS) {
2128 pr_err("%s: CPE reset fail! err = %d\n",
2129 __func__, rc);
2130 goto restore_iram;
2131 }
2132
2133restore_iram:
2134 /* Restore the IRAM 4 bytes data */
2135 rc = t_info->tgt->tgt_write_ram(t_info, &backup_seg);
2136 if (rc != CPE_SVC_SUCCESS) {
2137 pr_err("%s: Fail to restore the IRAM data, err = %d\n",
2138 __func__, rc);
2139 goto err_return;
2140 }
2141 }
2142
2143err_return:
2144 kfree(backup_data);
2145fail_cmd:
2146 CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
2147 return rc;
2148}
2149
2150static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
2151{
2152 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2153
2154 if (!debug_mode)
2155 rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
2156 0x3F, 0x31);
2157 else
2158 pr_info("%s: CPE in debug mode, WDOG disabled\n",
2159 __func__);
2160
2161 rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
2162 0x02, 0x00);
2163 rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
2164 0x0C, 0x04);
2165 rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
2166 0x01, 0x01);
2167
2168 return rc;
2169}
2170
2171static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
2172{
2173 u8 status = 0;
2174
2175 cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
2176 return status & 0x01;
2177}
2178
2179static u32 cpe_tgt_tomtom_is_active(void)
2180{
2181 u8 status = 0;
2182
2183 cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
2184 return status & 0x04;
2185}
2186
2187static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
2188{
2189 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2190
2191 rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
2192 0x30, 0x00);
2193
2194 rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
2195 0x01, 0x00);
2196 rc = cpe_update_bits(TOMTOM_A_MEM_LEAKAGE_CTL,
2197 0x07, 0x03);
2198 rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
2199 0x08, 0x08);
2200 rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
2201 0x02, 0x02);
2202 return rc;
2203}
2204
2205enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
2206{
2207 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2208 u8 val = 0;
2209
2210 if (enable)
2211 val = 0x02;
2212 else
2213 val = 0x00;
2214 rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
2215 0x02, val);
2216 val = 0;
2217 cpe_register_read(TOMTOM_A_SVASS_CFG, &val);
2218 return rc;
2219}
2220
2221enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
2222{
2223
2224 struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
2225
2226 if (!t_info)
2227 t_info = cpe_d.cpe_default_handle;
2228
2229 if (t_info->tgt)
2230 return t_info->tgt->tgt_voice_tx_lab(enable);
2231 else
2232 return CPE_SVC_INVALID_HANDLE;
2233}
2234
2235static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
2236 size_t size)
2237{
2238 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2239 u32 cnt = 0;
2240
2241 if (size >= TOMTOM_A_SVASS_SPE_OUTBOX_SIZE)
2242 size = TOMTOM_A_SVASS_SPE_OUTBOX_SIZE - 1;
2243 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
2244 rc = cpe_register_read(TOMTOM_A_SVASS_SPE_OUTBOX(cnt),
2245 &(buffer[cnt]));
2246 }
2247 return rc;
2248}
2249
2250static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
2251 size_t size)
2252{
2253 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2254 u32 cnt = 0;
2255
2256 if (size >= TOMTOM_A_SVASS_SPE_INBOX_SIZE)
2257 size = TOMTOM_A_SVASS_SPE_INBOX_SIZE - 1;
2258 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
2259 rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX(cnt),
2260 buffer[cnt]);
2261 }
2262
2263 if (rc == CPE_SVC_SUCCESS)
2264 rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX_TRG, 1);
2265
2266 return rc;
2267}
2268
2269static enum cpe_svc_result cpe_get_mem_addr(struct cpe_info *t_info,
2270 const struct cpe_svc_mem_segment *mem_seg,
2271 u32 *addr, u8 *mem)
2272{
2273 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2274 u32 offset, mem_sz, address;
2275 u8 mem_type;
2276
2277 switch (mem_seg->type) {
2278
2279 case CPE_SVC_DATA_MEM:
2280 mem_type = MEM_ACCESS_DRAM_VAL;
2281 offset = TOMTOM_A_SVASS_SPE_DRAM_OFFSET;
2282 mem_sz = TOMTOM_A_SVASS_SPE_DRAM_SIZE;
2283 break;
2284
2285 case CPE_SVC_INSTRUCTION_MEM:
2286 mem_type = MEM_ACCESS_IRAM_VAL;
2287 offset = TOMTOM_A_SVASS_SPE_IRAM_OFFSET;
2288 mem_sz = TOMTOM_A_SVASS_SPE_IRAM_SIZE;
2289 break;
2290
2291 default:
2292 pr_err("%s: Invalid mem type = %u\n",
2293 __func__, mem_seg->type);
2294 return CPE_SVC_INVALID_HANDLE;
2295 }
2296
2297 if (mem_seg->cpe_addr < offset) {
2298 pr_err("%s: Invalid addr %x for mem type %u\n",
2299 __func__, mem_seg->cpe_addr, mem_type);
2300 return CPE_SVC_INVALID_HANDLE;
2301 }
2302
2303 address = mem_seg->cpe_addr - offset;
2304 if (address + mem_seg->size > mem_sz) {
2305 pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
2306 __func__, mem_seg->size, address, mem_type);
2307 return CPE_SVC_INVALID_HANDLE;
2308 }
2309
2310 (*addr) = address;
2311 (*mem) = mem_type;
2312
2313 return rc;
2314}
2315
2316static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
2317 struct cpe_svc_mem_segment *mem_seg)
2318{
2319 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2320 u8 mem_reg_val = 0;
2321 u32 cnt = 0;
2322 bool autoinc;
2323 u8 mem = MEM_ACCESS_NONE_VAL;
2324 u32 addr = 0;
2325 u32 ptr_update = true;
2326
2327 if (!mem_seg) {
2328 pr_err("%s: Invalid mem segment\n",
2329 __func__);
2330 return CPE_SVC_INVALID_HANDLE;
2331 }
2332
2333 rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
2334
2335 if (rc != CPE_SVC_SUCCESS) {
2336 pr_err("%s: Cannot obtain address, mem_type %u\n",
2337 __func__, mem_seg->type);
2338 return rc;
2339 }
2340
2341 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
2342 autoinc = cpe_register_read_autoinc_supported();
2343 if (autoinc)
2344 mem_reg_val |= 0x04;
2345
2346 mem_reg_val |= 0x08;
2347 mem_reg_val |= mem;
2348
2349 do {
2350 if (!autoinc || ptr_update) {
2351 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
2352 (addr & 0xFF));
2353 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
2354 ((addr >> 8) & 0xFF));
2355 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
2356 ((addr >> 16) & 0xFF));
2357
2358 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL,
2359 mem_reg_val);
2360
2361 ptr_update = false;
2362 }
2363 rc = cpe_register_read(TOMTOM_A_SVASS_MEM_BANK,
2364 &mem_seg->data[cnt]);
2365
2366 if (!autoinc)
2367 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
2368 } while (++cnt < mem_seg->size);
2369
2370 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
2371
2372 return rc;
2373}
2374
2375static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
2376 const struct cpe_svc_mem_segment *mem_seg)
2377{
2378 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2379 u8 mem_reg_val = 0;
2380 u8 mem = MEM_ACCESS_NONE_VAL;
2381 u32 addr = 0;
2382 u8 *temp_ptr = NULL;
2383 u32 temp_size = 0;
2384 bool autoinc;
2385
2386 if (!mem_seg) {
2387 pr_err("%s: Invalid mem segment\n",
2388 __func__);
2389 return CPE_SVC_INVALID_HANDLE;
2390 }
2391
2392 rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
2393
2394 if (rc != CPE_SVC_SUCCESS) {
2395 pr_err("%s: Cannot obtain address, mem_type %u\n",
2396 __func__, mem_seg->type);
2397 return rc;
2398 }
2399
2400 autoinc = cpe_register_read_autoinc_supported();
2401 if (autoinc)
2402 mem_reg_val |= 0x04;
2403 mem_reg_val |= mem;
2404
2405 rc = cpe_update_bits(TOMTOM_A_SVASS_MEM_CTL,
2406 0x0F, mem_reg_val);
2407
2408 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
2409 (addr & 0xFF));
2410 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
2411 ((addr >> 8) & 0xFF));
2412
2413 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
2414 ((addr >> 16) & 0xFF));
2415
2416 temp_size = 0;
2417 temp_ptr = mem_seg->data;
2418
2419 while (temp_size <= mem_seg->size) {
2420 u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
2421 ? CHUNK_SIZE : (mem_seg->size-temp_size);
2422
2423 if (t_info->state == CPE_STATE_OFFLINE) {
2424 pr_err("%s: CPE is offline\n", __func__);
2425 return CPE_SVC_FAILED;
2426 }
2427
2428 cpe_register_write_repeat(TOMTOM_A_SVASS_MEM_BANK,
2429 temp_ptr, to_write);
2430 temp_size += CHUNK_SIZE;
2431 temp_ptr += CHUNK_SIZE;
2432 }
2433
2434 rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
2435 return rc;
2436}
2437
2438static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
2439 enum cpe_svc_module module,
2440 enum cpe_svc_route_dest dest)
2441{
2442 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2443 u8 ctl_reg_val = 0;
2444
2445 switch (module) {
2446 case CPE_SVC_LISTEN_PROC:
2447 switch (dest) {
2448 case CPE_SVC_EXTERNAL:
2449 ctl_reg_val = LISTEN_CTL_MSM_VAL;
2450 break;
2451 case CPE_SVC_INTERNAL:
2452 ctl_reg_val = LISTEN_CTL_SPE_VAL;
2453 break;
2454 default:
2455 pr_err("%s: Invalid dest %d\n",
2456 __func__, dest);
2457 return CPE_SVC_FAILED;
2458 }
2459
2460 rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
2461 0x01, ctl_reg_val);
2462 break;
2463 default:
2464 pr_err("%s: Invalid module %d\n",
2465 __func__, module);
2466 rc = CPE_SVC_FAILED;
2467 break;
2468 }
2469
2470 return rc;
2471}
2472
2473static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
2474{
2475 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2476 u8 dbg_reg_val = 0x00;
2477
2478 if (enable)
2479 dbg_reg_val = 0x08;
2480 rc = cpe_update_bits(TOMTOM_A_SVASS_DEBUG,
2481 0x08, dbg_reg_val);
2482 return rc;
2483}
2484
2485static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
2486{
2487 return &cpe_svc_tomtom_info;
2488}
2489
2490static enum cpe_svc_result cpe_tgt_tomtom_deinit(
2491 struct cpe_svc_tgt_abstraction *param)
2492{
2493 kfree(param->inbox);
2494 param->inbox = NULL;
2495 kfree(param->outbox);
2496 param->outbox = NULL;
2497 memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
2498 return CPE_SVC_SUCCESS;
2499}
2500
2501static u8 cpe_tgt_tomtom_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
2502
2503static struct cpe_tgt_waiti_info cpe_tgt_tomtom_waiti_info = {
2504 .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_tomtom_waiti_data),
2505 .tgt_waiti_data = cpe_tgt_tomtom_waiti_data,
2506};
2507
2508static enum cpe_svc_result cpe_tgt_tomtom_init(
2509 struct cpe_svc_codec_info_v1 *codec_info,
2510 struct cpe_svc_tgt_abstraction *param)
2511{
2512 if (!codec_info)
2513 return CPE_SVC_INVALID_HANDLE;
2514 if (!param)
2515 return CPE_SVC_INVALID_HANDLE;
2516
2517 if (codec_info->id == CPE_SVC_CODEC_TOMTOM) {
2518 param->tgt_boot = cpe_tgt_tomtom_boot;
2519 param->tgt_cpar_init_done = cpe_tgt_tomtom_is_cpar_init_done;
2520 param->tgt_is_active = cpe_tgt_tomtom_is_active;
2521 param->tgt_reset = cpe_tgt_tomtom_reset;
2522 param->tgt_read_mailbox = cpe_tgt_tomtom_read_mailbox;
2523 param->tgt_write_mailbox = cpe_tgt_tomtom_write_mailbox;
2524 param->tgt_read_ram = cpe_tgt_tomtom_read_RAM;
2525 param->tgt_write_ram = cpe_tgt_tomtom_write_RAM;
2526 param->tgt_route_notification =
2527 cpe_tgt_tomtom_route_notification;
2528 param->tgt_set_debug_mode = cpe_tgt_tomtom_set_debug_mode;
2529 param->tgt_get_cpe_info = cpe_tgt_tomtom_get_cpe_info;
2530 param->tgt_deinit = cpe_tgt_tomtom_deinit;
2531 param->tgt_voice_tx_lab = cpe_tgt_tomtom_voicetx;
2532 param->tgt_waiti_info = &cpe_tgt_tomtom_waiti_info;
2533
2534 param->inbox = kzalloc(TOMTOM_A_SVASS_SPE_INBOX_SIZE,
2535 GFP_KERNEL);
2536 if (!param->inbox)
2537 return CPE_SVC_NO_MEMORY;
2538
2539 param->outbox = kzalloc(TOMTOM_A_SVASS_SPE_OUTBOX_SIZE,
2540 GFP_KERNEL);
2541 if (!param->outbox) {
2542 kfree(param->inbox);
2543 return CPE_SVC_NO_MEMORY;
2544 }
2545 }
2546
2547 return CPE_SVC_SUCCESS;
2548}
2549
2550static enum cpe_svc_result cpe_tgt_wcd9335_boot(int debug_mode)
2551{
2552 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2553
2554 if (!debug_mode)
2555 rc |= cpe_update_bits(
2556 WCD9335_CPE_SS_WDOG_CFG,
2557 0x3f, 0x31);
2558 else
2559 pr_info("%s: CPE in debug mode, WDOG disabled\n",
2560 __func__);
2561
2562 rc |= cpe_register_write(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 19);
2563 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x00);
2564 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x02, 0x02);
2565 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x01, 0x01);
2566
2567 if (unlikely(rc)) {
2568 pr_err("%s: Failed to boot, err = %d\n",
2569 __func__, rc);
2570 rc = CPE_SVC_FAILED;
2571 }
2572
2573 return rc;
2574}
2575
2576static u32 cpe_tgt_wcd9335_is_cpar_init_done(void)
2577{
2578 u8 temp = 0;
2579
2580 cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
2581 return temp & 0x1;
2582}
2583
2584static u32 cpe_tgt_wcd9335_is_active(void)
2585{
2586 u8 temp = 0;
2587
2588 cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
2589 return temp & 0x4;
2590}
2591
2592static enum cpe_svc_result cpe_tgt_wcd9335_reset(void)
2593{
2594 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2595
2596 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CFG, 0x01, 0x00);
2597
2598 rc |= cpe_register_write(
2599 WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN, 0x00);
2600 rc |= cpe_register_write(
2601 WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN, 0x00);
2602 rc |= cpe_register_write(
2603 WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1, 0x00);
2604 rc |= cpe_register_write(
2605 WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2, 0x00);
2606
2607 rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x04);
2608
2609 if (unlikely(rc)) {
2610 pr_err("%s: failed to reset cpe, err = %d\n",
2611 __func__, rc);
2612 rc = CPE_SVC_FAILED;
2613 }
2614
2615 return rc;
2616}
2617
2618static enum cpe_svc_result cpe_tgt_wcd9335_read_mailbox(u8 *buffer,
2619 size_t size)
2620{
2621 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2622 u32 cnt = 0;
2623
2624 pr_debug("%s: size=%zd\n", __func__, size);
2625
2626 if (size > WCD9335_CPE_SS_SPE_OUTBOX_SIZE)
2627 size = WCD9335_CPE_SS_SPE_OUTBOX_SIZE;
2628
2629 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++)
2630 rc = cpe_register_read(WCD9335_CPE_SS_SPE_OUTBOX1(cnt),
2631 &buffer[cnt]);
2632
2633 rc = cpe_register_write(WCD9335_CPE_SS_OUTBOX1_ACK, 0x01);
2634
2635 if (unlikely(rc)) {
2636 pr_err("%s: failed to ACK outbox, err = %d\n",
2637 __func__, rc);
2638 rc = CPE_SVC_FAILED;
2639 }
2640
2641 return rc;
2642}
2643
2644static enum cpe_svc_result cpe_tgt_wcd9335_write_mailbox(u8 *buffer,
2645 size_t size)
2646{
2647 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2648 u32 cnt = 0;
2649
2650 pr_debug("%s: size = %zd\n", __func__, size);
2651 if (size > WCD9335_CPE_SS_SPE_INBOX_SIZE)
2652 size = WCD9335_CPE_SS_SPE_INBOX_SIZE;
2653 for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
2654 rc |= cpe_register_write(WCD9335_CPE_SS_SPE_INBOX1(cnt),
2655 buffer[cnt]);
2656 }
2657
2658 if (unlikely(rc)) {
2659 pr_err("%s: Error %d writing mailbox registers\n",
2660 __func__, rc);
2661 return rc;
2662 }
2663
2664 rc = cpe_register_write(WCD9335_CPE_SS_INBOX1_TRG, 1);
2665 return rc;
2666}
2667
2668static enum cpe_svc_result cpe_wcd9335_get_mem_addr(struct cpe_info *t_info,
2669 const struct cpe_svc_mem_segment *mem_seg,
2670 u32 *addr, u8 *mem)
2671{
2672 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2673 u32 offset, mem_sz, address;
2674 u8 mem_type;
2675
2676 switch (mem_seg->type) {
2677 case CPE_SVC_DATA_MEM:
2678 mem_type = MEM_ACCESS_DRAM_VAL;
2679 offset = WCD9335_CPE_SS_SPE_DRAM_OFFSET;
2680 mem_sz = WCD9335_CPE_SS_SPE_DRAM_SIZE;
2681 break;
2682
2683 case CPE_SVC_INSTRUCTION_MEM:
2684 mem_type = MEM_ACCESS_IRAM_VAL;
2685 offset = WCD9335_CPE_SS_SPE_IRAM_OFFSET;
2686 mem_sz = WCD9335_CPE_SS_SPE_IRAM_SIZE;
2687 break;
2688
2689 default:
2690 pr_err("%s: Invalid mem type = %u\n",
2691 __func__, mem_seg->type);
2692 return CPE_SVC_INVALID_HANDLE;
2693 }
2694
2695 if (mem_seg->cpe_addr < offset) {
2696 pr_err("%s: Invalid addr %x for mem type %u\n",
2697 __func__, mem_seg->cpe_addr, mem_type);
2698 return CPE_SVC_INVALID_HANDLE;
2699 }
2700
2701 address = mem_seg->cpe_addr - offset;
2702 if (address + mem_seg->size > mem_sz) {
2703 pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
2704 __func__, mem_seg->size, address, mem_type);
2705 return CPE_SVC_INVALID_HANDLE;
2706 }
2707
2708 (*addr) = address;
2709 (*mem) = mem_type;
2710
2711 return rc;
2712}
2713
2714static enum cpe_svc_result cpe_tgt_wcd9335_read_RAM(struct cpe_info *t_info,
2715 struct cpe_svc_mem_segment *mem_seg)
2716{
2717 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2718 u8 temp = 0;
2719 u32 cnt = 0;
2720 u8 mem = 0x0;
2721 u32 addr = 0;
2722 u32 lastaddr = 0;
2723 u32 ptr_update = true;
2724 bool autoinc;
2725
2726 if (!mem_seg) {
2727 pr_err("%s: Invalid buffer\n", __func__);
2728 return CPE_SVC_INVALID_HANDLE;
2729 }
2730
2731 rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
2732
2733 if (rc != CPE_SVC_SUCCESS) {
2734 pr_err("%s: Cannot obtain address, mem_type %u\n",
2735 __func__, mem_seg->type);
2736 return rc;
2737 }
2738
2739 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2740 autoinc = cpe_register_read_autoinc_supported();
2741
2742 if (autoinc)
2743 temp = 0x18;
2744 else
2745 temp = 0x10;
2746
2747 temp |= mem;
2748
2749 lastaddr = ~addr;
2750 do {
2751 if (!autoinc || (ptr_update)) {
2752 /* write LSB only if modified */
2753 if ((lastaddr & 0xFF) != (addr & 0xFF))
2754 rc |= cpe_register_write(
2755 WCD9335_CPE_SS_MEM_PTR_0,
2756 (addr & 0xFF));
2757 /* write middle byte only if modified */
2758 if (((lastaddr >> 8) & 0xFF) != ((addr >> 8) & 0xFF))
2759 rc |= cpe_register_write(
2760 WCD9335_CPE_SS_MEM_PTR_1,
2761 ((addr>>8) & 0xFF));
2762 /* write MSB only if modified */
2763 if (((lastaddr >> 16) & 0xFF) != ((addr >> 16) & 0xFF))
2764 rc |= cpe_register_write(
2765 WCD9335_CPE_SS_MEM_PTR_2,
2766 ((addr>>16) & 0xFF));
2767
2768 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, temp);
2769 lastaddr = addr;
2770 addr++;
2771 ptr_update = false;
2772 }
2773
2774 rc |= cpe_register_read(WCD9335_CPE_SS_MEM_BANK_0,
2775 &mem_seg->data[cnt]);
2776
2777 if (!autoinc)
2778 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2779 } while ((++cnt < mem_seg->size) ||
2780 (rc != CPE_SVC_SUCCESS));
2781
2782 rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2783
2784 if (rc)
2785 pr_err("%s: Failed to read registers, err = %d\n",
2786 __func__, rc);
2787
2788 return rc;
2789}
2790
2791static enum cpe_svc_result cpe_tgt_wcd9335_write_RAM(struct cpe_info *t_info,
2792 const struct cpe_svc_mem_segment *mem_seg)
2793{
2794 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2795 u8 mem_reg_val = 0;
2796 u8 mem = MEM_ACCESS_NONE_VAL;
2797 u32 addr = 0;
2798 u8 *temp_ptr = NULL;
2799 u32 temp_size = 0;
2800 bool autoinc;
2801
2802 if (!mem_seg) {
2803 pr_err("%s: Invalid mem segment\n",
2804 __func__);
2805 return CPE_SVC_INVALID_HANDLE;
2806 }
2807
2808 rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
2809
2810 if (rc != CPE_SVC_SUCCESS) {
2811 pr_err("%s: Cannot obtain address, mem_type %u\n",
2812 __func__, mem_seg->type);
2813 return rc;
2814 }
2815
2816 autoinc = cpe_register_read_autoinc_supported();
2817 if (autoinc)
2818 mem_reg_val = 0x18;
2819 else
2820 mem_reg_val = 0x10;
2821
2822 mem_reg_val |= mem;
2823
2824 rc = cpe_update_bits(WCD9335_CPE_SS_MEM_CTRL,
2825 0x0F, mem_reg_val);
2826
2827 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_0,
2828 (addr & 0xFF));
2829 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_1,
2830 ((addr >> 8) & 0xFF));
2831
2832 rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_2,
2833 ((addr >> 16) & 0xFF));
2834
2835 temp_size = 0;
2836 temp_ptr = mem_seg->data;
2837
2838 while (temp_size <= mem_seg->size) {
2839 u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
2840 ? CHUNK_SIZE : (mem_seg->size - temp_size);
2841
2842 if (t_info->state == CPE_STATE_OFFLINE) {
2843 pr_err("%s: CPE is offline\n", __func__);
2844 return CPE_SVC_FAILED;
2845 }
2846
2847 cpe_register_write_repeat(WCD9335_CPE_SS_MEM_BANK_0,
2848 temp_ptr, to_write);
2849 temp_size += CHUNK_SIZE;
2850 temp_ptr += CHUNK_SIZE;
2851 }
2852
2853 rc = cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
2854
2855 if (rc)
2856 pr_err("%s: Failed to write registers, err = %d\n",
2857 __func__, rc);
2858 return rc;
2859}
2860
2861static enum cpe_svc_result cpe_tgt_wcd9335_route_notification(
2862 enum cpe_svc_module module,
2863 enum cpe_svc_route_dest dest)
2864{
2865 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2866
2867 pr_debug("%s: Module = %d, Destination = %d\n",
2868 __func__, module, dest);
2869
2870 switch (module) {
2871 case CPE_SVC_LISTEN_PROC:
2872 switch (dest) {
2873 case CPE_SVC_EXTERNAL:
2874 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x01);
2875 break;
2876 case CPE_SVC_INTERNAL:
2877 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x00);
2878 break;
2879 default:
2880 pr_err("%s: Invalid destination %d\n",
2881 __func__, dest);
2882 return CPE_SVC_FAILED;
2883 }
2884 break;
2885 default:
2886 pr_err("%s: Invalid module %d\n",
2887 __func__, module);
2888 rc = CPE_SVC_FAILED;
2889 break;
2890 }
2891 return rc;
2892}
2893
2894static enum cpe_svc_result cpe_tgt_wcd9335_set_debug_mode(u32 enable)
2895{
2896 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2897
2898 pr_debug("%s: enable = %s\n", __func__,
2899 (enable) ? "true" : "false");
2900
2901 return rc;
2902}
2903
2904static const struct cpe_svc_hw_cfg *cpe_tgt_wcd9335_get_cpe_info(void)
2905{
2906 return &cpe_svc_wcd9335_info;
2907}
2908
2909static enum cpe_svc_result
2910cpe_tgt_wcd9335_deinit(struct cpe_svc_tgt_abstraction *param)
2911{
2912 kfree(param->inbox);
2913 param->inbox = NULL;
2914 kfree(param->outbox);
2915 param->outbox = NULL;
2916 memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
2917
2918 return CPE_SVC_SUCCESS;
2919}
2920
2921static enum cpe_svc_result
2922 cpe_tgt_wcd9335_voicetx(bool enable)
2923{
2924 enum cpe_svc_result rc = CPE_SVC_SUCCESS;
2925 u8 val = 0;
2926
2927 pr_debug("%s: enable = %u\n", __func__, enable);
2928 if (enable)
2929 val = 0x02;
2930 else
2931 val = 0x00;
2932
2933 rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x02, val);
2934 val = 0;
2935 cpe_register_read(WCD9335_CPE_SS_CFG, &val);
2936
2937 return rc;
2938}
2939
2940static u8 cpe_tgt_wcd9335_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
2941
2942static struct cpe_tgt_waiti_info cpe_tgt_wcd9335_waiti_info = {
2943 .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_wcd9335_waiti_data),
2944 .tgt_waiti_data = cpe_tgt_wcd9335_waiti_data,
2945};
2946
2947static enum cpe_svc_result cpe_tgt_wcd9335_init(
2948 struct cpe_svc_codec_info_v1 *codec_info,
2949 struct cpe_svc_tgt_abstraction *param)
2950{
2951 if (!codec_info)
2952 return CPE_SVC_INVALID_HANDLE;
2953 if (!param)
2954 return CPE_SVC_INVALID_HANDLE;
2955
2956 if (codec_info->id == CPE_SVC_CODEC_WCD9335) {
2957 param->tgt_boot = cpe_tgt_wcd9335_boot;
2958 param->tgt_cpar_init_done = cpe_tgt_wcd9335_is_cpar_init_done;
2959 param->tgt_is_active = cpe_tgt_wcd9335_is_active;
2960 param->tgt_reset = cpe_tgt_wcd9335_reset;
2961 param->tgt_read_mailbox = cpe_tgt_wcd9335_read_mailbox;
2962 param->tgt_write_mailbox = cpe_tgt_wcd9335_write_mailbox;
2963 param->tgt_read_ram = cpe_tgt_wcd9335_read_RAM;
2964 param->tgt_write_ram = cpe_tgt_wcd9335_write_RAM;
2965 param->tgt_route_notification =
2966 cpe_tgt_wcd9335_route_notification;
2967 param->tgt_set_debug_mode = cpe_tgt_wcd9335_set_debug_mode;
2968 param->tgt_get_cpe_info = cpe_tgt_wcd9335_get_cpe_info;
2969 param->tgt_deinit = cpe_tgt_wcd9335_deinit;
2970 param->tgt_voice_tx_lab = cpe_tgt_wcd9335_voicetx;
2971 param->tgt_waiti_info = &cpe_tgt_wcd9335_waiti_info;
2972
2973 param->inbox = kzalloc(WCD9335_CPE_SS_SPE_INBOX_SIZE,
2974 GFP_KERNEL);
2975 if (!param->inbox)
2976 return CPE_SVC_NO_MEMORY;
2977
2978 param->outbox = kzalloc(WCD9335_CPE_SS_SPE_OUTBOX_SIZE,
2979 GFP_KERNEL);
2980 if (!param->outbox) {
2981 kfree(param->inbox);
2982 return CPE_SVC_NO_MEMORY;
2983 }
2984 }
2985
2986 return CPE_SVC_SUCCESS;
2987}
2988
2989MODULE_DESCRIPTION("WCD CPE Services");
2990MODULE_LICENSE("GPL v2");