blob: 1ed74a9c2b1dada73c43ab6efc0b3d7085b4d16b [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/mutex.h>
14#include <linux/wait.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053015#include <linux/sched.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053018#include <dsp/msm_audio_ion.h>
19#include <dsp/apr_audio-v2.h>
20#include <ipc/apr_us.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053021#include "q6usm.h"
22
23#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
24
25#define MEM_4K_OFFSET 4095
26#define MEM_4K_MASK 0xfffff000
27
28#define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
29
30#define READDONE_IDX_STATUS 0
31
32#define WRITEDONE_IDX_STATUS 0
33
34/* Standard timeout in the asynchronous ops */
35#define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
36
37static DEFINE_MUTEX(session_lock);
38
39static struct us_client *session[USM_SESSION_MAX];
40static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
41static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
42static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
43 uint32_t pkt_size, bool cmd_flg);
44
45struct usm_mmap {
46 atomic_t ref_cnt;
47 atomic_t cmd_state;
48 wait_queue_head_t cmd_wait;
49 void *apr;
50 int mem_handle;
51};
52
53static struct usm_mmap this_mmap;
54
55static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
56 uint32_t pkt_size, bool cmd_flg, u32 token)
57{
58 hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
59 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
60 hdr->src_port = 0;
61 hdr->dest_port = 0;
62 if (cmd_flg) {
63 hdr->token = token;
64 atomic_set(&this_mmap.cmd_state, 1);
65 }
66 hdr->pkt_size = pkt_size;
67}
68
69static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
70 uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
71{
72 struct usm_cmd_memory_map_region mem_region_map;
73 int rc = 0;
74
75 if (this_mmap.apr == NULL) {
76 pr_err("%s: APR handle NULL\n", __func__);
77 return -EINVAL;
78 }
79
80 q6usm_add_mmaphdr(&mem_region_map.hdr,
81 sizeof(struct usm_cmd_memory_map_region), true,
82 ((session << 8) | dir));
83
84 mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
85 mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
86
87 mem_region_map.num_regions = 1;
88 mem_region_map.flags = 0;
89
90 mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
91 mem_region_map.shm_addr_msw =
92 msm_audio_populate_upper_32_bits(buf_add);
93 mem_region_map.mem_size_bytes = bufsz * bufcnt;
94
95 rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
96 if (rc < 0) {
97 pr_err("%s: mem_map op[0x%x]rc[%d]\n",
98 __func__, mem_region_map.hdr.opcode, rc);
99 rc = -EINVAL;
100 goto fail_cmd;
101 }
102
103 rc = wait_event_timeout(this_mmap.cmd_wait,
104 (atomic_read(&this_mmap.cmd_state) == 0),
105 Q6USM_TIMEOUT_JIFFIES);
106 if (!rc) {
107 rc = -ETIME;
108 pr_err("%s: timeout. waited for memory_map\n", __func__);
109 } else {
110 *mem_handle = this_mmap.mem_handle;
111 rc = 0;
112 }
113fail_cmd:
114 return rc;
115}
116
117int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
118 uint32_t mem_handle)
119{
120 struct usm_cmd_memory_unmap_region mem_unmap;
121 int rc = 0;
122
123 if (this_mmap.apr == NULL) {
124 pr_err("%s: APR handle NULL\n", __func__);
125 return -EINVAL;
126 }
127
128 q6usm_add_mmaphdr(&mem_unmap.hdr,
129 sizeof(struct usm_cmd_memory_unmap_region), true,
130 ((session << 8) | dir));
131 mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
132 mem_unmap.mem_map_handle = mem_handle;
133
134 rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
135 if (rc < 0) {
136 pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
137 __func__, mem_unmap.hdr.opcode, rc);
138 goto fail_cmd;
139 }
140
141 rc = wait_event_timeout(this_mmap.cmd_wait,
142 (atomic_read(&this_mmap.cmd_state) == 0),
143 Q6USM_TIMEOUT_JIFFIES);
144 if (!rc) {
145 rc = -ETIME;
146 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
147 } else
148 rc = 0;
149fail_cmd:
150 return rc;
151}
152
153static int q6usm_session_alloc(struct us_client *usc)
154{
155 int ind = 0;
156
157 mutex_lock(&session_lock);
158 for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
159 if (!session[ind]) {
160 session[ind] = usc;
161 mutex_unlock(&session_lock);
162 ++ind; /* session id: 0 reserved */
163 pr_debug("%s: session[%d] was allocated\n",
164 __func__, ind);
165 return ind;
166 }
167 }
168 mutex_unlock(&session_lock);
169 return -ENOMEM;
170}
171
172static void q6usm_session_free(struct us_client *usc)
173{
174 /* Session index was incremented during allocation */
175 uint16_t ind = (uint16_t)usc->session - 1;
176
177 pr_debug("%s: to free session[%d]\n", __func__, ind);
178 if (ind < USM_SESSION_MAX) {
179 mutex_lock(&session_lock);
180 session[ind] = NULL;
181 mutex_unlock(&session_lock);
182 }
183}
184
185static int q6usm_us_client_buf_free(unsigned int dir,
186 struct us_client *usc)
187{
188 struct us_port_data *port;
189 int rc = 0;
190
191 if ((usc == NULL) ||
192 ((dir != IN) && (dir != OUT)))
193 return -EINVAL;
194
195 mutex_lock(&usc->cmd_lock);
196 port = &usc->port[dir];
197 if (port == NULL) {
198 mutex_unlock(&usc->cmd_lock);
199 return -EINVAL;
200 }
201
202 if (port->data == NULL) {
203 mutex_unlock(&usc->cmd_lock);
204 return 0;
205 }
206
207 rc = q6usm_memory_unmap(port->phys, dir, usc->session,
208 *((uint32_t *)port->ext));
209 pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
210 (void *)port->data, (u64)port->phys, (void *)&port->phys);
211
212 msm_audio_ion_free(port->client, port->handle);
213
214 port->data = NULL;
215 port->phys = 0;
216 port->buf_size = 0;
217 port->buf_cnt = 0;
218 port->client = NULL;
219 port->handle = NULL;
220
221 mutex_unlock(&usc->cmd_lock);
222 return rc;
223}
224
225int q6usm_us_param_buf_free(unsigned int dir,
226 struct us_client *usc)
227{
228 struct us_port_data *port;
229 int rc = 0;
230
231 if ((usc == NULL) ||
232 ((dir != IN) && (dir != OUT)))
233 return -EINVAL;
234
235 mutex_lock(&usc->cmd_lock);
236 port = &usc->port[dir];
237 if (port == NULL) {
238 mutex_unlock(&usc->cmd_lock);
239 return -EINVAL;
240 }
241
242 if (port->param_buf == NULL) {
243 mutex_unlock(&usc->cmd_lock);
244 return 0;
245 }
246
247 rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
248 *((uint32_t *)port->param_buf_mem_handle));
249 pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
250 (void *)port->param_buf, (u64)port->param_phys,
251 (void *)&port->param_phys);
252
253 msm_audio_ion_free(port->param_client, port->param_handle);
254
255 port->param_buf = NULL;
256 port->param_phys = 0;
257 port->param_buf_size = 0;
258 port->param_client = NULL;
259 port->param_handle = NULL;
260
261 mutex_unlock(&usc->cmd_lock);
262 return rc;
263}
264
265void q6usm_us_client_free(struct us_client *usc)
266{
267 int loopcnt = 0;
268 struct us_port_data *port;
269 uint32_t *p_mem_handle = NULL;
270
271 if ((usc == NULL) ||
272 !(usc->session))
273 return;
274
275 for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
276 port = &usc->port[loopcnt];
277 if (port->data == NULL)
278 continue;
279 pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
280 q6usm_us_client_buf_free(loopcnt, usc);
281 q6usm_us_param_buf_free(loopcnt, usc);
282 }
283 q6usm_session_free(usc);
284 apr_deregister(usc->apr);
285
286 pr_debug("%s: APR De-Register\n", __func__);
287
288 if (atomic_read(&this_mmap.ref_cnt) <= 0) {
289 pr_err("%s: APR Common Port Already Closed\n", __func__);
290 goto done;
291 }
292
293 atomic_dec(&this_mmap.ref_cnt);
294 if (atomic_read(&this_mmap.ref_cnt) == 0) {
295 apr_deregister(this_mmap.apr);
296 pr_debug("%s: APR De-Register common port\n", __func__);
297 }
298
299done:
300 p_mem_handle = (uint32_t *)usc->port[IN].ext;
301 kfree(p_mem_handle);
302 kfree(usc);
303 pr_debug("%s:\n", __func__);
304}
305
306struct us_client *q6usm_us_client_alloc(
307 void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
308 void *priv)
309{
310 struct us_client *usc;
311 uint32_t *p_mem_handle = NULL;
312 int n;
313 int lcnt = 0;
314
315 usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
316 if (usc == NULL)
317 return NULL;
318
319 p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
320 if (p_mem_handle == NULL) {
321 kfree(usc);
322 return NULL;
323 }
324
325 n = q6usm_session_alloc(usc);
326 if (n <= 0)
327 goto fail_session;
328 usc->session = n;
329 usc->cb = cb;
330 usc->priv = priv;
331 usc->apr = apr_register("ADSP", "USM",
332 (apr_fn)q6usm_callback,
333 ((usc->session) << 8 | 0x0001),
334 usc);
335
336 if (usc->apr == NULL) {
337 pr_err("%s: Registration with APR failed\n", __func__);
338 goto fail;
339 }
340 pr_debug("%s: Registering the common port with APR\n", __func__);
341 if (atomic_read(&this_mmap.ref_cnt) == 0) {
342 this_mmap.apr = apr_register("ADSP", "USM",
343 (apr_fn)q6usm_mmapcallback,
344 0x0FFFFFFFF, &this_mmap);
345 if (this_mmap.apr == NULL) {
346 pr_err("%s: USM port registration failed\n",
347 __func__);
348 goto fail;
349 }
350 }
351
352 atomic_inc(&this_mmap.ref_cnt);
353 init_waitqueue_head(&usc->cmd_wait);
354 mutex_init(&usc->cmd_lock);
355 for (lcnt = 0; lcnt <= OUT; ++lcnt) {
356 mutex_init(&usc->port[lcnt].lock);
357 spin_lock_init(&usc->port[lcnt].dsp_lock);
358 usc->port[lcnt].ext = (void *)p_mem_handle++;
359 usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
360 pr_err("%s: usc->port[%d].ext=%pK;\n",
361 __func__, lcnt, usc->port[lcnt].ext);
362 }
363 atomic_set(&usc->cmd_state, 0);
364
365 return usc;
366fail:
367 kfree(p_mem_handle);
368 q6usm_us_client_free(usc);
369 return NULL;
370fail_session:
371 kfree(p_mem_handle);
372 kfree(usc);
373 return NULL;
374}
375
376int q6usm_us_client_buf_alloc(unsigned int dir,
377 struct us_client *usc,
378 unsigned int bufsz,
379 unsigned int bufcnt)
380{
381 int rc = 0;
382 struct us_port_data *port = NULL;
383 unsigned int size = bufsz*bufcnt;
384 size_t len;
385
386 if ((usc == NULL) ||
387 ((dir != IN) && (dir != OUT)) || (size == 0) ||
388 (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
389 pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
390 __func__, size, bufcnt);
391 return -EINVAL;
392 }
393
394 mutex_lock(&usc->cmd_lock);
395
396 port = &usc->port[dir];
397
398 /* The size to allocate should be multiple of 4K bytes */
399 size = PAGE_ALIGN(size);
400
401 rc = msm_audio_ion_alloc("ultrasound_client",
402 &port->client, &port->handle,
403 size, &port->phys,
404 &len, &port->data);
405
406 if (rc) {
407 pr_err("%s: US ION allocation failed, rc = %d\n",
408 __func__, rc);
409 mutex_unlock(&usc->cmd_lock);
410 return -ENOMEM;
411 }
412
413 port->buf_cnt = bufcnt;
414 port->buf_size = bufsz;
415 pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
416 (void *)port->data,
417 (u64)port->phys,
418 (void *)&port->phys);
419
420 rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
421 (uint32_t *)port->ext);
422 if (rc < 0) {
423 pr_err("%s: CMD Memory_map failed\n", __func__);
424 mutex_unlock(&usc->cmd_lock);
425 q6usm_us_client_buf_free(dir, usc);
426 q6usm_us_param_buf_free(dir, usc);
427 } else {
428 mutex_unlock(&usc->cmd_lock);
429 rc = 0;
430 }
431
432 return rc;
433}
434
435int q6usm_us_param_buf_alloc(unsigned int dir,
436 struct us_client *usc,
437 unsigned int bufsz)
438{
439 int rc = 0;
440 struct us_port_data *port = NULL;
441 unsigned int size = bufsz;
442 size_t len;
443
444 if ((usc == NULL) ||
445 ((dir != IN) && (dir != OUT)) ||
446 (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
447 pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
448 __func__, dir, bufsz);
449 return -EINVAL;
450 }
451
452 mutex_lock(&usc->cmd_lock);
453
454 port = &usc->port[dir];
455
456 if (bufsz == 0) {
457 pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
458 __func__);
459 port->param_buf = NULL;
460 mutex_unlock(&usc->cmd_lock);
461 return rc;
462 }
463
464 /* The size to allocate should be multiple of 4K bytes */
465 size = PAGE_ALIGN(size);
466
467 rc = msm_audio_ion_alloc("ultrasound_client",
468 &port->param_client, &port->param_handle,
469 size, &port->param_phys,
470 &len, &port->param_buf);
471
472 if (rc) {
473 pr_err("%s: US ION allocation failed, rc = %d\n",
474 __func__, rc);
475 mutex_unlock(&usc->cmd_lock);
476 return -ENOMEM;
477 }
478
479 port->param_buf_size = bufsz;
480 pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
481 (void *)port->param_buf,
482 (u64)port->param_phys,
483 (void *)&port->param_phys);
484
485 rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
486 usc->session, (uint32_t *)port->param_buf_mem_handle);
487 if (rc < 0) {
488 pr_err("%s: CMD Memory_map failed\n", __func__);
489 mutex_unlock(&usc->cmd_lock);
490 q6usm_us_client_buf_free(dir, usc);
491 q6usm_us_param_buf_free(dir, usc);
492 } else {
493 mutex_unlock(&usc->cmd_lock);
494 rc = 0;
495 }
496
497 return rc;
498}
499
500static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
501{
502 uint32_t token;
503 uint32_t *payload = data->payload;
504
505 pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
506 __func__, payload[0], payload[1], data->opcode);
507 pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
508 __func__, data->token, data->payload_size,
509 data->src_port, data->dest_port);
510
511 if (data->opcode == APR_BASIC_RSP_RESULT) {
512 /* status field check */
513 if (payload[1]) {
514 pr_err("%s: wrong response[%d] on cmd [%d]\n",
515 __func__, payload[1], payload[0]);
516 } else {
517 token = data->token;
518 switch (payload[0]) {
519 case USM_CMD_SHARED_MEM_UNMAP_REGION:
520 if (atomic_read(&this_mmap.cmd_state)) {
521 atomic_set(&this_mmap.cmd_state, 0);
522 wake_up(&this_mmap.cmd_wait);
523 }
524 /* fallthrough */
525 case USM_CMD_SHARED_MEM_MAP_REGION:
526 /* For MEM_MAP, additional answer is waited, */
527 /* therfore, no wake-up here */
528 pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
529 __func__, payload[0], payload[1]);
530 break;
531 default:
532 pr_debug("%s: wrong command[0x%x]\n",
533 __func__, payload[0]);
534 break;
535 }
536 }
537 } else {
538 if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
539 this_mmap.mem_handle = payload[0];
540 pr_debug("%s: memory map handle = 0x%x",
541 __func__, payload[0]);
542 if (atomic_read(&this_mmap.cmd_state)) {
543 atomic_set(&this_mmap.cmd_state, 0);
544 wake_up(&this_mmap.cmd_wait);
545 }
546 }
547 }
548 return 0;
549}
550
551
552static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
553{
554 struct us_client *usc = (struct us_client *)priv;
555 unsigned long dsp_flags;
556 uint32_t *payload = data->payload;
557 uint32_t token = data->token;
558 uint32_t opcode = Q6USM_EVENT_UNDEF;
559
560 if (usc == NULL) {
561 pr_err("%s: client info is NULL\n", __func__);
562 return -EINVAL;
563 }
564
565 if (data->opcode == APR_BASIC_RSP_RESULT) {
566 /* status field check */
567 if (payload[1]) {
568 pr_err("%s: wrong response[%d] on cmd [%d]\n",
569 __func__, payload[1], payload[0]);
570 if (usc->cb)
571 usc->cb(data->opcode, token,
572 (uint32_t *)data->payload, usc->priv);
573 } else {
574 switch (payload[0]) {
575 case USM_SESSION_CMD_RUN:
576 case USM_STREAM_CMD_CLOSE:
577 if (token != usc->session) {
578 pr_err("%s: wrong token[%d]",
579 __func__, token);
580 break;
581 }
582 case USM_STREAM_CMD_OPEN_READ:
583 case USM_STREAM_CMD_OPEN_WRITE:
584 case USM_STREAM_CMD_SET_ENC_PARAM:
585 case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
586 case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
587 case USM_STREAM_CMD_SET_PARAM:
588 case USM_STREAM_CMD_GET_PARAM:
589 if (atomic_read(&usc->cmd_state)) {
590 atomic_set(&usc->cmd_state, 0);
591 wake_up(&usc->cmd_wait);
592 }
593 if (usc->cb)
594 usc->cb(data->opcode, token,
595 (uint32_t *)data->payload,
596 usc->priv);
597 break;
598 default:
599 break;
600 }
601 }
602 return 0;
603 }
604
605 switch (data->opcode) {
606 case RESET_EVENTS: {
607 pr_err("%s: Reset event is received: %d %d\n",
608 __func__,
609 data->reset_event,
610 data->reset_proc);
611
612 opcode = RESET_EVENTS;
613
614 apr_reset(this_mmap.apr);
615 this_mmap.apr = NULL;
616
617 apr_reset(usc->apr);
618 usc->apr = NULL;
619
620 break;
621 }
622
623
624 case USM_DATA_EVENT_READ_DONE: {
625 struct us_port_data *port = &usc->port[OUT];
626
627 opcode = Q6USM_EVENT_READ_DONE;
628 spin_lock_irqsave(&port->dsp_lock, dsp_flags);
629 if (payload[READDONE_IDX_STATUS]) {
630 pr_err("%s: wrong READDONE[%d]; token[%d]\n",
631 __func__,
632 payload[READDONE_IDX_STATUS],
633 token);
634 token = USM_WRONG_TOKEN;
635 spin_unlock_irqrestore(&port->dsp_lock,
636 dsp_flags);
637 break;
638 }
639
640 if (port->expected_token != token) {
641 u32 cpu_buf = port->cpu_buf;
642
643 pr_err("%s: expected[%d] != token[%d]\n",
644 __func__, port->expected_token, token);
645 pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
646 __func__, port->dsp_buf, cpu_buf);
647
648 token = USM_WRONG_TOKEN;
649 /* To prevent data handle continiue */
650 port->expected_token = USM_WRONG_TOKEN;
651 spin_unlock_irqrestore(&port->dsp_lock,
652 dsp_flags);
653 break;
654 } /* port->expected_token != data->token */
655
656 port->expected_token = token + 1;
657 if (port->expected_token == port->buf_cnt)
658 port->expected_token = 0;
659
660 /* gap support */
661 if (port->expected_token != port->cpu_buf) {
662 port->dsp_buf = port->expected_token;
663 token = port->dsp_buf; /* for callback */
664 } else
665 port->dsp_buf = token;
666
667 spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
668 break;
669 } /* case USM_DATA_EVENT_READ_DONE */
670
671 case USM_DATA_EVENT_WRITE_DONE: {
672 struct us_port_data *port = &usc->port[IN];
673
674 opcode = Q6USM_EVENT_WRITE_DONE;
675 if (payload[WRITEDONE_IDX_STATUS]) {
676 pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
677 __func__,
678 payload[WRITEDONE_IDX_STATUS]);
679 break;
680 }
681
682 spin_lock_irqsave(&port->dsp_lock, dsp_flags);
683 port->dsp_buf = token + 1;
684 if (port->dsp_buf == port->buf_cnt)
685 port->dsp_buf = 0;
686 spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
687
688 break;
689 } /* case USM_DATA_EVENT_WRITE_DONE */
690
691 case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
692 pr_debug("%s: US detect result: result=%d",
693 __func__,
694 payload[0]);
695 opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
696
697 break;
698 } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
699
700 default:
701 return 0;
702
703 } /* switch */
704
705 if (usc->cb)
706 usc->cb(opcode, token,
707 data->payload, usc->priv);
708
709 return 0;
710}
711
712uint32_t q6usm_get_virtual_address(int dir,
713 struct us_client *usc,
714 struct vm_area_struct *vms)
715{
716 uint32_t ret = 0xffffffff;
717
718 if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
719 struct us_port_data *port = &usc->port[dir];
720 int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
721 struct audio_buffer ab;
722
723 ab.phys = port->phys;
724 ab.data = port->data;
725 ab.used = 1;
726 ab.size = size;
727 ab.actual_size = size;
728 ab.handle = port->handle;
729 ab.client = port->client;
730
731 ret = msm_audio_ion_mmap(&ab, vms);
732
733 }
734 return ret;
735}
736
737static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
738 uint32_t pkt_size, bool cmd_flg)
739{
740 mutex_lock(&usc->cmd_lock);
741 hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
742 APR_HDR_LEN(sizeof(struct apr_hdr)),
743 APR_PKT_VER);
744 hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
745 hdr->src_domain = APR_DOMAIN_APPS;
746 hdr->dest_svc = APR_SVC_USM;
747 hdr->dest_domain = APR_DOMAIN_ADSP;
748 hdr->src_port = (usc->session << 8) | 0x0001;
749 hdr->dest_port = (usc->session << 8) | 0x0001;
750 if (cmd_flg) {
751 hdr->token = usc->session;
752 atomic_set(&usc->cmd_state, 1);
753 }
754 hdr->pkt_size = pkt_size;
755 mutex_unlock(&usc->cmd_lock);
756}
757
758static uint32_t q6usm_ext2int_format(uint32_t ext_format)
759{
760 uint32_t int_format = INVALID_FORMAT;
761
762 switch (ext_format) {
763 case FORMAT_USPS_EPOS:
764 int_format = US_POINT_EPOS_FORMAT_V2;
765 break;
766 case FORMAT_USRAW:
767 int_format = US_RAW_FORMAT_V2;
768 break;
769 case FORMAT_USPROX:
770 int_format = US_PROX_FORMAT_V4;
771 break;
772 case FORMAT_USGES_SYNC:
773 int_format = US_GES_SYNC_FORMAT;
774 break;
775 case FORMAT_USRAW_SYNC:
776 int_format = US_RAW_SYNC_FORMAT;
777 break;
778 default:
779 pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
780 break;
781 }
782
783 return int_format;
784}
785
786int q6usm_open_read(struct us_client *usc,
787 uint32_t format)
788{
789 uint32_t int_format = INVALID_FORMAT;
790 int rc = 0x00;
791 struct usm_stream_cmd_open_read open;
792
793 if ((usc == NULL) || (usc->apr == NULL)) {
794 pr_err("%s: client or its apr is NULL\n", __func__);
795 return -EINVAL;
796 }
797
798 pr_debug("%s: session[%d]", __func__, usc->session);
799
800 q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
801 open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
802 open.src_endpoint = 0; /* AFE */
803 open.pre_proc_top = 0; /* No preprocessing required */
804
805 int_format = q6usm_ext2int_format(format);
806 if (int_format == INVALID_FORMAT)
807 return -EINVAL;
808
809 open.uMode = STREAM_PRIORITY_NORMAL;
810 open.format = int_format;
811
812 rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
813 if (rc < 0) {
814 pr_err("%s: open failed op[0x%x]rc[%d]\n",
815 __func__, open.hdr.opcode, rc);
816 goto fail_cmd;
817 }
818 rc = wait_event_timeout(usc->cmd_wait,
819 (atomic_read(&usc->cmd_state) == 0),
820 Q6USM_TIMEOUT_JIFFIES);
821 if (!rc) {
822 rc = -ETIME;
823 pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
824 __func__, rc);
825 goto fail_cmd;
826 } else
827 rc = 0;
828fail_cmd:
829 return rc;
830}
831
832
833int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
834{
835 uint32_t int_format = INVALID_FORMAT;
836 struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
837 struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
838 int rc = 0;
839 uint32_t total_cfg_size =
840 sizeof(struct usm_stream_cmd_encdec_cfg_blk);
841 uint32_t round_params_size = 0;
842 uint8_t is_allocated = 0;
843
844
845 if ((usc == NULL) || (us_cfg == NULL)) {
846 pr_err("%s: wrong input", __func__);
847 return -EINVAL;
848 }
849
850 int_format = q6usm_ext2int_format(us_cfg->format_id);
851 if (int_format == INVALID_FORMAT) {
852 pr_err("%s: wrong input format[%d]",
853 __func__, us_cfg->format_id);
854 return -EINVAL;
855 }
856
857 /* Transparent configuration data is after enc_cfg */
858 /* Integer number of u32s is required */
859 round_params_size = ((us_cfg->params_size + 3)/4) * 4;
860 if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
861 /* Dynamic allocated encdec_cfg_blk is required */
862 /* static part use */
863 round_params_size -= USM_MAX_CFG_DATA_SIZE;
864 total_cfg_size += round_params_size;
865 enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
866 if (enc_cfg == NULL) {
867 pr_err("%s: enc_cfg[%d] allocation failed\n",
868 __func__, total_cfg_size);
869 return -ENOMEM;
870 }
871 is_allocated = 1;
872 } else
873 round_params_size = 0;
874
875 q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
876
877 enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
878 enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
879 enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
880 round_params_size;
881 enc_cfg->enc_blk.frames_per_buf = 1;
882 enc_cfg->enc_blk.format_id = int_format;
883 enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
884 USM_MAX_CFG_DATA_SIZE +
885 round_params_size;
886 memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
887 sizeof(struct usm_cfg_common));
888
889 /* Transparent data copy */
890 memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
891 us_cfg->params_size);
892 pr_debug("%s: cfg_size[%d], params_size[%d]\n",
893 __func__,
894 enc_cfg->enc_blk.cfg_size,
895 us_cfg->params_size);
896 pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
897 __func__,
898 enc_cfg->enc_blk.transp_data[0],
899 enc_cfg->enc_blk.transp_data[1],
900 enc_cfg->enc_blk.transp_data[2],
901 enc_cfg->enc_blk.transp_data[3],
902 enc_cfg->enc_blk.transp_data[4],
903 enc_cfg->enc_blk.transp_data[5],
904 enc_cfg->enc_blk.transp_data[6],
905 enc_cfg->enc_blk.transp_data[7]
906 );
907 pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
908 __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
909 enc_cfg->enc_blk.cfg_common.ch_cfg,
910 enc_cfg->enc_blk.cfg_common.bits_per_sample);
911 pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
912 enc_cfg->enc_blk.cfg_common.data_map[0],
913 enc_cfg->enc_blk.cfg_common.data_map[1],
914 enc_cfg->enc_blk.cfg_common.data_map[2],
915 enc_cfg->enc_blk.cfg_common.data_map[3],
916 enc_cfg->enc_blk.cfg_common.data_map[4],
917 enc_cfg->enc_blk.cfg_common.data_map[5],
918 enc_cfg->enc_blk.cfg_common.data_map[6],
919 enc_cfg->enc_blk.cfg_common.data_map[7],
920 enc_cfg->enc_blk.cfg_common.dev_id);
921
922 rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
923 if (rc < 0) {
924 pr_err("%s:Comamnd open failed\n", __func__);
925 rc = -EINVAL;
926 goto fail_cmd;
927 }
928 rc = wait_event_timeout(usc->cmd_wait,
929 (atomic_read(&usc->cmd_state) == 0),
930 Q6USM_TIMEOUT_JIFFIES);
931 if (!rc) {
932 rc = -ETIME;
933 pr_err("%s: timeout opcode[0x%x]\n",
934 __func__, enc_cfg->hdr.opcode);
935 } else
936 rc = 0;
937
938fail_cmd:
939 if (is_allocated == 1)
940 kfree(enc_cfg);
941
942 return rc;
943}
944
945int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
946{
947
948 uint32_t int_format = INVALID_FORMAT;
949 struct usm_stream_media_format_update dec_cfg_obj;
950 struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
951
952 int rc = 0;
953 uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
954 uint32_t round_params_size = 0;
955 uint8_t is_allocated = 0;
956
957
958 if ((usc == NULL) || (us_cfg == NULL)) {
959 pr_err("%s: wrong input", __func__);
960 return -EINVAL;
961 }
962
963 int_format = q6usm_ext2int_format(us_cfg->format_id);
964 if (int_format == INVALID_FORMAT) {
965 pr_err("%s: wrong input format[%d]",
966 __func__, us_cfg->format_id);
967 return -EINVAL;
968 }
969
970 /* Transparent configuration data is after enc_cfg */
971 /* Integer number of u32s is required */
972 round_params_size = ((us_cfg->params_size + 3)/4) * 4;
973 if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
974 /* Dynamic allocated encdec_cfg_blk is required */
975 /* static part use */
976 round_params_size -= USM_MAX_CFG_DATA_SIZE;
977 total_cfg_size += round_params_size;
978 dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
979 if (dec_cfg == NULL) {
980 pr_err("%s:dec_cfg[%d] allocation failed\n",
981 __func__, total_cfg_size);
982 return -ENOMEM;
983 }
984 is_allocated = 1;
985 } else { /* static transp_data is enough */
986 round_params_size = 0;
987 }
988
989 q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
990
991 dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
992 dec_cfg->format_id = int_format;
993 dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
994 USM_MAX_CFG_DATA_SIZE +
995 round_params_size;
996 memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
997 sizeof(struct usm_cfg_common));
998 /* Transparent data copy */
999 memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
1000 pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
1001 __func__,
1002 dec_cfg->cfg_size,
1003 us_cfg->params_size,
1004 dec_cfg->transp_data[0],
1005 dec_cfg->transp_data[1],
1006 dec_cfg->transp_data[2],
1007 dec_cfg->transp_data[3]
1008 );
1009
1010 rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
1011 if (rc < 0) {
1012 pr_err("%s:Comamnd open failed\n", __func__);
1013 rc = -EINVAL;
1014 goto fail_cmd;
1015 }
1016 rc = wait_event_timeout(usc->cmd_wait,
1017 (atomic_read(&usc->cmd_state) == 0),
1018 Q6USM_TIMEOUT_JIFFIES);
1019 if (!rc) {
1020 rc = -ETIME;
1021 pr_err("%s: timeout opcode[0x%x]\n",
1022 __func__, dec_cfg->hdr.opcode);
1023 } else
1024 rc = 0;
1025
1026fail_cmd:
1027 if (is_allocated == 1)
1028 kfree(dec_cfg);
1029
1030 return rc;
1031}
1032
1033int q6usm_open_write(struct us_client *usc,
1034 uint32_t format)
1035{
1036 int rc = 0;
1037 uint32_t int_format = INVALID_FORMAT;
1038 struct usm_stream_cmd_open_write open;
1039
1040 if ((usc == NULL) || (usc->apr == NULL)) {
1041 pr_err("%s: APR handle NULL\n", __func__);
1042 return -EINVAL;
1043 }
1044
1045 pr_debug("%s: session[%d]", __func__, usc->session);
1046
1047 q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
1048 open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
1049
1050 int_format = q6usm_ext2int_format(format);
1051 if (int_format == INVALID_FORMAT) {
1052 pr_err("%s: wrong format[%d]", __func__, format);
1053 return -EINVAL;
1054 }
1055
1056 open.format = int_format;
1057
1058 rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
1059 if (rc < 0) {
1060 pr_err("%s:open failed op[0x%x]rc[%d]\n",
1061 __func__, open.hdr.opcode, rc);
1062 goto fail_cmd;
1063 }
1064 rc = wait_event_timeout(usc->cmd_wait,
1065 (atomic_read(&usc->cmd_state) == 0),
1066 Q6USM_TIMEOUT_JIFFIES);
1067 if (!rc) {
1068 rc = -ETIME;
1069 pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
1070 __func__, rc);
1071 goto fail_cmd;
1072 } else
1073 rc = 0;
1074
1075fail_cmd:
1076 return rc;
1077}
1078
1079int q6usm_run(struct us_client *usc, uint32_t flags,
1080 uint32_t msw_ts, uint32_t lsw_ts)
1081{
1082 struct usm_stream_cmd_run run;
1083 int rc = 0;
1084
1085 if ((usc == NULL) || (usc->apr == NULL)) {
1086 pr_err("%s: APR handle NULL\n", __func__);
1087 return -EINVAL;
1088 }
1089 q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
1090
1091 run.hdr.opcode = USM_SESSION_CMD_RUN;
1092 run.flags = flags;
1093 run.msw_ts = msw_ts;
1094 run.lsw_ts = lsw_ts;
1095
1096 rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
1097 if (rc < 0) {
1098 pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
1099 goto fail_cmd;
1100 }
1101
1102 rc = wait_event_timeout(usc->cmd_wait,
1103 (atomic_read(&usc->cmd_state) == 0),
1104 Q6USM_TIMEOUT_JIFFIES);
1105 if (!rc) {
1106 rc = -ETIME;
1107 pr_err("%s: timeout. waited for run success rc[%d]\n",
1108 __func__, rc);
1109 } else
1110 rc = 0;
1111
1112fail_cmd:
1113 return rc;
1114}
1115
1116
1117
1118int q6usm_read(struct us_client *usc, uint32_t read_ind)
1119{
1120 struct usm_stream_cmd_read read;
1121 struct us_port_data *port = NULL;
1122 int rc = 0;
1123 u32 read_counter = 0;
1124 u32 loop_ind = 0;
1125 u64 buf_addr = 0;
1126
1127 if ((usc == NULL) || (usc->apr == NULL)) {
1128 pr_err("%s: APR handle NULL\n", __func__);
1129 return -EINVAL;
1130 }
1131 port = &usc->port[OUT];
1132
1133 if (read_ind > port->buf_cnt) {
1134 pr_err("%s: wrong read_ind[%d]\n",
1135 __func__, read_ind);
1136 return -EINVAL;
1137 }
1138 if (read_ind == port->cpu_buf) {
1139 pr_err("%s: no free region\n", __func__);
1140 return 0;
1141 }
1142
1143 if (read_ind > port->cpu_buf) { /* 1 range */
1144 read_counter = read_ind - port->cpu_buf;
1145 } else { /* 2 ranges */
1146 read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
1147 }
1148
1149 q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
1150
1151 read.hdr.opcode = USM_DATA_CMD_READ;
1152 read.buf_size = port->buf_size;
1153 buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
1154 read.buf_addr_lsw = lower_32_bits(buf_addr);
1155 read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1156 read.mem_map_handle = *((uint32_t *)(port->ext));
1157
1158 for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
1159 u32 temp_cpu_buf = port->cpu_buf;
1160
1161 buf_addr = (u64)(port->phys) +
1162 port->buf_size * (port->cpu_buf);
1163 read.buf_addr_lsw = lower_32_bits(buf_addr);
1164 read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1165 read.seq_id = port->cpu_buf;
1166 read.hdr.token = port->cpu_buf;
1167 read.counter = 1;
1168
1169 ++(port->cpu_buf);
1170 if (port->cpu_buf == port->buf_cnt)
1171 port->cpu_buf = 0;
1172
1173 rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
1174
1175 if (rc < 0) {
1176 port->cpu_buf = temp_cpu_buf;
1177
1178 pr_err("%s:read op[0x%x]rc[%d]\n",
1179 __func__, read.hdr.opcode, rc);
1180 break;
1181 }
1182
1183 rc = 0;
1184 } /* bufs loop */
1185
1186 return rc;
1187}
1188
1189int q6usm_write(struct us_client *usc, uint32_t write_ind)
1190{
1191 int rc = 0;
1192 struct usm_stream_cmd_write cmd_write;
1193 struct us_port_data *port = NULL;
1194 u32 current_dsp_buf = 0;
1195 u64 buf_addr = 0;
1196
1197 if ((usc == NULL) || (usc->apr == NULL)) {
1198 pr_err("%s: APR handle NULL\n", __func__);
1199 return -EINVAL;
1200 }
1201 port = &usc->port[IN];
1202
1203 current_dsp_buf = port->dsp_buf;
1204 /* free region, caused by new dsp_buf report from DSP, */
1205 /* can be only extended */
1206 if (port->cpu_buf >= current_dsp_buf) {
1207 /* 2 -part free region, including empty buffer */
1208 if ((write_ind <= port->cpu_buf) &&
1209 (write_ind > current_dsp_buf)) {
1210 pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
1211 __func__, write_ind,
1212 current_dsp_buf, port->cpu_buf);
1213 return -EINVAL;
1214 }
1215 } else {
1216 /* 1 -part free region */
1217 if ((write_ind <= port->cpu_buf) ||
1218 (write_ind > current_dsp_buf)) {
1219 pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
1220 __func__, write_ind,
1221 current_dsp_buf, port->cpu_buf);
1222 return -EINVAL;
1223 }
1224 }
1225
1226 q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
1227
1228 cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
1229 cmd_write.buf_size = port->buf_size;
1230 buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
1231 cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
1232 cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1233 cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
1234 cmd_write.res0 = 0;
1235 cmd_write.res1 = 0;
1236 cmd_write.res2 = 0;
1237
1238 while (port->cpu_buf != write_ind) {
1239 u32 temp_cpu_buf = port->cpu_buf;
1240
1241 buf_addr = (u64)(port->phys) +
1242 port->buf_size * (port->cpu_buf);
1243 cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
1244 cmd_write.buf_addr_msw =
1245 msm_audio_populate_upper_32_bits(buf_addr);
1246 cmd_write.seq_id = port->cpu_buf;
1247 cmd_write.hdr.token = port->cpu_buf;
1248
1249 ++(port->cpu_buf);
1250 if (port->cpu_buf == port->buf_cnt)
1251 port->cpu_buf = 0;
1252
1253 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
1254
1255 if (rc < 0) {
1256 port->cpu_buf = temp_cpu_buf;
1257 pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
1258 __func__, cmd_write.hdr.opcode,
1259 rc, port->cpu_buf);
1260 break;
1261 }
1262
1263 rc = 0;
1264 }
1265
1266 return rc;
1267}
1268
1269bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
1270{
1271 struct us_port_data *port = NULL;
1272 u32 cpu_buf = 0;
1273
1274 if ((usc == NULL) || !free_region) {
1275 pr_err("%s: input data wrong\n", __func__);
1276 return false;
1277 }
1278 port = &usc->port[IN];
1279 cpu_buf = port->cpu_buf + 1;
1280 if (cpu_buf == port->buf_cnt)
1281 cpu_buf = 0;
1282
1283 *free_region = port->dsp_buf;
1284
1285 return cpu_buf == *free_region;
1286}
1287
1288int q6usm_cmd(struct us_client *usc, int cmd)
1289{
1290 struct apr_hdr hdr;
1291 int rc = 0;
1292 atomic_t *state;
1293
1294 if ((usc == NULL) || (usc->apr == NULL)) {
1295 pr_err("%s: APR handle NULL\n", __func__);
1296 return -EINVAL;
1297 }
1298 q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
1299 switch (cmd) {
1300 case CMD_CLOSE:
1301 hdr.opcode = USM_STREAM_CMD_CLOSE;
1302 state = &usc->cmd_state;
1303 break;
1304
1305 default:
1306 pr_err("%s:Invalid format[%d]\n", __func__, cmd);
1307 goto fail_cmd;
1308 }
1309
1310 rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
1311 if (rc < 0) {
1312 pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
1313 goto fail_cmd;
1314 }
1315 rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
1316 Q6USM_TIMEOUT_JIFFIES);
1317 if (!rc) {
1318 rc = -ETIME;
1319 pr_err("%s:timeout. waited for response opcode[0x%x]\n",
1320 __func__, hdr.opcode);
1321 } else
1322 rc = 0;
1323fail_cmd:
1324 return rc;
1325}
1326
1327int q6usm_set_us_detection(struct us_client *usc,
1328 struct usm_session_cmd_detect_info *detect_info,
1329 uint16_t detect_info_size)
1330{
1331 int rc = 0;
1332
1333 if ((usc == NULL) ||
1334 (detect_info_size == 0) ||
1335 (detect_info == NULL)) {
1336 pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
1337 __func__,
1338 usc,
1339 detect_info_size,
1340 detect_info);
1341 return -EINVAL;
1342 }
1343
1344 q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
1345
1346 detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
1347
1348 rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
1349 if (rc < 0) {
1350 pr_err("%s:Comamnd signal detect failed\n", __func__);
1351 return -EINVAL;
1352 }
1353 rc = wait_event_timeout(usc->cmd_wait,
1354 (atomic_read(&usc->cmd_state) == 0),
1355 Q6USM_TIMEOUT_JIFFIES);
1356 if (!rc) {
1357 rc = -ETIME;
1358 pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
1359 __func__, Q6USM_TIMEOUT_JIFFIES);
1360 } else
1361 rc = 0;
1362
1363 return rc;
1364}
1365
1366int q6usm_set_us_stream_param(int dir, struct us_client *usc,
1367 uint32_t module_id, uint32_t param_id, uint32_t buf_size)
1368{
1369 int rc = 0;
1370 struct usm_stream_cmd_set_param cmd_set_param;
1371 struct us_port_data *port = NULL;
1372
1373 if ((usc == NULL) || (usc->apr == NULL)) {
1374 pr_err("%s: APR handle NULL\n", __func__);
1375 return -EINVAL;
1376 }
1377 port = &usc->port[dir];
1378
1379 q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
1380
1381 cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
1382 cmd_set_param.buf_size = buf_size;
1383 cmd_set_param.buf_addr_msw =
1384 msm_audio_populate_upper_32_bits(port->param_phys);
1385 cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
1386 cmd_set_param.mem_map_handle =
1387 *((uint32_t *)(port->param_buf_mem_handle));
1388 cmd_set_param.module_id = module_id;
1389 cmd_set_param.param_id = param_id;
1390 cmd_set_param.hdr.token = 0;
1391
1392 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
1393
1394 if (rc < 0) {
1395 pr_err("%s:write op[0x%x];rc[%d]\n",
1396 __func__, cmd_set_param.hdr.opcode, rc);
1397 }
1398
1399 rc = wait_event_timeout(usc->cmd_wait,
1400 (atomic_read(&usc->cmd_state) == 0),
1401 Q6USM_TIMEOUT_JIFFIES);
1402 if (!rc) {
1403 rc = -ETIME;
1404 pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
1405 __func__, Q6USM_TIMEOUT_JIFFIES);
1406 } else
1407 rc = 0;
1408
1409 return rc;
1410}
1411
1412int q6usm_get_us_stream_param(int dir, struct us_client *usc,
1413 uint32_t module_id, uint32_t param_id, uint32_t buf_size)
1414{
1415 int rc = 0;
1416 struct usm_stream_cmd_get_param cmd_get_param;
1417 struct us_port_data *port = NULL;
1418
1419 if ((usc == NULL) || (usc->apr == NULL)) {
1420 pr_err("%s: APR handle NULL\n", __func__);
1421 return -EINVAL;
1422 }
1423 port = &usc->port[dir];
1424
1425 q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
1426
1427 cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
1428 cmd_get_param.buf_size = buf_size;
1429 cmd_get_param.buf_addr_msw =
1430 msm_audio_populate_upper_32_bits(port->param_phys);
1431 cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
1432 cmd_get_param.mem_map_handle =
1433 *((uint32_t *)(port->param_buf_mem_handle));
1434 cmd_get_param.module_id = module_id;
1435 cmd_get_param.param_id = param_id;
1436 cmd_get_param.hdr.token = 0;
1437
1438 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
1439
1440 if (rc < 0) {
1441 pr_err("%s:write op[0x%x];rc[%d]\n",
1442 __func__, cmd_get_param.hdr.opcode, rc);
1443 }
1444
1445 rc = wait_event_timeout(usc->cmd_wait,
1446 (atomic_read(&usc->cmd_state) == 0),
1447 Q6USM_TIMEOUT_JIFFIES);
1448 if (!rc) {
1449 rc = -ETIME;
1450 pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
1451 __func__, Q6USM_TIMEOUT_JIFFIES);
1452 } else
1453 rc = 0;
1454
1455 return rc;
1456}
1457
Laxminath Kasam8b1366a2017-10-05 01:44:16 +05301458int __init q6usm_init(void)
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301459{
1460 pr_debug("%s\n", __func__);
1461 init_waitqueue_head(&this_mmap.cmd_wait);
1462 memset(session, 0, sizeof(session));
1463 return 0;
1464}