blob: 70c3a6b7721a781888767e25bf56714273357820 [file] [log] [blame]
Meng Wang688a8672019-01-29 13:43:33 +08001// SPDX-License-Identifier: GPL-2.0-only
Meng Wang61af6842018-09-10 17:47:55 +08002/*
Vatsal Bucha1c8aec02019-01-28 18:54:56 +05303 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05304 */
5#include <linux/mutex.h>
6#include <linux/wait.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05307#include <linux/sched.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053010#include <dsp/msm_audio_ion.h>
11#include <dsp/apr_audio-v2.h>
12#include <ipc/apr_us.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053013#include "q6usm.h"
14
15#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
16
17#define MEM_4K_OFFSET 4095
18#define MEM_4K_MASK 0xfffff000
19
20#define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
21
22#define READDONE_IDX_STATUS 0
23
24#define WRITEDONE_IDX_STATUS 0
25
26/* Standard timeout in the asynchronous ops */
27#define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
28
29static DEFINE_MUTEX(session_lock);
30
31static struct us_client *session[USM_SESSION_MAX];
32static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
33static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
34static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
35 uint32_t pkt_size, bool cmd_flg);
36
37struct usm_mmap {
38 atomic_t ref_cnt;
39 atomic_t cmd_state;
40 wait_queue_head_t cmd_wait;
41 void *apr;
42 int mem_handle;
43};
44
45static struct usm_mmap this_mmap;
46
47static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
48 uint32_t pkt_size, bool cmd_flg, u32 token)
49{
50 hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
51 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
52 hdr->src_port = 0;
53 hdr->dest_port = 0;
54 if (cmd_flg) {
55 hdr->token = token;
56 atomic_set(&this_mmap.cmd_state, 1);
57 }
58 hdr->pkt_size = pkt_size;
59}
60
61static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
62 uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
63{
64 struct usm_cmd_memory_map_region mem_region_map;
65 int rc = 0;
66
67 if (this_mmap.apr == NULL) {
68 pr_err("%s: APR handle NULL\n", __func__);
69 return -EINVAL;
70 }
71
72 q6usm_add_mmaphdr(&mem_region_map.hdr,
73 sizeof(struct usm_cmd_memory_map_region), true,
74 ((session << 8) | dir));
75
76 mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
77 mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
78
79 mem_region_map.num_regions = 1;
80 mem_region_map.flags = 0;
81
82 mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
83 mem_region_map.shm_addr_msw =
84 msm_audio_populate_upper_32_bits(buf_add);
85 mem_region_map.mem_size_bytes = bufsz * bufcnt;
86
87 rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
88 if (rc < 0) {
89 pr_err("%s: mem_map op[0x%x]rc[%d]\n",
90 __func__, mem_region_map.hdr.opcode, rc);
91 rc = -EINVAL;
92 goto fail_cmd;
93 }
94
95 rc = wait_event_timeout(this_mmap.cmd_wait,
96 (atomic_read(&this_mmap.cmd_state) == 0),
97 Q6USM_TIMEOUT_JIFFIES);
98 if (!rc) {
99 rc = -ETIME;
100 pr_err("%s: timeout. waited for memory_map\n", __func__);
101 } else {
102 *mem_handle = this_mmap.mem_handle;
103 rc = 0;
104 }
105fail_cmd:
106 return rc;
107}
108
109int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
110 uint32_t mem_handle)
111{
112 struct usm_cmd_memory_unmap_region mem_unmap;
113 int rc = 0;
114
115 if (this_mmap.apr == NULL) {
116 pr_err("%s: APR handle NULL\n", __func__);
117 return -EINVAL;
118 }
119
120 q6usm_add_mmaphdr(&mem_unmap.hdr,
121 sizeof(struct usm_cmd_memory_unmap_region), true,
122 ((session << 8) | dir));
123 mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
124 mem_unmap.mem_map_handle = mem_handle;
125
126 rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
127 if (rc < 0) {
128 pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
129 __func__, mem_unmap.hdr.opcode, rc);
130 goto fail_cmd;
131 }
132
133 rc = wait_event_timeout(this_mmap.cmd_wait,
134 (atomic_read(&this_mmap.cmd_state) == 0),
135 Q6USM_TIMEOUT_JIFFIES);
136 if (!rc) {
137 rc = -ETIME;
138 pr_err("%s: timeout. waited for memory_unmap\n", __func__);
139 } else
140 rc = 0;
141fail_cmd:
142 return rc;
143}
144
145static int q6usm_session_alloc(struct us_client *usc)
146{
147 int ind = 0;
148
149 mutex_lock(&session_lock);
150 for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
151 if (!session[ind]) {
152 session[ind] = usc;
153 mutex_unlock(&session_lock);
154 ++ind; /* session id: 0 reserved */
155 pr_debug("%s: session[%d] was allocated\n",
156 __func__, ind);
157 return ind;
158 }
159 }
160 mutex_unlock(&session_lock);
161 return -ENOMEM;
162}
163
164static void q6usm_session_free(struct us_client *usc)
165{
166 /* Session index was incremented during allocation */
167 uint16_t ind = (uint16_t)usc->session - 1;
168
169 pr_debug("%s: to free session[%d]\n", __func__, ind);
170 if (ind < USM_SESSION_MAX) {
171 mutex_lock(&session_lock);
172 session[ind] = NULL;
173 mutex_unlock(&session_lock);
174 }
175}
176
177static int q6usm_us_client_buf_free(unsigned int dir,
178 struct us_client *usc)
179{
180 struct us_port_data *port;
181 int rc = 0;
182
183 if ((usc == NULL) ||
184 ((dir != IN) && (dir != OUT)))
185 return -EINVAL;
186
187 mutex_lock(&usc->cmd_lock);
188 port = &usc->port[dir];
189 if (port == NULL) {
190 mutex_unlock(&usc->cmd_lock);
191 return -EINVAL;
192 }
193
194 if (port->data == NULL) {
195 mutex_unlock(&usc->cmd_lock);
196 return 0;
197 }
198
199 rc = q6usm_memory_unmap(port->phys, dir, usc->session,
200 *((uint32_t *)port->ext));
201 pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
202 (void *)port->data, (u64)port->phys, (void *)&port->phys);
203
Banajit Goswami08bb7362017-11-03 22:48:23 -0700204 msm_audio_ion_free(port->dma_buf);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530205
206 port->data = NULL;
207 port->phys = 0;
208 port->buf_size = 0;
209 port->buf_cnt = 0;
Banajit Goswami08bb7362017-11-03 22:48:23 -0700210 port->dma_buf = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530211
212 mutex_unlock(&usc->cmd_lock);
213 return rc;
214}
215
216int q6usm_us_param_buf_free(unsigned int dir,
217 struct us_client *usc)
218{
219 struct us_port_data *port;
220 int rc = 0;
221
222 if ((usc == NULL) ||
223 ((dir != IN) && (dir != OUT)))
224 return -EINVAL;
225
226 mutex_lock(&usc->cmd_lock);
227 port = &usc->port[dir];
228 if (port == NULL) {
229 mutex_unlock(&usc->cmd_lock);
230 return -EINVAL;
231 }
232
233 if (port->param_buf == NULL) {
234 mutex_unlock(&usc->cmd_lock);
235 return 0;
236 }
237
238 rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
239 *((uint32_t *)port->param_buf_mem_handle));
240 pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
241 (void *)port->param_buf, (u64)port->param_phys,
242 (void *)&port->param_phys);
243
Banajit Goswami08bb7362017-11-03 22:48:23 -0700244 msm_audio_ion_free(port->param_dma_buf);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530245
246 port->param_buf = NULL;
247 port->param_phys = 0;
248 port->param_buf_size = 0;
Banajit Goswami08bb7362017-11-03 22:48:23 -0700249 port->param_dma_buf = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530250
251 mutex_unlock(&usc->cmd_lock);
252 return rc;
253}
254
255void q6usm_us_client_free(struct us_client *usc)
256{
257 int loopcnt = 0;
258 struct us_port_data *port;
259 uint32_t *p_mem_handle = NULL;
260
261 if ((usc == NULL) ||
262 !(usc->session))
263 return;
264
265 for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
266 port = &usc->port[loopcnt];
267 if (port->data == NULL)
268 continue;
269 pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
270 q6usm_us_client_buf_free(loopcnt, usc);
271 q6usm_us_param_buf_free(loopcnt, usc);
272 }
273 q6usm_session_free(usc);
274 apr_deregister(usc->apr);
275
276 pr_debug("%s: APR De-Register\n", __func__);
277
278 if (atomic_read(&this_mmap.ref_cnt) <= 0) {
279 pr_err("%s: APR Common Port Already Closed\n", __func__);
280 goto done;
281 }
282
283 atomic_dec(&this_mmap.ref_cnt);
284 if (atomic_read(&this_mmap.ref_cnt) == 0) {
285 apr_deregister(this_mmap.apr);
286 pr_debug("%s: APR De-Register common port\n", __func__);
287 }
288
289done:
290 p_mem_handle = (uint32_t *)usc->port[IN].ext;
291 kfree(p_mem_handle);
292 kfree(usc);
293 pr_debug("%s:\n", __func__);
294}
295
296struct us_client *q6usm_us_client_alloc(
297 void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
298 void *priv)
299{
300 struct us_client *usc;
301 uint32_t *p_mem_handle = NULL;
302 int n;
303 int lcnt = 0;
304
305 usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
306 if (usc == NULL)
307 return NULL;
308
309 p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
310 if (p_mem_handle == NULL) {
311 kfree(usc);
312 return NULL;
313 }
314
315 n = q6usm_session_alloc(usc);
316 if (n <= 0)
317 goto fail_session;
318 usc->session = n;
319 usc->cb = cb;
320 usc->priv = priv;
321 usc->apr = apr_register("ADSP", "USM",
322 (apr_fn)q6usm_callback,
323 ((usc->session) << 8 | 0x0001),
324 usc);
325
326 if (usc->apr == NULL) {
327 pr_err("%s: Registration with APR failed\n", __func__);
328 goto fail;
329 }
330 pr_debug("%s: Registering the common port with APR\n", __func__);
331 if (atomic_read(&this_mmap.ref_cnt) == 0) {
332 this_mmap.apr = apr_register("ADSP", "USM",
333 (apr_fn)q6usm_mmapcallback,
334 0x0FFFFFFFF, &this_mmap);
335 if (this_mmap.apr == NULL) {
336 pr_err("%s: USM port registration failed\n",
337 __func__);
338 goto fail;
339 }
340 }
341
342 atomic_inc(&this_mmap.ref_cnt);
343 init_waitqueue_head(&usc->cmd_wait);
344 mutex_init(&usc->cmd_lock);
345 for (lcnt = 0; lcnt <= OUT; ++lcnt) {
346 mutex_init(&usc->port[lcnt].lock);
347 spin_lock_init(&usc->port[lcnt].dsp_lock);
348 usc->port[lcnt].ext = (void *)p_mem_handle++;
349 usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
350 pr_err("%s: usc->port[%d].ext=%pK;\n",
351 __func__, lcnt, usc->port[lcnt].ext);
352 }
353 atomic_set(&usc->cmd_state, 0);
354
355 return usc;
356fail:
357 kfree(p_mem_handle);
358 q6usm_us_client_free(usc);
359 return NULL;
360fail_session:
361 kfree(p_mem_handle);
362 kfree(usc);
363 return NULL;
364}
365
366int q6usm_us_client_buf_alloc(unsigned int dir,
367 struct us_client *usc,
368 unsigned int bufsz,
369 unsigned int bufcnt)
370{
371 int rc = 0;
372 struct us_port_data *port = NULL;
373 unsigned int size = bufsz*bufcnt;
374 size_t len;
375
376 if ((usc == NULL) ||
377 ((dir != IN) && (dir != OUT)) || (size == 0) ||
378 (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
379 pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
380 __func__, size, bufcnt);
381 return -EINVAL;
382 }
383
384 mutex_lock(&usc->cmd_lock);
385
386 port = &usc->port[dir];
387
388 /* The size to allocate should be multiple of 4K bytes */
389 size = PAGE_ALIGN(size);
390
Banajit Goswami08bb7362017-11-03 22:48:23 -0700391 rc = msm_audio_ion_alloc(&port->dma_buf,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530392 size, &port->phys,
393 &len, &port->data);
394
395 if (rc) {
396 pr_err("%s: US ION allocation failed, rc = %d\n",
397 __func__, rc);
398 mutex_unlock(&usc->cmd_lock);
399 return -ENOMEM;
400 }
401
402 port->buf_cnt = bufcnt;
403 port->buf_size = bufsz;
404 pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
405 (void *)port->data,
406 (u64)port->phys,
407 (void *)&port->phys);
408
409 rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
410 (uint32_t *)port->ext);
411 if (rc < 0) {
412 pr_err("%s: CMD Memory_map failed\n", __func__);
413 mutex_unlock(&usc->cmd_lock);
414 q6usm_us_client_buf_free(dir, usc);
415 q6usm_us_param_buf_free(dir, usc);
416 } else {
417 mutex_unlock(&usc->cmd_lock);
418 rc = 0;
419 }
420
421 return rc;
422}
423
424int q6usm_us_param_buf_alloc(unsigned int dir,
425 struct us_client *usc,
426 unsigned int bufsz)
427{
428 int rc = 0;
429 struct us_port_data *port = NULL;
430 unsigned int size = bufsz;
431 size_t len;
432
433 if ((usc == NULL) ||
434 ((dir != IN) && (dir != OUT)) ||
435 (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
436 pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
437 __func__, dir, bufsz);
438 return -EINVAL;
439 }
440
441 mutex_lock(&usc->cmd_lock);
442
443 port = &usc->port[dir];
444
445 if (bufsz == 0) {
446 pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
447 __func__);
448 port->param_buf = NULL;
449 mutex_unlock(&usc->cmd_lock);
450 return rc;
451 }
452
453 /* The size to allocate should be multiple of 4K bytes */
454 size = PAGE_ALIGN(size);
455
Banajit Goswami08bb7362017-11-03 22:48:23 -0700456 rc = msm_audio_ion_alloc(&port->param_dma_buf,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530457 size, &port->param_phys,
458 &len, &port->param_buf);
459
460 if (rc) {
461 pr_err("%s: US ION allocation failed, rc = %d\n",
462 __func__, rc);
463 mutex_unlock(&usc->cmd_lock);
464 return -ENOMEM;
465 }
466
467 port->param_buf_size = bufsz;
468 pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
469 (void *)port->param_buf,
470 (u64)port->param_phys,
471 (void *)&port->param_phys);
472
473 rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
474 usc->session, (uint32_t *)port->param_buf_mem_handle);
475 if (rc < 0) {
476 pr_err("%s: CMD Memory_map failed\n", __func__);
477 mutex_unlock(&usc->cmd_lock);
478 q6usm_us_client_buf_free(dir, usc);
479 q6usm_us_param_buf_free(dir, usc);
480 } else {
481 mutex_unlock(&usc->cmd_lock);
482 rc = 0;
483 }
484
485 return rc;
486}
487
488static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
489{
490 uint32_t token;
491 uint32_t *payload = data->payload;
492
Vatsal Bucha1c8aec02019-01-28 18:54:56 +0530493 if (data->payload_size < (2 * sizeof(uint32_t))) {
494 pr_err("%s: payload has invalid size[%d]\n", __func__,
495 data->payload_size);
496 return -EINVAL;
497 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530498 pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
499 __func__, payload[0], payload[1], data->opcode);
500 pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
501 __func__, data->token, data->payload_size,
502 data->src_port, data->dest_port);
503
504 if (data->opcode == APR_BASIC_RSP_RESULT) {
505 /* status field check */
506 if (payload[1]) {
507 pr_err("%s: wrong response[%d] on cmd [%d]\n",
508 __func__, payload[1], payload[0]);
509 } else {
510 token = data->token;
511 switch (payload[0]) {
512 case USM_CMD_SHARED_MEM_UNMAP_REGION:
513 if (atomic_read(&this_mmap.cmd_state)) {
514 atomic_set(&this_mmap.cmd_state, 0);
515 wake_up(&this_mmap.cmd_wait);
516 }
517 /* fallthrough */
518 case USM_CMD_SHARED_MEM_MAP_REGION:
519 /* For MEM_MAP, additional answer is waited, */
520 /* therfore, no wake-up here */
521 pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
522 __func__, payload[0], payload[1]);
523 break;
524 default:
525 pr_debug("%s: wrong command[0x%x]\n",
526 __func__, payload[0]);
527 break;
528 }
529 }
530 } else {
531 if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
532 this_mmap.mem_handle = payload[0];
533 pr_debug("%s: memory map handle = 0x%x",
534 __func__, payload[0]);
535 if (atomic_read(&this_mmap.cmd_state)) {
536 atomic_set(&this_mmap.cmd_state, 0);
537 wake_up(&this_mmap.cmd_wait);
538 }
539 }
540 }
541 return 0;
542}
543
544
545static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
546{
547 struct us_client *usc = (struct us_client *)priv;
548 unsigned long dsp_flags;
549 uint32_t *payload = data->payload;
550 uint32_t token = data->token;
551 uint32_t opcode = Q6USM_EVENT_UNDEF;
552
553 if (usc == NULL) {
554 pr_err("%s: client info is NULL\n", __func__);
555 return -EINVAL;
556 }
557
558 if (data->opcode == APR_BASIC_RSP_RESULT) {
Vatsal Buchadef212b2019-01-28 18:44:29 +0530559 if (data->payload_size < (2 * sizeof(uint32_t))) {
560 pr_err("%s: payload has invalid size[%d]\n", __func__,
561 data->payload_size);
562 return -EINVAL;
563 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530564 /* status field check */
565 if (payload[1]) {
566 pr_err("%s: wrong response[%d] on cmd [%d]\n",
567 __func__, payload[1], payload[0]);
568 if (usc->cb)
569 usc->cb(data->opcode, token,
570 (uint32_t *)data->payload, usc->priv);
571 } else {
572 switch (payload[0]) {
573 case USM_SESSION_CMD_RUN:
574 case USM_STREAM_CMD_CLOSE:
575 if (token != usc->session) {
576 pr_err("%s: wrong token[%d]",
577 __func__, token);
578 break;
579 }
580 case USM_STREAM_CMD_OPEN_READ:
581 case USM_STREAM_CMD_OPEN_WRITE:
582 case USM_STREAM_CMD_SET_ENC_PARAM:
583 case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
584 case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
585 case USM_STREAM_CMD_SET_PARAM:
586 case USM_STREAM_CMD_GET_PARAM:
587 if (atomic_read(&usc->cmd_state)) {
588 atomic_set(&usc->cmd_state, 0);
589 wake_up(&usc->cmd_wait);
590 }
591 if (usc->cb)
592 usc->cb(data->opcode, token,
593 (uint32_t *)data->payload,
594 usc->priv);
595 break;
596 default:
597 break;
598 }
599 }
600 return 0;
601 }
602
603 switch (data->opcode) {
604 case RESET_EVENTS: {
605 pr_err("%s: Reset event is received: %d %d\n",
606 __func__,
607 data->reset_event,
608 data->reset_proc);
609
610 opcode = RESET_EVENTS;
611
612 apr_reset(this_mmap.apr);
613 this_mmap.apr = NULL;
614
615 apr_reset(usc->apr);
616 usc->apr = NULL;
617
618 break;
619 }
620
621
622 case USM_DATA_EVENT_READ_DONE: {
623 struct us_port_data *port = &usc->port[OUT];
624
625 opcode = Q6USM_EVENT_READ_DONE;
626 spin_lock_irqsave(&port->dsp_lock, dsp_flags);
Vatsal Buchadef212b2019-01-28 18:44:29 +0530627 if (data->payload_size <
628 (sizeof(uint32_t)*(READDONE_IDX_STATUS + 1))) {
629 pr_err("%s: Invalid payload size for READDONE[%d]\n",
630 __func__, data->payload_size);
631 return -EINVAL;
632 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530633 if (payload[READDONE_IDX_STATUS]) {
634 pr_err("%s: wrong READDONE[%d]; token[%d]\n",
635 __func__,
636 payload[READDONE_IDX_STATUS],
637 token);
638 token = USM_WRONG_TOKEN;
639 spin_unlock_irqrestore(&port->dsp_lock,
640 dsp_flags);
641 break;
642 }
643
644 if (port->expected_token != token) {
645 u32 cpu_buf = port->cpu_buf;
646
647 pr_err("%s: expected[%d] != token[%d]\n",
648 __func__, port->expected_token, token);
649 pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
650 __func__, port->dsp_buf, cpu_buf);
651
652 token = USM_WRONG_TOKEN;
653 /* To prevent data handle continiue */
654 port->expected_token = USM_WRONG_TOKEN;
655 spin_unlock_irqrestore(&port->dsp_lock,
656 dsp_flags);
657 break;
658 } /* port->expected_token != data->token */
659
660 port->expected_token = token + 1;
661 if (port->expected_token == port->buf_cnt)
662 port->expected_token = 0;
663
664 /* gap support */
665 if (port->expected_token != port->cpu_buf) {
666 port->dsp_buf = port->expected_token;
667 token = port->dsp_buf; /* for callback */
668 } else
669 port->dsp_buf = token;
670
671 spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
672 break;
673 } /* case USM_DATA_EVENT_READ_DONE */
674
675 case USM_DATA_EVENT_WRITE_DONE: {
676 struct us_port_data *port = &usc->port[IN];
677
678 opcode = Q6USM_EVENT_WRITE_DONE;
Vatsal Buchadef212b2019-01-28 18:44:29 +0530679 if (data->payload_size <
680 (sizeof(uint32_t)*(WRITEDONE_IDX_STATUS + 1))) {
681 pr_err("%s: Invalid payload size for WRITEDONE[%d]\n",
682 __func__, data->payload_size);
683 return -EINVAL;
684 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530685 if (payload[WRITEDONE_IDX_STATUS]) {
686 pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
687 __func__,
688 payload[WRITEDONE_IDX_STATUS]);
689 break;
690 }
691
692 spin_lock_irqsave(&port->dsp_lock, dsp_flags);
693 port->dsp_buf = token + 1;
694 if (port->dsp_buf == port->buf_cnt)
695 port->dsp_buf = 0;
696 spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
697
698 break;
699 } /* case USM_DATA_EVENT_WRITE_DONE */
700
701 case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
702 pr_debug("%s: US detect result: result=%d",
703 __func__,
704 payload[0]);
705 opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
706
707 break;
708 } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
709
710 default:
711 return 0;
712
713 } /* switch */
714
715 if (usc->cb)
716 usc->cb(opcode, token,
717 data->payload, usc->priv);
718
719 return 0;
720}
721
722uint32_t q6usm_get_virtual_address(int dir,
723 struct us_client *usc,
724 struct vm_area_struct *vms)
725{
726 uint32_t ret = 0xffffffff;
727
728 if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
729 struct us_port_data *port = &usc->port[dir];
730 int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
731 struct audio_buffer ab;
732
733 ab.phys = port->phys;
734 ab.data = port->data;
735 ab.used = 1;
736 ab.size = size;
737 ab.actual_size = size;
Banajit Goswami08bb7362017-11-03 22:48:23 -0700738 ab.dma_buf = port->dma_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530739
740 ret = msm_audio_ion_mmap(&ab, vms);
741
742 }
743 return ret;
744}
745
746static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
747 uint32_t pkt_size, bool cmd_flg)
748{
749 mutex_lock(&usc->cmd_lock);
750 hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
751 APR_HDR_LEN(sizeof(struct apr_hdr)),
752 APR_PKT_VER);
753 hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
754 hdr->src_domain = APR_DOMAIN_APPS;
755 hdr->dest_svc = APR_SVC_USM;
756 hdr->dest_domain = APR_DOMAIN_ADSP;
757 hdr->src_port = (usc->session << 8) | 0x0001;
758 hdr->dest_port = (usc->session << 8) | 0x0001;
759 if (cmd_flg) {
760 hdr->token = usc->session;
761 atomic_set(&usc->cmd_state, 1);
762 }
763 hdr->pkt_size = pkt_size;
764 mutex_unlock(&usc->cmd_lock);
765}
766
767static uint32_t q6usm_ext2int_format(uint32_t ext_format)
768{
769 uint32_t int_format = INVALID_FORMAT;
770
771 switch (ext_format) {
772 case FORMAT_USPS_EPOS:
773 int_format = US_POINT_EPOS_FORMAT_V2;
774 break;
775 case FORMAT_USRAW:
776 int_format = US_RAW_FORMAT_V2;
777 break;
778 case FORMAT_USPROX:
779 int_format = US_PROX_FORMAT_V4;
780 break;
781 case FORMAT_USGES_SYNC:
782 int_format = US_GES_SYNC_FORMAT;
783 break;
784 case FORMAT_USRAW_SYNC:
785 int_format = US_RAW_SYNC_FORMAT;
786 break;
787 default:
788 pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
789 break;
790 }
791
792 return int_format;
793}
794
795int q6usm_open_read(struct us_client *usc,
796 uint32_t format)
797{
798 uint32_t int_format = INVALID_FORMAT;
799 int rc = 0x00;
800 struct usm_stream_cmd_open_read open;
801
802 if ((usc == NULL) || (usc->apr == NULL)) {
803 pr_err("%s: client or its apr is NULL\n", __func__);
804 return -EINVAL;
805 }
806
807 pr_debug("%s: session[%d]", __func__, usc->session);
808
809 q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
810 open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
811 open.src_endpoint = 0; /* AFE */
812 open.pre_proc_top = 0; /* No preprocessing required */
813
814 int_format = q6usm_ext2int_format(format);
815 if (int_format == INVALID_FORMAT)
816 return -EINVAL;
817
818 open.uMode = STREAM_PRIORITY_NORMAL;
819 open.format = int_format;
820
821 rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
822 if (rc < 0) {
823 pr_err("%s: open failed op[0x%x]rc[%d]\n",
824 __func__, open.hdr.opcode, rc);
825 goto fail_cmd;
826 }
827 rc = wait_event_timeout(usc->cmd_wait,
828 (atomic_read(&usc->cmd_state) == 0),
829 Q6USM_TIMEOUT_JIFFIES);
830 if (!rc) {
831 rc = -ETIME;
832 pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
833 __func__, rc);
834 goto fail_cmd;
835 } else
836 rc = 0;
837fail_cmd:
838 return rc;
839}
840
841
842int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
843{
844 uint32_t int_format = INVALID_FORMAT;
845 struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
846 struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
847 int rc = 0;
848 uint32_t total_cfg_size =
849 sizeof(struct usm_stream_cmd_encdec_cfg_blk);
850 uint32_t round_params_size = 0;
851 uint8_t is_allocated = 0;
852
853
854 if ((usc == NULL) || (us_cfg == NULL)) {
855 pr_err("%s: wrong input", __func__);
856 return -EINVAL;
857 }
858
859 int_format = q6usm_ext2int_format(us_cfg->format_id);
860 if (int_format == INVALID_FORMAT) {
861 pr_err("%s: wrong input format[%d]",
862 __func__, us_cfg->format_id);
863 return -EINVAL;
864 }
865
866 /* Transparent configuration data is after enc_cfg */
867 /* Integer number of u32s is required */
868 round_params_size = ((us_cfg->params_size + 3)/4) * 4;
869 if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
870 /* Dynamic allocated encdec_cfg_blk is required */
871 /* static part use */
872 round_params_size -= USM_MAX_CFG_DATA_SIZE;
873 total_cfg_size += round_params_size;
874 enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
875 if (enc_cfg == NULL) {
876 pr_err("%s: enc_cfg[%d] allocation failed\n",
877 __func__, total_cfg_size);
878 return -ENOMEM;
879 }
880 is_allocated = 1;
881 } else
882 round_params_size = 0;
883
884 q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
885
886 enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
887 enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
888 enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
889 round_params_size;
890 enc_cfg->enc_blk.frames_per_buf = 1;
891 enc_cfg->enc_blk.format_id = int_format;
892 enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
893 USM_MAX_CFG_DATA_SIZE +
894 round_params_size;
895 memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
896 sizeof(struct usm_cfg_common));
897
898 /* Transparent data copy */
899 memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
900 us_cfg->params_size);
901 pr_debug("%s: cfg_size[%d], params_size[%d]\n",
902 __func__,
903 enc_cfg->enc_blk.cfg_size,
904 us_cfg->params_size);
905 pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
906 __func__,
907 enc_cfg->enc_blk.transp_data[0],
908 enc_cfg->enc_blk.transp_data[1],
909 enc_cfg->enc_blk.transp_data[2],
910 enc_cfg->enc_blk.transp_data[3],
911 enc_cfg->enc_blk.transp_data[4],
912 enc_cfg->enc_blk.transp_data[5],
913 enc_cfg->enc_blk.transp_data[6],
914 enc_cfg->enc_blk.transp_data[7]
915 );
916 pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
917 __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
918 enc_cfg->enc_blk.cfg_common.ch_cfg,
919 enc_cfg->enc_blk.cfg_common.bits_per_sample);
920 pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
921 enc_cfg->enc_blk.cfg_common.data_map[0],
922 enc_cfg->enc_blk.cfg_common.data_map[1],
923 enc_cfg->enc_blk.cfg_common.data_map[2],
924 enc_cfg->enc_blk.cfg_common.data_map[3],
925 enc_cfg->enc_blk.cfg_common.data_map[4],
926 enc_cfg->enc_blk.cfg_common.data_map[5],
927 enc_cfg->enc_blk.cfg_common.data_map[6],
928 enc_cfg->enc_blk.cfg_common.data_map[7],
929 enc_cfg->enc_blk.cfg_common.dev_id);
930
931 rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
932 if (rc < 0) {
933 pr_err("%s:Comamnd open failed\n", __func__);
934 rc = -EINVAL;
935 goto fail_cmd;
936 }
937 rc = wait_event_timeout(usc->cmd_wait,
938 (atomic_read(&usc->cmd_state) == 0),
939 Q6USM_TIMEOUT_JIFFIES);
940 if (!rc) {
941 rc = -ETIME;
942 pr_err("%s: timeout opcode[0x%x]\n",
943 __func__, enc_cfg->hdr.opcode);
944 } else
945 rc = 0;
946
947fail_cmd:
948 if (is_allocated == 1)
949 kfree(enc_cfg);
950
951 return rc;
952}
953
954int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
955{
956
957 uint32_t int_format = INVALID_FORMAT;
958 struct usm_stream_media_format_update dec_cfg_obj;
959 struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
960
961 int rc = 0;
962 uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
963 uint32_t round_params_size = 0;
964 uint8_t is_allocated = 0;
965
966
967 if ((usc == NULL) || (us_cfg == NULL)) {
968 pr_err("%s: wrong input", __func__);
969 return -EINVAL;
970 }
971
972 int_format = q6usm_ext2int_format(us_cfg->format_id);
973 if (int_format == INVALID_FORMAT) {
974 pr_err("%s: wrong input format[%d]",
975 __func__, us_cfg->format_id);
976 return -EINVAL;
977 }
978
979 /* Transparent configuration data is after enc_cfg */
980 /* Integer number of u32s is required */
981 round_params_size = ((us_cfg->params_size + 3)/4) * 4;
982 if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
983 /* Dynamic allocated encdec_cfg_blk is required */
984 /* static part use */
985 round_params_size -= USM_MAX_CFG_DATA_SIZE;
986 total_cfg_size += round_params_size;
987 dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
988 if (dec_cfg == NULL) {
989 pr_err("%s:dec_cfg[%d] allocation failed\n",
990 __func__, total_cfg_size);
991 return -ENOMEM;
992 }
993 is_allocated = 1;
994 } else { /* static transp_data is enough */
995 round_params_size = 0;
996 }
997
998 q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
999
1000 dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
1001 dec_cfg->format_id = int_format;
1002 dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
1003 USM_MAX_CFG_DATA_SIZE +
1004 round_params_size;
1005 memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
1006 sizeof(struct usm_cfg_common));
1007 /* Transparent data copy */
1008 memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
1009 pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
1010 __func__,
1011 dec_cfg->cfg_size,
1012 us_cfg->params_size,
1013 dec_cfg->transp_data[0],
1014 dec_cfg->transp_data[1],
1015 dec_cfg->transp_data[2],
1016 dec_cfg->transp_data[3]
1017 );
1018
1019 rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
1020 if (rc < 0) {
1021 pr_err("%s:Comamnd open failed\n", __func__);
1022 rc = -EINVAL;
1023 goto fail_cmd;
1024 }
1025 rc = wait_event_timeout(usc->cmd_wait,
1026 (atomic_read(&usc->cmd_state) == 0),
1027 Q6USM_TIMEOUT_JIFFIES);
1028 if (!rc) {
1029 rc = -ETIME;
1030 pr_err("%s: timeout opcode[0x%x]\n",
1031 __func__, dec_cfg->hdr.opcode);
1032 } else
1033 rc = 0;
1034
1035fail_cmd:
1036 if (is_allocated == 1)
1037 kfree(dec_cfg);
1038
1039 return rc;
1040}
1041
1042int q6usm_open_write(struct us_client *usc,
1043 uint32_t format)
1044{
1045 int rc = 0;
1046 uint32_t int_format = INVALID_FORMAT;
1047 struct usm_stream_cmd_open_write open;
1048
1049 if ((usc == NULL) || (usc->apr == NULL)) {
1050 pr_err("%s: APR handle NULL\n", __func__);
1051 return -EINVAL;
1052 }
1053
1054 pr_debug("%s: session[%d]", __func__, usc->session);
1055
1056 q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
1057 open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
1058
1059 int_format = q6usm_ext2int_format(format);
1060 if (int_format == INVALID_FORMAT) {
1061 pr_err("%s: wrong format[%d]", __func__, format);
1062 return -EINVAL;
1063 }
1064
1065 open.format = int_format;
1066
1067 rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
1068 if (rc < 0) {
1069 pr_err("%s:open failed op[0x%x]rc[%d]\n",
1070 __func__, open.hdr.opcode, rc);
1071 goto fail_cmd;
1072 }
1073 rc = wait_event_timeout(usc->cmd_wait,
1074 (atomic_read(&usc->cmd_state) == 0),
1075 Q6USM_TIMEOUT_JIFFIES);
1076 if (!rc) {
1077 rc = -ETIME;
1078 pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
1079 __func__, rc);
1080 goto fail_cmd;
1081 } else
1082 rc = 0;
1083
1084fail_cmd:
1085 return rc;
1086}
1087
1088int q6usm_run(struct us_client *usc, uint32_t flags,
1089 uint32_t msw_ts, uint32_t lsw_ts)
1090{
1091 struct usm_stream_cmd_run run;
1092 int rc = 0;
1093
1094 if ((usc == NULL) || (usc->apr == NULL)) {
1095 pr_err("%s: APR handle NULL\n", __func__);
1096 return -EINVAL;
1097 }
1098 q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
1099
1100 run.hdr.opcode = USM_SESSION_CMD_RUN;
1101 run.flags = flags;
1102 run.msw_ts = msw_ts;
1103 run.lsw_ts = lsw_ts;
1104
1105 rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
1106 if (rc < 0) {
1107 pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
1108 goto fail_cmd;
1109 }
1110
1111 rc = wait_event_timeout(usc->cmd_wait,
1112 (atomic_read(&usc->cmd_state) == 0),
1113 Q6USM_TIMEOUT_JIFFIES);
1114 if (!rc) {
1115 rc = -ETIME;
1116 pr_err("%s: timeout. waited for run success rc[%d]\n",
1117 __func__, rc);
1118 } else
1119 rc = 0;
1120
1121fail_cmd:
1122 return rc;
1123}
1124
1125
1126
1127int q6usm_read(struct us_client *usc, uint32_t read_ind)
1128{
1129 struct usm_stream_cmd_read read;
1130 struct us_port_data *port = NULL;
1131 int rc = 0;
1132 u32 read_counter = 0;
1133 u32 loop_ind = 0;
1134 u64 buf_addr = 0;
1135
1136 if ((usc == NULL) || (usc->apr == NULL)) {
1137 pr_err("%s: APR handle NULL\n", __func__);
1138 return -EINVAL;
1139 }
1140 port = &usc->port[OUT];
1141
1142 if (read_ind > port->buf_cnt) {
1143 pr_err("%s: wrong read_ind[%d]\n",
1144 __func__, read_ind);
1145 return -EINVAL;
1146 }
1147 if (read_ind == port->cpu_buf) {
1148 pr_err("%s: no free region\n", __func__);
1149 return 0;
1150 }
1151
1152 if (read_ind > port->cpu_buf) { /* 1 range */
1153 read_counter = read_ind - port->cpu_buf;
1154 } else { /* 2 ranges */
1155 read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
1156 }
1157
1158 q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
1159
1160 read.hdr.opcode = USM_DATA_CMD_READ;
1161 read.buf_size = port->buf_size;
1162 buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
1163 read.buf_addr_lsw = lower_32_bits(buf_addr);
1164 read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1165 read.mem_map_handle = *((uint32_t *)(port->ext));
1166
1167 for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
1168 u32 temp_cpu_buf = port->cpu_buf;
1169
1170 buf_addr = (u64)(port->phys) +
1171 port->buf_size * (port->cpu_buf);
1172 read.buf_addr_lsw = lower_32_bits(buf_addr);
1173 read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1174 read.seq_id = port->cpu_buf;
1175 read.hdr.token = port->cpu_buf;
1176 read.counter = 1;
1177
1178 ++(port->cpu_buf);
1179 if (port->cpu_buf == port->buf_cnt)
1180 port->cpu_buf = 0;
1181
1182 rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
1183
1184 if (rc < 0) {
1185 port->cpu_buf = temp_cpu_buf;
1186
1187 pr_err("%s:read op[0x%x]rc[%d]\n",
1188 __func__, read.hdr.opcode, rc);
1189 break;
1190 }
1191
1192 rc = 0;
1193 } /* bufs loop */
1194
1195 return rc;
1196}
1197
1198int q6usm_write(struct us_client *usc, uint32_t write_ind)
1199{
1200 int rc = 0;
1201 struct usm_stream_cmd_write cmd_write;
1202 struct us_port_data *port = NULL;
1203 u32 current_dsp_buf = 0;
1204 u64 buf_addr = 0;
1205
1206 if ((usc == NULL) || (usc->apr == NULL)) {
1207 pr_err("%s: APR handle NULL\n", __func__);
1208 return -EINVAL;
1209 }
1210 port = &usc->port[IN];
1211
1212 current_dsp_buf = port->dsp_buf;
1213 /* free region, caused by new dsp_buf report from DSP, */
1214 /* can be only extended */
1215 if (port->cpu_buf >= current_dsp_buf) {
1216 /* 2 -part free region, including empty buffer */
1217 if ((write_ind <= port->cpu_buf) &&
1218 (write_ind > current_dsp_buf)) {
1219 pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
1220 __func__, write_ind,
1221 current_dsp_buf, port->cpu_buf);
1222 return -EINVAL;
1223 }
1224 } else {
1225 /* 1 -part free region */
1226 if ((write_ind <= port->cpu_buf) ||
1227 (write_ind > current_dsp_buf)) {
1228 pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
1229 __func__, write_ind,
1230 current_dsp_buf, port->cpu_buf);
1231 return -EINVAL;
1232 }
1233 }
1234
1235 q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
1236
1237 cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
1238 cmd_write.buf_size = port->buf_size;
1239 buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
1240 cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
1241 cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
1242 cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
1243 cmd_write.res0 = 0;
1244 cmd_write.res1 = 0;
1245 cmd_write.res2 = 0;
1246
1247 while (port->cpu_buf != write_ind) {
1248 u32 temp_cpu_buf = port->cpu_buf;
1249
1250 buf_addr = (u64)(port->phys) +
1251 port->buf_size * (port->cpu_buf);
1252 cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
1253 cmd_write.buf_addr_msw =
1254 msm_audio_populate_upper_32_bits(buf_addr);
1255 cmd_write.seq_id = port->cpu_buf;
1256 cmd_write.hdr.token = port->cpu_buf;
1257
1258 ++(port->cpu_buf);
1259 if (port->cpu_buf == port->buf_cnt)
1260 port->cpu_buf = 0;
1261
1262 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
1263
1264 if (rc < 0) {
1265 port->cpu_buf = temp_cpu_buf;
1266 pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
1267 __func__, cmd_write.hdr.opcode,
1268 rc, port->cpu_buf);
1269 break;
1270 }
1271
1272 rc = 0;
1273 }
1274
1275 return rc;
1276}
1277
1278bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
1279{
1280 struct us_port_data *port = NULL;
1281 u32 cpu_buf = 0;
1282
1283 if ((usc == NULL) || !free_region) {
1284 pr_err("%s: input data wrong\n", __func__);
1285 return false;
1286 }
1287 port = &usc->port[IN];
1288 cpu_buf = port->cpu_buf + 1;
1289 if (cpu_buf == port->buf_cnt)
1290 cpu_buf = 0;
1291
1292 *free_region = port->dsp_buf;
1293
1294 return cpu_buf == *free_region;
1295}
1296
1297int q6usm_cmd(struct us_client *usc, int cmd)
1298{
1299 struct apr_hdr hdr;
1300 int rc = 0;
1301 atomic_t *state;
1302
1303 if ((usc == NULL) || (usc->apr == NULL)) {
1304 pr_err("%s: APR handle NULL\n", __func__);
1305 return -EINVAL;
1306 }
1307 q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
1308 switch (cmd) {
1309 case CMD_CLOSE:
1310 hdr.opcode = USM_STREAM_CMD_CLOSE;
1311 state = &usc->cmd_state;
1312 break;
1313
1314 default:
1315 pr_err("%s:Invalid format[%d]\n", __func__, cmd);
1316 goto fail_cmd;
1317 }
1318
1319 rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
1320 if (rc < 0) {
1321 pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
1322 goto fail_cmd;
1323 }
1324 rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
1325 Q6USM_TIMEOUT_JIFFIES);
1326 if (!rc) {
1327 rc = -ETIME;
1328 pr_err("%s:timeout. waited for response opcode[0x%x]\n",
1329 __func__, hdr.opcode);
1330 } else
1331 rc = 0;
1332fail_cmd:
1333 return rc;
1334}
1335
1336int q6usm_set_us_detection(struct us_client *usc,
1337 struct usm_session_cmd_detect_info *detect_info,
1338 uint16_t detect_info_size)
1339{
1340 int rc = 0;
1341
1342 if ((usc == NULL) ||
1343 (detect_info_size == 0) ||
1344 (detect_info == NULL)) {
1345 pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
1346 __func__,
1347 usc,
1348 detect_info_size,
1349 detect_info);
1350 return -EINVAL;
1351 }
1352
1353 q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
1354
1355 detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
1356
1357 rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
1358 if (rc < 0) {
1359 pr_err("%s:Comamnd signal detect failed\n", __func__);
1360 return -EINVAL;
1361 }
1362 rc = wait_event_timeout(usc->cmd_wait,
1363 (atomic_read(&usc->cmd_state) == 0),
1364 Q6USM_TIMEOUT_JIFFIES);
1365 if (!rc) {
1366 rc = -ETIME;
1367 pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
1368 __func__, Q6USM_TIMEOUT_JIFFIES);
1369 } else
1370 rc = 0;
1371
1372 return rc;
1373}
1374
1375int q6usm_set_us_stream_param(int dir, struct us_client *usc,
1376 uint32_t module_id, uint32_t param_id, uint32_t buf_size)
1377{
1378 int rc = 0;
1379 struct usm_stream_cmd_set_param cmd_set_param;
1380 struct us_port_data *port = NULL;
1381
1382 if ((usc == NULL) || (usc->apr == NULL)) {
1383 pr_err("%s: APR handle NULL\n", __func__);
1384 return -EINVAL;
1385 }
1386 port = &usc->port[dir];
1387
1388 q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
1389
1390 cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
1391 cmd_set_param.buf_size = buf_size;
1392 cmd_set_param.buf_addr_msw =
1393 msm_audio_populate_upper_32_bits(port->param_phys);
1394 cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
1395 cmd_set_param.mem_map_handle =
1396 *((uint32_t *)(port->param_buf_mem_handle));
1397 cmd_set_param.module_id = module_id;
1398 cmd_set_param.param_id = param_id;
1399 cmd_set_param.hdr.token = 0;
1400
1401 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
1402
1403 if (rc < 0) {
1404 pr_err("%s:write op[0x%x];rc[%d]\n",
1405 __func__, cmd_set_param.hdr.opcode, rc);
1406 }
1407
1408 rc = wait_event_timeout(usc->cmd_wait,
1409 (atomic_read(&usc->cmd_state) == 0),
1410 Q6USM_TIMEOUT_JIFFIES);
1411 if (!rc) {
1412 rc = -ETIME;
1413 pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
1414 __func__, Q6USM_TIMEOUT_JIFFIES);
1415 } else
1416 rc = 0;
1417
1418 return rc;
1419}
1420
1421int q6usm_get_us_stream_param(int dir, struct us_client *usc,
1422 uint32_t module_id, uint32_t param_id, uint32_t buf_size)
1423{
1424 int rc = 0;
1425 struct usm_stream_cmd_get_param cmd_get_param;
1426 struct us_port_data *port = NULL;
1427
1428 if ((usc == NULL) || (usc->apr == NULL)) {
1429 pr_err("%s: APR handle NULL\n", __func__);
1430 return -EINVAL;
1431 }
1432 port = &usc->port[dir];
1433
1434 q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
1435
1436 cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
1437 cmd_get_param.buf_size = buf_size;
1438 cmd_get_param.buf_addr_msw =
1439 msm_audio_populate_upper_32_bits(port->param_phys);
1440 cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
1441 cmd_get_param.mem_map_handle =
1442 *((uint32_t *)(port->param_buf_mem_handle));
1443 cmd_get_param.module_id = module_id;
1444 cmd_get_param.param_id = param_id;
1445 cmd_get_param.hdr.token = 0;
1446
1447 rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
1448
1449 if (rc < 0) {
1450 pr_err("%s:write op[0x%x];rc[%d]\n",
1451 __func__, cmd_get_param.hdr.opcode, rc);
1452 }
1453
1454 rc = wait_event_timeout(usc->cmd_wait,
1455 (atomic_read(&usc->cmd_state) == 0),
1456 Q6USM_TIMEOUT_JIFFIES);
1457 if (!rc) {
1458 rc = -ETIME;
1459 pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
1460 __func__, Q6USM_TIMEOUT_JIFFIES);
1461 } else
1462 rc = 0;
1463
1464 return rc;
1465}
1466
Laxminath Kasam8b1366a2017-10-05 01:44:16 +05301467int __init q6usm_init(void)
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301468{
1469 pr_debug("%s\n", __func__);
1470 init_waitqueue_head(&this_mmap.cmd_wait);
1471 memset(session, 0, sizeof(session));
1472 return 0;
1473}