blob: 72a866990dfe745dc3ca90acbb9dcc4faf6ab721 [file] [log] [blame]
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Sagar Dharia2754ab42012-08-21 18:07:39 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/pm_runtime.h>
14#include <linux/dma-mapping.h>
15#include <linux/slimbus/slimbus.h>
16#include <mach/sps.h>
17#include "slim-msm.h"
18
19int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
20{
21 spin_lock(&dev->rx_lock);
22 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
23 spin_unlock(&dev->rx_lock);
24 dev_err(dev->dev, "RX QUEUE full!");
25 return -EXFULL;
26 }
27 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
28 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
29 spin_unlock(&dev->rx_lock);
30 return 0;
31}
32
33int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
34{
35 unsigned long flags;
36 spin_lock_irqsave(&dev->rx_lock, flags);
37 if (dev->tail == dev->head) {
38 spin_unlock_irqrestore(&dev->rx_lock, flags);
39 return -ENODATA;
40 }
41 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
42 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
43 spin_unlock_irqrestore(&dev->rx_lock, flags);
44 return 0;
45}
46
47int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
48{
49#ifdef CONFIG_PM_RUNTIME
50 int ref = 0;
51 int ret = pm_runtime_get_sync(dev->dev);
52 if (ret >= 0) {
53 ref = atomic_read(&dev->dev->power.usage_count);
54 if (ref <= 0) {
55 dev_err(dev->dev, "reference count -ve:%d", ref);
56 ret = -ENODEV;
57 }
58 }
59 return ret;
60#else
61 return -ENODEV;
62#endif
63}
64void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
65{
66#ifdef CONFIG_PM_RUNTIME
67 int ref;
68 pm_runtime_mark_last_busy(dev->dev);
69 ref = atomic_read(&dev->dev->power.usage_count);
70 if (ref <= 0)
71 dev_err(dev->dev, "reference count mismatch:%d", ref);
72 else
73 pm_runtime_put(dev->dev);
74#endif
75}
76
77int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
78{
79 int ret;
80 struct sps_pipe *endpoint;
81 struct sps_connect *config = &ep->config;
82
83 /* Allocate the endpoint */
84 endpoint = sps_alloc_endpoint();
85 if (!endpoint) {
86 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
87 return -ENOMEM;
88 }
89
90 /* Get default connection configuration for an endpoint */
91 ret = sps_get_config(endpoint, config);
92 if (ret) {
93 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
94 goto sps_config_failed;
95 }
96
97 ep->sps = endpoint;
98 return 0;
99
100sps_config_failed:
101 sps_free_endpoint(endpoint);
102 return ret;
103}
104
105void msm_slim_free_endpoint(struct msm_slim_endp *ep)
106{
107 sps_free_endpoint(ep->sps);
108 ep->sps = NULL;
109}
110
111int msm_slim_sps_mem_alloc(
112 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
113{
114 dma_addr_t phys;
115
116 mem->size = len;
117 mem->min_size = 0;
118 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
119
120 if (!mem->base) {
121 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
122 return -ENOMEM;
123 }
124
125 mem->phys_base = phys;
126 memset(mem->base, 0x00, mem->size);
127 return 0;
128}
129
130void
131msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
132{
133 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
134 mem->size = 0;
135 mem->base = NULL;
136 mem->phys_base = 0;
137}
138
139void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
140{
141 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
142 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
143 dev->ver));
144 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
145 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
146 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
147 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
148 dev->ver));
149 /* Make sure that port registers are updated before returning */
150 mb();
151}
152
153int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
154{
155 struct msm_slim_endp *endpoint = &dev->pipes[pn];
156 struct sps_connect *cfg = &endpoint->config;
157 u32 stat;
158 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
159 if (ret) {
160 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
161 return ret;
162 }
163 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
164 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
165
166 if (dev->pipes[pn].connected) {
167 ret = sps_set_config(dev->pipes[pn].sps, cfg);
168 if (ret) {
169 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
170 ret);
171 return ret;
172 }
173 }
174
175 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
176 dev->ver));
177 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
178 cfg->destination = dev->bam.hdl;
179 cfg->source = SPS_DEV_HANDLE_MEM;
180 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
181 cfg->src_pipe_index = 0;
182 dev_dbg(dev->dev, "flow src:pipe num:%d",
183 cfg->dest_pipe_index);
184 cfg->mode = SPS_MODE_DEST;
185 } else {
186 cfg->source = dev->bam.hdl;
187 cfg->destination = SPS_DEV_HANDLE_MEM;
188 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
189 cfg->dest_pipe_index = 0;
190 dev_dbg(dev->dev, "flow dest:pipe num:%d",
191 cfg->src_pipe_index);
192 cfg->mode = SPS_MODE_SRC;
193 }
194 /* Space for desciptor FIFOs */
195 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
196 cfg->config = SPS_CONFIG_DEFAULT;
197 ret = sps_connect(dev->pipes[pn].sps, cfg);
198 if (!ret) {
199 dev->pipes[pn].connected = true;
200 msm_hw_set_port(dev, pn + dev->pipe_b);
201 }
202 return ret;
203}
204
205int msm_config_port(struct slim_controller *ctrl, u8 pn)
206{
207 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
208 struct msm_slim_endp *endpoint;
209 int ret = 0;
210 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
211 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
212 return -EPROTONOSUPPORT;
213 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
214 return -ENODEV;
215
216 endpoint = &dev->pipes[pn];
217 ret = msm_slim_init_endpoint(dev, endpoint);
218 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
219 return ret;
220}
221
222enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
223 u8 pn, u8 **done_buf, u32 *done_len)
224{
225 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
226 struct sps_iovec sio;
227 int ret;
228 if (done_len)
229 *done_len = 0;
230 if (done_buf)
231 *done_buf = NULL;
232 if (!dev->pipes[pn].connected)
233 return SLIM_P_DISCONNECT;
234 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
235 if (!ret) {
236 if (done_len)
237 *done_len = sio.size;
238 if (done_buf)
239 *done_buf = (u8 *)sio.addr;
240 }
241 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
242 return SLIM_P_INPROGRESS;
243}
244
245int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
246 u32 len, struct completion *comp)
247{
248 struct sps_register_event sreg;
249 int ret;
250 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
251 if (pn >= 7)
252 return -ENODEV;
253
254
255 ctrl->ports[pn].xcomp = comp;
256 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
257 sreg.mode = SPS_TRIGGER_WAIT;
258 sreg.xfer_done = comp;
259 sreg.callback = NULL;
260 sreg.user = &ctrl->ports[pn];
261 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
262 if (ret) {
263 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
264 return ret;
265 }
266 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
267 SPS_IOVEC_FLAG_INT);
268 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
269
270 return ret;
271}
272
273int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
274{
275 int i;
276 for (i = 0; i < (len + 3) >> 2; i++) {
277 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
278 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
279 }
280 /* Guarantee that message is sent before returning */
281 mb();
282 return 0;
283}
284
285u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
286{
287 /*
288 * Currently we block a transaction until the current one completes.
289 * In case we need multiple transactions, use message Q
290 */
291 return dev->tx_buf;
292}
293
294static void
295msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
296{
297 u32 *buf = ev->data.transfer.user;
298 struct sps_iovec *iovec = &ev->data.transfer.iovec;
299
300 /*
301 * Note the virtual address needs to be offset by the same index
302 * as the physical address or just pass in the actual virtual address
303 * if the sps_mem_buffer is not needed. Note that if completion is
304 * used, the virtual address won't be available and will need to be
305 * calculated based on the offset of the physical address
306 */
307 if (ev->event_id == SPS_EVENT_DESC_DONE) {
308
309 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
310
311 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
312 iovec->addr, iovec->size, iovec->flags);
313
314 } else {
315 dev_err(dev->dev, "%s: unknown event %d\n",
316 __func__, ev->event_id);
317 }
318}
319
320static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
321{
322 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
323 msm_slim_rx_msgq_event(dev, notify);
324}
325
326/* Queue up Rx message buffer */
327static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
328{
329 int ret;
330 u32 flags = SPS_IOVEC_FLAG_INT;
331 struct msm_slim_endp *endpoint = &dev->rx_msgq;
332 struct sps_mem_buffer *mem = &endpoint->buf;
333 struct sps_pipe *pipe = endpoint->sps;
334
335 /* Rx message queue buffers are 4 bytes in length */
336 u8 *virt_addr = mem->base + (4 * ix);
337 u32 phys_addr = mem->phys_base + (4 * ix);
338
339 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
340
341 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
342 if (ret)
343 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
344
345 return ret;
346}
347
348int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
349{
350 struct msm_slim_endp *endpoint = &dev->rx_msgq;
351 struct sps_mem_buffer *mem = &endpoint->buf;
352 struct sps_pipe *pipe = endpoint->sps;
353 struct sps_iovec iovec;
354 int index;
355 int ret;
356
357 ret = sps_get_iovec(pipe, &iovec);
358 if (ret) {
359 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
360 goto err_exit;
361 }
362
363 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
364 iovec.addr, iovec.size, iovec.flags);
365 BUG_ON(iovec.addr < mem->phys_base);
366 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
367
368 /* Calculate buffer index */
369 index = (iovec.addr - mem->phys_base) / 4;
370 *(data + offset) = *((u32 *)mem->base + index);
371
372 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
373
374 /* Add buffer back to the queue */
375 (void)msm_slim_post_rx_msgq(dev, index);
376
377err_exit:
378 return ret;
379}
380
381static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
382{
383 int i, ret;
384 u32 pipe_offset;
385 struct msm_slim_endp *endpoint = &dev->rx_msgq;
386 struct sps_connect *config = &endpoint->config;
387 struct sps_mem_buffer *descr = &config->desc;
388 struct sps_mem_buffer *mem = &endpoint->buf;
389 struct completion *notify = &dev->rx_msgq_notify;
390
391 struct sps_register_event sps_error_event; /* SPS_ERROR */
392 struct sps_register_event sps_descr_event; /* DESCR_DONE */
393
394 init_completion(notify);
395 if (!dev->use_rx_msgqs)
396 return 0;
397
398 /* Allocate the endpoint */
399 ret = msm_slim_init_endpoint(dev, endpoint);
400 if (ret) {
401 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
402 goto sps_init_endpoint_failed;
403 }
404
405 /* Get the pipe indices for the message queues */
406 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
407 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
408
409 config->mode = SPS_MODE_SRC;
410 config->source = dev->bam.hdl;
411 config->destination = SPS_DEV_HANDLE_MEM;
412 config->src_pipe_index = pipe_offset;
413 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
414 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
415
416 /* Allocate memory for the FIFO descriptors */
417 ret = msm_slim_sps_mem_alloc(dev, descr,
418 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
419 if (ret) {
420 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
421 goto alloc_descr_failed;
422 }
423
424 ret = sps_connect(endpoint->sps, config);
425 if (ret) {
426 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
427 goto sps_connect_failed;
428 }
429
430 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
431
432 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
433 sps_descr_event.options = SPS_O_DESC_DONE;
434 sps_descr_event.user = (void *)dev;
435 sps_descr_event.xfer_done = notify;
436
437 ret = sps_register_event(endpoint->sps, &sps_descr_event);
438 if (ret) {
439 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
440 goto sps_reg_event_failed;
441 }
442
443 /* Register callback for errors */
444 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
445 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
446 sps_error_event.options = SPS_O_ERROR;
447 sps_error_event.user = (void *)dev;
448 sps_error_event.callback = msm_slim_rx_msgq_cb;
449
450 ret = sps_register_event(endpoint->sps, &sps_error_event);
451 if (ret) {
452 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
453 goto sps_reg_event_failed;
454 }
455
456 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
457 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
458 if (ret) {
459 dev_err(dev->dev, "dma_alloc_coherent failed\n");
460 goto alloc_buffer_failed;
461 }
462
463 /*
464 * Call transfer_one for each 4-byte buffer
465 * Use (buf->size/4) - 1 for the number of buffer to post
466 */
467
468 /* Setup the transfer */
469 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
470 ret = msm_slim_post_rx_msgq(dev, i);
471 if (ret) {
472 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
473 goto sps_transfer_failed;
474 }
475 }
476
477 return 0;
478
479sps_transfer_failed:
480 msm_slim_sps_mem_free(dev, mem);
481alloc_buffer_failed:
482 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
483 sps_register_event(endpoint->sps, &sps_error_event);
484sps_reg_event_failed:
485 sps_disconnect(endpoint->sps);
486sps_connect_failed:
487 msm_slim_sps_mem_free(dev, descr);
488alloc_descr_failed:
489 msm_slim_free_endpoint(endpoint);
490sps_init_endpoint_failed:
491 dev->use_rx_msgqs = 0;
492 return ret;
493}
494
495/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
496int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600497 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600498{
499 int i, ret;
500 u32 bam_handle;
501 struct sps_bam_props bam_props = {0};
502
503 static struct sps_bam_sec_config_props sec_props = {
504 .ees = {
505 [0] = { /* LPASS */
506 .vmid = 0,
507 .pipe_mask = 0xFFFF98,
508 },
509 [1] = { /* Krait Apps */
510 .vmid = 1,
511 .pipe_mask = 0x3F000007,
512 },
513 [2] = { /* Modem */
514 .vmid = 2,
515 .pipe_mask = 0x00000060,
516 },
517 },
518 };
519
Sagar Dharia33beca02012-10-22 16:21:46 -0600520 if (dev->bam.hdl)
521 goto init_rx_msgq;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600522 bam_props.ee = dev->ee;
523 bam_props.virt_addr = dev->bam.base;
524 bam_props.phys_addr = bam_mem->start;
525 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600526 if (!remote) {
527 bam_props.manage = SPS_BAM_MGR_LOCAL;
528 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
529 } else {
530 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
531 SPS_BAM_MGR_MULTI_EE;
532 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
533 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600534 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
535
Sagar Dharia2754ab42012-08-21 18:07:39 -0600536 bam_props.p_sec_config_props = &sec_props;
537
538 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
539 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
540
541 /* First 7 bits are for message Qs */
542 for (i = 7; i < 32; i++) {
543 /* Check what pipes are owned by Apps. */
544 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
545 break;
546 }
547 dev->pipe_b = i - 7;
548
549 /* Register the BAM device with the SPS driver */
550 ret = sps_register_bam_device(&bam_props, &bam_handle);
551 if (ret) {
552 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
553 dev->use_rx_msgqs = 0;
554 goto init_rx_msgq;
555 }
556 dev->bam.hdl = bam_handle;
557 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
558
559init_rx_msgq:
560 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
561 if (ret)
562 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
563 if (ret && bam_handle) {
564 sps_deregister_bam_device(bam_handle);
565 dev->bam.hdl = 0L;
566 }
567 return ret;
568}
569
Sagar Dharia33beca02012-10-22 16:21:46 -0600570void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600571{
572 if (dev->use_rx_msgqs) {
573 struct msm_slim_endp *endpoint = &dev->rx_msgq;
574 struct sps_connect *config = &endpoint->config;
575 struct sps_mem_buffer *descr = &config->desc;
576 struct sps_mem_buffer *mem = &endpoint->buf;
577 struct sps_register_event sps_event;
578 memset(&sps_event, 0x00, sizeof(sps_event));
579 msm_slim_sps_mem_free(dev, mem);
580 sps_register_event(endpoint->sps, &sps_event);
581 sps_disconnect(endpoint->sps);
582 msm_slim_sps_mem_free(dev, descr);
583 msm_slim_free_endpoint(endpoint);
Sagar Dharia33beca02012-10-22 16:21:46 -0600584 }
585 if (dereg) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600586 sps_deregister_bam_device(dev->bam.hdl);
Sagar Dharia33beca02012-10-22 16:21:46 -0600587 dev->bam.hdl = 0L;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600588 }
589}
Kenneth Heitkeae626042012-11-05 21:01:44 -0700590
591/* Slimbus QMI Messaging */
592#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
593#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
594#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
595#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
596
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700597#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
598#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
599#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
600#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
601
Kenneth Heitkeae626042012-11-05 21:01:44 -0700602enum slimbus_mode_enum_type_v01 {
603 /* To force a 32 bit signed enum. Do not change or use*/
604 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
605 SLIMBUS_MODE_SATELLITE_V01 = 1,
606 SLIMBUS_MODE_MASTER_V01 = 2,
607 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
608};
609
610enum slimbus_pm_enum_type_v01 {
611 /* To force a 32 bit signed enum. Do not change or use*/
612 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
613 SLIMBUS_PM_INACTIVE_V01 = 1,
614 SLIMBUS_PM_ACTIVE_V01 = 2,
615 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
616};
617
618struct slimbus_select_inst_req_msg_v01 {
619 /* Mandatory */
620 /* Hardware Instance Selection */
621 uint32_t instance;
622
623 /* Optional */
624 /* Optional Mode Request Operation */
625 /* Must be set to true if mode is being passed */
626 uint8_t mode_valid;
627 enum slimbus_mode_enum_type_v01 mode;
628};
629
630struct slimbus_select_inst_resp_msg_v01 {
631 /* Mandatory */
632 /* Result Code */
633 struct qmi_response_type_v01 resp;
634};
635
636struct slimbus_power_req_msg_v01 {
637 /* Mandatory */
638 /* Power Request Operation */
639 enum slimbus_pm_enum_type_v01 pm_req;
640};
641
642struct slimbus_power_resp_msg_v01 {
643 /* Mandatory */
644 /* Result Code */
645 struct qmi_response_type_v01 resp;
646};
647
648static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
649 {
650 .data_type = QMI_UNSIGNED_4_BYTE,
651 .elem_len = 1,
652 .elem_size = sizeof(uint32_t),
653 .is_array = NO_ARRAY,
654 .tlv_type = 0x01,
655 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
656 instance),
657 .ei_array = NULL,
658 },
659 {
660 .data_type = QMI_OPT_FLAG,
661 .elem_len = 1,
662 .elem_size = sizeof(uint8_t),
663 .is_array = NO_ARRAY,
664 .tlv_type = 0x10,
665 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
666 mode_valid),
667 .ei_array = NULL,
668 },
669 {
670 .data_type = QMI_UNSIGNED_4_BYTE,
671 .elem_len = 1,
672 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
673 .is_array = NO_ARRAY,
674 .tlv_type = 0x10,
675 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
676 mode),
677 .ei_array = NULL,
678 },
679 {
680 .data_type = QMI_EOTI,
681 .elem_len = 0,
682 .elem_size = 0,
683 .is_array = NO_ARRAY,
684 .tlv_type = 0x00,
685 .offset = 0,
686 .ei_array = NULL,
687 },
688};
689
690static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
691 {
692 .data_type = QMI_STRUCT,
693 .elem_len = 1,
694 .elem_size = sizeof(struct qmi_response_type_v01),
695 .is_array = NO_ARRAY,
696 .tlv_type = 0x02,
697 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
698 resp),
699 .ei_array = get_qmi_response_type_v01_ei(),
700 },
701 {
702 .data_type = QMI_EOTI,
703 .elem_len = 0,
704 .elem_size = 0,
705 .is_array = NO_ARRAY,
706 .tlv_type = 0x00,
707 .offset = 0,
708 .ei_array = NULL,
709 },
710};
711
712static struct elem_info slimbus_power_req_msg_v01_ei[] = {
713 {
714 .data_type = QMI_UNSIGNED_4_BYTE,
715 .elem_len = 1,
716 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
717 .is_array = NO_ARRAY,
718 .tlv_type = 0x01,
719 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
720 .ei_array = NULL,
721 },
722 {
723 .data_type = QMI_EOTI,
724 .elem_len = 0,
725 .elem_size = 0,
726 .is_array = NO_ARRAY,
727 .tlv_type = 0x00,
728 .offset = 0,
729 .ei_array = NULL,
730 },
731};
732
733static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
734 {
735 .data_type = QMI_STRUCT,
736 .elem_len = 1,
737 .elem_size = sizeof(struct qmi_response_type_v01),
738 .is_array = NO_ARRAY,
739 .tlv_type = 0x02,
740 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
741 .ei_array = get_qmi_response_type_v01_ei(),
742 },
743 {
744 .data_type = QMI_EOTI,
745 .elem_len = 0,
746 .elem_size = 0,
747 .is_array = NO_ARRAY,
748 .tlv_type = 0x00,
749 .offset = 0,
750 .ei_array = NULL,
751 },
752};
753
754static void msm_slim_qmi_recv_msg(struct kthread_work *work)
755{
756 int rc;
757 struct msm_slim_qmi *qmi =
758 container_of(work, struct msm_slim_qmi, kwork);
759
760 rc = qmi_recv_msg(qmi->handle);
761 if (rc < 0)
762 pr_err("%s: Error receiving QMI message\n", __func__);
763}
764
765static void msm_slim_qmi_notify(struct qmi_handle *handle,
766 enum qmi_event_type event, void *notify_priv)
767{
768 struct msm_slim_ctrl *dev = notify_priv;
769 struct msm_slim_qmi *qmi = &dev->qmi;
770
771 switch (event) {
772 case QMI_RECV_MSG:
773 queue_kthread_work(&qmi->kworker, &qmi->kwork);
774 break;
775 default:
776 break;
777 }
778}
779
780static const char *get_qmi_error(struct qmi_response_type_v01 *r)
781{
782 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
783 return "No Error";
784 else if (r->error == QMI_ERR_NO_MEMORY_V01)
785 return "Out of Memory";
786 else if (r->error == QMI_ERR_INTERNAL_V01)
787 return "Unexpected error occurred";
788 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
789 return "Slimbus s/w already configured to a different mode";
790 else if (r->error == QMI_ERR_INVALID_ID_V01)
791 return "Slimbus hardware instance is not valid";
792 else
793 return "Unknown error";
794}
795
796static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
797 struct slimbus_select_inst_req_msg_v01 *req)
798{
799 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
800 struct msg_desc req_desc, resp_desc;
801 int rc;
802
803 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700804 req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700805 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
806
807 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700808 resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700809 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
810
811 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
812 &resp_desc, &resp, sizeof(resp), 5000);
813 if (rc < 0) {
814 pr_err("%s: QMI send req failed %d\n", __func__, rc);
815 return rc;
816 }
817
818 /* Check the response */
819 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
820 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
821 resp.resp.result, get_qmi_error(&resp.resp));
822 return -EREMOTEIO;
823 }
824
825 return 0;
826}
827
828static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
829 struct slimbus_power_req_msg_v01 *req)
830{
831 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
832 struct msg_desc req_desc, resp_desc;
833 int rc;
834
835 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700836 req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700837 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
838
839 resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700840 resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700841 resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
842
843 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
844 &resp_desc, &resp, sizeof(resp), 5000);
845 if (rc < 0) {
846 pr_err("%s: QMI send req failed %d\n", __func__, rc);
847 return rc;
848 }
849
850 /* Check the response */
851 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
852 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
853 resp.resp.result, get_qmi_error(&resp.resp));
854 return -EREMOTEIO;
855 }
856
857 return 0;
858}
859
860int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
861{
862 int rc = 0;
863 struct qmi_handle *handle;
864 struct slimbus_select_inst_req_msg_v01 req;
865
866 init_kthread_worker(&dev->qmi.kworker);
867
868 dev->qmi.task = kthread_run(kthread_worker_fn,
869 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
870
871 if (IS_ERR(dev->qmi.task)) {
872 pr_err("%s: Failed to create QMI client kthread\n", __func__);
873 return -ENOMEM;
874 }
875
876 init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
877
878 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
879 if (!handle) {
880 rc = -ENOMEM;
881 pr_err("%s: QMI client handle alloc failed\n", __func__);
882 goto qmi_handle_create_failed;
883 }
884
885 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
886 SLIMBUS_QMI_INS_ID);
887 if (rc < 0) {
888 pr_err("%s: QMI server not found\n", __func__);
889 goto qmi_connect_to_service_failed;
890 }
891
892 /* Instance is 0 based */
893 req.instance = dev->ctrl.nr - 1;
894 req.mode_valid = 1;
895
896 /* Mode indicates the role of the ADSP */
897 if (apps_is_master)
898 req.mode = SLIMBUS_MODE_SATELLITE_V01;
899 else
900 req.mode = SLIMBUS_MODE_MASTER_V01;
901
902 dev->qmi.handle = handle;
903
904 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
905 if (rc) {
906 pr_err("%s: failed to select h/w instance\n", __func__);
907 goto qmi_select_instance_failed;
908 }
909
910 return 0;
911
912qmi_select_instance_failed:
913 dev->qmi.handle = NULL;
914qmi_connect_to_service_failed:
915 qmi_handle_destroy(handle);
916qmi_handle_create_failed:
917 flush_kthread_worker(&dev->qmi.kworker);
918 kthread_stop(dev->qmi.task);
919 dev->qmi.task = NULL;
920 return rc;
921}
922
923void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
924{
925 qmi_handle_destroy(dev->qmi.handle);
926 flush_kthread_worker(&dev->qmi.kworker);
927 kthread_stop(dev->qmi.task);
928 dev->qmi.task = NULL;
929 dev->qmi.handle = NULL;
930}
931
932int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
933{
934 struct slimbus_power_req_msg_v01 req;
935
936 if (active)
937 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
938 else
939 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
940
941 return msm_slim_qmi_send_power_request(dev, &req);
942}