blob: 3e19f9b174a45cf11078707a142011a3f2935d10 [file] [log] [blame]
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Sagar Dharia2754ab42012-08-21 18:07:39 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
Sagar Dharia2754ab42012-08-21 18:07:39 -060012#include <linux/pm_runtime.h>
13#include <linux/dma-mapping.h>
14#include <linux/slimbus/slimbus.h>
15#include <mach/sps.h>
16#include "slim-msm.h"
17
18int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
19{
20 spin_lock(&dev->rx_lock);
21 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
22 spin_unlock(&dev->rx_lock);
23 dev_err(dev->dev, "RX QUEUE full!");
24 return -EXFULL;
25 }
26 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
27 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
28 spin_unlock(&dev->rx_lock);
29 return 0;
30}
31
32int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
33{
34 unsigned long flags;
35 spin_lock_irqsave(&dev->rx_lock, flags);
36 if (dev->tail == dev->head) {
37 spin_unlock_irqrestore(&dev->rx_lock, flags);
38 return -ENODATA;
39 }
40 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
41 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
42 spin_unlock_irqrestore(&dev->rx_lock, flags);
43 return 0;
44}
45
46int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
47{
48#ifdef CONFIG_PM_RUNTIME
49 int ref = 0;
50 int ret = pm_runtime_get_sync(dev->dev);
51 if (ret >= 0) {
52 ref = atomic_read(&dev->dev->power.usage_count);
53 if (ref <= 0) {
54 dev_err(dev->dev, "reference count -ve:%d", ref);
55 ret = -ENODEV;
56 }
57 }
58 return ret;
59#else
60 return -ENODEV;
61#endif
62}
63void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
64{
65#ifdef CONFIG_PM_RUNTIME
66 int ref;
67 pm_runtime_mark_last_busy(dev->dev);
68 ref = atomic_read(&dev->dev->power.usage_count);
69 if (ref <= 0)
70 dev_err(dev->dev, "reference count mismatch:%d", ref);
71 else
72 pm_runtime_put(dev->dev);
73#endif
74}
75
76int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
77{
78 int ret;
79 struct sps_pipe *endpoint;
80 struct sps_connect *config = &ep->config;
81
82 /* Allocate the endpoint */
83 endpoint = sps_alloc_endpoint();
84 if (!endpoint) {
85 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
86 return -ENOMEM;
87 }
88
89 /* Get default connection configuration for an endpoint */
90 ret = sps_get_config(endpoint, config);
91 if (ret) {
92 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
93 goto sps_config_failed;
94 }
95
96 ep->sps = endpoint;
97 return 0;
98
99sps_config_failed:
100 sps_free_endpoint(endpoint);
101 return ret;
102}
103
104void msm_slim_free_endpoint(struct msm_slim_endp *ep)
105{
106 sps_free_endpoint(ep->sps);
107 ep->sps = NULL;
108}
109
110int msm_slim_sps_mem_alloc(
111 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
112{
113 dma_addr_t phys;
114
115 mem->size = len;
116 mem->min_size = 0;
117 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
118
119 if (!mem->base) {
120 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
121 return -ENOMEM;
122 }
123
124 mem->phys_base = phys;
125 memset(mem->base, 0x00, mem->size);
126 return 0;
127}
128
129void
130msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
131{
132 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
133 mem->size = 0;
134 mem->base = NULL;
135 mem->phys_base = 0;
136}
137
138void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
139{
140 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
141 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
142 dev->ver));
143 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
144 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
145 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
146 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
147 dev->ver));
148 /* Make sure that port registers are updated before returning */
149 mb();
150}
151
152int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
153{
154 struct msm_slim_endp *endpoint = &dev->pipes[pn];
155 struct sps_connect *cfg = &endpoint->config;
156 u32 stat;
157 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
158 if (ret) {
159 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
160 return ret;
161 }
162 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
163 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
164
165 if (dev->pipes[pn].connected) {
166 ret = sps_set_config(dev->pipes[pn].sps, cfg);
167 if (ret) {
168 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
169 ret);
170 return ret;
171 }
172 }
173
174 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
175 dev->ver));
176 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
177 cfg->destination = dev->bam.hdl;
178 cfg->source = SPS_DEV_HANDLE_MEM;
179 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
180 cfg->src_pipe_index = 0;
181 dev_dbg(dev->dev, "flow src:pipe num:%d",
182 cfg->dest_pipe_index);
183 cfg->mode = SPS_MODE_DEST;
184 } else {
185 cfg->source = dev->bam.hdl;
186 cfg->destination = SPS_DEV_HANDLE_MEM;
187 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
188 cfg->dest_pipe_index = 0;
189 dev_dbg(dev->dev, "flow dest:pipe num:%d",
190 cfg->src_pipe_index);
191 cfg->mode = SPS_MODE_SRC;
192 }
193 /* Space for desciptor FIFOs */
194 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
195 cfg->config = SPS_CONFIG_DEFAULT;
196 ret = sps_connect(dev->pipes[pn].sps, cfg);
197 if (!ret) {
198 dev->pipes[pn].connected = true;
199 msm_hw_set_port(dev, pn + dev->pipe_b);
200 }
201 return ret;
202}
203
204int msm_config_port(struct slim_controller *ctrl, u8 pn)
205{
206 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
207 struct msm_slim_endp *endpoint;
208 int ret = 0;
209 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
210 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
211 return -EPROTONOSUPPORT;
212 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
213 return -ENODEV;
214
215 endpoint = &dev->pipes[pn];
216 ret = msm_slim_init_endpoint(dev, endpoint);
217 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
218 return ret;
219}
220
221enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
222 u8 pn, u8 **done_buf, u32 *done_len)
223{
224 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
225 struct sps_iovec sio;
226 int ret;
227 if (done_len)
228 *done_len = 0;
229 if (done_buf)
230 *done_buf = NULL;
231 if (!dev->pipes[pn].connected)
232 return SLIM_P_DISCONNECT;
233 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
234 if (!ret) {
235 if (done_len)
236 *done_len = sio.size;
237 if (done_buf)
238 *done_buf = (u8 *)sio.addr;
239 }
240 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
241 return SLIM_P_INPROGRESS;
242}
243
244int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
245 u32 len, struct completion *comp)
246{
247 struct sps_register_event sreg;
248 int ret;
249 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
250 if (pn >= 7)
251 return -ENODEV;
252
253
254 ctrl->ports[pn].xcomp = comp;
255 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
256 sreg.mode = SPS_TRIGGER_WAIT;
257 sreg.xfer_done = comp;
258 sreg.callback = NULL;
259 sreg.user = &ctrl->ports[pn];
260 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
261 if (ret) {
262 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
263 return ret;
264 }
265 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
266 SPS_IOVEC_FLAG_INT);
267 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
268
269 return ret;
270}
271
272int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
273{
274 int i;
275 for (i = 0; i < (len + 3) >> 2; i++) {
276 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
277 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
278 }
279 /* Guarantee that message is sent before returning */
280 mb();
281 return 0;
282}
283
284u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
285{
286 /*
287 * Currently we block a transaction until the current one completes.
288 * In case we need multiple transactions, use message Q
289 */
290 return dev->tx_buf;
291}
292
293static void
294msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
295{
296 u32 *buf = ev->data.transfer.user;
297 struct sps_iovec *iovec = &ev->data.transfer.iovec;
298
299 /*
300 * Note the virtual address needs to be offset by the same index
301 * as the physical address or just pass in the actual virtual address
302 * if the sps_mem_buffer is not needed. Note that if completion is
303 * used, the virtual address won't be available and will need to be
304 * calculated based on the offset of the physical address
305 */
306 if (ev->event_id == SPS_EVENT_DESC_DONE) {
307
308 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
309
310 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
311 iovec->addr, iovec->size, iovec->flags);
312
313 } else {
314 dev_err(dev->dev, "%s: unknown event %d\n",
315 __func__, ev->event_id);
316 }
317}
318
319static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
320{
321 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
322 msm_slim_rx_msgq_event(dev, notify);
323}
324
325/* Queue up Rx message buffer */
326static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
327{
328 int ret;
329 u32 flags = SPS_IOVEC_FLAG_INT;
330 struct msm_slim_endp *endpoint = &dev->rx_msgq;
331 struct sps_mem_buffer *mem = &endpoint->buf;
332 struct sps_pipe *pipe = endpoint->sps;
333
334 /* Rx message queue buffers are 4 bytes in length */
335 u8 *virt_addr = mem->base + (4 * ix);
336 u32 phys_addr = mem->phys_base + (4 * ix);
337
338 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
339
340 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
341 if (ret)
342 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
343
344 return ret;
345}
346
347int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
348{
349 struct msm_slim_endp *endpoint = &dev->rx_msgq;
350 struct sps_mem_buffer *mem = &endpoint->buf;
351 struct sps_pipe *pipe = endpoint->sps;
352 struct sps_iovec iovec;
353 int index;
354 int ret;
355
356 ret = sps_get_iovec(pipe, &iovec);
357 if (ret) {
358 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
359 goto err_exit;
360 }
361
362 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
363 iovec.addr, iovec.size, iovec.flags);
364 BUG_ON(iovec.addr < mem->phys_base);
365 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
366
367 /* Calculate buffer index */
368 index = (iovec.addr - mem->phys_base) / 4;
369 *(data + offset) = *((u32 *)mem->base + index);
370
371 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
372
373 /* Add buffer back to the queue */
374 (void)msm_slim_post_rx_msgq(dev, index);
375
376err_exit:
377 return ret;
378}
379
Sagar Dharia24419e32013-01-14 17:56:32 -0700380int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
381 struct msm_slim_endp *endpoint,
382 struct completion *notify)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600383{
384 int i, ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600385 struct sps_register_event sps_error_event; /* SPS_ERROR */
386 struct sps_register_event sps_descr_event; /* DESCR_DONE */
Sagar Dharia24419e32013-01-14 17:56:32 -0700387 struct sps_connect *config = &endpoint->config;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600388
389 ret = sps_connect(endpoint->sps, config);
390 if (ret) {
391 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700392 return ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600393 }
394
395 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
396
397 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
398 sps_descr_event.options = SPS_O_DESC_DONE;
399 sps_descr_event.user = (void *)dev;
400 sps_descr_event.xfer_done = notify;
401
402 ret = sps_register_event(endpoint->sps, &sps_descr_event);
403 if (ret) {
404 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
405 goto sps_reg_event_failed;
406 }
407
408 /* Register callback for errors */
409 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
410 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
411 sps_error_event.options = SPS_O_ERROR;
412 sps_error_event.user = (void *)dev;
413 sps_error_event.callback = msm_slim_rx_msgq_cb;
414
415 ret = sps_register_event(endpoint->sps, &sps_error_event);
416 if (ret) {
417 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
418 goto sps_reg_event_failed;
419 }
420
Sagar Dharia2754ab42012-08-21 18:07:39 -0600421 /*
422 * Call transfer_one for each 4-byte buffer
423 * Use (buf->size/4) - 1 for the number of buffer to post
424 */
425
426 /* Setup the transfer */
427 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
428 ret = msm_slim_post_rx_msgq(dev, i);
429 if (ret) {
430 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
431 goto sps_transfer_failed;
432 }
433 }
434
435 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600436sps_transfer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600437 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
438 sps_register_event(endpoint->sps, &sps_error_event);
439sps_reg_event_failed:
440 sps_disconnect(endpoint->sps);
Sagar Dharia24419e32013-01-14 17:56:32 -0700441 return ret;
442}
443static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
444{
445 int ret;
446 u32 pipe_offset;
447 struct msm_slim_endp *endpoint = &dev->rx_msgq;
448 struct sps_connect *config = &endpoint->config;
449 struct sps_mem_buffer *descr = &config->desc;
450 struct sps_mem_buffer *mem = &endpoint->buf;
451 struct completion *notify = &dev->rx_msgq_notify;
452
453 init_completion(notify);
454 if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
455 return 0;
456
457 /* Allocate the endpoint */
458 ret = msm_slim_init_endpoint(dev, endpoint);
459 if (ret) {
460 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
461 goto sps_init_endpoint_failed;
462 }
463
464 /* Get the pipe indices for the message queues */
465 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
466 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
467
468 config->mode = SPS_MODE_SRC;
469 config->source = dev->bam.hdl;
470 config->destination = SPS_DEV_HANDLE_MEM;
471 config->src_pipe_index = pipe_offset;
472 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
473 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
474
475 /* Allocate memory for the FIFO descriptors */
476 ret = msm_slim_sps_mem_alloc(dev, descr,
477 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
478 if (ret) {
479 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
480 goto alloc_descr_failed;
481 }
482
483 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
484 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
485 if (ret) {
486 dev_err(dev->dev, "dma_alloc_coherent failed\n");
487 goto alloc_buffer_failed;
488 }
489
490 ret = msm_slim_connect_endp(dev, endpoint, notify);
491
492 if (!ret) {
493 dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
494 return 0;
495 }
496
497 msm_slim_sps_mem_free(dev, mem);
498alloc_buffer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600499 msm_slim_sps_mem_free(dev, descr);
500alloc_descr_failed:
501 msm_slim_free_endpoint(endpoint);
502sps_init_endpoint_failed:
Sagar Dharia24419e32013-01-14 17:56:32 -0700503 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600504 return ret;
505}
506
507/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
508int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600509 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600510{
511 int i, ret;
512 u32 bam_handle;
513 struct sps_bam_props bam_props = {0};
514
515 static struct sps_bam_sec_config_props sec_props = {
516 .ees = {
517 [0] = { /* LPASS */
518 .vmid = 0,
519 .pipe_mask = 0xFFFF98,
520 },
521 [1] = { /* Krait Apps */
522 .vmid = 1,
523 .pipe_mask = 0x3F000007,
524 },
525 [2] = { /* Modem */
526 .vmid = 2,
527 .pipe_mask = 0x00000060,
528 },
529 },
530 };
531
Sagar Dharia33beca02012-10-22 16:21:46 -0600532 if (dev->bam.hdl)
533 goto init_rx_msgq;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600534 bam_props.ee = dev->ee;
535 bam_props.virt_addr = dev->bam.base;
536 bam_props.phys_addr = bam_mem->start;
537 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600538 if (!remote) {
539 bam_props.manage = SPS_BAM_MGR_LOCAL;
540 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
541 } else {
542 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
543 SPS_BAM_MGR_MULTI_EE;
544 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
545 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600546 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
547
Sagar Dharia2754ab42012-08-21 18:07:39 -0600548 bam_props.p_sec_config_props = &sec_props;
549
550 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
551 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
552
553 /* First 7 bits are for message Qs */
554 for (i = 7; i < 32; i++) {
555 /* Check what pipes are owned by Apps. */
556 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
557 break;
558 }
559 dev->pipe_b = i - 7;
560
561 /* Register the BAM device with the SPS driver */
562 ret = sps_register_bam_device(&bam_props, &bam_handle);
563 if (ret) {
564 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700565 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600566 goto init_rx_msgq;
567 }
568 dev->bam.hdl = bam_handle;
569 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
570
571init_rx_msgq:
572 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
573 if (ret)
574 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
575 if (ret && bam_handle) {
576 sps_deregister_bam_device(bam_handle);
577 dev->bam.hdl = 0L;
578 }
579 return ret;
580}
581
Sagar Dharia33beca02012-10-22 16:21:46 -0600582void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600583{
Sagar Dharia24419e32013-01-14 17:56:32 -0700584 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600585 struct msm_slim_endp *endpoint = &dev->rx_msgq;
586 struct sps_connect *config = &endpoint->config;
587 struct sps_mem_buffer *descr = &config->desc;
588 struct sps_mem_buffer *mem = &endpoint->buf;
589 struct sps_register_event sps_event;
590 memset(&sps_event, 0x00, sizeof(sps_event));
591 msm_slim_sps_mem_free(dev, mem);
592 sps_register_event(endpoint->sps, &sps_event);
593 sps_disconnect(endpoint->sps);
594 msm_slim_sps_mem_free(dev, descr);
595 msm_slim_free_endpoint(endpoint);
Sagar Dharia24419e32013-01-14 17:56:32 -0700596 dev->use_rx_msgqs = MSM_MSGQ_RESET;
Sagar Dharia33beca02012-10-22 16:21:46 -0600597 }
598 if (dereg) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600599 sps_deregister_bam_device(dev->bam.hdl);
Sagar Dharia33beca02012-10-22 16:21:46 -0600600 dev->bam.hdl = 0L;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600601 }
602}
Kenneth Heitkeae626042012-11-05 21:01:44 -0700603
604/* Slimbus QMI Messaging */
605#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
606#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
607#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
608#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
609
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700610#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
611#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
612#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
613#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
614
Kenneth Heitkeae626042012-11-05 21:01:44 -0700615enum slimbus_mode_enum_type_v01 {
616 /* To force a 32 bit signed enum. Do not change or use*/
617 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
618 SLIMBUS_MODE_SATELLITE_V01 = 1,
619 SLIMBUS_MODE_MASTER_V01 = 2,
620 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
621};
622
623enum slimbus_pm_enum_type_v01 {
624 /* To force a 32 bit signed enum. Do not change or use*/
625 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
626 SLIMBUS_PM_INACTIVE_V01 = 1,
627 SLIMBUS_PM_ACTIVE_V01 = 2,
628 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
629};
630
631struct slimbus_select_inst_req_msg_v01 {
632 /* Mandatory */
633 /* Hardware Instance Selection */
634 uint32_t instance;
635
636 /* Optional */
637 /* Optional Mode Request Operation */
638 /* Must be set to true if mode is being passed */
639 uint8_t mode_valid;
640 enum slimbus_mode_enum_type_v01 mode;
641};
642
643struct slimbus_select_inst_resp_msg_v01 {
644 /* Mandatory */
645 /* Result Code */
646 struct qmi_response_type_v01 resp;
647};
648
649struct slimbus_power_req_msg_v01 {
650 /* Mandatory */
651 /* Power Request Operation */
652 enum slimbus_pm_enum_type_v01 pm_req;
653};
654
655struct slimbus_power_resp_msg_v01 {
656 /* Mandatory */
657 /* Result Code */
658 struct qmi_response_type_v01 resp;
659};
660
661static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
662 {
663 .data_type = QMI_UNSIGNED_4_BYTE,
664 .elem_len = 1,
665 .elem_size = sizeof(uint32_t),
666 .is_array = NO_ARRAY,
667 .tlv_type = 0x01,
668 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
669 instance),
670 .ei_array = NULL,
671 },
672 {
673 .data_type = QMI_OPT_FLAG,
674 .elem_len = 1,
675 .elem_size = sizeof(uint8_t),
676 .is_array = NO_ARRAY,
677 .tlv_type = 0x10,
678 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
679 mode_valid),
680 .ei_array = NULL,
681 },
682 {
683 .data_type = QMI_UNSIGNED_4_BYTE,
684 .elem_len = 1,
685 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
686 .is_array = NO_ARRAY,
687 .tlv_type = 0x10,
688 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
689 mode),
690 .ei_array = NULL,
691 },
692 {
693 .data_type = QMI_EOTI,
694 .elem_len = 0,
695 .elem_size = 0,
696 .is_array = NO_ARRAY,
697 .tlv_type = 0x00,
698 .offset = 0,
699 .ei_array = NULL,
700 },
701};
702
703static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
704 {
705 .data_type = QMI_STRUCT,
706 .elem_len = 1,
707 .elem_size = sizeof(struct qmi_response_type_v01),
708 .is_array = NO_ARRAY,
709 .tlv_type = 0x02,
710 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
711 resp),
712 .ei_array = get_qmi_response_type_v01_ei(),
713 },
714 {
715 .data_type = QMI_EOTI,
716 .elem_len = 0,
717 .elem_size = 0,
718 .is_array = NO_ARRAY,
719 .tlv_type = 0x00,
720 .offset = 0,
721 .ei_array = NULL,
722 },
723};
724
725static struct elem_info slimbus_power_req_msg_v01_ei[] = {
726 {
727 .data_type = QMI_UNSIGNED_4_BYTE,
728 .elem_len = 1,
729 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
730 .is_array = NO_ARRAY,
731 .tlv_type = 0x01,
732 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
733 .ei_array = NULL,
734 },
735 {
736 .data_type = QMI_EOTI,
737 .elem_len = 0,
738 .elem_size = 0,
739 .is_array = NO_ARRAY,
740 .tlv_type = 0x00,
741 .offset = 0,
742 .ei_array = NULL,
743 },
744};
745
746static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
747 {
748 .data_type = QMI_STRUCT,
749 .elem_len = 1,
750 .elem_size = sizeof(struct qmi_response_type_v01),
751 .is_array = NO_ARRAY,
752 .tlv_type = 0x02,
753 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
754 .ei_array = get_qmi_response_type_v01_ei(),
755 },
756 {
757 .data_type = QMI_EOTI,
758 .elem_len = 0,
759 .elem_size = 0,
760 .is_array = NO_ARRAY,
761 .tlv_type = 0x00,
762 .offset = 0,
763 .ei_array = NULL,
764 },
765};
766
767static void msm_slim_qmi_recv_msg(struct kthread_work *work)
768{
769 int rc;
770 struct msm_slim_qmi *qmi =
771 container_of(work, struct msm_slim_qmi, kwork);
772
773 rc = qmi_recv_msg(qmi->handle);
774 if (rc < 0)
775 pr_err("%s: Error receiving QMI message\n", __func__);
776}
777
778static void msm_slim_qmi_notify(struct qmi_handle *handle,
779 enum qmi_event_type event, void *notify_priv)
780{
781 struct msm_slim_ctrl *dev = notify_priv;
782 struct msm_slim_qmi *qmi = &dev->qmi;
783
784 switch (event) {
785 case QMI_RECV_MSG:
786 queue_kthread_work(&qmi->kworker, &qmi->kwork);
787 break;
788 default:
789 break;
790 }
791}
792
793static const char *get_qmi_error(struct qmi_response_type_v01 *r)
794{
795 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
796 return "No Error";
797 else if (r->error == QMI_ERR_NO_MEMORY_V01)
798 return "Out of Memory";
799 else if (r->error == QMI_ERR_INTERNAL_V01)
800 return "Unexpected error occurred";
801 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
802 return "Slimbus s/w already configured to a different mode";
803 else if (r->error == QMI_ERR_INVALID_ID_V01)
804 return "Slimbus hardware instance is not valid";
805 else
806 return "Unknown error";
807}
808
809static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
810 struct slimbus_select_inst_req_msg_v01 *req)
811{
812 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
813 struct msg_desc req_desc, resp_desc;
814 int rc;
815
816 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700817 req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700818 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
819
820 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700821 resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700822 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
823
824 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
825 &resp_desc, &resp, sizeof(resp), 5000);
826 if (rc < 0) {
827 pr_err("%s: QMI send req failed %d\n", __func__, rc);
828 return rc;
829 }
830
831 /* Check the response */
832 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
833 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
834 resp.resp.result, get_qmi_error(&resp.resp));
835 return -EREMOTEIO;
836 }
837
838 return 0;
839}
840
841static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
842 struct slimbus_power_req_msg_v01 *req)
843{
844 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
845 struct msg_desc req_desc, resp_desc;
846 int rc;
847
848 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700849 req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700850 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
851
852 resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700853 resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700854 resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
855
856 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
857 &resp_desc, &resp, sizeof(resp), 5000);
858 if (rc < 0) {
859 pr_err("%s: QMI send req failed %d\n", __func__, rc);
860 return rc;
861 }
862
863 /* Check the response */
864 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
865 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
866 resp.resp.result, get_qmi_error(&resp.resp));
867 return -EREMOTEIO;
868 }
869
870 return 0;
871}
872
873int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
874{
875 int rc = 0;
876 struct qmi_handle *handle;
877 struct slimbus_select_inst_req_msg_v01 req;
878
879 init_kthread_worker(&dev->qmi.kworker);
880
881 dev->qmi.task = kthread_run(kthread_worker_fn,
882 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
883
884 if (IS_ERR(dev->qmi.task)) {
885 pr_err("%s: Failed to create QMI client kthread\n", __func__);
886 return -ENOMEM;
887 }
888
889 init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
890
891 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
892 if (!handle) {
893 rc = -ENOMEM;
894 pr_err("%s: QMI client handle alloc failed\n", __func__);
895 goto qmi_handle_create_failed;
896 }
897
898 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
899 SLIMBUS_QMI_INS_ID);
900 if (rc < 0) {
901 pr_err("%s: QMI server not found\n", __func__);
902 goto qmi_connect_to_service_failed;
903 }
904
905 /* Instance is 0 based */
906 req.instance = dev->ctrl.nr - 1;
907 req.mode_valid = 1;
908
909 /* Mode indicates the role of the ADSP */
910 if (apps_is_master)
911 req.mode = SLIMBUS_MODE_SATELLITE_V01;
912 else
913 req.mode = SLIMBUS_MODE_MASTER_V01;
914
915 dev->qmi.handle = handle;
916
917 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
918 if (rc) {
919 pr_err("%s: failed to select h/w instance\n", __func__);
920 goto qmi_select_instance_failed;
921 }
922
923 return 0;
924
925qmi_select_instance_failed:
926 dev->qmi.handle = NULL;
927qmi_connect_to_service_failed:
928 qmi_handle_destroy(handle);
929qmi_handle_create_failed:
930 flush_kthread_worker(&dev->qmi.kworker);
931 kthread_stop(dev->qmi.task);
932 dev->qmi.task = NULL;
933 return rc;
934}
935
936void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
937{
938 qmi_handle_destroy(dev->qmi.handle);
939 flush_kthread_worker(&dev->qmi.kworker);
940 kthread_stop(dev->qmi.task);
941 dev->qmi.task = NULL;
942 dev->qmi.handle = NULL;
943}
944
945int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
946{
947 struct slimbus_power_req_msg_v01 req;
948
949 if (active)
950 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
951 else
952 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
953
954 return msm_slim_qmi_send_power_request(dev, &req);
955}