blob: 0166196cd2a6f934a1edeef8b2226c5822ea4e3b [file] [log] [blame]
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Sagar Dharia2754ab42012-08-21 18:07:39 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
Sagar Dharia2754ab42012-08-21 18:07:39 -060012#include <linux/pm_runtime.h>
13#include <linux/dma-mapping.h>
14#include <linux/slimbus/slimbus.h>
15#include <mach/sps.h>
16#include "slim-msm.h"
17
18int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
19{
20 spin_lock(&dev->rx_lock);
21 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
22 spin_unlock(&dev->rx_lock);
23 dev_err(dev->dev, "RX QUEUE full!");
24 return -EXFULL;
25 }
26 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
27 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
28 spin_unlock(&dev->rx_lock);
29 return 0;
30}
31
32int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
33{
34 unsigned long flags;
35 spin_lock_irqsave(&dev->rx_lock, flags);
36 if (dev->tail == dev->head) {
37 spin_unlock_irqrestore(&dev->rx_lock, flags);
38 return -ENODATA;
39 }
40 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
41 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
42 spin_unlock_irqrestore(&dev->rx_lock, flags);
43 return 0;
44}
45
46int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
47{
48#ifdef CONFIG_PM_RUNTIME
49 int ref = 0;
50 int ret = pm_runtime_get_sync(dev->dev);
51 if (ret >= 0) {
52 ref = atomic_read(&dev->dev->power.usage_count);
53 if (ref <= 0) {
54 dev_err(dev->dev, "reference count -ve:%d", ref);
55 ret = -ENODEV;
56 }
57 }
58 return ret;
59#else
60 return -ENODEV;
61#endif
62}
63void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
64{
65#ifdef CONFIG_PM_RUNTIME
66 int ref;
67 pm_runtime_mark_last_busy(dev->dev);
68 ref = atomic_read(&dev->dev->power.usage_count);
69 if (ref <= 0)
70 dev_err(dev->dev, "reference count mismatch:%d", ref);
71 else
72 pm_runtime_put(dev->dev);
73#endif
74}
75
76int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
77{
78 int ret;
79 struct sps_pipe *endpoint;
80 struct sps_connect *config = &ep->config;
81
82 /* Allocate the endpoint */
83 endpoint = sps_alloc_endpoint();
84 if (!endpoint) {
85 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
86 return -ENOMEM;
87 }
88
89 /* Get default connection configuration for an endpoint */
90 ret = sps_get_config(endpoint, config);
91 if (ret) {
92 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
93 goto sps_config_failed;
94 }
95
96 ep->sps = endpoint;
97 return 0;
98
99sps_config_failed:
100 sps_free_endpoint(endpoint);
101 return ret;
102}
103
104void msm_slim_free_endpoint(struct msm_slim_endp *ep)
105{
106 sps_free_endpoint(ep->sps);
107 ep->sps = NULL;
108}
109
110int msm_slim_sps_mem_alloc(
111 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
112{
113 dma_addr_t phys;
114
115 mem->size = len;
116 mem->min_size = 0;
117 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
118
119 if (!mem->base) {
120 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
121 return -ENOMEM;
122 }
123
124 mem->phys_base = phys;
125 memset(mem->base, 0x00, mem->size);
126 return 0;
127}
128
129void
130msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
131{
132 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
133 mem->size = 0;
134 mem->base = NULL;
135 mem->phys_base = 0;
136}
137
138void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
139{
140 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
141 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
142 dev->ver));
143 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
144 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
145 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
146 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
147 dev->ver));
148 /* Make sure that port registers are updated before returning */
149 mb();
150}
151
152int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
153{
154 struct msm_slim_endp *endpoint = &dev->pipes[pn];
155 struct sps_connect *cfg = &endpoint->config;
156 u32 stat;
157 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
158 if (ret) {
159 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
160 return ret;
161 }
162 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
163 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
164
165 if (dev->pipes[pn].connected) {
166 ret = sps_set_config(dev->pipes[pn].sps, cfg);
167 if (ret) {
168 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
169 ret);
170 return ret;
171 }
172 }
173
174 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
175 dev->ver));
176 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
177 cfg->destination = dev->bam.hdl;
178 cfg->source = SPS_DEV_HANDLE_MEM;
179 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
180 cfg->src_pipe_index = 0;
181 dev_dbg(dev->dev, "flow src:pipe num:%d",
182 cfg->dest_pipe_index);
183 cfg->mode = SPS_MODE_DEST;
184 } else {
185 cfg->source = dev->bam.hdl;
186 cfg->destination = SPS_DEV_HANDLE_MEM;
187 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
188 cfg->dest_pipe_index = 0;
189 dev_dbg(dev->dev, "flow dest:pipe num:%d",
190 cfg->src_pipe_index);
191 cfg->mode = SPS_MODE_SRC;
192 }
193 /* Space for desciptor FIFOs */
194 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
195 cfg->config = SPS_CONFIG_DEFAULT;
196 ret = sps_connect(dev->pipes[pn].sps, cfg);
197 if (!ret) {
198 dev->pipes[pn].connected = true;
199 msm_hw_set_port(dev, pn + dev->pipe_b);
200 }
201 return ret;
202}
203
204int msm_config_port(struct slim_controller *ctrl, u8 pn)
205{
206 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
207 struct msm_slim_endp *endpoint;
208 int ret = 0;
209 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
210 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
211 return -EPROTONOSUPPORT;
212 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
213 return -ENODEV;
214
215 endpoint = &dev->pipes[pn];
216 ret = msm_slim_init_endpoint(dev, endpoint);
217 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
218 return ret;
219}
220
221enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
222 u8 pn, u8 **done_buf, u32 *done_len)
223{
224 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
225 struct sps_iovec sio;
226 int ret;
227 if (done_len)
228 *done_len = 0;
229 if (done_buf)
230 *done_buf = NULL;
231 if (!dev->pipes[pn].connected)
232 return SLIM_P_DISCONNECT;
233 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
234 if (!ret) {
235 if (done_len)
236 *done_len = sio.size;
237 if (done_buf)
238 *done_buf = (u8 *)sio.addr;
239 }
240 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
241 return SLIM_P_INPROGRESS;
242}
243
244int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
245 u32 len, struct completion *comp)
246{
247 struct sps_register_event sreg;
248 int ret;
249 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
250 if (pn >= 7)
251 return -ENODEV;
252
253
254 ctrl->ports[pn].xcomp = comp;
255 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
256 sreg.mode = SPS_TRIGGER_WAIT;
257 sreg.xfer_done = comp;
258 sreg.callback = NULL;
259 sreg.user = &ctrl->ports[pn];
260 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
261 if (ret) {
262 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
263 return ret;
264 }
265 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
266 SPS_IOVEC_FLAG_INT);
267 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
268
269 return ret;
270}
271
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600272/* Queue up Tx message buffer */
273static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
274{
275 int ret;
276 struct msm_slim_endp *endpoint = &dev->tx_msgq;
277 struct sps_mem_buffer *mem = &endpoint->buf;
278 struct sps_pipe *pipe = endpoint->sps;
279 int ix = (buf - (u8 *)mem->base) / SLIM_MSGQ_BUF_LEN;
280
281 u32 phys_addr = mem->phys_base + (SLIM_MSGQ_BUF_LEN * ix);
282
283 for (ret = 0; ret < ((len + 3) >> 2); ret++)
284 pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
285
286 ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
287 SPS_IOVEC_FLAG_EOT);
288 if (ret)
289 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
290
291 return ret;
292}
293
294static u32 *msm_slim_tx_msgq_return(struct msm_slim_ctrl *dev)
295{
296 struct msm_slim_endp *endpoint = &dev->tx_msgq;
297 struct sps_mem_buffer *mem = &endpoint->buf;
298 struct sps_pipe *pipe = endpoint->sps;
299 struct sps_iovec iovec;
300 int ret;
301
302 /* first transaction after establishing connection */
303 if (dev->tx_idx == -1) {
304 dev->tx_idx = 0;
305 return mem->base;
306 }
307 ret = sps_get_iovec(pipe, &iovec);
308 if (ret || iovec.addr == 0) {
309 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
310 return NULL;
311 }
312
313 /* Calculate buffer index */
314 dev->tx_idx = (iovec.addr - mem->phys_base) / SLIM_MSGQ_BUF_LEN;
315
316 return (u32 *)((u8 *)mem->base + (dev->tx_idx * SLIM_MSGQ_BUF_LEN));
317}
318
Sagar Dharia2754ab42012-08-21 18:07:39 -0600319int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
320{
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600321 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
322 int i;
323 for (i = 0; i < (len + 3) >> 2; i++) {
324 dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
325 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
326 }
327 /* Guarantee that message is sent before returning */
328 mb();
329 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600330 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600331 return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600332}
333
334u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
335{
336 /*
337 * Currently we block a transaction until the current one completes.
338 * In case we need multiple transactions, use message Q
339 */
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600340 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED)
341 return dev->tx_buf;
342
343 return msm_slim_tx_msgq_return(dev);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600344}
345
346static void
347msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
348{
349 u32 *buf = ev->data.transfer.user;
350 struct sps_iovec *iovec = &ev->data.transfer.iovec;
351
352 /*
353 * Note the virtual address needs to be offset by the same index
354 * as the physical address or just pass in the actual virtual address
355 * if the sps_mem_buffer is not needed. Note that if completion is
356 * used, the virtual address won't be available and will need to be
357 * calculated based on the offset of the physical address
358 */
359 if (ev->event_id == SPS_EVENT_DESC_DONE) {
360
361 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
362
363 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
364 iovec->addr, iovec->size, iovec->flags);
365
366 } else {
367 dev_err(dev->dev, "%s: unknown event %d\n",
368 __func__, ev->event_id);
369 }
370}
371
372static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
373{
374 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
375 msm_slim_rx_msgq_event(dev, notify);
376}
377
378/* Queue up Rx message buffer */
379static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
380{
381 int ret;
382 u32 flags = SPS_IOVEC_FLAG_INT;
383 struct msm_slim_endp *endpoint = &dev->rx_msgq;
384 struct sps_mem_buffer *mem = &endpoint->buf;
385 struct sps_pipe *pipe = endpoint->sps;
386
387 /* Rx message queue buffers are 4 bytes in length */
388 u8 *virt_addr = mem->base + (4 * ix);
389 u32 phys_addr = mem->phys_base + (4 * ix);
390
391 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
392
393 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
394 if (ret)
395 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
396
397 return ret;
398}
399
400int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
401{
402 struct msm_slim_endp *endpoint = &dev->rx_msgq;
403 struct sps_mem_buffer *mem = &endpoint->buf;
404 struct sps_pipe *pipe = endpoint->sps;
405 struct sps_iovec iovec;
406 int index;
407 int ret;
408
409 ret = sps_get_iovec(pipe, &iovec);
410 if (ret) {
411 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
412 goto err_exit;
413 }
414
415 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
416 iovec.addr, iovec.size, iovec.flags);
417 BUG_ON(iovec.addr < mem->phys_base);
418 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
419
420 /* Calculate buffer index */
421 index = (iovec.addr - mem->phys_base) / 4;
422 *(data + offset) = *((u32 *)mem->base + index);
423
424 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
425
426 /* Add buffer back to the queue */
427 (void)msm_slim_post_rx_msgq(dev, index);
428
429err_exit:
430 return ret;
431}
432
Sagar Dharia24419e32013-01-14 17:56:32 -0700433int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
434 struct msm_slim_endp *endpoint,
435 struct completion *notify)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600436{
437 int i, ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600438 struct sps_register_event sps_error_event; /* SPS_ERROR */
439 struct sps_register_event sps_descr_event; /* DESCR_DONE */
Sagar Dharia24419e32013-01-14 17:56:32 -0700440 struct sps_connect *config = &endpoint->config;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600441
442 ret = sps_connect(endpoint->sps, config);
443 if (ret) {
444 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700445 return ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600446 }
447
448 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
449
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600450 if (notify) {
451 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
452 sps_descr_event.options = SPS_O_DESC_DONE;
453 sps_descr_event.user = (void *)dev;
454 sps_descr_event.xfer_done = notify;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600455
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600456 ret = sps_register_event(endpoint->sps, &sps_descr_event);
457 if (ret) {
458 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
459 goto sps_reg_event_failed;
460 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600461 }
462
463 /* Register callback for errors */
464 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
465 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
466 sps_error_event.options = SPS_O_ERROR;
467 sps_error_event.user = (void *)dev;
468 sps_error_event.callback = msm_slim_rx_msgq_cb;
469
470 ret = sps_register_event(endpoint->sps, &sps_error_event);
471 if (ret) {
472 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
473 goto sps_reg_event_failed;
474 }
475
Sagar Dharia2754ab42012-08-21 18:07:39 -0600476 /*
477 * Call transfer_one for each 4-byte buffer
478 * Use (buf->size/4) - 1 for the number of buffer to post
479 */
480
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600481 if (endpoint == &dev->rx_msgq) {
482 /* Setup the transfer */
483 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
484 ret = msm_slim_post_rx_msgq(dev, i);
485 if (ret) {
486 dev_err(dev->dev,
487 "post_rx_msgq() failed 0x%x\n", ret);
488 goto sps_transfer_failed;
489 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600490 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600491 dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
492 } else {
493 dev->tx_idx = -1;
494 dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600495 }
496
497 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600498sps_transfer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600499 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
500 sps_register_event(endpoint->sps, &sps_error_event);
501sps_reg_event_failed:
502 sps_disconnect(endpoint->sps);
Sagar Dharia24419e32013-01-14 17:56:32 -0700503 return ret;
504}
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600505
Sagar Dharia24419e32013-01-14 17:56:32 -0700506static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
507{
508 int ret;
509 u32 pipe_offset;
510 struct msm_slim_endp *endpoint = &dev->rx_msgq;
511 struct sps_connect *config = &endpoint->config;
512 struct sps_mem_buffer *descr = &config->desc;
513 struct sps_mem_buffer *mem = &endpoint->buf;
514 struct completion *notify = &dev->rx_msgq_notify;
515
516 init_completion(notify);
517 if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
518 return 0;
519
520 /* Allocate the endpoint */
521 ret = msm_slim_init_endpoint(dev, endpoint);
522 if (ret) {
523 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
524 goto sps_init_endpoint_failed;
525 }
526
527 /* Get the pipe indices for the message queues */
528 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
529 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
530
531 config->mode = SPS_MODE_SRC;
532 config->source = dev->bam.hdl;
533 config->destination = SPS_DEV_HANDLE_MEM;
534 config->src_pipe_index = pipe_offset;
535 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
536 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
537
538 /* Allocate memory for the FIFO descriptors */
539 ret = msm_slim_sps_mem_alloc(dev, descr,
540 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
541 if (ret) {
542 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
543 goto alloc_descr_failed;
544 }
545
546 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
547 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
548 if (ret) {
549 dev_err(dev->dev, "dma_alloc_coherent failed\n");
550 goto alloc_buffer_failed;
551 }
552
553 ret = msm_slim_connect_endp(dev, endpoint, notify);
554
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600555 if (!ret)
Sagar Dharia24419e32013-01-14 17:56:32 -0700556 return 0;
Sagar Dharia24419e32013-01-14 17:56:32 -0700557
558 msm_slim_sps_mem_free(dev, mem);
559alloc_buffer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600560 msm_slim_sps_mem_free(dev, descr);
561alloc_descr_failed:
562 msm_slim_free_endpoint(endpoint);
563sps_init_endpoint_failed:
Sagar Dharia24419e32013-01-14 17:56:32 -0700564 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600565 return ret;
566}
567
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600568static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
569{
570 int ret;
571 u32 pipe_offset;
572 struct msm_slim_endp *endpoint = &dev->tx_msgq;
573 struct sps_connect *config = &endpoint->config;
574 struct sps_mem_buffer *descr = &config->desc;
575 struct sps_mem_buffer *mem = &endpoint->buf;
576
577 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
578 return 0;
579
580 /* Allocate the endpoint */
581 ret = msm_slim_init_endpoint(dev, endpoint);
582 if (ret) {
583 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
584 goto sps_init_endpoint_failed;
585 }
586
587 /* Get the pipe indices for the message queues */
588 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
589 pipe_offset += 1;
590 dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
591
592 config->mode = SPS_MODE_DEST;
593 config->source = SPS_DEV_HANDLE_MEM;
594 config->destination = dev->bam.hdl;
595 config->dest_pipe_index = pipe_offset;
596 config->src_pipe_index = 0;
597 config->options = SPS_O_ERROR | SPS_O_NO_Q |
598 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
599
600 /* Allocate memory for the FIFO descriptors */
601 ret = msm_slim_sps_mem_alloc(dev, descr,
602 MSM_TX_BUFS * sizeof(struct sps_iovec));
603 if (ret) {
604 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
605 goto alloc_descr_failed;
606 }
607
608 /* Allocate memory for the message buffer(s), N descrs, 40-byte mesg */
609 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN);
610 if (ret) {
611 dev_err(dev->dev, "dma_alloc_coherent failed\n");
612 goto alloc_buffer_failed;
613 }
614 ret = msm_slim_connect_endp(dev, endpoint, NULL);
615
616 if (!ret)
617 return 0;
618
619 msm_slim_sps_mem_free(dev, mem);
620alloc_buffer_failed:
621 msm_slim_sps_mem_free(dev, descr);
622alloc_descr_failed:
623 msm_slim_free_endpoint(endpoint);
624sps_init_endpoint_failed:
625 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
626 return ret;
627}
628
Sagar Dharia2754ab42012-08-21 18:07:39 -0600629/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
630int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600631 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600632{
633 int i, ret;
634 u32 bam_handle;
635 struct sps_bam_props bam_props = {0};
636
637 static struct sps_bam_sec_config_props sec_props = {
638 .ees = {
639 [0] = { /* LPASS */
640 .vmid = 0,
641 .pipe_mask = 0xFFFF98,
642 },
643 [1] = { /* Krait Apps */
644 .vmid = 1,
645 .pipe_mask = 0x3F000007,
646 },
647 [2] = { /* Modem */
648 .vmid = 2,
649 .pipe_mask = 0x00000060,
650 },
651 },
652 };
653
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600654 if (dev->bam.hdl) {
655 bam_handle = dev->bam.hdl;
656 goto init_msgq;
657 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600658 bam_props.ee = dev->ee;
659 bam_props.virt_addr = dev->bam.base;
660 bam_props.phys_addr = bam_mem->start;
661 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600662 if (!remote) {
663 bam_props.manage = SPS_BAM_MGR_LOCAL;
664 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
665 } else {
666 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
667 SPS_BAM_MGR_MULTI_EE;
668 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
669 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600670 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
671
Sagar Dharia2754ab42012-08-21 18:07:39 -0600672 bam_props.p_sec_config_props = &sec_props;
673
674 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
675 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
676
677 /* First 7 bits are for message Qs */
678 for (i = 7; i < 32; i++) {
679 /* Check what pipes are owned by Apps. */
680 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
681 break;
682 }
683 dev->pipe_b = i - 7;
684
685 /* Register the BAM device with the SPS driver */
686 ret = sps_register_bam_device(&bam_props, &bam_handle);
687 if (ret) {
688 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700689 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600690 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
691 return ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600692 }
693 dev->bam.hdl = bam_handle;
694 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
695
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600696init_msgq:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600697 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
698 if (ret)
699 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600700 if (ret && bam_handle)
701 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
702
703 ret = msm_slim_init_tx_msgq(dev, pipe_reg);
704 if (ret)
705 dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
706 if (ret && bam_handle)
707 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
708
709 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED &&
710 dev->use_rx_msgqs == MSM_MSGQ_DISABLED && bam_handle) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600711 sps_deregister_bam_device(bam_handle);
712 dev->bam.hdl = 0L;
713 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600714
Sagar Dharia2754ab42012-08-21 18:07:39 -0600715 return ret;
716}
717
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600718void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
719 struct msm_slim_endp *endpoint,
720 enum msm_slim_msgq *msgq_flag)
721{
722 if (*msgq_flag == MSM_MSGQ_ENABLED) {
723 sps_disconnect(endpoint->sps);
724 *msgq_flag = MSM_MSGQ_RESET;
725 }
726}
727
728static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
729 struct msm_slim_endp *endpoint,
730 enum msm_slim_msgq *msgq_flag)
731{
732 struct sps_connect *config = &endpoint->config;
733 struct sps_mem_buffer *descr = &config->desc;
734 struct sps_mem_buffer *mem = &endpoint->buf;
735 struct sps_register_event sps_event;
736 memset(&sps_event, 0x00, sizeof(sps_event));
737 msm_slim_sps_mem_free(dev, mem);
738 sps_register_event(endpoint->sps, &sps_event);
739 if (*msgq_flag == MSM_MSGQ_ENABLED) {
740 msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
741 msm_slim_free_endpoint(endpoint);
742 }
743 msm_slim_sps_mem_free(dev, descr);
744}
745
Sagar Dharia33beca02012-10-22 16:21:46 -0600746void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600747{
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600748 if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
749 msm_slim_remove_ep(dev, &dev->rx_msgq, &dev->use_rx_msgqs);
750 if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
751 msm_slim_remove_ep(dev, &dev->tx_msgq, &dev->use_tx_msgqs);
Sagar Dharia33beca02012-10-22 16:21:46 -0600752 if (dereg) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600753 sps_deregister_bam_device(dev->bam.hdl);
Sagar Dharia33beca02012-10-22 16:21:46 -0600754 dev->bam.hdl = 0L;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600755 }
756}
Kenneth Heitkeae626042012-11-05 21:01:44 -0700757
758/* Slimbus QMI Messaging */
759#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
760#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
761#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
762#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
763
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700764#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
765#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
766#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
767#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
768
Kenneth Heitkeae626042012-11-05 21:01:44 -0700769enum slimbus_mode_enum_type_v01 {
770 /* To force a 32 bit signed enum. Do not change or use*/
771 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
772 SLIMBUS_MODE_SATELLITE_V01 = 1,
773 SLIMBUS_MODE_MASTER_V01 = 2,
774 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
775};
776
777enum slimbus_pm_enum_type_v01 {
778 /* To force a 32 bit signed enum. Do not change or use*/
779 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
780 SLIMBUS_PM_INACTIVE_V01 = 1,
781 SLIMBUS_PM_ACTIVE_V01 = 2,
782 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
783};
784
785struct slimbus_select_inst_req_msg_v01 {
786 /* Mandatory */
787 /* Hardware Instance Selection */
788 uint32_t instance;
789
790 /* Optional */
791 /* Optional Mode Request Operation */
792 /* Must be set to true if mode is being passed */
793 uint8_t mode_valid;
794 enum slimbus_mode_enum_type_v01 mode;
795};
796
797struct slimbus_select_inst_resp_msg_v01 {
798 /* Mandatory */
799 /* Result Code */
800 struct qmi_response_type_v01 resp;
801};
802
803struct slimbus_power_req_msg_v01 {
804 /* Mandatory */
805 /* Power Request Operation */
806 enum slimbus_pm_enum_type_v01 pm_req;
807};
808
809struct slimbus_power_resp_msg_v01 {
810 /* Mandatory */
811 /* Result Code */
812 struct qmi_response_type_v01 resp;
813};
814
815static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
816 {
817 .data_type = QMI_UNSIGNED_4_BYTE,
818 .elem_len = 1,
819 .elem_size = sizeof(uint32_t),
820 .is_array = NO_ARRAY,
821 .tlv_type = 0x01,
822 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
823 instance),
824 .ei_array = NULL,
825 },
826 {
827 .data_type = QMI_OPT_FLAG,
828 .elem_len = 1,
829 .elem_size = sizeof(uint8_t),
830 .is_array = NO_ARRAY,
831 .tlv_type = 0x10,
832 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
833 mode_valid),
834 .ei_array = NULL,
835 },
836 {
837 .data_type = QMI_UNSIGNED_4_BYTE,
838 .elem_len = 1,
839 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
840 .is_array = NO_ARRAY,
841 .tlv_type = 0x10,
842 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
843 mode),
844 .ei_array = NULL,
845 },
846 {
847 .data_type = QMI_EOTI,
848 .elem_len = 0,
849 .elem_size = 0,
850 .is_array = NO_ARRAY,
851 .tlv_type = 0x00,
852 .offset = 0,
853 .ei_array = NULL,
854 },
855};
856
857static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
858 {
859 .data_type = QMI_STRUCT,
860 .elem_len = 1,
861 .elem_size = sizeof(struct qmi_response_type_v01),
862 .is_array = NO_ARRAY,
863 .tlv_type = 0x02,
864 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
865 resp),
866 .ei_array = get_qmi_response_type_v01_ei(),
867 },
868 {
869 .data_type = QMI_EOTI,
870 .elem_len = 0,
871 .elem_size = 0,
872 .is_array = NO_ARRAY,
873 .tlv_type = 0x00,
874 .offset = 0,
875 .ei_array = NULL,
876 },
877};
878
879static struct elem_info slimbus_power_req_msg_v01_ei[] = {
880 {
881 .data_type = QMI_UNSIGNED_4_BYTE,
882 .elem_len = 1,
883 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
884 .is_array = NO_ARRAY,
885 .tlv_type = 0x01,
886 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
887 .ei_array = NULL,
888 },
889 {
890 .data_type = QMI_EOTI,
891 .elem_len = 0,
892 .elem_size = 0,
893 .is_array = NO_ARRAY,
894 .tlv_type = 0x00,
895 .offset = 0,
896 .ei_array = NULL,
897 },
898};
899
900static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
901 {
902 .data_type = QMI_STRUCT,
903 .elem_len = 1,
904 .elem_size = sizeof(struct qmi_response_type_v01),
905 .is_array = NO_ARRAY,
906 .tlv_type = 0x02,
907 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
908 .ei_array = get_qmi_response_type_v01_ei(),
909 },
910 {
911 .data_type = QMI_EOTI,
912 .elem_len = 0,
913 .elem_size = 0,
914 .is_array = NO_ARRAY,
915 .tlv_type = 0x00,
916 .offset = 0,
917 .ei_array = NULL,
918 },
919};
920
921static void msm_slim_qmi_recv_msg(struct kthread_work *work)
922{
923 int rc;
924 struct msm_slim_qmi *qmi =
925 container_of(work, struct msm_slim_qmi, kwork);
926
927 rc = qmi_recv_msg(qmi->handle);
928 if (rc < 0)
929 pr_err("%s: Error receiving QMI message\n", __func__);
930}
931
932static void msm_slim_qmi_notify(struct qmi_handle *handle,
933 enum qmi_event_type event, void *notify_priv)
934{
935 struct msm_slim_ctrl *dev = notify_priv;
936 struct msm_slim_qmi *qmi = &dev->qmi;
937
938 switch (event) {
939 case QMI_RECV_MSG:
940 queue_kthread_work(&qmi->kworker, &qmi->kwork);
941 break;
942 default:
943 break;
944 }
945}
946
947static const char *get_qmi_error(struct qmi_response_type_v01 *r)
948{
949 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
950 return "No Error";
951 else if (r->error == QMI_ERR_NO_MEMORY_V01)
952 return "Out of Memory";
953 else if (r->error == QMI_ERR_INTERNAL_V01)
954 return "Unexpected error occurred";
955 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
956 return "Slimbus s/w already configured to a different mode";
957 else if (r->error == QMI_ERR_INVALID_ID_V01)
958 return "Slimbus hardware instance is not valid";
959 else
960 return "Unknown error";
961}
962
963static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
964 struct slimbus_select_inst_req_msg_v01 *req)
965{
966 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
967 struct msg_desc req_desc, resp_desc;
968 int rc;
969
970 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700971 req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700972 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
973
974 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700975 resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -0700976 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
977
978 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
979 &resp_desc, &resp, sizeof(resp), 5000);
980 if (rc < 0) {
981 pr_err("%s: QMI send req failed %d\n", __func__, rc);
982 return rc;
983 }
984
985 /* Check the response */
986 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
987 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
988 resp.resp.result, get_qmi_error(&resp.resp));
989 return -EREMOTEIO;
990 }
991
992 return 0;
993}
994
995static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
996 struct slimbus_power_req_msg_v01 *req)
997{
998 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
999 struct msg_desc req_desc, resp_desc;
1000 int rc;
1001
1002 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001003 req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001004 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
1005
1006 resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001007 resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001008 resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
1009
1010 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
1011 &resp_desc, &resp, sizeof(resp), 5000);
1012 if (rc < 0) {
1013 pr_err("%s: QMI send req failed %d\n", __func__, rc);
1014 return rc;
1015 }
1016
1017 /* Check the response */
1018 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1019 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
1020 resp.resp.result, get_qmi_error(&resp.resp));
1021 return -EREMOTEIO;
1022 }
1023
1024 return 0;
1025}
1026
1027int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
1028{
1029 int rc = 0;
1030 struct qmi_handle *handle;
1031 struct slimbus_select_inst_req_msg_v01 req;
1032
1033 init_kthread_worker(&dev->qmi.kworker);
1034
1035 dev->qmi.task = kthread_run(kthread_worker_fn,
1036 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
1037
1038 if (IS_ERR(dev->qmi.task)) {
1039 pr_err("%s: Failed to create QMI client kthread\n", __func__);
1040 return -ENOMEM;
1041 }
1042
1043 init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
1044
1045 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
1046 if (!handle) {
1047 rc = -ENOMEM;
1048 pr_err("%s: QMI client handle alloc failed\n", __func__);
1049 goto qmi_handle_create_failed;
1050 }
1051
1052 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
1053 SLIMBUS_QMI_INS_ID);
1054 if (rc < 0) {
1055 pr_err("%s: QMI server not found\n", __func__);
1056 goto qmi_connect_to_service_failed;
1057 }
1058
1059 /* Instance is 0 based */
1060 req.instance = dev->ctrl.nr - 1;
1061 req.mode_valid = 1;
1062
1063 /* Mode indicates the role of the ADSP */
1064 if (apps_is_master)
1065 req.mode = SLIMBUS_MODE_SATELLITE_V01;
1066 else
1067 req.mode = SLIMBUS_MODE_MASTER_V01;
1068
1069 dev->qmi.handle = handle;
1070
1071 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
1072 if (rc) {
1073 pr_err("%s: failed to select h/w instance\n", __func__);
1074 goto qmi_select_instance_failed;
1075 }
1076
1077 return 0;
1078
1079qmi_select_instance_failed:
1080 dev->qmi.handle = NULL;
1081qmi_connect_to_service_failed:
1082 qmi_handle_destroy(handle);
1083qmi_handle_create_failed:
1084 flush_kthread_worker(&dev->qmi.kworker);
1085 kthread_stop(dev->qmi.task);
1086 dev->qmi.task = NULL;
1087 return rc;
1088}
1089
1090void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
1091{
1092 qmi_handle_destroy(dev->qmi.handle);
1093 flush_kthread_worker(&dev->qmi.kworker);
1094 kthread_stop(dev->qmi.task);
1095 dev->qmi.task = NULL;
1096 dev->qmi.handle = NULL;
1097}
1098
1099int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
1100{
1101 struct slimbus_power_req_msg_v01 req;
1102
1103 if (active)
1104 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
1105 else
1106 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
1107
1108 return msm_slim_qmi_send_power_request(dev, &req);
1109}