blob: c62ac275001f30251b410900247c5fd876b5c513 [file] [log] [blame]
Sagar Dharia2754ab42012-08-21 18:07:39 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/pm_runtime.h>
14#include <linux/dma-mapping.h>
15#include <linux/slimbus/slimbus.h>
16#include <mach/sps.h>
17#include "slim-msm.h"
18
19int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
20{
21 spin_lock(&dev->rx_lock);
22 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
23 spin_unlock(&dev->rx_lock);
24 dev_err(dev->dev, "RX QUEUE full!");
25 return -EXFULL;
26 }
27 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
28 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
29 spin_unlock(&dev->rx_lock);
30 return 0;
31}
32
33int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
34{
35 unsigned long flags;
36 spin_lock_irqsave(&dev->rx_lock, flags);
37 if (dev->tail == dev->head) {
38 spin_unlock_irqrestore(&dev->rx_lock, flags);
39 return -ENODATA;
40 }
41 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
42 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
43 spin_unlock_irqrestore(&dev->rx_lock, flags);
44 return 0;
45}
46
47int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
48{
49#ifdef CONFIG_PM_RUNTIME
50 int ref = 0;
51 int ret = pm_runtime_get_sync(dev->dev);
52 if (ret >= 0) {
53 ref = atomic_read(&dev->dev->power.usage_count);
54 if (ref <= 0) {
55 dev_err(dev->dev, "reference count -ve:%d", ref);
56 ret = -ENODEV;
57 }
58 }
59 return ret;
60#else
61 return -ENODEV;
62#endif
63}
64void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
65{
66#ifdef CONFIG_PM_RUNTIME
67 int ref;
68 pm_runtime_mark_last_busy(dev->dev);
69 ref = atomic_read(&dev->dev->power.usage_count);
70 if (ref <= 0)
71 dev_err(dev->dev, "reference count mismatch:%d", ref);
72 else
73 pm_runtime_put(dev->dev);
74#endif
75}
76
77int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
78{
79 int ret;
80 struct sps_pipe *endpoint;
81 struct sps_connect *config = &ep->config;
82
83 /* Allocate the endpoint */
84 endpoint = sps_alloc_endpoint();
85 if (!endpoint) {
86 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
87 return -ENOMEM;
88 }
89
90 /* Get default connection configuration for an endpoint */
91 ret = sps_get_config(endpoint, config);
92 if (ret) {
93 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
94 goto sps_config_failed;
95 }
96
97 ep->sps = endpoint;
98 return 0;
99
100sps_config_failed:
101 sps_free_endpoint(endpoint);
102 return ret;
103}
104
105void msm_slim_free_endpoint(struct msm_slim_endp *ep)
106{
107 sps_free_endpoint(ep->sps);
108 ep->sps = NULL;
109}
110
111int msm_slim_sps_mem_alloc(
112 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
113{
114 dma_addr_t phys;
115
116 mem->size = len;
117 mem->min_size = 0;
118 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
119
120 if (!mem->base) {
121 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
122 return -ENOMEM;
123 }
124
125 mem->phys_base = phys;
126 memset(mem->base, 0x00, mem->size);
127 return 0;
128}
129
130void
131msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
132{
133 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
134 mem->size = 0;
135 mem->base = NULL;
136 mem->phys_base = 0;
137}
138
139void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
140{
141 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
142 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
143 dev->ver));
144 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
145 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
146 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
147 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
148 dev->ver));
149 /* Make sure that port registers are updated before returning */
150 mb();
151}
152
153int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
154{
155 struct msm_slim_endp *endpoint = &dev->pipes[pn];
156 struct sps_connect *cfg = &endpoint->config;
157 u32 stat;
158 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
159 if (ret) {
160 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
161 return ret;
162 }
163 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
164 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
165
166 if (dev->pipes[pn].connected) {
167 ret = sps_set_config(dev->pipes[pn].sps, cfg);
168 if (ret) {
169 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
170 ret);
171 return ret;
172 }
173 }
174
175 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
176 dev->ver));
177 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
178 cfg->destination = dev->bam.hdl;
179 cfg->source = SPS_DEV_HANDLE_MEM;
180 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
181 cfg->src_pipe_index = 0;
182 dev_dbg(dev->dev, "flow src:pipe num:%d",
183 cfg->dest_pipe_index);
184 cfg->mode = SPS_MODE_DEST;
185 } else {
186 cfg->source = dev->bam.hdl;
187 cfg->destination = SPS_DEV_HANDLE_MEM;
188 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
189 cfg->dest_pipe_index = 0;
190 dev_dbg(dev->dev, "flow dest:pipe num:%d",
191 cfg->src_pipe_index);
192 cfg->mode = SPS_MODE_SRC;
193 }
194 /* Space for desciptor FIFOs */
195 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
196 cfg->config = SPS_CONFIG_DEFAULT;
197 ret = sps_connect(dev->pipes[pn].sps, cfg);
198 if (!ret) {
199 dev->pipes[pn].connected = true;
200 msm_hw_set_port(dev, pn + dev->pipe_b);
201 }
202 return ret;
203}
204
205int msm_config_port(struct slim_controller *ctrl, u8 pn)
206{
207 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
208 struct msm_slim_endp *endpoint;
209 int ret = 0;
210 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
211 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
212 return -EPROTONOSUPPORT;
213 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
214 return -ENODEV;
215
216 endpoint = &dev->pipes[pn];
217 ret = msm_slim_init_endpoint(dev, endpoint);
218 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
219 return ret;
220}
221
222enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
223 u8 pn, u8 **done_buf, u32 *done_len)
224{
225 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
226 struct sps_iovec sio;
227 int ret;
228 if (done_len)
229 *done_len = 0;
230 if (done_buf)
231 *done_buf = NULL;
232 if (!dev->pipes[pn].connected)
233 return SLIM_P_DISCONNECT;
234 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
235 if (!ret) {
236 if (done_len)
237 *done_len = sio.size;
238 if (done_buf)
239 *done_buf = (u8 *)sio.addr;
240 }
241 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
242 return SLIM_P_INPROGRESS;
243}
244
245int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
246 u32 len, struct completion *comp)
247{
248 struct sps_register_event sreg;
249 int ret;
250 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
251 if (pn >= 7)
252 return -ENODEV;
253
254
255 ctrl->ports[pn].xcomp = comp;
256 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
257 sreg.mode = SPS_TRIGGER_WAIT;
258 sreg.xfer_done = comp;
259 sreg.callback = NULL;
260 sreg.user = &ctrl->ports[pn];
261 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
262 if (ret) {
263 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
264 return ret;
265 }
266 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
267 SPS_IOVEC_FLAG_INT);
268 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
269
270 return ret;
271}
272
273int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
274{
275 int i;
276 for (i = 0; i < (len + 3) >> 2; i++) {
277 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
278 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
279 }
280 /* Guarantee that message is sent before returning */
281 mb();
282 return 0;
283}
284
285u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
286{
287 /*
288 * Currently we block a transaction until the current one completes.
289 * In case we need multiple transactions, use message Q
290 */
291 return dev->tx_buf;
292}
293
294static void
295msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
296{
297 u32 *buf = ev->data.transfer.user;
298 struct sps_iovec *iovec = &ev->data.transfer.iovec;
299
300 /*
301 * Note the virtual address needs to be offset by the same index
302 * as the physical address or just pass in the actual virtual address
303 * if the sps_mem_buffer is not needed. Note that if completion is
304 * used, the virtual address won't be available and will need to be
305 * calculated based on the offset of the physical address
306 */
307 if (ev->event_id == SPS_EVENT_DESC_DONE) {
308
309 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
310
311 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
312 iovec->addr, iovec->size, iovec->flags);
313
314 } else {
315 dev_err(dev->dev, "%s: unknown event %d\n",
316 __func__, ev->event_id);
317 }
318}
319
320static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
321{
322 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
323 msm_slim_rx_msgq_event(dev, notify);
324}
325
326/* Queue up Rx message buffer */
327static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
328{
329 int ret;
330 u32 flags = SPS_IOVEC_FLAG_INT;
331 struct msm_slim_endp *endpoint = &dev->rx_msgq;
332 struct sps_mem_buffer *mem = &endpoint->buf;
333 struct sps_pipe *pipe = endpoint->sps;
334
335 /* Rx message queue buffers are 4 bytes in length */
336 u8 *virt_addr = mem->base + (4 * ix);
337 u32 phys_addr = mem->phys_base + (4 * ix);
338
339 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
340
341 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
342 if (ret)
343 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
344
345 return ret;
346}
347
348int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
349{
350 struct msm_slim_endp *endpoint = &dev->rx_msgq;
351 struct sps_mem_buffer *mem = &endpoint->buf;
352 struct sps_pipe *pipe = endpoint->sps;
353 struct sps_iovec iovec;
354 int index;
355 int ret;
356
357 ret = sps_get_iovec(pipe, &iovec);
358 if (ret) {
359 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
360 goto err_exit;
361 }
362
363 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
364 iovec.addr, iovec.size, iovec.flags);
365 BUG_ON(iovec.addr < mem->phys_base);
366 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
367
368 /* Calculate buffer index */
369 index = (iovec.addr - mem->phys_base) / 4;
370 *(data + offset) = *((u32 *)mem->base + index);
371
372 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
373
374 /* Add buffer back to the queue */
375 (void)msm_slim_post_rx_msgq(dev, index);
376
377err_exit:
378 return ret;
379}
380
381static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
382{
383 int i, ret;
384 u32 pipe_offset;
385 struct msm_slim_endp *endpoint = &dev->rx_msgq;
386 struct sps_connect *config = &endpoint->config;
387 struct sps_mem_buffer *descr = &config->desc;
388 struct sps_mem_buffer *mem = &endpoint->buf;
389 struct completion *notify = &dev->rx_msgq_notify;
390
391 struct sps_register_event sps_error_event; /* SPS_ERROR */
392 struct sps_register_event sps_descr_event; /* DESCR_DONE */
393
394 init_completion(notify);
395 if (!dev->use_rx_msgqs)
396 return 0;
397
398 /* Allocate the endpoint */
399 ret = msm_slim_init_endpoint(dev, endpoint);
400 if (ret) {
401 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
402 goto sps_init_endpoint_failed;
403 }
404
405 /* Get the pipe indices for the message queues */
406 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
407 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
408
409 config->mode = SPS_MODE_SRC;
410 config->source = dev->bam.hdl;
411 config->destination = SPS_DEV_HANDLE_MEM;
412 config->src_pipe_index = pipe_offset;
413 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
414 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
415
416 /* Allocate memory for the FIFO descriptors */
417 ret = msm_slim_sps_mem_alloc(dev, descr,
418 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
419 if (ret) {
420 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
421 goto alloc_descr_failed;
422 }
423
424 ret = sps_connect(endpoint->sps, config);
425 if (ret) {
426 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
427 goto sps_connect_failed;
428 }
429
430 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
431
432 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
433 sps_descr_event.options = SPS_O_DESC_DONE;
434 sps_descr_event.user = (void *)dev;
435 sps_descr_event.xfer_done = notify;
436
437 ret = sps_register_event(endpoint->sps, &sps_descr_event);
438 if (ret) {
439 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
440 goto sps_reg_event_failed;
441 }
442
443 /* Register callback for errors */
444 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
445 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
446 sps_error_event.options = SPS_O_ERROR;
447 sps_error_event.user = (void *)dev;
448 sps_error_event.callback = msm_slim_rx_msgq_cb;
449
450 ret = sps_register_event(endpoint->sps, &sps_error_event);
451 if (ret) {
452 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
453 goto sps_reg_event_failed;
454 }
455
456 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
457 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
458 if (ret) {
459 dev_err(dev->dev, "dma_alloc_coherent failed\n");
460 goto alloc_buffer_failed;
461 }
462
463 /*
464 * Call transfer_one for each 4-byte buffer
465 * Use (buf->size/4) - 1 for the number of buffer to post
466 */
467
468 /* Setup the transfer */
469 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
470 ret = msm_slim_post_rx_msgq(dev, i);
471 if (ret) {
472 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
473 goto sps_transfer_failed;
474 }
475 }
476
477 return 0;
478
479sps_transfer_failed:
480 msm_slim_sps_mem_free(dev, mem);
481alloc_buffer_failed:
482 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
483 sps_register_event(endpoint->sps, &sps_error_event);
484sps_reg_event_failed:
485 sps_disconnect(endpoint->sps);
486sps_connect_failed:
487 msm_slim_sps_mem_free(dev, descr);
488alloc_descr_failed:
489 msm_slim_free_endpoint(endpoint);
490sps_init_endpoint_failed:
491 dev->use_rx_msgqs = 0;
492 return ret;
493}
494
495/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
496int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600497 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600498{
499 int i, ret;
500 u32 bam_handle;
501 struct sps_bam_props bam_props = {0};
502
503 static struct sps_bam_sec_config_props sec_props = {
504 .ees = {
505 [0] = { /* LPASS */
506 .vmid = 0,
507 .pipe_mask = 0xFFFF98,
508 },
509 [1] = { /* Krait Apps */
510 .vmid = 1,
511 .pipe_mask = 0x3F000007,
512 },
513 [2] = { /* Modem */
514 .vmid = 2,
515 .pipe_mask = 0x00000060,
516 },
517 },
518 };
519
520 bam_props.ee = dev->ee;
521 bam_props.virt_addr = dev->bam.base;
522 bam_props.phys_addr = bam_mem->start;
523 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600524 if (!remote) {
525 bam_props.manage = SPS_BAM_MGR_LOCAL;
526 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
527 } else {
528 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
529 SPS_BAM_MGR_MULTI_EE;
530 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
531 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600532 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
533
Sagar Dharia2754ab42012-08-21 18:07:39 -0600534 bam_props.p_sec_config_props = &sec_props;
535
536 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
537 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
538
539 /* First 7 bits are for message Qs */
540 for (i = 7; i < 32; i++) {
541 /* Check what pipes are owned by Apps. */
542 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
543 break;
544 }
545 dev->pipe_b = i - 7;
546
547 /* Register the BAM device with the SPS driver */
548 ret = sps_register_bam_device(&bam_props, &bam_handle);
549 if (ret) {
550 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
551 dev->use_rx_msgqs = 0;
552 goto init_rx_msgq;
553 }
554 dev->bam.hdl = bam_handle;
555 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
556
557init_rx_msgq:
558 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
559 if (ret)
560 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
561 if (ret && bam_handle) {
562 sps_deregister_bam_device(bam_handle);
563 dev->bam.hdl = 0L;
564 }
565 return ret;
566}
567
568void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
569{
570 if (dev->use_rx_msgqs) {
571 struct msm_slim_endp *endpoint = &dev->rx_msgq;
572 struct sps_connect *config = &endpoint->config;
573 struct sps_mem_buffer *descr = &config->desc;
574 struct sps_mem_buffer *mem = &endpoint->buf;
575 struct sps_register_event sps_event;
576 memset(&sps_event, 0x00, sizeof(sps_event));
577 msm_slim_sps_mem_free(dev, mem);
578 sps_register_event(endpoint->sps, &sps_event);
579 sps_disconnect(endpoint->sps);
580 msm_slim_sps_mem_free(dev, descr);
581 msm_slim_free_endpoint(endpoint);
582 sps_deregister_bam_device(dev->bam.hdl);
583 }
584}
Kenneth Heitkeae626042012-11-05 21:01:44 -0700585
586/* Slimbus QMI Messaging */
587#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
588#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
589#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
590#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
591
592enum slimbus_mode_enum_type_v01 {
593 /* To force a 32 bit signed enum. Do not change or use*/
594 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
595 SLIMBUS_MODE_SATELLITE_V01 = 1,
596 SLIMBUS_MODE_MASTER_V01 = 2,
597 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
598};
599
600enum slimbus_pm_enum_type_v01 {
601 /* To force a 32 bit signed enum. Do not change or use*/
602 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
603 SLIMBUS_PM_INACTIVE_V01 = 1,
604 SLIMBUS_PM_ACTIVE_V01 = 2,
605 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
606};
607
608struct slimbus_select_inst_req_msg_v01 {
609 /* Mandatory */
610 /* Hardware Instance Selection */
611 uint32_t instance;
612
613 /* Optional */
614 /* Optional Mode Request Operation */
615 /* Must be set to true if mode is being passed */
616 uint8_t mode_valid;
617 enum slimbus_mode_enum_type_v01 mode;
618};
619
620struct slimbus_select_inst_resp_msg_v01 {
621 /* Mandatory */
622 /* Result Code */
623 struct qmi_response_type_v01 resp;
624};
625
626struct slimbus_power_req_msg_v01 {
627 /* Mandatory */
628 /* Power Request Operation */
629 enum slimbus_pm_enum_type_v01 pm_req;
630};
631
632struct slimbus_power_resp_msg_v01 {
633 /* Mandatory */
634 /* Result Code */
635 struct qmi_response_type_v01 resp;
636};
637
638static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
639 {
640 .data_type = QMI_UNSIGNED_4_BYTE,
641 .elem_len = 1,
642 .elem_size = sizeof(uint32_t),
643 .is_array = NO_ARRAY,
644 .tlv_type = 0x01,
645 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
646 instance),
647 .ei_array = NULL,
648 },
649 {
650 .data_type = QMI_OPT_FLAG,
651 .elem_len = 1,
652 .elem_size = sizeof(uint8_t),
653 .is_array = NO_ARRAY,
654 .tlv_type = 0x10,
655 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
656 mode_valid),
657 .ei_array = NULL,
658 },
659 {
660 .data_type = QMI_UNSIGNED_4_BYTE,
661 .elem_len = 1,
662 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
663 .is_array = NO_ARRAY,
664 .tlv_type = 0x10,
665 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
666 mode),
667 .ei_array = NULL,
668 },
669 {
670 .data_type = QMI_EOTI,
671 .elem_len = 0,
672 .elem_size = 0,
673 .is_array = NO_ARRAY,
674 .tlv_type = 0x00,
675 .offset = 0,
676 .ei_array = NULL,
677 },
678};
679
680static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
681 {
682 .data_type = QMI_STRUCT,
683 .elem_len = 1,
684 .elem_size = sizeof(struct qmi_response_type_v01),
685 .is_array = NO_ARRAY,
686 .tlv_type = 0x02,
687 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
688 resp),
689 .ei_array = get_qmi_response_type_v01_ei(),
690 },
691 {
692 .data_type = QMI_EOTI,
693 .elem_len = 0,
694 .elem_size = 0,
695 .is_array = NO_ARRAY,
696 .tlv_type = 0x00,
697 .offset = 0,
698 .ei_array = NULL,
699 },
700};
701
702static struct elem_info slimbus_power_req_msg_v01_ei[] = {
703 {
704 .data_type = QMI_UNSIGNED_4_BYTE,
705 .elem_len = 1,
706 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
707 .is_array = NO_ARRAY,
708 .tlv_type = 0x01,
709 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
710 .ei_array = NULL,
711 },
712 {
713 .data_type = QMI_EOTI,
714 .elem_len = 0,
715 .elem_size = 0,
716 .is_array = NO_ARRAY,
717 .tlv_type = 0x00,
718 .offset = 0,
719 .ei_array = NULL,
720 },
721};
722
723static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
724 {
725 .data_type = QMI_STRUCT,
726 .elem_len = 1,
727 .elem_size = sizeof(struct qmi_response_type_v01),
728 .is_array = NO_ARRAY,
729 .tlv_type = 0x02,
730 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
731 .ei_array = get_qmi_response_type_v01_ei(),
732 },
733 {
734 .data_type = QMI_EOTI,
735 .elem_len = 0,
736 .elem_size = 0,
737 .is_array = NO_ARRAY,
738 .tlv_type = 0x00,
739 .offset = 0,
740 .ei_array = NULL,
741 },
742};
743
744static void msm_slim_qmi_recv_msg(struct kthread_work *work)
745{
746 int rc;
747 struct msm_slim_qmi *qmi =
748 container_of(work, struct msm_slim_qmi, kwork);
749
750 rc = qmi_recv_msg(qmi->handle);
751 if (rc < 0)
752 pr_err("%s: Error receiving QMI message\n", __func__);
753}
754
755static void msm_slim_qmi_notify(struct qmi_handle *handle,
756 enum qmi_event_type event, void *notify_priv)
757{
758 struct msm_slim_ctrl *dev = notify_priv;
759 struct msm_slim_qmi *qmi = &dev->qmi;
760
761 switch (event) {
762 case QMI_RECV_MSG:
763 queue_kthread_work(&qmi->kworker, &qmi->kwork);
764 break;
765 default:
766 break;
767 }
768}
769
770static const char *get_qmi_error(struct qmi_response_type_v01 *r)
771{
772 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
773 return "No Error";
774 else if (r->error == QMI_ERR_NO_MEMORY_V01)
775 return "Out of Memory";
776 else if (r->error == QMI_ERR_INTERNAL_V01)
777 return "Unexpected error occurred";
778 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
779 return "Slimbus s/w already configured to a different mode";
780 else if (r->error == QMI_ERR_INVALID_ID_V01)
781 return "Slimbus hardware instance is not valid";
782 else
783 return "Unknown error";
784}
785
786static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
787 struct slimbus_select_inst_req_msg_v01 *req)
788{
789 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
790 struct msg_desc req_desc, resp_desc;
791 int rc;
792
793 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
794 req_desc.max_msg_len = sizeof(*req);
795 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
796
797 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
798 resp_desc.max_msg_len = sizeof(resp);
799 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
800
801 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
802 &resp_desc, &resp, sizeof(resp), 5000);
803 if (rc < 0) {
804 pr_err("%s: QMI send req failed %d\n", __func__, rc);
805 return rc;
806 }
807
808 /* Check the response */
809 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
810 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
811 resp.resp.result, get_qmi_error(&resp.resp));
812 return -EREMOTEIO;
813 }
814
815 return 0;
816}
817
818static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
819 struct slimbus_power_req_msg_v01 *req)
820{
821 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
822 struct msg_desc req_desc, resp_desc;
823 int rc;
824
825 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
826 req_desc.max_msg_len = sizeof(*req);
827 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
828
829 resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
830 resp_desc.max_msg_len = sizeof(resp);
831 resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
832
833 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
834 &resp_desc, &resp, sizeof(resp), 5000);
835 if (rc < 0) {
836 pr_err("%s: QMI send req failed %d\n", __func__, rc);
837 return rc;
838 }
839
840 /* Check the response */
841 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
842 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
843 resp.resp.result, get_qmi_error(&resp.resp));
844 return -EREMOTEIO;
845 }
846
847 return 0;
848}
849
850int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
851{
852 int rc = 0;
853 struct qmi_handle *handle;
854 struct slimbus_select_inst_req_msg_v01 req;
855
856 init_kthread_worker(&dev->qmi.kworker);
857
858 dev->qmi.task = kthread_run(kthread_worker_fn,
859 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
860
861 if (IS_ERR(dev->qmi.task)) {
862 pr_err("%s: Failed to create QMI client kthread\n", __func__);
863 return -ENOMEM;
864 }
865
866 init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
867
868 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
869 if (!handle) {
870 rc = -ENOMEM;
871 pr_err("%s: QMI client handle alloc failed\n", __func__);
872 goto qmi_handle_create_failed;
873 }
874
875 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
876 SLIMBUS_QMI_INS_ID);
877 if (rc < 0) {
878 pr_err("%s: QMI server not found\n", __func__);
879 goto qmi_connect_to_service_failed;
880 }
881
882 /* Instance is 0 based */
883 req.instance = dev->ctrl.nr - 1;
884 req.mode_valid = 1;
885
886 /* Mode indicates the role of the ADSP */
887 if (apps_is_master)
888 req.mode = SLIMBUS_MODE_SATELLITE_V01;
889 else
890 req.mode = SLIMBUS_MODE_MASTER_V01;
891
892 dev->qmi.handle = handle;
893
894 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
895 if (rc) {
896 pr_err("%s: failed to select h/w instance\n", __func__);
897 goto qmi_select_instance_failed;
898 }
899
900 return 0;
901
902qmi_select_instance_failed:
903 dev->qmi.handle = NULL;
904qmi_connect_to_service_failed:
905 qmi_handle_destroy(handle);
906qmi_handle_create_failed:
907 flush_kthread_worker(&dev->qmi.kworker);
908 kthread_stop(dev->qmi.task);
909 dev->qmi.task = NULL;
910 return rc;
911}
912
913void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
914{
915 qmi_handle_destroy(dev->qmi.handle);
916 flush_kthread_worker(&dev->qmi.kworker);
917 kthread_stop(dev->qmi.task);
918 dev->qmi.task = NULL;
919 dev->qmi.handle = NULL;
920}
921
922int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
923{
924 struct slimbus_power_req_msg_v01 req;
925
926 if (active)
927 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
928 else
929 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
930
931 return msm_slim_qmi_send_power_request(dev, &req);
932}