blob: 37bc8834cd06c3cb62885d94872c337a1c6952f3 [file] [log] [blame]
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Sagar Dharia2754ab42012-08-21 18:07:39 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
Sagar Dharia2754ab42012-08-21 18:07:39 -060012#include <linux/pm_runtime.h>
13#include <linux/dma-mapping.h>
14#include <linux/slimbus/slimbus.h>
15#include <mach/sps.h>
16#include "slim-msm.h"
17
18int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
19{
20 spin_lock(&dev->rx_lock);
21 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
22 spin_unlock(&dev->rx_lock);
23 dev_err(dev->dev, "RX QUEUE full!");
24 return -EXFULL;
25 }
26 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
27 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
28 spin_unlock(&dev->rx_lock);
29 return 0;
30}
31
32int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
33{
34 unsigned long flags;
35 spin_lock_irqsave(&dev->rx_lock, flags);
36 if (dev->tail == dev->head) {
37 spin_unlock_irqrestore(&dev->rx_lock, flags);
38 return -ENODATA;
39 }
40 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
41 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
42 spin_unlock_irqrestore(&dev->rx_lock, flags);
43 return 0;
44}
45
46int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
47{
48#ifdef CONFIG_PM_RUNTIME
49 int ref = 0;
50 int ret = pm_runtime_get_sync(dev->dev);
51 if (ret >= 0) {
52 ref = atomic_read(&dev->dev->power.usage_count);
53 if (ref <= 0) {
54 dev_err(dev->dev, "reference count -ve:%d", ref);
55 ret = -ENODEV;
56 }
57 }
58 return ret;
59#else
60 return -ENODEV;
61#endif
62}
63void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
64{
65#ifdef CONFIG_PM_RUNTIME
66 int ref;
67 pm_runtime_mark_last_busy(dev->dev);
68 ref = atomic_read(&dev->dev->power.usage_count);
69 if (ref <= 0)
70 dev_err(dev->dev, "reference count mismatch:%d", ref);
71 else
72 pm_runtime_put(dev->dev);
73#endif
74}
75
Sagar Dharia2b8a4b52013-05-15 20:01:45 -060076irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
77{
78 int i;
79 u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
80 dev->ver));
81 /*
82 * different port-interrupt than what we enabled, ignore.
83 * This may happen if overflow/underflow is reported, but
84 * was disabled due to unavailability of buffers provided by
85 * client.
86 */
87 if ((pstat & int_en) == 0)
88 return IRQ_HANDLED;
89 for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
90 if (pstat & (1 << i)) {
91 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
92 i, dev->ver));
93 if (val & MSM_PORT_OVERFLOW) {
94 dev->ctrl.ports[i-dev->port_b].err =
95 SLIM_P_OVERFLOW;
96 } else if (val & MSM_PORT_UNDERFLOW) {
97 dev->ctrl.ports[i-dev->port_b].err =
98 SLIM_P_UNDERFLOW;
99 }
100 }
101 }
102 /*
103 * Disable port interrupt here. Re-enable when more
104 * buffers are provided for this port.
105 */
106 writel_relaxed((int_en & (~pstat)),
107 PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
108 dev->ver));
109 /* clear port interrupts */
110 writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
111 dev->ver));
112 pr_info("disabled overflow/underflow for port 0x%x", pstat);
113
114 /*
115 * Guarantee that port interrupt bit(s) clearing writes go
116 * through before exiting ISR
117 */
118 mb();
119 return IRQ_HANDLED;
120}
121
Sagar Dharia2754ab42012-08-21 18:07:39 -0600122int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
123{
124 int ret;
125 struct sps_pipe *endpoint;
126 struct sps_connect *config = &ep->config;
127
128 /* Allocate the endpoint */
129 endpoint = sps_alloc_endpoint();
130 if (!endpoint) {
131 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
132 return -ENOMEM;
133 }
134
135 /* Get default connection configuration for an endpoint */
136 ret = sps_get_config(endpoint, config);
137 if (ret) {
138 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
139 goto sps_config_failed;
140 }
141
142 ep->sps = endpoint;
143 return 0;
144
145sps_config_failed:
146 sps_free_endpoint(endpoint);
147 return ret;
148}
149
150void msm_slim_free_endpoint(struct msm_slim_endp *ep)
151{
152 sps_free_endpoint(ep->sps);
153 ep->sps = NULL;
154}
155
156int msm_slim_sps_mem_alloc(
157 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
158{
159 dma_addr_t phys;
160
161 mem->size = len;
162 mem->min_size = 0;
163 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
164
165 if (!mem->base) {
166 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
167 return -ENOMEM;
168 }
169
170 mem->phys_base = phys;
171 memset(mem->base, 0x00, mem->size);
172 return 0;
173}
174
175void
176msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
177{
178 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
179 mem->size = 0;
180 mem->base = NULL;
181 mem->phys_base = 0;
182}
183
184void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
185{
186 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600187 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
188 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
189 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
Sagar Dharia2754ab42012-08-21 18:07:39 -0600190 /* Make sure that port registers are updated before returning */
191 mb();
192}
193
Sagar Dharia2d74c142013-05-17 02:24:06 -0600194static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
195{
196 struct msm_slim_endp *endpoint = &dev->pipes[pn];
197 struct sps_register_event sps_event;
198 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (pn + dev->port_b),
199 dev->ver));
200 /* Make sure port register is updated */
201 mb();
202 memset(&sps_event, 0, sizeof(sps_event));
203 sps_register_event(endpoint->sps, &sps_event);
204 sps_disconnect(endpoint->sps);
205 dev->pipes[pn].connected = false;
206}
207
Sagar Dharia2754ab42012-08-21 18:07:39 -0600208int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
209{
210 struct msm_slim_endp *endpoint = &dev->pipes[pn];
211 struct sps_connect *cfg = &endpoint->config;
212 u32 stat;
213 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
214 if (ret) {
215 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
216 return ret;
217 }
218 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
219 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
220
Sagar Dharia2d74c142013-05-17 02:24:06 -0600221 if (dev->pipes[pn].connected &&
222 dev->ctrl.ports[pn].state == SLIM_P_CFG) {
223 return -EISCONN;
224 } else if (dev->pipes[pn].connected) {
225 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (pn + dev->port_b),
226 dev->ver));
227 /* Make sure port disabling goes through */
228 mb();
229 /* Is pipe already connected in desired direction */
230 if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
231 cfg->mode == SPS_MODE_DEST) ||
232 (dev->ctrl.ports[pn].flow == SLIM_SINK &&
233 cfg->mode == SPS_MODE_SRC)) {
234 msm_hw_set_port(dev, pn + dev->port_b);
235 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600236 }
Sagar Dharia2d74c142013-05-17 02:24:06 -0600237 msm_slim_disconn_pipe_port(dev, pn);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600238 }
239
Sagar Dharia2b8a4b52013-05-15 20:01:45 -0600240 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->port_b),
Sagar Dharia2754ab42012-08-21 18:07:39 -0600241 dev->ver));
242 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
243 cfg->destination = dev->bam.hdl;
244 cfg->source = SPS_DEV_HANDLE_MEM;
245 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
246 cfg->src_pipe_index = 0;
247 dev_dbg(dev->dev, "flow src:pipe num:%d",
248 cfg->dest_pipe_index);
249 cfg->mode = SPS_MODE_DEST;
250 } else {
251 cfg->source = dev->bam.hdl;
252 cfg->destination = SPS_DEV_HANDLE_MEM;
253 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
254 cfg->dest_pipe_index = 0;
255 dev_dbg(dev->dev, "flow dest:pipe num:%d",
256 cfg->src_pipe_index);
257 cfg->mode = SPS_MODE_SRC;
258 }
259 /* Space for desciptor FIFOs */
Sagar Dharia2d74c142013-05-17 02:24:06 -0600260 ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
261 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
262 if (ret)
263 pr_err("mem alloc for descr failed:%d", ret);
264 else
265 ret = sps_connect(dev->pipes[pn].sps, cfg);
266
Sagar Dharia2754ab42012-08-21 18:07:39 -0600267 if (!ret) {
268 dev->pipes[pn].connected = true;
Sagar Dharia2b8a4b52013-05-15 20:01:45 -0600269 msm_hw_set_port(dev, pn + dev->port_b);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600270 }
271 return ret;
272}
273
Sagar Dharia100e7212013-05-17 18:20:57 -0600274int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600275{
276 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
277 struct msm_slim_endp *endpoint;
278 int ret = 0;
279 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
280 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
281 return -EPROTONOSUPPORT;
Sagar Dharia2b8a4b52013-05-15 20:01:45 -0600282 if (pn >= (MSM_SLIM_NPORTS - dev->port_b))
Sagar Dharia2754ab42012-08-21 18:07:39 -0600283 return -ENODEV;
284
285 endpoint = &dev->pipes[pn];
286 ret = msm_slim_init_endpoint(dev, endpoint);
287 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
288 return ret;
289}
290
Sagar Dharia2d74c142013-05-17 02:24:06 -0600291void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
292{
293 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
294 struct msm_slim_endp *endpoint;
295 if (pn >= (MSM_SLIM_NPORTS - dev->port_b))
296 return;
297 endpoint = &dev->pipes[pn];
298 if (dev->pipes[pn].connected)
299 msm_slim_disconn_pipe_port(dev, pn);
300 if (endpoint->sps) {
301 struct sps_connect *config = &endpoint->config;
302 msm_slim_free_endpoint(endpoint);
303 msm_slim_sps_mem_free(dev, &config->desc);
304 }
305}
306
Sagar Dharia2754ab42012-08-21 18:07:39 -0600307enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
308 u8 pn, u8 **done_buf, u32 *done_len)
309{
310 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
311 struct sps_iovec sio;
312 int ret;
313 if (done_len)
314 *done_len = 0;
315 if (done_buf)
316 *done_buf = NULL;
317 if (!dev->pipes[pn].connected)
318 return SLIM_P_DISCONNECT;
319 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
320 if (!ret) {
321 if (done_len)
322 *done_len = sio.size;
323 if (done_buf)
324 *done_buf = (u8 *)sio.addr;
325 }
326 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
327 return SLIM_P_INPROGRESS;
328}
329
Sagar Dhariaea33a6e2013-05-15 20:18:01 -0600330static void msm_slim_port_cb(struct sps_event_notify *ev)
331{
332
333 struct completion *comp = ev->data.transfer.user;
334 struct sps_iovec *iovec = &ev->data.transfer.iovec;
335
336 if (ev->event_id == SPS_EVENT_DESC_DONE) {
337
338 pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
339 iovec->addr, iovec->size, iovec->flags);
340
341 } else {
342 pr_err("%s: ERR event %d\n",
343 __func__, ev->event_id);
344 }
345 if (comp)
346 complete(comp);
347}
348
Sagar Dharia2754ab42012-08-21 18:07:39 -0600349int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
350 u32 len, struct completion *comp)
351{
352 struct sps_register_event sreg;
353 int ret;
354 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
355 if (pn >= 7)
356 return -ENODEV;
357
358
Sagar Dharia2754ab42012-08-21 18:07:39 -0600359 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
360 sreg.mode = SPS_TRIGGER_WAIT;
Sagar Dhariaea33a6e2013-05-15 20:18:01 -0600361 sreg.xfer_done = NULL;
362 sreg.callback = msm_slim_port_cb;
363 sreg.user = NULL;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600364 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
365 if (ret) {
366 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
367 return ret;
368 }
Sagar Dhariaea33a6e2013-05-15 20:18:01 -0600369 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, comp,
Sagar Dharia2754ab42012-08-21 18:07:39 -0600370 SPS_IOVEC_FLAG_INT);
371 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
Sagar Dharia2b8a4b52013-05-15 20:01:45 -0600372 if (!ret) {
373 /* Enable port interrupts */
374 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
375 dev->ver));
376 if (!(int_port & (1 << (dev->port_b + pn))))
377 writel_relaxed((int_port | (1 << (dev->port_b + pn))),
378 PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
379 /* Make sure that port registers are updated before returning */
380 mb();
381 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600382
383 return ret;
384}
385
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600386/* Queue up Tx message buffer */
387static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
388{
389 int ret;
390 struct msm_slim_endp *endpoint = &dev->tx_msgq;
391 struct sps_mem_buffer *mem = &endpoint->buf;
392 struct sps_pipe *pipe = endpoint->sps;
393 int ix = (buf - (u8 *)mem->base) / SLIM_MSGQ_BUF_LEN;
394
395 u32 phys_addr = mem->phys_base + (SLIM_MSGQ_BUF_LEN * ix);
396
397 for (ret = 0; ret < ((len + 3) >> 2); ret++)
398 pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
399
400 ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
401 SPS_IOVEC_FLAG_EOT);
402 if (ret)
403 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
404
405 return ret;
406}
407
408static u32 *msm_slim_tx_msgq_return(struct msm_slim_ctrl *dev)
409{
410 struct msm_slim_endp *endpoint = &dev->tx_msgq;
411 struct sps_mem_buffer *mem = &endpoint->buf;
412 struct sps_pipe *pipe = endpoint->sps;
413 struct sps_iovec iovec;
414 int ret;
415
416 /* first transaction after establishing connection */
417 if (dev->tx_idx == -1) {
418 dev->tx_idx = 0;
419 return mem->base;
420 }
421 ret = sps_get_iovec(pipe, &iovec);
422 if (ret || iovec.addr == 0) {
423 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
424 return NULL;
425 }
426
427 /* Calculate buffer index */
428 dev->tx_idx = (iovec.addr - mem->phys_base) / SLIM_MSGQ_BUF_LEN;
429
430 return (u32 *)((u8 *)mem->base + (dev->tx_idx * SLIM_MSGQ_BUF_LEN));
431}
432
Sagar Dharia2754ab42012-08-21 18:07:39 -0600433int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
434{
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600435 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
436 int i;
437 for (i = 0; i < (len + 3) >> 2; i++) {
438 dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
439 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
440 }
441 /* Guarantee that message is sent before returning */
442 mb();
443 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600444 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600445 return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600446}
447
448u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
449{
450 /*
451 * Currently we block a transaction until the current one completes.
452 * In case we need multiple transactions, use message Q
453 */
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600454 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED)
455 return dev->tx_buf;
456
457 return msm_slim_tx_msgq_return(dev);
Sagar Dharia2754ab42012-08-21 18:07:39 -0600458}
459
460static void
461msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
462{
463 u32 *buf = ev->data.transfer.user;
464 struct sps_iovec *iovec = &ev->data.transfer.iovec;
465
466 /*
467 * Note the virtual address needs to be offset by the same index
468 * as the physical address or just pass in the actual virtual address
469 * if the sps_mem_buffer is not needed. Note that if completion is
470 * used, the virtual address won't be available and will need to be
471 * calculated based on the offset of the physical address
472 */
473 if (ev->event_id == SPS_EVENT_DESC_DONE) {
474
475 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
476
477 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
478 iovec->addr, iovec->size, iovec->flags);
479
480 } else {
481 dev_err(dev->dev, "%s: unknown event %d\n",
482 __func__, ev->event_id);
483 }
484}
485
486static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
487{
488 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
489 msm_slim_rx_msgq_event(dev, notify);
490}
491
492/* Queue up Rx message buffer */
493static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
494{
495 int ret;
496 u32 flags = SPS_IOVEC_FLAG_INT;
497 struct msm_slim_endp *endpoint = &dev->rx_msgq;
498 struct sps_mem_buffer *mem = &endpoint->buf;
499 struct sps_pipe *pipe = endpoint->sps;
500
501 /* Rx message queue buffers are 4 bytes in length */
502 u8 *virt_addr = mem->base + (4 * ix);
503 u32 phys_addr = mem->phys_base + (4 * ix);
504
505 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
506
507 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
508 if (ret)
509 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
510
511 return ret;
512}
513
514int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
515{
516 struct msm_slim_endp *endpoint = &dev->rx_msgq;
517 struct sps_mem_buffer *mem = &endpoint->buf;
518 struct sps_pipe *pipe = endpoint->sps;
519 struct sps_iovec iovec;
520 int index;
521 int ret;
522
523 ret = sps_get_iovec(pipe, &iovec);
524 if (ret) {
525 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
526 goto err_exit;
527 }
528
529 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
530 iovec.addr, iovec.size, iovec.flags);
531 BUG_ON(iovec.addr < mem->phys_base);
532 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
533
534 /* Calculate buffer index */
535 index = (iovec.addr - mem->phys_base) / 4;
536 *(data + offset) = *((u32 *)mem->base + index);
537
538 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
539
540 /* Add buffer back to the queue */
541 (void)msm_slim_post_rx_msgq(dev, index);
542
543err_exit:
544 return ret;
545}
546
Sagar Dharia24419e32013-01-14 17:56:32 -0700547int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
548 struct msm_slim_endp *endpoint,
549 struct completion *notify)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600550{
551 int i, ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600552 struct sps_register_event sps_error_event; /* SPS_ERROR */
553 struct sps_register_event sps_descr_event; /* DESCR_DONE */
Sagar Dharia24419e32013-01-14 17:56:32 -0700554 struct sps_connect *config = &endpoint->config;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600555
556 ret = sps_connect(endpoint->sps, config);
557 if (ret) {
558 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700559 return ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600560 }
561
562 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
563
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600564 if (notify) {
565 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
566 sps_descr_event.options = SPS_O_DESC_DONE;
567 sps_descr_event.user = (void *)dev;
568 sps_descr_event.xfer_done = notify;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600569
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600570 ret = sps_register_event(endpoint->sps, &sps_descr_event);
571 if (ret) {
572 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
573 goto sps_reg_event_failed;
574 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600575 }
576
577 /* Register callback for errors */
578 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
579 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
580 sps_error_event.options = SPS_O_ERROR;
581 sps_error_event.user = (void *)dev;
582 sps_error_event.callback = msm_slim_rx_msgq_cb;
583
584 ret = sps_register_event(endpoint->sps, &sps_error_event);
585 if (ret) {
586 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
587 goto sps_reg_event_failed;
588 }
589
Sagar Dharia2754ab42012-08-21 18:07:39 -0600590 /*
591 * Call transfer_one for each 4-byte buffer
592 * Use (buf->size/4) - 1 for the number of buffer to post
593 */
594
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600595 if (endpoint == &dev->rx_msgq) {
596 /* Setup the transfer */
597 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
598 ret = msm_slim_post_rx_msgq(dev, i);
599 if (ret) {
600 dev_err(dev->dev,
601 "post_rx_msgq() failed 0x%x\n", ret);
602 goto sps_transfer_failed;
603 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600604 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600605 dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
606 } else {
607 dev->tx_idx = -1;
608 dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600609 }
610
611 return 0;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600612sps_transfer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600613 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
614 sps_register_event(endpoint->sps, &sps_error_event);
615sps_reg_event_failed:
616 sps_disconnect(endpoint->sps);
Sagar Dharia24419e32013-01-14 17:56:32 -0700617 return ret;
618}
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600619
Sagar Dharia24419e32013-01-14 17:56:32 -0700620static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
621{
622 int ret;
623 u32 pipe_offset;
624 struct msm_slim_endp *endpoint = &dev->rx_msgq;
625 struct sps_connect *config = &endpoint->config;
626 struct sps_mem_buffer *descr = &config->desc;
627 struct sps_mem_buffer *mem = &endpoint->buf;
628 struct completion *notify = &dev->rx_msgq_notify;
629
630 init_completion(notify);
631 if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
632 return 0;
633
634 /* Allocate the endpoint */
635 ret = msm_slim_init_endpoint(dev, endpoint);
636 if (ret) {
637 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
638 goto sps_init_endpoint_failed;
639 }
640
641 /* Get the pipe indices for the message queues */
642 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
643 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
644
645 config->mode = SPS_MODE_SRC;
646 config->source = dev->bam.hdl;
647 config->destination = SPS_DEV_HANDLE_MEM;
648 config->src_pipe_index = pipe_offset;
649 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
650 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
651
652 /* Allocate memory for the FIFO descriptors */
653 ret = msm_slim_sps_mem_alloc(dev, descr,
654 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
655 if (ret) {
656 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
657 goto alloc_descr_failed;
658 }
659
660 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
661 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
662 if (ret) {
663 dev_err(dev->dev, "dma_alloc_coherent failed\n");
664 goto alloc_buffer_failed;
665 }
666
667 ret = msm_slim_connect_endp(dev, endpoint, notify);
668
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600669 if (!ret)
Sagar Dharia24419e32013-01-14 17:56:32 -0700670 return 0;
Sagar Dharia24419e32013-01-14 17:56:32 -0700671
672 msm_slim_sps_mem_free(dev, mem);
673alloc_buffer_failed:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600674 msm_slim_sps_mem_free(dev, descr);
675alloc_descr_failed:
676 msm_slim_free_endpoint(endpoint);
677sps_init_endpoint_failed:
Sagar Dharia24419e32013-01-14 17:56:32 -0700678 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600679 return ret;
680}
681
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600682static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
683{
684 int ret;
685 u32 pipe_offset;
686 struct msm_slim_endp *endpoint = &dev->tx_msgq;
687 struct sps_connect *config = &endpoint->config;
688 struct sps_mem_buffer *descr = &config->desc;
689 struct sps_mem_buffer *mem = &endpoint->buf;
690
691 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
692 return 0;
693
694 /* Allocate the endpoint */
695 ret = msm_slim_init_endpoint(dev, endpoint);
696 if (ret) {
697 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
698 goto sps_init_endpoint_failed;
699 }
700
701 /* Get the pipe indices for the message queues */
702 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
703 pipe_offset += 1;
704 dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
705
706 config->mode = SPS_MODE_DEST;
707 config->source = SPS_DEV_HANDLE_MEM;
708 config->destination = dev->bam.hdl;
709 config->dest_pipe_index = pipe_offset;
710 config->src_pipe_index = 0;
711 config->options = SPS_O_ERROR | SPS_O_NO_Q |
712 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
713
714 /* Allocate memory for the FIFO descriptors */
715 ret = msm_slim_sps_mem_alloc(dev, descr,
716 MSM_TX_BUFS * sizeof(struct sps_iovec));
717 if (ret) {
718 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
719 goto alloc_descr_failed;
720 }
721
722 /* Allocate memory for the message buffer(s), N descrs, 40-byte mesg */
723 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN);
724 if (ret) {
725 dev_err(dev->dev, "dma_alloc_coherent failed\n");
726 goto alloc_buffer_failed;
727 }
728 ret = msm_slim_connect_endp(dev, endpoint, NULL);
729
730 if (!ret)
731 return 0;
732
733 msm_slim_sps_mem_free(dev, mem);
734alloc_buffer_failed:
735 msm_slim_sps_mem_free(dev, descr);
736alloc_descr_failed:
737 msm_slim_free_endpoint(endpoint);
738sps_init_endpoint_failed:
739 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
740 return ret;
741}
742
Sagar Dharia2754ab42012-08-21 18:07:39 -0600743/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
744int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600745 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600746{
747 int i, ret;
748 u32 bam_handle;
749 struct sps_bam_props bam_props = {0};
750
751 static struct sps_bam_sec_config_props sec_props = {
752 .ees = {
753 [0] = { /* LPASS */
754 .vmid = 0,
755 .pipe_mask = 0xFFFF98,
756 },
757 [1] = { /* Krait Apps */
758 .vmid = 1,
759 .pipe_mask = 0x3F000007,
760 },
761 [2] = { /* Modem */
762 .vmid = 2,
763 .pipe_mask = 0x00000060,
764 },
765 },
766 };
767
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600768 if (dev->bam.hdl) {
769 bam_handle = dev->bam.hdl;
770 goto init_msgq;
771 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600772 bam_props.ee = dev->ee;
773 bam_props.virt_addr = dev->bam.base;
774 bam_props.phys_addr = bam_mem->start;
775 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600776 if (!remote) {
777 bam_props.manage = SPS_BAM_MGR_LOCAL;
778 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
779 } else {
780 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
781 SPS_BAM_MGR_MULTI_EE;
782 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
783 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600784 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
785
Sagar Dharia2754ab42012-08-21 18:07:39 -0600786 bam_props.p_sec_config_props = &sec_props;
787
788 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
789 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
790
Sagar Dhariaa3c6a382013-06-21 12:18:20 -0600791 /* override apps channel pipes if specified in platform-data or DT */
792 if (dev->pdata.apps_pipes)
793 sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
794
Sagar Dharia2754ab42012-08-21 18:07:39 -0600795 /* First 7 bits are for message Qs */
796 for (i = 7; i < 32; i++) {
797 /* Check what pipes are owned by Apps. */
798 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
799 break;
800 }
Sagar Dharia2b8a4b52013-05-15 20:01:45 -0600801 dev->port_b = i - 7;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600802
803 /* Register the BAM device with the SPS driver */
804 ret = sps_register_bam_device(&bam_props, &bam_handle);
805 if (ret) {
806 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
Sagar Dharia24419e32013-01-14 17:56:32 -0700807 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600808 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
809 return ret;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600810 }
811 dev->bam.hdl = bam_handle;
812 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
813
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600814init_msgq:
Sagar Dharia2754ab42012-08-21 18:07:39 -0600815 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
816 if (ret)
817 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600818 if (ret && bam_handle)
819 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
820
821 ret = msm_slim_init_tx_msgq(dev, pipe_reg);
822 if (ret)
823 dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
824 if (ret && bam_handle)
825 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
826
827 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED &&
828 dev->use_rx_msgqs == MSM_MSGQ_DISABLED && bam_handle) {
Sagar Dharia2754ab42012-08-21 18:07:39 -0600829 sps_deregister_bam_device(bam_handle);
830 dev->bam.hdl = 0L;
831 }
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600832
Sagar Dharia2754ab42012-08-21 18:07:39 -0600833 return ret;
834}
835
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600836void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
837 struct msm_slim_endp *endpoint,
838 enum msm_slim_msgq *msgq_flag)
839{
840 if (*msgq_flag == MSM_MSGQ_ENABLED) {
841 sps_disconnect(endpoint->sps);
842 *msgq_flag = MSM_MSGQ_RESET;
843 }
844}
845
846static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
847 struct msm_slim_endp *endpoint,
848 enum msm_slim_msgq *msgq_flag)
849{
850 struct sps_connect *config = &endpoint->config;
851 struct sps_mem_buffer *descr = &config->desc;
852 struct sps_mem_buffer *mem = &endpoint->buf;
853 struct sps_register_event sps_event;
854 memset(&sps_event, 0x00, sizeof(sps_event));
855 msm_slim_sps_mem_free(dev, mem);
856 sps_register_event(endpoint->sps, &sps_event);
857 if (*msgq_flag == MSM_MSGQ_ENABLED) {
858 msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
859 msm_slim_free_endpoint(endpoint);
860 }
861 msm_slim_sps_mem_free(dev, descr);
862}
863
Sagar Dharia33beca02012-10-22 16:21:46 -0600864void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600865{
Sagar Dharia5c8ad192013-05-31 11:39:05 -0600866 if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
867 msm_slim_remove_ep(dev, &dev->rx_msgq, &dev->use_rx_msgqs);
868 if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
869 msm_slim_remove_ep(dev, &dev->tx_msgq, &dev->use_tx_msgqs);
Sagar Dharia33beca02012-10-22 16:21:46 -0600870 if (dereg) {
Sagar Dharia2d74c142013-05-17 02:24:06 -0600871 int i;
872 for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
873 if (dev->pipes[i - dev->port_b].connected)
874 msm_dealloc_port(&dev->ctrl,
875 i - dev->port_b);
876 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600877 sps_deregister_bam_device(dev->bam.hdl);
Sagar Dharia33beca02012-10-22 16:21:46 -0600878 dev->bam.hdl = 0L;
Sagar Dharia2754ab42012-08-21 18:07:39 -0600879 }
880}
Kenneth Heitkeae626042012-11-05 21:01:44 -0700881
882/* Slimbus QMI Messaging */
883#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
884#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
885#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
886#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
887
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -0700888#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
889#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
890#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
891#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
892
Kenneth Heitkeae626042012-11-05 21:01:44 -0700893enum slimbus_mode_enum_type_v01 {
894 /* To force a 32 bit signed enum. Do not change or use*/
895 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
896 SLIMBUS_MODE_SATELLITE_V01 = 1,
897 SLIMBUS_MODE_MASTER_V01 = 2,
898 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
899};
900
901enum slimbus_pm_enum_type_v01 {
902 /* To force a 32 bit signed enum. Do not change or use*/
903 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
904 SLIMBUS_PM_INACTIVE_V01 = 1,
905 SLIMBUS_PM_ACTIVE_V01 = 2,
906 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
907};
908
909struct slimbus_select_inst_req_msg_v01 {
910 /* Mandatory */
911 /* Hardware Instance Selection */
912 uint32_t instance;
913
914 /* Optional */
915 /* Optional Mode Request Operation */
916 /* Must be set to true if mode is being passed */
917 uint8_t mode_valid;
918 enum slimbus_mode_enum_type_v01 mode;
919};
920
921struct slimbus_select_inst_resp_msg_v01 {
922 /* Mandatory */
923 /* Result Code */
924 struct qmi_response_type_v01 resp;
925};
926
927struct slimbus_power_req_msg_v01 {
928 /* Mandatory */
929 /* Power Request Operation */
930 enum slimbus_pm_enum_type_v01 pm_req;
931};
932
933struct slimbus_power_resp_msg_v01 {
934 /* Mandatory */
935 /* Result Code */
936 struct qmi_response_type_v01 resp;
937};
938
939static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
940 {
941 .data_type = QMI_UNSIGNED_4_BYTE,
942 .elem_len = 1,
943 .elem_size = sizeof(uint32_t),
944 .is_array = NO_ARRAY,
945 .tlv_type = 0x01,
946 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
947 instance),
948 .ei_array = NULL,
949 },
950 {
951 .data_type = QMI_OPT_FLAG,
952 .elem_len = 1,
953 .elem_size = sizeof(uint8_t),
954 .is_array = NO_ARRAY,
955 .tlv_type = 0x10,
956 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
957 mode_valid),
958 .ei_array = NULL,
959 },
960 {
961 .data_type = QMI_UNSIGNED_4_BYTE,
962 .elem_len = 1,
963 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
964 .is_array = NO_ARRAY,
965 .tlv_type = 0x10,
966 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
967 mode),
968 .ei_array = NULL,
969 },
970 {
971 .data_type = QMI_EOTI,
972 .elem_len = 0,
973 .elem_size = 0,
974 .is_array = NO_ARRAY,
975 .tlv_type = 0x00,
976 .offset = 0,
977 .ei_array = NULL,
978 },
979};
980
981static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
982 {
983 .data_type = QMI_STRUCT,
984 .elem_len = 1,
985 .elem_size = sizeof(struct qmi_response_type_v01),
986 .is_array = NO_ARRAY,
987 .tlv_type = 0x02,
988 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
989 resp),
990 .ei_array = get_qmi_response_type_v01_ei(),
991 },
992 {
993 .data_type = QMI_EOTI,
994 .elem_len = 0,
995 .elem_size = 0,
996 .is_array = NO_ARRAY,
997 .tlv_type = 0x00,
998 .offset = 0,
999 .ei_array = NULL,
1000 },
1001};
1002
1003static struct elem_info slimbus_power_req_msg_v01_ei[] = {
1004 {
1005 .data_type = QMI_UNSIGNED_4_BYTE,
1006 .elem_len = 1,
1007 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
1008 .is_array = NO_ARRAY,
1009 .tlv_type = 0x01,
1010 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
1011 .ei_array = NULL,
1012 },
1013 {
1014 .data_type = QMI_EOTI,
1015 .elem_len = 0,
1016 .elem_size = 0,
1017 .is_array = NO_ARRAY,
1018 .tlv_type = 0x00,
1019 .offset = 0,
1020 .ei_array = NULL,
1021 },
1022};
1023
1024static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
1025 {
1026 .data_type = QMI_STRUCT,
1027 .elem_len = 1,
1028 .elem_size = sizeof(struct qmi_response_type_v01),
1029 .is_array = NO_ARRAY,
1030 .tlv_type = 0x02,
1031 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
1032 .ei_array = get_qmi_response_type_v01_ei(),
1033 },
1034 {
1035 .data_type = QMI_EOTI,
1036 .elem_len = 0,
1037 .elem_size = 0,
1038 .is_array = NO_ARRAY,
1039 .tlv_type = 0x00,
1040 .offset = 0,
1041 .ei_array = NULL,
1042 },
1043};
1044
1045static void msm_slim_qmi_recv_msg(struct kthread_work *work)
1046{
1047 int rc;
1048 struct msm_slim_qmi *qmi =
1049 container_of(work, struct msm_slim_qmi, kwork);
1050
1051 rc = qmi_recv_msg(qmi->handle);
1052 if (rc < 0)
1053 pr_err("%s: Error receiving QMI message\n", __func__);
1054}
1055
1056static void msm_slim_qmi_notify(struct qmi_handle *handle,
1057 enum qmi_event_type event, void *notify_priv)
1058{
1059 struct msm_slim_ctrl *dev = notify_priv;
1060 struct msm_slim_qmi *qmi = &dev->qmi;
1061
1062 switch (event) {
1063 case QMI_RECV_MSG:
1064 queue_kthread_work(&qmi->kworker, &qmi->kwork);
1065 break;
1066 default:
1067 break;
1068 }
1069}
1070
1071static const char *get_qmi_error(struct qmi_response_type_v01 *r)
1072{
1073 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
1074 return "No Error";
1075 else if (r->error == QMI_ERR_NO_MEMORY_V01)
1076 return "Out of Memory";
1077 else if (r->error == QMI_ERR_INTERNAL_V01)
1078 return "Unexpected error occurred";
1079 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
1080 return "Slimbus s/w already configured to a different mode";
1081 else if (r->error == QMI_ERR_INVALID_ID_V01)
1082 return "Slimbus hardware instance is not valid";
1083 else
1084 return "Unknown error";
1085}
1086
1087static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
1088 struct slimbus_select_inst_req_msg_v01 *req)
1089{
1090 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
1091 struct msg_desc req_desc, resp_desc;
1092 int rc;
1093
1094 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001095 req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001096 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
1097
1098 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001099 resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001100 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
1101
1102 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
1103 &resp_desc, &resp, sizeof(resp), 5000);
1104 if (rc < 0) {
1105 pr_err("%s: QMI send req failed %d\n", __func__, rc);
1106 return rc;
1107 }
1108
1109 /* Check the response */
1110 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1111 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
1112 resp.resp.result, get_qmi_error(&resp.resp));
1113 return -EREMOTEIO;
1114 }
1115
1116 return 0;
1117}
1118
1119static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
1120 struct slimbus_power_req_msg_v01 *req)
1121{
1122 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
1123 struct msg_desc req_desc, resp_desc;
1124 int rc;
1125
1126 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001127 req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001128 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
1129
1130 resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
Kenneth Heitke01d6b4d2013-01-07 15:32:59 -07001131 resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
Kenneth Heitkeae626042012-11-05 21:01:44 -07001132 resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
1133
1134 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
1135 &resp_desc, &resp, sizeof(resp), 5000);
1136 if (rc < 0) {
1137 pr_err("%s: QMI send req failed %d\n", __func__, rc);
1138 return rc;
1139 }
1140
1141 /* Check the response */
1142 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1143 pr_err("%s: QMI request failed 0x%x (%s)\n", __func__,
1144 resp.resp.result, get_qmi_error(&resp.resp));
1145 return -EREMOTEIO;
1146 }
1147
1148 return 0;
1149}
1150
1151int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
1152{
1153 int rc = 0;
1154 struct qmi_handle *handle;
1155 struct slimbus_select_inst_req_msg_v01 req;
1156
1157 init_kthread_worker(&dev->qmi.kworker);
1158
1159 dev->qmi.task = kthread_run(kthread_worker_fn,
1160 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
1161
1162 if (IS_ERR(dev->qmi.task)) {
1163 pr_err("%s: Failed to create QMI client kthread\n", __func__);
1164 return -ENOMEM;
1165 }
1166
1167 init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
1168
1169 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
1170 if (!handle) {
1171 rc = -ENOMEM;
1172 pr_err("%s: QMI client handle alloc failed\n", __func__);
1173 goto qmi_handle_create_failed;
1174 }
1175
1176 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
1177 SLIMBUS_QMI_INS_ID);
1178 if (rc < 0) {
1179 pr_err("%s: QMI server not found\n", __func__);
1180 goto qmi_connect_to_service_failed;
1181 }
1182
1183 /* Instance is 0 based */
1184 req.instance = dev->ctrl.nr - 1;
1185 req.mode_valid = 1;
1186
1187 /* Mode indicates the role of the ADSP */
1188 if (apps_is_master)
1189 req.mode = SLIMBUS_MODE_SATELLITE_V01;
1190 else
1191 req.mode = SLIMBUS_MODE_MASTER_V01;
1192
1193 dev->qmi.handle = handle;
1194
1195 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
1196 if (rc) {
1197 pr_err("%s: failed to select h/w instance\n", __func__);
1198 goto qmi_select_instance_failed;
1199 }
1200
1201 return 0;
1202
1203qmi_select_instance_failed:
1204 dev->qmi.handle = NULL;
1205qmi_connect_to_service_failed:
1206 qmi_handle_destroy(handle);
1207qmi_handle_create_failed:
1208 flush_kthread_worker(&dev->qmi.kworker);
1209 kthread_stop(dev->qmi.task);
1210 dev->qmi.task = NULL;
1211 return rc;
1212}
1213
1214void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
1215{
1216 qmi_handle_destroy(dev->qmi.handle);
1217 flush_kthread_worker(&dev->qmi.kworker);
1218 kthread_stop(dev->qmi.task);
1219 dev->qmi.task = NULL;
1220 dev->qmi.handle = NULL;
1221}
1222
1223int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
1224{
1225 struct slimbus_power_req_msg_v01 req;
1226
1227 if (active)
1228 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
1229 else
1230 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
1231
1232 return msm_slim_qmi_send_power_request(dev, &req);
1233}