blob: 7cd34d36fcbc78dd511a47142155084187c24bd9 [file] [log] [blame]
Sagar Dharia2754ab42012-08-21 18:07:39 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/pm_runtime.h>
14#include <linux/dma-mapping.h>
15#include <linux/slimbus/slimbus.h>
16#include <mach/sps.h>
17#include "slim-msm.h"
18
19int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
20{
21 spin_lock(&dev->rx_lock);
22 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
23 spin_unlock(&dev->rx_lock);
24 dev_err(dev->dev, "RX QUEUE full!");
25 return -EXFULL;
26 }
27 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
28 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
29 spin_unlock(&dev->rx_lock);
30 return 0;
31}
32
33int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
34{
35 unsigned long flags;
36 spin_lock_irqsave(&dev->rx_lock, flags);
37 if (dev->tail == dev->head) {
38 spin_unlock_irqrestore(&dev->rx_lock, flags);
39 return -ENODATA;
40 }
41 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
42 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
43 spin_unlock_irqrestore(&dev->rx_lock, flags);
44 return 0;
45}
46
47int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
48{
49#ifdef CONFIG_PM_RUNTIME
50 int ref = 0;
51 int ret = pm_runtime_get_sync(dev->dev);
52 if (ret >= 0) {
53 ref = atomic_read(&dev->dev->power.usage_count);
54 if (ref <= 0) {
55 dev_err(dev->dev, "reference count -ve:%d", ref);
56 ret = -ENODEV;
57 }
58 }
59 return ret;
60#else
61 return -ENODEV;
62#endif
63}
64void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
65{
66#ifdef CONFIG_PM_RUNTIME
67 int ref;
68 pm_runtime_mark_last_busy(dev->dev);
69 ref = atomic_read(&dev->dev->power.usage_count);
70 if (ref <= 0)
71 dev_err(dev->dev, "reference count mismatch:%d", ref);
72 else
73 pm_runtime_put(dev->dev);
74#endif
75}
76
77int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
78{
79 int ret;
80 struct sps_pipe *endpoint;
81 struct sps_connect *config = &ep->config;
82
83 /* Allocate the endpoint */
84 endpoint = sps_alloc_endpoint();
85 if (!endpoint) {
86 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
87 return -ENOMEM;
88 }
89
90 /* Get default connection configuration for an endpoint */
91 ret = sps_get_config(endpoint, config);
92 if (ret) {
93 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
94 goto sps_config_failed;
95 }
96
97 ep->sps = endpoint;
98 return 0;
99
100sps_config_failed:
101 sps_free_endpoint(endpoint);
102 return ret;
103}
104
105void msm_slim_free_endpoint(struct msm_slim_endp *ep)
106{
107 sps_free_endpoint(ep->sps);
108 ep->sps = NULL;
109}
110
111int msm_slim_sps_mem_alloc(
112 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
113{
114 dma_addr_t phys;
115
116 mem->size = len;
117 mem->min_size = 0;
118 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
119
120 if (!mem->base) {
121 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
122 return -ENOMEM;
123 }
124
125 mem->phys_base = phys;
126 memset(mem->base, 0x00, mem->size);
127 return 0;
128}
129
130void
131msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
132{
133 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
134 mem->size = 0;
135 mem->base = NULL;
136 mem->phys_base = 0;
137}
138
139void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
140{
141 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
142 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
143 dev->ver));
144 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
145 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
146 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
147 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
148 dev->ver));
149 /* Make sure that port registers are updated before returning */
150 mb();
151}
152
153int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
154{
155 struct msm_slim_endp *endpoint = &dev->pipes[pn];
156 struct sps_connect *cfg = &endpoint->config;
157 u32 stat;
158 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
159 if (ret) {
160 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
161 return ret;
162 }
163 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
164 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
165
166 if (dev->pipes[pn].connected) {
167 ret = sps_set_config(dev->pipes[pn].sps, cfg);
168 if (ret) {
169 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
170 ret);
171 return ret;
172 }
173 }
174
175 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
176 dev->ver));
177 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
178 cfg->destination = dev->bam.hdl;
179 cfg->source = SPS_DEV_HANDLE_MEM;
180 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
181 cfg->src_pipe_index = 0;
182 dev_dbg(dev->dev, "flow src:pipe num:%d",
183 cfg->dest_pipe_index);
184 cfg->mode = SPS_MODE_DEST;
185 } else {
186 cfg->source = dev->bam.hdl;
187 cfg->destination = SPS_DEV_HANDLE_MEM;
188 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
189 cfg->dest_pipe_index = 0;
190 dev_dbg(dev->dev, "flow dest:pipe num:%d",
191 cfg->src_pipe_index);
192 cfg->mode = SPS_MODE_SRC;
193 }
194 /* Space for desciptor FIFOs */
195 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
196 cfg->config = SPS_CONFIG_DEFAULT;
197 ret = sps_connect(dev->pipes[pn].sps, cfg);
198 if (!ret) {
199 dev->pipes[pn].connected = true;
200 msm_hw_set_port(dev, pn + dev->pipe_b);
201 }
202 return ret;
203}
204
205int msm_config_port(struct slim_controller *ctrl, u8 pn)
206{
207 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
208 struct msm_slim_endp *endpoint;
209 int ret = 0;
210 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
211 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
212 return -EPROTONOSUPPORT;
213 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
214 return -ENODEV;
215
216 endpoint = &dev->pipes[pn];
217 ret = msm_slim_init_endpoint(dev, endpoint);
218 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
219 return ret;
220}
221
222enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
223 u8 pn, u8 **done_buf, u32 *done_len)
224{
225 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
226 struct sps_iovec sio;
227 int ret;
228 if (done_len)
229 *done_len = 0;
230 if (done_buf)
231 *done_buf = NULL;
232 if (!dev->pipes[pn].connected)
233 return SLIM_P_DISCONNECT;
234 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
235 if (!ret) {
236 if (done_len)
237 *done_len = sio.size;
238 if (done_buf)
239 *done_buf = (u8 *)sio.addr;
240 }
241 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
242 return SLIM_P_INPROGRESS;
243}
244
245int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
246 u32 len, struct completion *comp)
247{
248 struct sps_register_event sreg;
249 int ret;
250 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
251 if (pn >= 7)
252 return -ENODEV;
253
254
255 ctrl->ports[pn].xcomp = comp;
256 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
257 sreg.mode = SPS_TRIGGER_WAIT;
258 sreg.xfer_done = comp;
259 sreg.callback = NULL;
260 sreg.user = &ctrl->ports[pn];
261 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
262 if (ret) {
263 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
264 return ret;
265 }
266 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
267 SPS_IOVEC_FLAG_INT);
268 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
269
270 return ret;
271}
272
273int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
274{
275 int i;
276 for (i = 0; i < (len + 3) >> 2; i++) {
277 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
278 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
279 }
280 /* Guarantee that message is sent before returning */
281 mb();
282 return 0;
283}
284
285u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
286{
287 /*
288 * Currently we block a transaction until the current one completes.
289 * In case we need multiple transactions, use message Q
290 */
291 return dev->tx_buf;
292}
293
294static void
295msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
296{
297 u32 *buf = ev->data.transfer.user;
298 struct sps_iovec *iovec = &ev->data.transfer.iovec;
299
300 /*
301 * Note the virtual address needs to be offset by the same index
302 * as the physical address or just pass in the actual virtual address
303 * if the sps_mem_buffer is not needed. Note that if completion is
304 * used, the virtual address won't be available and will need to be
305 * calculated based on the offset of the physical address
306 */
307 if (ev->event_id == SPS_EVENT_DESC_DONE) {
308
309 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
310
311 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
312 iovec->addr, iovec->size, iovec->flags);
313
314 } else {
315 dev_err(dev->dev, "%s: unknown event %d\n",
316 __func__, ev->event_id);
317 }
318}
319
320static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
321{
322 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
323 msm_slim_rx_msgq_event(dev, notify);
324}
325
326/* Queue up Rx message buffer */
327static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
328{
329 int ret;
330 u32 flags = SPS_IOVEC_FLAG_INT;
331 struct msm_slim_endp *endpoint = &dev->rx_msgq;
332 struct sps_mem_buffer *mem = &endpoint->buf;
333 struct sps_pipe *pipe = endpoint->sps;
334
335 /* Rx message queue buffers are 4 bytes in length */
336 u8 *virt_addr = mem->base + (4 * ix);
337 u32 phys_addr = mem->phys_base + (4 * ix);
338
339 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
340
341 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
342 if (ret)
343 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
344
345 return ret;
346}
347
348int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
349{
350 struct msm_slim_endp *endpoint = &dev->rx_msgq;
351 struct sps_mem_buffer *mem = &endpoint->buf;
352 struct sps_pipe *pipe = endpoint->sps;
353 struct sps_iovec iovec;
354 int index;
355 int ret;
356
357 ret = sps_get_iovec(pipe, &iovec);
358 if (ret) {
359 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
360 goto err_exit;
361 }
362
363 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
364 iovec.addr, iovec.size, iovec.flags);
365 BUG_ON(iovec.addr < mem->phys_base);
366 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
367
368 /* Calculate buffer index */
369 index = (iovec.addr - mem->phys_base) / 4;
370 *(data + offset) = *((u32 *)mem->base + index);
371
372 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
373
374 /* Add buffer back to the queue */
375 (void)msm_slim_post_rx_msgq(dev, index);
376
377err_exit:
378 return ret;
379}
380
381static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
382{
383 int i, ret;
384 u32 pipe_offset;
385 struct msm_slim_endp *endpoint = &dev->rx_msgq;
386 struct sps_connect *config = &endpoint->config;
387 struct sps_mem_buffer *descr = &config->desc;
388 struct sps_mem_buffer *mem = &endpoint->buf;
389 struct completion *notify = &dev->rx_msgq_notify;
390
391 struct sps_register_event sps_error_event; /* SPS_ERROR */
392 struct sps_register_event sps_descr_event; /* DESCR_DONE */
393
394 init_completion(notify);
395 if (!dev->use_rx_msgqs)
396 return 0;
397
398 /* Allocate the endpoint */
399 ret = msm_slim_init_endpoint(dev, endpoint);
400 if (ret) {
401 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
402 goto sps_init_endpoint_failed;
403 }
404
405 /* Get the pipe indices for the message queues */
406 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
407 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
408
409 config->mode = SPS_MODE_SRC;
410 config->source = dev->bam.hdl;
411 config->destination = SPS_DEV_HANDLE_MEM;
412 config->src_pipe_index = pipe_offset;
413 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
414 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
415
416 /* Allocate memory for the FIFO descriptors */
417 ret = msm_slim_sps_mem_alloc(dev, descr,
418 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
419 if (ret) {
420 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
421 goto alloc_descr_failed;
422 }
423
424 ret = sps_connect(endpoint->sps, config);
425 if (ret) {
426 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
427 goto sps_connect_failed;
428 }
429
430 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
431
432 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
433 sps_descr_event.options = SPS_O_DESC_DONE;
434 sps_descr_event.user = (void *)dev;
435 sps_descr_event.xfer_done = notify;
436
437 ret = sps_register_event(endpoint->sps, &sps_descr_event);
438 if (ret) {
439 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
440 goto sps_reg_event_failed;
441 }
442
443 /* Register callback for errors */
444 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
445 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
446 sps_error_event.options = SPS_O_ERROR;
447 sps_error_event.user = (void *)dev;
448 sps_error_event.callback = msm_slim_rx_msgq_cb;
449
450 ret = sps_register_event(endpoint->sps, &sps_error_event);
451 if (ret) {
452 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
453 goto sps_reg_event_failed;
454 }
455
456 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
457 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
458 if (ret) {
459 dev_err(dev->dev, "dma_alloc_coherent failed\n");
460 goto alloc_buffer_failed;
461 }
462
463 /*
464 * Call transfer_one for each 4-byte buffer
465 * Use (buf->size/4) - 1 for the number of buffer to post
466 */
467
468 /* Setup the transfer */
469 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
470 ret = msm_slim_post_rx_msgq(dev, i);
471 if (ret) {
472 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
473 goto sps_transfer_failed;
474 }
475 }
476
477 return 0;
478
479sps_transfer_failed:
480 msm_slim_sps_mem_free(dev, mem);
481alloc_buffer_failed:
482 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
483 sps_register_event(endpoint->sps, &sps_error_event);
484sps_reg_event_failed:
485 sps_disconnect(endpoint->sps);
486sps_connect_failed:
487 msm_slim_sps_mem_free(dev, descr);
488alloc_descr_failed:
489 msm_slim_free_endpoint(endpoint);
490sps_init_endpoint_failed:
491 dev->use_rx_msgqs = 0;
492 return ret;
493}
494
495/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
496int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
Sagar Dharia60f59a72012-10-17 12:42:03 -0600497 u32 pipe_reg, bool remote)
Sagar Dharia2754ab42012-08-21 18:07:39 -0600498{
499 int i, ret;
500 u32 bam_handle;
501 struct sps_bam_props bam_props = {0};
502
503 static struct sps_bam_sec_config_props sec_props = {
504 .ees = {
505 [0] = { /* LPASS */
506 .vmid = 0,
507 .pipe_mask = 0xFFFF98,
508 },
509 [1] = { /* Krait Apps */
510 .vmid = 1,
511 .pipe_mask = 0x3F000007,
512 },
513 [2] = { /* Modem */
514 .vmid = 2,
515 .pipe_mask = 0x00000060,
516 },
517 },
518 };
519
520 bam_props.ee = dev->ee;
521 bam_props.virt_addr = dev->bam.base;
522 bam_props.phys_addr = bam_mem->start;
523 bam_props.irq = dev->bam.irq;
Sagar Dharia60f59a72012-10-17 12:42:03 -0600524 if (!remote) {
525 bam_props.manage = SPS_BAM_MGR_LOCAL;
526 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
527 } else {
528 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
529 SPS_BAM_MGR_MULTI_EE;
530 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
531 }
Sagar Dharia2754ab42012-08-21 18:07:39 -0600532 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
533
Sagar Dharia2754ab42012-08-21 18:07:39 -0600534 bam_props.p_sec_config_props = &sec_props;
535
536 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
537 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
538
539 /* First 7 bits are for message Qs */
540 for (i = 7; i < 32; i++) {
541 /* Check what pipes are owned by Apps. */
542 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
543 break;
544 }
545 dev->pipe_b = i - 7;
546
547 /* Register the BAM device with the SPS driver */
548 ret = sps_register_bam_device(&bam_props, &bam_handle);
549 if (ret) {
550 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
551 dev->use_rx_msgqs = 0;
552 goto init_rx_msgq;
553 }
554 dev->bam.hdl = bam_handle;
555 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
556
557init_rx_msgq:
558 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
559 if (ret)
560 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
561 if (ret && bam_handle) {
562 sps_deregister_bam_device(bam_handle);
563 dev->bam.hdl = 0L;
564 }
565 return ret;
566}
567
568void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
569{
570 if (dev->use_rx_msgqs) {
571 struct msm_slim_endp *endpoint = &dev->rx_msgq;
572 struct sps_connect *config = &endpoint->config;
573 struct sps_mem_buffer *descr = &config->desc;
574 struct sps_mem_buffer *mem = &endpoint->buf;
575 struct sps_register_event sps_event;
576 memset(&sps_event, 0x00, sizeof(sps_event));
577 msm_slim_sps_mem_free(dev, mem);
578 sps_register_event(endpoint->sps, &sps_event);
579 sps_disconnect(endpoint->sps);
580 msm_slim_sps_mem_free(dev, descr);
581 msm_slim_free_endpoint(endpoint);
582 sps_deregister_bam_device(dev->bam.hdl);
583 }
584}