blob: 1cc5d8476630f7a90f86708c0e01ddb961128bc3 [file] [log] [blame]
Dilip Kota81c88552017-10-24 12:18:23 +05301/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -060012#include <asm/dma-iommu.h>
Sagar Dhariabe37c9c2016-11-28 23:06:58 -070013#include <linux/delay.h>
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -060014#include <linux/dma-mapping.h>
15#include <linux/gcd.h>
16#include <linux/msm-sps.h>
17#include <linux/platform_device.h>
18#include <linux/pm_runtime.h>
Sagar Dhariabe37c9c2016-11-28 23:06:58 -070019#include <linux/slab.h>
20#include <linux/slimbus/slimbus.h>
Sagar Dhariabe37c9c2016-11-28 23:06:58 -070021#include "slim-msm.h"
22
Karthikeyan Ramasubramanianc5265902017-01-17 10:01:01 -070023/* Pipe Number Offset Mask */
24#define P_OFF_MASK 0x3FC
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -060025#define MSM_SLIM_VA_START (0x40000000)
26#define MSM_SLIM_VA_SIZE (0xC0000000)
Karthikeyan Ramasubramanianc5265902017-01-17 10:01:01 -070027
Sagar Dhariabe37c9c2016-11-28 23:06:58 -070028int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
29{
30 spin_lock(&dev->rx_lock);
31 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
32 spin_unlock(&dev->rx_lock);
33 dev_err(dev->dev, "RX QUEUE full!");
34 return -EXFULL;
35 }
36 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
37 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
38 spin_unlock(&dev->rx_lock);
39 return 0;
40}
41
42int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
43{
44 unsigned long flags;
45
46 spin_lock_irqsave(&dev->rx_lock, flags);
47 if (dev->tail == dev->head) {
48 spin_unlock_irqrestore(&dev->rx_lock, flags);
49 return -ENODATA;
50 }
51 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
52 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
53 spin_unlock_irqrestore(&dev->rx_lock, flags);
54 return 0;
55}
56
57int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
58{
59#ifdef CONFIG_PM
60 int ref = 0;
61 int ret = pm_runtime_get_sync(dev->dev);
62
63 if (ret >= 0) {
64 ref = atomic_read(&dev->dev->power.usage_count);
65 if (ref <= 0) {
66 SLIM_WARN(dev, "reference count -ve:%d", ref);
67 ret = -ENODEV;
68 }
69 }
70 return ret;
71#else
72 return -ENODEV;
73#endif
74}
75void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
76{
77#ifdef CONFIG_PM
78 int ref;
79
80 pm_runtime_mark_last_busy(dev->dev);
81 ref = atomic_read(&dev->dev->power.usage_count);
82 if (ref <= 0)
83 SLIM_WARN(dev, "reference count mismatch:%d", ref);
84 else
85 pm_runtime_put_sync(dev->dev);
86#endif
87}
88
89irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
90{
91 int i;
92 u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
93 dev->ver));
94 /*
95 * different port-interrupt than what we enabled, ignore.
96 * This may happen if overflow/underflow is reported, but
97 * was disabled due to unavailability of buffers provided by
98 * client.
99 */
100 if ((pstat & int_en) == 0)
101 return IRQ_HANDLED;
102 for (i = 0; i < dev->port_nums; i++) {
103 struct msm_slim_endp *endpoint = &dev->pipes[i];
104
105 if (pstat & (1 << endpoint->port_b)) {
106 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
107 endpoint->port_b, dev->ver));
108 if (val & MSM_PORT_OVERFLOW) {
109 dev->ctrl.ports[i].err =
110 SLIM_P_OVERFLOW;
111 } else if (val & MSM_PORT_UNDERFLOW) {
112 dev->ctrl.ports[i].err =
113 SLIM_P_UNDERFLOW;
114 }
115 }
116 }
117 /*
118 * Disable port interrupt here. Re-enable when more
119 * buffers are provided for this port.
120 */
121 writel_relaxed((int_en & (~pstat)),
122 PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
123 dev->ver));
124 /* clear port interrupts */
125 writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
126 dev->ver));
127 SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
128
129 /*
130 * Guarantee that port interrupt bit(s) clearing writes go
131 * through before exiting ISR
132 */
133 mb();
134 return IRQ_HANDLED;
135}
136
137int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
138{
139 int ret;
140 struct sps_pipe *endpoint;
141 struct sps_connect *config = &ep->config;
142
143 /* Allocate the endpoint */
144 endpoint = sps_alloc_endpoint();
145 if (!endpoint) {
146 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
147 return -ENOMEM;
148 }
149
150 /* Get default connection configuration for an endpoint */
151 ret = sps_get_config(endpoint, config);
152 if (ret) {
153 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
154 goto sps_config_failed;
155 }
156
157 ep->sps = endpoint;
158 return 0;
159
160sps_config_failed:
161 sps_free_endpoint(endpoint);
162 return ret;
163}
164
165void msm_slim_free_endpoint(struct msm_slim_endp *ep)
166{
167 sps_free_endpoint(ep->sps);
168 ep->sps = NULL;
169}
170
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600171static int msm_slim_iommu_attach(struct msm_slim_ctrl *ctrl_dev)
172{
173 struct dma_iommu_mapping *iommu_map;
174 dma_addr_t va_start = MSM_SLIM_VA_START;
175 size_t va_size = MSM_SLIM_VA_SIZE;
176 int bypass = 1;
177 struct device *dev;
178
179 if (unlikely(!ctrl_dev))
180 return -EINVAL;
181
182 if (!ctrl_dev->iommu_desc.cb_dev)
183 return 0;
184
Shrey Vijayab142792018-06-18 17:30:18 +0530185 if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map)) {
186 arm_iommu_detach_device(ctrl_dev->iommu_desc.cb_dev);
187 arm_iommu_release_mapping(ctrl_dev->iommu_desc.iommu_map);
188 ctrl_dev->iommu_desc.iommu_map = NULL;
189 SLIM_INFO(ctrl_dev, "NGD IOMMU Dettach complete\n");
190 }
Karthikeyan Ramasubramaniane6c9f162017-07-11 09:34:19 -0600191
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600192 dev = ctrl_dev->iommu_desc.cb_dev;
193 iommu_map = arm_iommu_create_mapping(&platform_bus_type,
194 va_start, va_size);
195 if (IS_ERR(iommu_map)) {
196 dev_err(dev, "%s iommu_create_mapping failure\n", __func__);
197 return PTR_ERR(iommu_map);
198 }
199
200 if (ctrl_dev->iommu_desc.s1_bypass) {
201 if (iommu_domain_set_attr(iommu_map->domain,
202 DOMAIN_ATTR_S1_BYPASS, &bypass)) {
203 dev_err(dev, "%s Can't bypass s1 translation\n",
204 __func__);
205 arm_iommu_release_mapping(iommu_map);
206 return -EIO;
207 }
208 }
209
210 if (arm_iommu_attach_device(dev, iommu_map)) {
211 dev_err(dev, "%s can't arm_iommu_attach_device\n", __func__);
212 arm_iommu_release_mapping(iommu_map);
213 return -EIO;
214 }
215 ctrl_dev->iommu_desc.iommu_map = iommu_map;
216 SLIM_INFO(ctrl_dev, "NGD IOMMU Attach complete\n");
217 return 0;
218}
219
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700220int msm_slim_sps_mem_alloc(
221 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
222{
223 dma_addr_t phys;
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600224 struct device *dma_dev = dev->iommu_desc.cb_dev ?
225 dev->iommu_desc.cb_dev : dev->dev;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700226
227 mem->size = len;
228 mem->min_size = 0;
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600229 mem->base = dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700230
231 if (!mem->base) {
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600232 dev_err(dma_dev, "dma_alloc_coherent(%d) failed\n", len);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700233 return -ENOMEM;
234 }
235
236 mem->phys_base = phys;
237 memset(mem->base, 0x00, mem->size);
238 return 0;
239}
240
241void
242msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
243{
244 if (mem->base && mem->phys_base)
245 dma_free_coherent(dev->dev, mem->size, mem->base,
246 mem->phys_base);
247 else
248 dev_err(dev->dev, "cant dma free. they are NULL\n");
249 mem->size = 0;
250 mem->base = NULL;
251 mem->phys_base = 0;
252}
253
254void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum)
255{
256 struct slim_controller *ctrl;
257 struct slim_ch *chan;
258 struct msm_slim_pshpull_parm *parm;
259 u32 set_cfg = 0;
260 struct slim_port_cfg cfg = dev->ctrl.ports[portnum].cfg;
261
262 if (!dev) {
263 pr_err("%s:Dev node is null\n", __func__);
264 return;
265 }
266 if (portnum >= dev->port_nums) {
267 pr_err("%s:Invalid port\n", __func__);
268 return;
269 }
270 ctrl = &dev->ctrl;
271 chan = ctrl->ports[portnum].ch;
272 parm = &dev->pipes[portnum].psh_pull;
273
274 if (cfg.watermark)
275 set_cfg = (cfg.watermark << 1);
276 else
277 set_cfg = DEF_WATERMARK;
278
279 if (cfg.port_opts & SLIM_OPT_NO_PACK)
280 set_cfg |= DEF_NO_PACK;
281 else
282 set_cfg |= DEF_PACK;
283
284 if (cfg.port_opts & SLIM_OPT_ALIGN_MSB)
285 set_cfg |= DEF_ALIGN_MSB;
286 else
287 set_cfg |= DEF_ALIGN_LSB;
288
289 set_cfg |= ENABLE_PORT;
290
291 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pipenum, dev->ver));
292 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pipenum, dev->ver));
293 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pipenum, dev->ver));
294
295 if (chan->prot == SLIM_PUSH || chan->prot == SLIM_PULL) {
296 set_cfg = 0;
297 set_cfg |= ((0xFFFF & parm->num_samples)<<16);
298 set_cfg |= (0xFFFF & parm->rpt_period);
299 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_PSHPLLn,
300 pipenum, dev->ver));
301 }
302 /* Make sure that port registers are updated before returning */
303 mb();
304}
305
306static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
307{
308 struct msm_slim_endp *endpoint = &dev->pipes[pn];
309 struct sps_register_event sps_event;
310 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
311 dev->ver));
312 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (endpoint->port_b),
313 dev->ver));
314 writel_relaxed((int_port & ~(1 << endpoint->port_b)),
315 PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
316 /* Make sure port register is updated */
317 mb();
318 memset(&sps_event, 0, sizeof(sps_event));
319 sps_register_event(endpoint->sps, &sps_event);
320 sps_disconnect(endpoint->sps);
321 dev->pipes[pn].connected = false;
322}
323
324static void msm_slim_calc_pshpull_parm(struct msm_slim_ctrl *dev,
325 u8 pn, struct slim_ch *prop)
326{
327 struct msm_slim_endp *endpoint = &dev->pipes[pn];
328 struct msm_slim_pshpull_parm *parm = &endpoint->psh_pull;
329 int chan_freq, round_off, divisor, super_freq;
330
331 super_freq = dev->ctrl.a_framer->superfreq;
332
333 if (prop->baser == SLIM_RATE_4000HZ)
334 chan_freq = 4000 * prop->ratem;
335 else if (prop->baser == SLIM_RATE_11025HZ)
336 chan_freq = 11025 * prop->ratem;
337 else
338 chan_freq = prop->baser * prop->ratem;
339
340 /*
341 * If channel frequency is multiple of super frame frequency
342 * ISO protocol is suggested
343 */
344 if (!(chan_freq % super_freq)) {
345 prop->prot = SLIM_HARD_ISO;
346 return;
347 }
348 round_off = DIV_ROUND_UP(chan_freq, super_freq);
349 divisor = gcd(round_off * super_freq, chan_freq);
350 parm->num_samples = chan_freq/divisor;
351 parm->rpt_period = (round_off * super_freq)/divisor;
352}
353
354int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
355{
356 struct msm_slim_endp *endpoint;
357 struct sps_connect *cfg;
358 struct slim_ch *prop;
359 u32 stat;
360 int ret;
361
362 if (!dev || pn >= dev->port_nums)
363 return -ENODEV;
364 endpoint = &dev->pipes[pn];
365 cfg = &endpoint->config;
366 prop = dev->ctrl.ports[pn].ch;
367
368 endpoint = &dev->pipes[pn];
369 ret = sps_get_config(dev->pipes[pn].sps, cfg);
370 if (ret) {
371 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
372 return ret;
373 }
374 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
375 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
376
377 if (prop->prot == SLIM_PUSH || prop->prot == SLIM_PULL)
378 msm_slim_calc_pshpull_parm(dev, pn, prop);
379
380 if (dev->pipes[pn].connected &&
381 dev->ctrl.ports[pn].state == SLIM_P_CFG) {
382 return -EISCONN;
383 } else if (dev->pipes[pn].connected) {
384 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
385 (endpoint->port_b), dev->ver));
386 /* Make sure port disabling goes through */
387 mb();
388 /* Is pipe already connected in desired direction */
389 if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
390 cfg->mode == SPS_MODE_DEST) ||
391 (dev->ctrl.ports[pn].flow == SLIM_SINK &&
392 cfg->mode == SPS_MODE_SRC)) {
393 msm_hw_set_port(dev, endpoint->port_b, pn);
394 return 0;
395 }
396 msm_slim_disconn_pipe_port(dev, pn);
397 }
398
399 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, endpoint->port_b,
400 dev->ver));
401 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
402 cfg->destination = dev->bam.hdl;
403 cfg->source = SPS_DEV_HANDLE_MEM;
404 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
405 cfg->src_pipe_index = 0;
406 dev_dbg(dev->dev, "flow src:pipe num:%d",
407 cfg->dest_pipe_index);
408 cfg->mode = SPS_MODE_DEST;
409 } else {
410 cfg->source = dev->bam.hdl;
411 cfg->destination = SPS_DEV_HANDLE_MEM;
412 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
413 cfg->dest_pipe_index = 0;
414 dev_dbg(dev->dev, "flow dest:pipe num:%d",
415 cfg->src_pipe_index);
416 cfg->mode = SPS_MODE_SRC;
417 }
418 /* Space for desciptor FIFOs */
419 ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
420 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
421 if (ret)
422 pr_err("mem alloc for descr failed:%d", ret);
423 else
424 ret = sps_connect(dev->pipes[pn].sps, cfg);
425
426 if (!ret) {
427 dev->pipes[pn].connected = true;
428 msm_hw_set_port(dev, endpoint->port_b, pn);
429 }
430 return ret;
431}
432
433int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
434{
435 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
436 struct msm_slim_endp *endpoint;
437 int ret = 0;
438
439 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
440 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
441 return -EPROTONOSUPPORT;
442 if (pn >= dev->port_nums)
443 return -ENODEV;
444
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600445 ret = msm_slim_iommu_attach(dev);
446 if (ret)
447 return ret;
448
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700449 endpoint = &dev->pipes[pn];
450 ret = msm_slim_init_endpoint(dev, endpoint);
451 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
452 return ret;
453}
454
455void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
456{
457 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
458 struct msm_slim_endp *endpoint;
459
460 if (pn >= dev->port_nums)
461 return;
462 endpoint = &dev->pipes[pn];
463 if (dev->pipes[pn].connected) {
464 struct sps_connect *config = &endpoint->config;
465
466 msm_slim_disconn_pipe_port(dev, pn);
467 msm_slim_sps_mem_free(dev, &config->desc);
468 }
469 if (endpoint->sps)
470 msm_slim_free_endpoint(endpoint);
471}
472
473enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
474 u8 pn, phys_addr_t *done_buf, u32 *done_len)
475{
476 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
477 struct sps_iovec sio;
478 int ret;
479
480 if (done_len)
481 *done_len = 0;
482 if (done_buf)
483 *done_buf = 0;
484 if (!dev->pipes[pn].connected)
485 return SLIM_P_DISCONNECT;
486 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
487 if (!ret) {
488 if (done_len)
489 *done_len = sio.size;
490 if (done_buf)
491 *done_buf = (phys_addr_t)sio.addr;
492 }
493 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
494 return SLIM_P_INPROGRESS;
495}
496
Dilip Kota81c88552017-10-24 12:18:23 +0530497static dma_addr_t msm_slim_iommu_map(struct msm_slim_ctrl *dev, void *buf_addr,
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600498 u32 len)
499{
Dilip Kota81c88552017-10-24 12:18:23 +0530500 dma_addr_t ret;
501 struct device *devp = dev->iommu_desc.cb_dev ? dev->iommu_desc.cb_dev :
502 dev->dev;
503 ret = dma_map_single(devp, buf_addr, len, DMA_BIDIRECTIONAL);
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600504
Dilip Kota81c88552017-10-24 12:18:23 +0530505 if (dma_mapping_error(devp, ret))
506 return DMA_ERROR_CODE;
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600507
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600508 return ret;
509}
510
Dilip Kota81c88552017-10-24 12:18:23 +0530511static void msm_slim_iommu_unmap(struct msm_slim_ctrl *dev, dma_addr_t buf_addr,
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600512 u32 len)
513{
Dilip Kota81c88552017-10-24 12:18:23 +0530514 struct device *devp = dev->iommu_desc.cb_dev ? dev->iommu_desc.cb_dev :
515 dev->dev;
516 dma_unmap_single(devp, buf_addr, len, DMA_BIDIRECTIONAL);
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600517}
518
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700519static void msm_slim_port_cb(struct sps_event_notify *ev)
520{
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600521 struct msm_slim_ctrl *dev = ev->user;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700522 struct completion *comp = ev->data.transfer.user;
523 struct sps_iovec *iovec = &ev->data.transfer.iovec;
524
525 if (ev->event_id == SPS_EVENT_DESC_DONE) {
526
527 pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
528 iovec->addr, iovec->size, iovec->flags);
529
530 } else {
531 pr_err("%s: ERR event %d\n",
532 __func__, ev->event_id);
533 }
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600534 if (dev)
535 msm_slim_iommu_unmap(dev, iovec->addr, iovec->size);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700536 if (comp)
537 complete(comp);
538}
539
Dilip Kota81c88552017-10-24 12:18:23 +0530540int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, void *buf,
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700541 u32 len, struct completion *comp)
542{
543 struct sps_register_event sreg;
544 int ret;
Dilip Kota81c88552017-10-24 12:18:23 +0530545 dma_addr_t dma_buf;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700546 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
547
548 if (pn >= dev->port_nums)
549 return -ENODEV;
550
551 if (!dev->pipes[pn].connected)
552 return -ENOTCONN;
553
Dilip Kota81c88552017-10-24 12:18:23 +0530554 dma_buf = msm_slim_iommu_map(dev, buf, len);
555 if (dma_buf == DMA_ERROR_CODE) {
556 dev_err(dev->dev, "error DMA mapping buffers\n");
557 return -ENOMEM;
558 }
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600559
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700560 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
561 sreg.mode = SPS_TRIGGER_WAIT;
562 sreg.xfer_done = NULL;
563 sreg.callback = msm_slim_port_cb;
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600564 sreg.user = dev;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700565 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
566 if (ret) {
567 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
Dilip Kota81c88552017-10-24 12:18:23 +0530568 msm_slim_iommu_unmap(dev, dma_buf, len);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700569 return ret;
570 }
Dilip Kota81c88552017-10-24 12:18:23 +0530571 ret = sps_transfer_one(dev->pipes[pn].sps, dma_buf, len, comp,
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700572 SPS_IOVEC_FLAG_INT);
573 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
574 if (!ret) {
575 /* Enable port interrupts */
576 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
577 dev->ver));
578 if (!(int_port & (1 << (dev->pipes[pn].port_b))))
579 writel_relaxed((int_port |
580 (1 << dev->pipes[pn].port_b)),
581 PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
582 /* Make sure that port registers are updated before returning */
583 mb();
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -0600584 } else {
Dilip Kota81c88552017-10-24 12:18:23 +0530585 msm_slim_iommu_unmap(dev, dma_buf, len);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700586 }
587
588 return ret;
589}
590
591/* Queue up Tx message buffer */
592static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
593{
594 int ret;
595 struct msm_slim_endp *endpoint = &dev->tx_msgq;
596 struct sps_mem_buffer *mem = &endpoint->buf;
597 struct sps_pipe *pipe = endpoint->sps;
598 int ix = (buf - (u8 *)mem->base);
599
600 phys_addr_t phys_addr = mem->phys_base + ix;
601
602 for (ret = 0; ret < ((len + 3) >> 2); ret++)
603 pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
604
605 ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
606 SPS_IOVEC_FLAG_EOT);
607 if (ret)
608 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
609
610 return ret;
611}
612
613void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev, int err)
614{
615 struct msm_slim_endp *endpoint = &dev->tx_msgq;
616 struct sps_mem_buffer *mem = &endpoint->buf;
617 struct sps_pipe *pipe = endpoint->sps;
618 struct sps_iovec iovec;
619 int idx, ret = 0;
620 phys_addr_t addr;
621
622 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
623 /* use 1 buffer, non-blocking writes are not possible */
624 if (dev->wr_comp[0]) {
625 struct completion *comp = dev->wr_comp[0];
626
627 dev->wr_comp[0] = NULL;
628 complete(comp);
629 }
630 return;
631 }
632 while (!ret) {
Karthikeyan Ramasubramanianac5a64d2017-01-25 12:18:03 -0700633 memset(&iovec, 0, sizeof(iovec));
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700634 ret = sps_get_iovec(pipe, &iovec);
635 addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
636 if (ret || addr == 0) {
637 if (ret)
638 pr_err("SLIM TX get IOVEC failed:%d", ret);
639 return;
640 }
641 if (addr == dev->bulk.wr_dma) {
642 dma_unmap_single(dev->dev, dev->bulk.wr_dma,
643 dev->bulk.size, DMA_TO_DEVICE);
644 if (!dev->bulk.cb)
645 SLIM_WARN(dev, "no callback for bulk WR?");
646 else
647 dev->bulk.cb(dev->bulk.ctx, err);
648 dev->bulk.in_progress = false;
649 pm_runtime_mark_last_busy(dev->dev);
650 return;
651 } else if (addr < mem->phys_base ||
652 (addr > (mem->phys_base +
653 (MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN)))) {
654 SLIM_WARN(dev, "BUF out of bounds:base:0x%pa, io:0x%pa",
655 &mem->phys_base, &addr);
656 continue;
657 }
658 idx = (int) ((addr - mem->phys_base)
659 / SLIM_MSGQ_BUF_LEN);
660 if (dev->wr_comp[idx]) {
661 struct completion *comp = dev->wr_comp[idx];
662
663 dev->wr_comp[idx] = NULL;
664 complete(comp);
665 }
666 if (err) {
667 int i;
668 u32 *addr = (u32 *)mem->base +
669 (idx * (SLIM_MSGQ_BUF_LEN >> 2));
670 /* print the descriptor that resulted in error */
671 for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2); i++)
672 SLIM_WARN(dev, "err desc[%d]:0x%x", i, addr[i]);
673 }
674 /* reclaim all packets that were delivered out of order */
675 if (idx != dev->tx_head)
676 SLIM_WARN(dev, "SLIM OUT OF ORDER TX:idx:%d, head:%d",
677 idx, dev->tx_head);
678 dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
679 }
680}
681
682static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
683 struct completion *comp)
684{
685 struct msm_slim_endp *endpoint = &dev->tx_msgq;
686 struct sps_mem_buffer *mem = &endpoint->buf;
687 u32 *retbuf = NULL;
688
689 if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
690 return NULL;
691
692 retbuf = (u32 *)((u8 *)mem->base +
693 (dev->tx_tail * SLIM_MSGQ_BUF_LEN));
694 dev->wr_comp[dev->tx_tail] = comp;
695 dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
696 return retbuf;
697}
698u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
699 struct completion *comp, int err)
700{
701 int ret = 0;
702 int retries = 0;
703 u32 *retbuf = NULL;
704 unsigned long flags;
705
706 spin_lock_irqsave(&dev->tx_buf_lock, flags);
707 if (!getbuf) {
708 msm_slim_tx_msg_return(dev, err);
709 spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
710 return NULL;
711 }
712
713 retbuf = msm_slim_modify_tx_buf(dev, comp);
714 if (retbuf) {
715 spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
716 return retbuf;
717 }
718
719 do {
720 msm_slim_tx_msg_return(dev, err);
721 retbuf = msm_slim_modify_tx_buf(dev, comp);
722 if (!retbuf)
723 ret = -EAGAIN;
724 else {
725 if (retries > 0)
726 SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
727 retries);
728 spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
729 return retbuf;
730 }
731
732 /*
733 * superframe size will vary based on clock gear
734 * 1 superframe will consume at least 1 message
735 * if HW is in good condition. With MX_RETRIES,
736 * make sure we wait for ~2 superframes
737 * before deciding HW couldn't process descriptors
738 */
739 udelay(50);
740 retries++;
741 } while (ret && (retries < INIT_MX_RETRIES));
742
743 spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
744 return NULL;
745}
746
747int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
748{
749 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
750 int i;
751
752 for (i = 0; i < (len + 3) >> 2; i++) {
753 dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
754 writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
755 }
756 /* Guarantee that message is sent before returning */
757 mb();
758 return 0;
759 }
760 return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
761}
762
763u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
764 struct completion *comp)
765{
766 /*
767 * Currently we block a transaction until the current one completes.
768 * In case we need multiple transactions, use message Q
769 */
770 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
771 dev->wr_comp[0] = comp;
772 return dev->tx_buf;
773 }
774
775 return msm_slim_manage_tx_msgq(dev, true, comp, 0);
776}
777
778static void
779msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
780{
781 if (ev->event_id == SPS_EVENT_DESC_DONE)
782 complete(&dev->rx_msgq_notify);
783 else
784 dev_err(dev->dev, "%s: unknown event %d\n",
785 __func__, ev->event_id);
786}
787
788static void
789msm_slim_handle_rx(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
790{
791 int ret = 0;
792 u32 mc = 0;
793 u32 mt = 0;
794 u8 msg_len = 0;
795
796 if (ev->event_id != SPS_EVENT_EOT) {
797 dev_err(dev->dev, "%s: unknown event %d\n",
798 __func__, ev->event_id);
799 return;
800 }
801
802 do {
803 ret = msm_slim_rx_msgq_get(dev, dev->current_rx_buf,
804 dev->current_count);
805 if (ret == -ENODATA) {
806 return;
807 } else if (ret) {
808 SLIM_ERR(dev, "rx_msgq_get() failed 0x%x\n",
809 ret);
810 return;
811 }
812
813 /* Traverse first byte of message for message length */
814 if (dev->current_count++ == 0) {
815 msg_len = *(dev->current_rx_buf) & 0x1F;
816 mt = (*(dev->current_rx_buf) >> 5) & 0x7;
817 mc = (*(dev->current_rx_buf) >> 8) & 0xff;
818 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
819 }
820
821 msg_len = (msg_len < 4) ? 0 : (msg_len - 4);
822
823 if (!msg_len) {
824 dev->rx_slim(dev, (u8 *)dev->current_rx_buf);
825 dev->current_count = 0;
826 }
827
828 } while (1);
829}
830
831static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
832{
833 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
834 /* is this manager controller or NGD controller? */
835 if (dev->ctrl.wakeup)
836 msm_slim_rx_msgq_event(dev, notify);
837 else
838 msm_slim_handle_rx(dev, notify);
839}
840
841/* Queue up Rx message buffer */
842static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
843{
844 int ret;
845 struct msm_slim_endp *endpoint = &dev->rx_msgq;
846 struct sps_mem_buffer *mem = &endpoint->buf;
847 struct sps_pipe *pipe = endpoint->sps;
848
849 /* Rx message queue buffers are 4 bytes in length */
850 u8 *virt_addr = mem->base + (4 * ix);
851 phys_addr_t phys_addr = mem->phys_base + (4 * ix);
852
853 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, 0);
854 if (ret)
855 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
856
857 return ret;
858}
859
860int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
861{
862 struct msm_slim_endp *endpoint = &dev->rx_msgq;
863 struct sps_mem_buffer *mem = &endpoint->buf;
864 struct sps_pipe *pipe = endpoint->sps;
865 struct sps_iovec iovec;
866 phys_addr_t addr;
867 int index;
868 int ret;
869
870 ret = sps_get_iovec(pipe, &iovec);
871 if (ret) {
872 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
873 goto err_exit;
874 }
875
876 addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
877 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
878 iovec.addr, iovec.size, iovec.flags);
879
880 /* no more descriptors */
881 if (!ret && (iovec.addr == 0) && (iovec.size == 0)) {
882 ret = -ENODATA;
883 goto err_exit;
884 }
885
886 /* Calculate buffer index */
887 index = (addr - mem->phys_base) / 4;
888 *(data + offset) = *((u32 *)mem->base + index);
889
890 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
891
892 /* Add buffer back to the queue */
893 (void)msm_slim_post_rx_msgq(dev, index);
894
895err_exit:
896 return ret;
897}
898
899int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
900 struct msm_slim_endp *endpoint)
901{
902 int i, ret;
903 struct sps_register_event sps_error_event; /* SPS_ERROR */
904 struct sps_register_event sps_descr_event; /* DESCR_DONE */
905 struct sps_connect *config = &endpoint->config;
906 unsigned long flags;
907
908 ret = sps_connect(endpoint->sps, config);
909 if (ret) {
910 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
911 return ret;
912 }
913
914 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
915
916 if (endpoint == &dev->rx_msgq) {
917 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
918 sps_descr_event.options = SPS_O_EOT;
919 sps_descr_event.user = (void *)dev;
920 sps_descr_event.callback = msm_slim_rx_msgq_cb;
921 sps_descr_event.xfer_done = NULL;
922
923 ret = sps_register_event(endpoint->sps, &sps_descr_event);
924 if (ret) {
925 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
926 goto sps_reg_event_failed;
927 }
928 }
929
930 /* Register callback for errors */
931 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
932 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
933 sps_error_event.options = SPS_O_ERROR;
934 sps_error_event.user = (void *)dev;
935 sps_error_event.callback = msm_slim_rx_msgq_cb;
936
937 ret = sps_register_event(endpoint->sps, &sps_error_event);
938 if (ret) {
939 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
940 goto sps_reg_event_failed;
941 }
942
943 /*
944 * Call transfer_one for each 4-byte buffer
945 * Use (buf->size/4) - 1 for the number of buffer to post
946 */
947
948 if (endpoint == &dev->rx_msgq) {
949 /* Setup the transfer */
950 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
951 ret = msm_slim_post_rx_msgq(dev, i);
952 if (ret) {
953 dev_err(dev->dev,
954 "post_rx_msgq() failed 0x%x\n", ret);
955 goto sps_transfer_failed;
956 }
957 }
958 dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
959 } else {
960 spin_lock_irqsave(&dev->tx_buf_lock, flags);
961 dev->tx_tail = 0;
962 dev->tx_head = 0;
963 for (i = 0; i < MSM_TX_BUFS; i++)
964 dev->wr_comp[i] = NULL;
965 spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
966 dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
967 }
968
969 return 0;
970sps_transfer_failed:
971 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
972 sps_register_event(endpoint->sps, &sps_error_event);
973sps_reg_event_failed:
974 sps_disconnect(endpoint->sps);
975 return ret;
976}
977
978static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
979{
980 int ret;
981 u32 pipe_offset;
982 struct msm_slim_endp *endpoint = &dev->rx_msgq;
983 struct sps_connect *config = &endpoint->config;
984 struct sps_mem_buffer *descr = &config->desc;
985 struct sps_mem_buffer *mem = &endpoint->buf;
986
987 if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
988 return 0;
989
990 /* Allocate the endpoint */
991 ret = msm_slim_init_endpoint(dev, endpoint);
992 if (ret) {
993 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
994 goto sps_init_endpoint_failed;
995 }
996
997 /* Get the pipe indices for the message queues */
Karthikeyan Ramasubramanianc5265902017-01-17 10:01:01 -0700998 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & P_OFF_MASK) >> 2;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700999 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1000
1001 config->mode = SPS_MODE_SRC;
1002 config->source = dev->bam.hdl;
1003 config->destination = SPS_DEV_HANDLE_MEM;
1004 config->src_pipe_index = pipe_offset;
1005 config->options = SPS_O_EOT | SPS_O_ERROR |
1006 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1007
1008 /* Allocate memory for the FIFO descriptors */
1009 ret = msm_slim_sps_mem_alloc(dev, descr,
1010 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1011 if (ret) {
1012 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1013 goto alloc_descr_failed;
1014 }
1015
1016 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1017 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1018 if (ret) {
1019 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1020 goto alloc_buffer_failed;
1021 }
1022
1023 ret = msm_slim_connect_endp(dev, endpoint);
1024
1025 if (!ret)
1026 return 0;
1027
1028 msm_slim_sps_mem_free(dev, mem);
1029alloc_buffer_failed:
1030 msm_slim_sps_mem_free(dev, descr);
1031alloc_descr_failed:
1032 msm_slim_free_endpoint(endpoint);
1033sps_init_endpoint_failed:
1034 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
1035 return ret;
1036}
1037
1038static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
1039{
1040 int ret;
1041 u32 pipe_offset;
1042 struct msm_slim_endp *endpoint = &dev->tx_msgq;
1043 struct sps_connect *config = &endpoint->config;
1044 struct sps_mem_buffer *descr = &config->desc;
1045 struct sps_mem_buffer *mem = &endpoint->buf;
1046
1047 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
1048 return 0;
1049
1050 /* Allocate the endpoint */
1051 ret = msm_slim_init_endpoint(dev, endpoint);
1052 if (ret) {
1053 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1054 goto sps_init_endpoint_failed;
1055 }
1056
1057 /* Get the pipe indices for the message queues */
Karthikeyan Ramasubramanianc5265902017-01-17 10:01:01 -07001058 pipe_offset = (readl_relaxed(dev->base + pipe_reg) & P_OFF_MASK) >> 2;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001059 pipe_offset += 1;
1060 dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
1061
1062 config->mode = SPS_MODE_DEST;
1063 config->source = SPS_DEV_HANDLE_MEM;
1064 config->destination = dev->bam.hdl;
1065 config->dest_pipe_index = pipe_offset;
1066 config->src_pipe_index = 0;
1067 config->options = SPS_O_ERROR | SPS_O_NO_Q |
1068 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1069
1070 /* Desc and TX buf are circular queues */
1071 /* Allocate memory for the FIFO descriptors */
1072 ret = msm_slim_sps_mem_alloc(dev, descr,
1073 (MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
1074 if (ret) {
1075 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1076 goto alloc_descr_failed;
1077 }
1078
1079 /* Allocate TX buffer from which descriptors are created */
1080 ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
1081 SLIM_MSGQ_BUF_LEN));
1082 if (ret) {
1083 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1084 goto alloc_buffer_failed;
1085 }
1086 ret = msm_slim_connect_endp(dev, endpoint);
1087
1088 if (!ret)
1089 return 0;
1090
1091 msm_slim_sps_mem_free(dev, mem);
1092alloc_buffer_failed:
1093 msm_slim_sps_mem_free(dev, descr);
1094alloc_descr_failed:
1095 msm_slim_free_endpoint(endpoint);
1096sps_init_endpoint_failed:
1097 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
1098 return ret;
1099}
1100
1101static int msm_slim_data_port_assign(struct msm_slim_ctrl *dev)
1102{
1103 int i, data_ports = 0;
1104 /* First 7 bits are for message Qs */
1105 for (i = 7; i < 32; i++) {
1106 /* Check what pipes are owned by Apps. */
1107 if ((dev->pdata.apps_pipes >> i) & 0x1) {
1108 if (dev->pipes)
1109 dev->pipes[data_ports].port_b = i - 7;
1110 data_ports++;
1111 }
1112 }
1113 return data_ports;
1114}
1115/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1116int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
1117 u32 pipe_reg, bool remote)
1118{
1119 int ret;
1120 unsigned long bam_handle;
1121 struct sps_bam_props bam_props = {0};
1122
1123 static struct sps_bam_sec_config_props sec_props = {
1124 .ees = {
1125 [0] = { /* LPASS */
1126 .vmid = 0,
1127 .pipe_mask = 0xFFFF98,
1128 },
1129 [1] = { /* Krait Apps */
1130 .vmid = 1,
1131 .pipe_mask = 0x3F000007,
1132 },
1133 [2] = { /* Modem */
1134 .vmid = 2,
1135 .pipe_mask = 0x00000060,
1136 },
1137 },
1138 };
1139
1140 if (dev->bam.hdl) {
1141 bam_handle = dev->bam.hdl;
1142 goto init_pipes;
1143 }
1144 bam_props.ee = dev->ee;
1145 bam_props.virt_addr = dev->bam.base;
1146 bam_props.phys_addr = bam_mem->start;
1147 bam_props.irq = dev->bam.irq;
1148 if (!remote) {
1149 bam_props.manage = SPS_BAM_MGR_LOCAL;
1150 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1151 } else {
1152 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
1153 SPS_BAM_MGR_MULTI_EE;
1154 bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
1155 }
1156 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1157
1158 bam_props.p_sec_config_props = &sec_props;
1159
1160 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1161 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1162
1163 /* override apps channel pipes if specified in platform-data or DT */
1164 if (dev->pdata.apps_pipes)
1165 sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
1166
1167 /* Register the BAM device with the SPS driver */
1168 ret = sps_register_bam_device(&bam_props, &bam_handle);
1169 if (ret) {
1170 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1171 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
1172 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
1173 return ret;
1174 }
1175 dev->bam.hdl = bam_handle;
1176 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%lx\n", bam_handle);
1177
1178init_pipes:
1179 if (dev->port_nums)
1180 goto init_msgq;
1181
1182 /* get the # of ports first */
1183 dev->port_nums = msm_slim_data_port_assign(dev);
1184 if (dev->port_nums && !dev->pipes) {
1185 dev->pipes = kzalloc(sizeof(struct msm_slim_endp) *
1186 dev->port_nums,
1187 GFP_KERNEL);
1188 if (IS_ERR_OR_NULL(dev->pipes)) {
1189 dev_err(dev->dev, "no memory for data ports");
1190 sps_deregister_bam_device(bam_handle);
1191 return PTR_ERR(dev->pipes);
1192 }
1193 /* assign the ports now */
1194 msm_slim_data_port_assign(dev);
1195 }
1196
1197init_msgq:
Karthikeyan Ramasubramanian9cd18ff2017-05-09 17:11:26 -06001198 ret = msm_slim_iommu_attach(dev);
1199 if (ret) {
1200 sps_deregister_bam_device(bam_handle);
1201 return ret;
1202 }
1203
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001204 ret = msm_slim_init_rx_msgq(dev, pipe_reg);
1205 if (ret)
1206 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1207 if (ret && bam_handle)
1208 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
1209
1210 ret = msm_slim_init_tx_msgq(dev, pipe_reg);
1211 if (ret)
1212 dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
1213 if (ret && bam_handle)
1214 dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
1215
1216 /*
1217 * If command interface for BAM fails, register interface is used for
1218 * commands.
1219 * It is possible that other BAM usecases (e.g. apps channels) will
1220 * still need BAM. Since BAM is successfully initialized, we can
1221 * continue using it for non-command use cases.
1222 */
1223
1224 return 0;
1225}
1226
1227void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
1228 struct msm_slim_endp *endpoint,
1229 enum msm_slim_msgq *msgq_flag)
1230{
1231 if (*msgq_flag >= MSM_MSGQ_ENABLED) {
1232 sps_disconnect(endpoint->sps);
1233 *msgq_flag = MSM_MSGQ_RESET;
1234 }
1235}
1236
1237static int msm_slim_discard_rx_data(struct msm_slim_ctrl *dev,
1238 struct msm_slim_endp *endpoint)
1239{
1240 struct sps_iovec sio;
1241 int desc_num = 0, ret = 0;
1242
1243 ret = sps_get_unused_desc_num(endpoint->sps, &desc_num);
1244 if (ret) {
1245 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1246 return ret;
1247 }
1248 while (desc_num--)
1249 sps_get_iovec(endpoint->sps, &sio);
1250 return ret;
1251}
1252
1253static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
1254 struct msm_slim_endp *endpoint,
1255 enum msm_slim_msgq *msgq_flag)
1256{
1257 struct sps_connect *config = &endpoint->config;
1258 struct sps_mem_buffer *descr = &config->desc;
1259 struct sps_mem_buffer *mem = &endpoint->buf;
1260
1261 msm_slim_sps_mem_free(dev, mem);
1262 msm_slim_sps_mem_free(dev, descr);
1263 msm_slim_free_endpoint(endpoint);
1264}
1265
1266void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
1267 struct msm_slim_endp *endpoint,
1268 enum msm_slim_msgq *msgq_flag)
1269{
1270 int ret = 0;
1271 struct sps_connect *config = &endpoint->config;
1272
1273 if (*msgq_flag == MSM_MSGQ_ENABLED) {
1274 if (config->mode == SPS_MODE_SRC) {
1275 ret = msm_slim_discard_rx_data(dev, endpoint);
1276 if (ret)
1277 SLIM_WARN(dev, "discarding Rx data failed\n");
1278 }
1279 msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
1280 msm_slim_remove_ep(dev, endpoint, msgq_flag);
1281 }
1282}
1283
1284static void msm_slim_sps_unreg_event(struct sps_pipe *sps)
1285{
1286 struct sps_register_event sps_event;
1287
1288 memset(&sps_event, 0x00, sizeof(sps_event));
1289 /* Disable interrupt and signal notification for Rx/Tx pipe */
1290 sps_register_event(sps, &sps_event);
1291}
1292
1293void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
1294{
1295 int i;
1296
1297 if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
1298 msm_slim_sps_unreg_event(dev->rx_msgq.sps);
1299 if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
1300 msm_slim_sps_unreg_event(dev->tx_msgq.sps);
1301
1302 for (i = 0; i < dev->port_nums; i++) {
1303 if (dev->pipes[i].connected)
1304 msm_slim_disconn_pipe_port(dev, i);
1305 }
Karthikeyan Ramasubramaniane6c9f162017-07-11 09:34:19 -06001306
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001307 if (dereg) {
1308 for (i = 0; i < dev->port_nums; i++) {
1309 if (dev->pipes[i].connected)
1310 msm_dealloc_port(&dev->ctrl, i);
1311 }
1312 sps_deregister_bam_device(dev->bam.hdl);
1313 dev->bam.hdl = 0L;
1314 kfree(dev->pipes);
1315 dev->pipes = NULL;
1316 }
1317 dev->port_nums = 0;
1318}
1319
1320/* Slimbus QMI Messaging */
1321#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
1322#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
1323#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
1324#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
1325#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
1326#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001327#define SLIMBUS_QMI_DEFERRED_STATUS_REQ 0x0023
1328#define SLIMBUS_QMI_DEFERRED_STATUS_RESP 0x0023
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001329
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001330#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001331#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
1332#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
1333#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
1334#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001335#define SLIMBUS_QMI_DEFERRED_STATUS_REQ_MSG_MAX_MSG_LEN 0
1336#define SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN 7
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001337
1338enum slimbus_mode_enum_type_v01 {
1339 /* To force a 32 bit signed enum. Do not change or use*/
1340 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
1341 SLIMBUS_MODE_SATELLITE_V01 = 1,
1342 SLIMBUS_MODE_MASTER_V01 = 2,
1343 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
1344};
1345
1346enum slimbus_pm_enum_type_v01 {
1347 /* To force a 32 bit signed enum. Do not change or use*/
1348 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
1349 SLIMBUS_PM_INACTIVE_V01 = 1,
1350 SLIMBUS_PM_ACTIVE_V01 = 2,
1351 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
1352};
1353
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001354enum slimbus_resp_enum_type_v01 {
1355 SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
1356 SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
1357 SLIMBUS_RESP_DEFERRED_V01 = 2,
1358 SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
1359};
1360
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001361struct slimbus_select_inst_req_msg_v01 {
1362 /* Mandatory */
1363 /* Hardware Instance Selection */
1364 uint32_t instance;
1365
1366 /* Optional */
1367 /* Optional Mode Request Operation */
1368 /* Must be set to true if mode is being passed */
1369 uint8_t mode_valid;
1370 enum slimbus_mode_enum_type_v01 mode;
1371};
1372
1373struct slimbus_select_inst_resp_msg_v01 {
1374 /* Mandatory */
1375 /* Result Code */
1376 struct qmi_response_type_v01 resp;
1377};
1378
1379struct slimbus_power_req_msg_v01 {
1380 /* Mandatory */
1381 /* Power Request Operation */
1382 enum slimbus_pm_enum_type_v01 pm_req;
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001383
1384 /* Optional */
1385 /* Optional Deferred Response type Operation */
1386 /* Must be set to true if type is being passed */
1387 uint8_t resp_type_valid;
1388 enum slimbus_resp_enum_type_v01 resp_type;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001389};
1390
1391struct slimbus_power_resp_msg_v01 {
1392 /* Mandatory */
1393 /* Result Code */
1394 struct qmi_response_type_v01 resp;
1395};
1396
1397struct slimbus_chkfrm_resp_msg {
1398 /* Mandatory */
1399 /* Result Code */
1400 struct qmi_response_type_v01 resp;
1401};
1402
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001403struct slimbus_deferred_status_resp {
1404 struct qmi_response_type_v01 resp;
1405};
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001406
1407static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
1408 {
1409 .data_type = QMI_UNSIGNED_4_BYTE,
1410 .elem_len = 1,
1411 .elem_size = sizeof(uint32_t),
1412 .is_array = NO_ARRAY,
1413 .tlv_type = 0x01,
1414 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
1415 instance),
1416 .ei_array = NULL,
1417 },
1418 {
1419 .data_type = QMI_OPT_FLAG,
1420 .elem_len = 1,
1421 .elem_size = sizeof(uint8_t),
1422 .is_array = NO_ARRAY,
1423 .tlv_type = 0x10,
1424 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
1425 mode_valid),
1426 .ei_array = NULL,
1427 },
1428 {
1429 .data_type = QMI_UNSIGNED_4_BYTE,
1430 .elem_len = 1,
1431 .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
1432 .is_array = NO_ARRAY,
1433 .tlv_type = 0x10,
1434 .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
1435 mode),
1436 .ei_array = NULL,
1437 },
1438 {
1439 .data_type = QMI_EOTI,
1440 .elem_len = 0,
1441 .elem_size = 0,
1442 .is_array = NO_ARRAY,
1443 .tlv_type = 0x00,
1444 .offset = 0,
1445 .ei_array = NULL,
1446 },
1447};
1448
1449static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
1450 {
1451 .data_type = QMI_STRUCT,
1452 .elem_len = 1,
1453 .elem_size = sizeof(struct qmi_response_type_v01),
1454 .is_array = NO_ARRAY,
1455 .tlv_type = 0x02,
1456 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
1457 resp),
1458 .ei_array = get_qmi_response_type_v01_ei(),
1459 },
1460 {
1461 .data_type = QMI_EOTI,
1462 .elem_len = 0,
1463 .elem_size = 0,
1464 .is_array = NO_ARRAY,
1465 .tlv_type = 0x00,
1466 .offset = 0,
1467 .ei_array = NULL,
1468 },
1469};
1470
1471static struct elem_info slimbus_power_req_msg_v01_ei[] = {
1472 {
1473 .data_type = QMI_UNSIGNED_4_BYTE,
1474 .elem_len = 1,
1475 .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
1476 .is_array = NO_ARRAY,
1477 .tlv_type = 0x01,
1478 .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
1479 .ei_array = NULL,
1480 },
1481 {
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001482 .data_type = QMI_OPT_FLAG,
1483 .elem_len = 1,
1484 .elem_size = sizeof(uint8_t),
1485 .is_array = NO_ARRAY,
1486 .tlv_type = 0x10,
1487 .offset = offsetof(struct slimbus_power_req_msg_v01,
1488 resp_type_valid),
1489 },
1490 {
1491 .data_type = QMI_SIGNED_4_BYTE_ENUM,
1492 .elem_len = 1,
1493 .elem_size = sizeof(enum slimbus_resp_enum_type_v01),
1494 .is_array = NO_ARRAY,
1495 .tlv_type = 0x10,
1496 .offset = offsetof(struct slimbus_power_req_msg_v01,
1497 resp_type),
1498 },
1499 {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001500 .data_type = QMI_EOTI,
1501 .elem_len = 0,
1502 .elem_size = 0,
1503 .is_array = NO_ARRAY,
1504 .tlv_type = 0x00,
1505 .offset = 0,
1506 .ei_array = NULL,
1507 },
1508};
1509
1510static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
1511 {
1512 .data_type = QMI_STRUCT,
1513 .elem_len = 1,
1514 .elem_size = sizeof(struct qmi_response_type_v01),
1515 .is_array = NO_ARRAY,
1516 .tlv_type = 0x02,
1517 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
1518 .ei_array = get_qmi_response_type_v01_ei(),
1519 },
1520 {
1521 .data_type = QMI_EOTI,
1522 .elem_len = 0,
1523 .elem_size = 0,
1524 .is_array = NO_ARRAY,
1525 .tlv_type = 0x00,
1526 .offset = 0,
1527 .ei_array = NULL,
1528 },
1529};
1530
1531static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
1532 {
1533 .data_type = QMI_STRUCT,
1534 .elem_len = 1,
1535 .elem_size = sizeof(struct qmi_response_type_v01),
1536 .is_array = NO_ARRAY,
1537 .tlv_type = 0x02,
1538 .offset = offsetof(struct slimbus_chkfrm_resp_msg, resp),
1539 .ei_array = get_qmi_response_type_v01_ei(),
1540 },
1541 {
1542 .data_type = QMI_EOTI,
1543 .elem_len = 0,
1544 .elem_size = 0,
1545 .is_array = NO_ARRAY,
1546 .tlv_type = 0x00,
1547 .offset = 0,
1548 .ei_array = NULL,
1549 },
1550};
1551
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001552static struct elem_info slimbus_deferred_status_resp_msg_v01_ei[] = {
1553 {
1554 .data_type = QMI_STRUCT,
1555 .elem_len = 1,
1556 .elem_size = sizeof(struct qmi_response_type_v01),
1557 .is_array = NO_ARRAY,
1558 .tlv_type = 0x02,
1559 .offset = offsetof(struct slimbus_deferred_status_resp,
1560 resp),
1561 .ei_array = get_qmi_response_type_v01_ei(),
1562 },
1563 {
1564 .data_type = QMI_EOTI,
1565 .is_array = NO_ARRAY,
1566 },
1567};
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001568static void msm_slim_qmi_recv_msg(struct kthread_work *work)
1569{
1570 int rc;
1571 struct msm_slim_qmi *qmi =
1572 container_of(work, struct msm_slim_qmi, kwork);
1573
1574 /* Drain all packets received */
1575 do {
1576 rc = qmi_recv_msg(qmi->handle);
1577 } while (rc == 0);
1578 if (rc != -ENOMSG)
1579 pr_err("%s: Error receiving QMI message:%d\n", __func__, rc);
1580}
1581
1582static void msm_slim_qmi_notify(struct qmi_handle *handle,
1583 enum qmi_event_type event, void *notify_priv)
1584{
1585 struct msm_slim_ctrl *dev = notify_priv;
1586 struct msm_slim_qmi *qmi = &dev->qmi;
1587
1588 switch (event) {
1589 case QMI_RECV_MSG:
Girish Mahadevan6e2dd802016-12-19 16:15:20 -07001590 kthread_queue_work(&qmi->kworker, &qmi->kwork);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001591 break;
1592 default:
1593 break;
1594 }
1595}
1596
1597static const char *get_qmi_error(struct qmi_response_type_v01 *r)
1598{
1599 if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
1600 return "No Error";
1601 else if (r->error == QMI_ERR_NO_MEMORY_V01)
1602 return "Out of Memory";
1603 else if (r->error == QMI_ERR_INTERNAL_V01)
1604 return "Unexpected error occurred";
1605 else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
1606 return "Slimbus s/w already configured to a different mode";
1607 else if (r->error == QMI_ERR_INVALID_ID_V01)
1608 return "Slimbus hardware instance is not valid";
1609 else
1610 return "Unknown error";
1611}
1612
1613static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
1614 struct slimbus_select_inst_req_msg_v01 *req)
1615{
1616 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
1617 struct msg_desc req_desc, resp_desc;
1618 int rc;
1619
1620 req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
1621 req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
1622 req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
1623
1624 resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
1625 resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
1626 resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
1627
1628 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
1629 &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
1630 if (rc < 0) {
1631 SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
1632 return rc;
1633 }
1634
1635 /* Check the response */
1636 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1637 SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
1638 resp.resp.result, get_qmi_error(&resp.resp));
1639 return -EREMOTEIO;
1640 }
1641
1642 return 0;
1643}
1644
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001645static void slim_qmi_resp_cb(struct qmi_handle *handle, unsigned int msg_id,
1646 void *msg, void *resp_cb_data, int stat)
1647{
1648 struct slimbus_power_resp_msg_v01 *resp = msg;
1649 struct msm_slim_ctrl *dev = resp_cb_data;
1650
1651 if (msg_id != SLIMBUS_QMI_POWER_RESP_V01)
1652 SLIM_WARN(dev, "incorrect msg id in qmi-resp CB:0x%x", msg_id);
1653 else if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
1654 SLIM_ERR(dev, "%s: QMI power failed 0x%x (%s)\n", __func__,
1655 resp->resp.result, get_qmi_error(&resp->resp));
1656
1657 complete(&dev->qmi.defer_comp);
1658}
1659
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001660static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
1661 struct slimbus_power_req_msg_v01 *req)
1662{
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001663 struct slimbus_power_resp_msg_v01 *resp =
1664 (struct slimbus_power_resp_msg_v01 *)&dev->qmi.resp;
1665 struct msg_desc req_desc;
1666 struct msg_desc *resp_desc = &dev->qmi.resp_desc;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001667 int rc;
1668
1669 req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
1670 req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
1671 req_desc.ei_array = slimbus_power_req_msg_v01_ei;
1672
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001673 resp_desc->msg_id = SLIMBUS_QMI_POWER_RESP_V01;
1674 resp_desc->max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
1675 resp_desc->ei_array = slimbus_power_resp_msg_v01_ei;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001676
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001677 if (dev->qmi.deferred_resp)
1678 rc = qmi_send_req_nowait(dev->qmi.handle, &req_desc, req,
1679 sizeof(*req), resp_desc, resp,
1680 sizeof(*resp), slim_qmi_resp_cb, dev);
1681 else
1682 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req,
1683 sizeof(*req), resp_desc, resp,
1684 sizeof(*resp), SLIM_QMI_RESP_TOUT);
1685 if (rc < 0)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001686 SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001687
1688 if (rc < 0 || dev->qmi.deferred_resp)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001689 return rc;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001690
1691 /* Check the response */
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001692 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001693 SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001694 resp->resp.result, get_qmi_error(&resp->resp));
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001695 return -EREMOTEIO;
1696 }
1697
1698 return 0;
1699}
1700
1701int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
1702{
1703 int rc = 0;
1704 struct qmi_handle *handle;
1705 struct slimbus_select_inst_req_msg_v01 req;
1706
Shrey Vijay210b8072018-06-18 17:42:53 +05301707 if (dev->qmi.handle || dev->qmi.task) {
1708 pr_err("%s: Destroying stale QMI client handle\n", __func__);
1709 msm_slim_qmi_exit(dev);
1710 }
1711
Girish Mahadevan6e2dd802016-12-19 16:15:20 -07001712 kthread_init_worker(&dev->qmi.kworker);
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001713 init_completion(&dev->qmi.defer_comp);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001714
1715 dev->qmi.task = kthread_run(kthread_worker_fn,
1716 &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
1717
1718 if (IS_ERR(dev->qmi.task)) {
1719 pr_err("%s: Failed to create QMI client kthread\n", __func__);
1720 return -ENOMEM;
1721 }
1722
Girish Mahadevan6e2dd802016-12-19 16:15:20 -07001723 kthread_init_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001724
1725 handle = qmi_handle_create(msm_slim_qmi_notify, dev);
1726 if (!handle) {
1727 rc = -ENOMEM;
1728 pr_err("%s: QMI client handle alloc failed\n", __func__);
1729 goto qmi_handle_create_failed;
1730 }
1731
1732 rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
1733 SLIMBUS_QMI_SVC_V1,
1734 SLIMBUS_QMI_INS_ID);
1735 if (rc < 0) {
1736 SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
1737 goto qmi_connect_to_service_failed;
1738 }
1739
1740 /* Instance is 0 based */
1741 req.instance = (dev->ctrl.nr >> 1);
1742 req.mode_valid = 1;
1743
1744 /* Mode indicates the role of the ADSP */
1745 if (apps_is_master)
1746 req.mode = SLIMBUS_MODE_SATELLITE_V01;
1747 else
1748 req.mode = SLIMBUS_MODE_MASTER_V01;
1749
1750 dev->qmi.handle = handle;
1751
1752 rc = msm_slim_qmi_send_select_inst_req(dev, &req);
1753 if (rc) {
1754 pr_err("%s: failed to select h/w instance\n", __func__);
1755 goto qmi_select_instance_failed;
1756 }
1757
1758 return 0;
1759
1760qmi_select_instance_failed:
1761 dev->qmi.handle = NULL;
1762qmi_connect_to_service_failed:
1763 qmi_handle_destroy(handle);
1764qmi_handle_create_failed:
Girish Mahadevan6e2dd802016-12-19 16:15:20 -07001765 kthread_flush_worker(&dev->qmi.kworker);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001766 kthread_stop(dev->qmi.task);
1767 dev->qmi.task = NULL;
1768 return rc;
1769}
1770
1771void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
1772{
1773 if (!dev->qmi.handle || !dev->qmi.task)
1774 return;
1775 qmi_handle_destroy(dev->qmi.handle);
Girish Mahadevan6e2dd802016-12-19 16:15:20 -07001776 kthread_flush_worker(&dev->qmi.kworker);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001777 kthread_stop(dev->qmi.task);
1778 dev->qmi.task = NULL;
1779 dev->qmi.handle = NULL;
1780}
1781
1782int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
1783{
1784 struct slimbus_power_req_msg_v01 req;
1785
1786 if (active)
1787 req.pm_req = SLIMBUS_PM_ACTIVE_V01;
1788 else
1789 req.pm_req = SLIMBUS_PM_INACTIVE_V01;
1790
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001791 if (dev->qmi.deferred_resp) {
1792 req.resp_type = SLIMBUS_RESP_DEFERRED_V01;
1793 req.resp_type_valid = 1;
1794 } else {
1795 req.resp_type_valid = 0;
1796 }
1797
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001798 return msm_slim_qmi_send_power_request(dev, &req);
1799}
1800
1801int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
1802{
1803 struct slimbus_chkfrm_resp_msg resp = { { 0, 0 } };
1804 struct msg_desc req_desc, resp_desc;
1805 int rc;
1806
1807 req_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ;
1808 req_desc.max_msg_len = 0;
1809 req_desc.ei_array = NULL;
1810
1811 resp_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP;
1812 resp_desc.max_msg_len = SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN;
1813 resp_desc.ei_array = slimbus_chkfrm_resp_msg_v01_ei;
1814
1815 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
1816 &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
1817 if (rc < 0) {
1818 SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
1819 return rc;
1820 }
1821 /* Check the response */
1822 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1823 SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
1824 __func__, resp.resp.result, get_qmi_error(&resp.resp));
1825 return -EREMOTEIO;
1826 }
1827 return 0;
1828}
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06001829
1830int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev)
1831{
1832 struct slimbus_deferred_status_resp resp = { { 0, 0 } };
1833 struct msg_desc req_desc, resp_desc;
1834 int rc;
1835
1836 req_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_REQ;
1837 req_desc.max_msg_len = 0;
1838 req_desc.ei_array = NULL;
1839
1840 resp_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_RESP;
1841 resp_desc.max_msg_len =
1842 SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN;
1843 resp_desc.ei_array = slimbus_deferred_status_resp_msg_v01_ei;
1844
1845 rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
1846 &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
1847 if (rc < 0) {
1848 SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
1849 return rc;
1850 }
1851 /* Check the response */
1852 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1853 SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
1854 __func__, resp.resp.result, get_qmi_error(&resp.resp));
1855 return -EREMOTEIO;
1856 }
1857
1858 /* wait for the deferred response */
1859 rc = wait_for_completion_timeout(&dev->qmi.defer_comp, HZ);
1860 if (rc == 0) {
1861 SLIM_WARN(dev, "slimbus power deferred response not rcvd\n");
1862 return -ETIMEDOUT;
1863 }
1864 /* Check what response we got in callback */
1865 if (dev->qmi.resp.result != QMI_RESULT_SUCCESS_V01) {
1866 SLIM_WARN(dev, "QMI power req failed in CB");
1867 return -EREMOTEIO;
1868 }
1869
1870 return 0;
1871}