blob: 3f99b2bc0f78cc7083aa1e7b85d495d8c6aa917c [file] [log] [blame]
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/slimbus/slimbus.h>
20#include <linux/delay.h>
21#include <linux/kthread.h>
22#include <linux/clk.h>
23#include <linux/pm_runtime.h>
24#include <linux/of.h>
25#include <linux/of_slimbus.h>
26#include <linux/msm-sps.h>
27#include <linux/qdsp6v2/apr.h>
28#include "slim-msm.h"
29
30#define MSM_SLIM_NAME "msm_slim_ctrl"
31#define SLIM_ROOT_FREQ 24576000
32
33#define QC_MSM_DEVS 5
34
35/* Manager registers */
36enum mgr_reg {
37 MGR_CFG = 0x200,
38 MGR_STATUS = 0x204,
39 MGR_RX_MSGQ_CFG = 0x208,
40 MGR_INT_EN = 0x210,
41 MGR_INT_STAT = 0x214,
42 MGR_INT_CLR = 0x218,
43 MGR_TX_MSG = 0x230,
44 MGR_RX_MSG = 0x270,
45 MGR_IE_STAT = 0x2F0,
46 MGR_VE_STAT = 0x300,
47};
48
49enum msg_cfg {
50 MGR_CFG_ENABLE = 1,
51 MGR_CFG_RX_MSGQ_EN = 1 << 1,
52 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
53 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
54};
55/* Message queue types */
56enum msm_slim_msgq_type {
57 MSGQ_RX = 0,
58 MSGQ_TX_LOW = 1,
59 MSGQ_TX_HIGH = 2,
60};
61/* Framer registers */
62enum frm_reg {
63 FRM_CFG = 0x400,
64 FRM_STAT = 0x404,
65 FRM_INT_EN = 0x410,
66 FRM_INT_STAT = 0x414,
67 FRM_INT_CLR = 0x418,
68 FRM_WAKEUP = 0x41C,
69 FRM_CLKCTL_DONE = 0x420,
70 FRM_IE_STAT = 0x430,
71 FRM_VE_STAT = 0x440,
72};
73
74/* Interface registers */
75enum intf_reg {
76 INTF_CFG = 0x600,
77 INTF_STAT = 0x604,
78 INTF_INT_EN = 0x610,
79 INTF_INT_STAT = 0x614,
80 INTF_INT_CLR = 0x618,
81 INTF_IE_STAT = 0x630,
82 INTF_VE_STAT = 0x640,
83};
84
85enum mgr_intr {
86 MGR_INT_RECFG_DONE = 1 << 24,
87 MGR_INT_TX_NACKED_2 = 1 << 25,
88 MGR_INT_MSG_BUF_CONTE = 1 << 26,
89 MGR_INT_RX_MSG_RCVD = 1 << 30,
90 MGR_INT_TX_MSG_SENT = 1 << 31,
91};
92
93enum frm_cfg {
94 FRM_ACTIVE = 1,
95 CLK_GEAR = 7,
96 ROOT_FREQ = 11,
97 REF_CLK_GEAR = 15,
98 INTR_WAKE = 19,
99};
100
101static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
102
103static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
104{
105 struct msm_slim_ctrl *dev = sat->dev;
106 unsigned long flags;
107
108 spin_lock_irqsave(&sat->lock, flags);
109 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
110 spin_unlock_irqrestore(&sat->lock, flags);
111 dev_err(dev->dev, "SAT QUEUE full!");
112 return -EXFULL;
113 }
114 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
115 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
116 spin_unlock_irqrestore(&sat->lock, flags);
117 return 0;
118}
119
120static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
121{
122 unsigned long flags;
123
124 spin_lock_irqsave(&sat->lock, flags);
125 if (sat->stail == sat->shead) {
126 spin_unlock_irqrestore(&sat->lock, flags);
127 return -ENODATA;
128 }
129 memcpy(buf, sat->sat_msgs[sat->shead], 40);
130 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
131 spin_unlock_irqrestore(&sat->lock, flags);
132 return 0;
133}
134
135static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
136{
137 e_addr[0] = (buffer[1] >> 24) & 0xff;
138 e_addr[1] = (buffer[1] >> 16) & 0xff;
139 e_addr[2] = (buffer[1] >> 8) & 0xff;
140 e_addr[3] = buffer[1] & 0xff;
141 e_addr[4] = (buffer[0] >> 24) & 0xff;
142 e_addr[5] = (buffer[0] >> 16) & 0xff;
143}
144
145static bool msm_is_sat_dev(u8 *e_addr)
146{
147 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
148 e_addr[2] != QC_CHIPID_SL &&
149 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
150 return true;
151 return false;
152}
153
154static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
155{
156 struct msm_slim_sat *sat = NULL;
157 int i = 0;
158
159 while (!sat && i < dev->nsats) {
160 if (laddr == dev->satd[i]->satcl.laddr)
161 sat = dev->satd[i];
162 i++;
163 }
164 return sat;
165}
166
167static irqreturn_t msm_slim_interrupt(int irq, void *d)
168{
169 struct msm_slim_ctrl *dev = d;
170 u32 pstat;
171 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
172
173 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
174 if (stat & MGR_INT_TX_MSG_SENT)
175 writel_relaxed(MGR_INT_TX_MSG_SENT,
176 dev->base + MGR_INT_CLR);
177 else {
178 u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
179 u32 mgr_ie_stat = readl_relaxed(dev->base +
180 MGR_IE_STAT);
181 u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
182 u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
183 u32 frm_intr_stat = readl_relaxed(dev->base +
184 FRM_INT_STAT);
185 u32 frm_ie_stat = readl_relaxed(dev->base +
186 FRM_IE_STAT);
187 u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
188 u32 intf_intr_stat = readl_relaxed(dev->base +
189 INTF_INT_STAT);
190 u32 intf_ie_stat = readl_relaxed(dev->base +
191 INTF_IE_STAT);
192
193 writel_relaxed(MGR_INT_TX_NACKED_2,
194 dev->base + MGR_INT_CLR);
195 pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
196 stat, mgr_stat);
197 pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
198 pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
199 frm_intr_stat, frm_stat);
200 pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
201 frm_cfg, frm_ie_stat);
202 pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
203 intf_intr_stat, intf_stat);
204 pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
205
206 dev->err = -EIO;
207 }
208 /*
209 * Guarantee that interrupt clear bit write goes through before
210 * signalling completion/exiting ISR
211 */
212 mb();
213 msm_slim_manage_tx_msgq(dev, false, NULL);
214 }
215 if (stat & MGR_INT_RX_MSG_RCVD) {
216 u32 rx_buf[10];
217 u32 mc, mt;
218 u8 len, i;
219
220 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
221 len = rx_buf[0] & 0x1F;
222 for (i = 1; i < ((len + 3) >> 2); i++) {
223 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
224 (4 * i));
225 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
226 }
227 mt = (rx_buf[0] >> 5) & 0x7;
228 mc = (rx_buf[0] >> 8) & 0xff;
229 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
230 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
231 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
232 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
233 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
234
235 if (sat)
236 msm_sat_enqueue(sat, rx_buf, len);
237 else
238 dev_err(dev->dev, "unknown sat:%d message",
239 laddr);
240 writel_relaxed(MGR_INT_RX_MSG_RCVD,
241 dev->base + MGR_INT_CLR);
242 /*
243 * Guarantee that CLR bit write goes through before
244 * queuing work
245 */
246 mb();
247 if (sat)
248 queue_work(sat->wq, &sat->wd);
249 } else if (mt == SLIM_MSG_MT_CORE &&
250 mc == SLIM_MSG_MC_REPORT_PRESENT) {
251 u8 e_addr[6];
252
253 msm_get_eaddr(e_addr, rx_buf);
254 msm_slim_rx_enqueue(dev, rx_buf, len);
255 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
256 MGR_INT_CLR);
257 /*
258 * Guarantee that CLR bit write goes through
259 * before signalling completion
260 */
261 mb();
262 complete(&dev->rx_msgq_notify);
263 } else if (mt == SLIM_MSG_MT_CORE &&
264 mc == SLIM_MSG_MC_REPORT_ABSENT) {
265 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
266 MGR_INT_CLR);
267 /*
268 * Guarantee that CLR bit write goes through
269 * before signalling completion
270 */
271 mb();
272 complete(&dev->rx_msgq_notify);
273
274 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
275 mc == SLIM_MSG_MC_REPLY_VALUE) {
276 msm_slim_rx_enqueue(dev, rx_buf, len);
277 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
278 MGR_INT_CLR);
279 /*
280 * Guarantee that CLR bit write goes through
281 * before signalling completion
282 */
283 mb();
284 complete(&dev->rx_msgq_notify);
285 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
286 u8 *buf = (u8 *)rx_buf;
287 u8 l_addr = buf[2];
288 u16 ele = (u16)buf[4] << 4;
289
290 ele |= ((buf[3] & 0xf0) >> 4);
291 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
292 l_addr, ele);
293 for (i = 0; i < len - 5; i++)
294 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
295 i, buf[i+5]);
296 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
297 MGR_INT_CLR);
298 /*
299 * Guarantee that CLR bit write goes through
300 * before exiting
301 */
302 mb();
303 } else {
304 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
305 mc, mt, len);
306 for (i = 0; i < ((len + 3) >> 2); i++)
307 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
308 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
309 MGR_INT_CLR);
310 /*
311 * Guarantee that CLR bit write goes through
312 * before exiting
313 */
314 mb();
315 }
316 }
317 if (stat & MGR_INT_RECFG_DONE) {
318 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
319 /*
320 * Guarantee that CLR bit write goes through
321 * before exiting ISR
322 */
323 mb();
324 complete(&dev->reconf);
325 }
326 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
327 if (pstat != 0)
328 return msm_slim_port_irq_handler(dev, pstat);
329
330 return IRQ_HANDLED;
331}
332
333static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
334{
335 DECLARE_COMPLETION_ONSTACK(done);
336 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
337 u32 *pbuf;
338 u8 *puc;
339 int timeout;
340 int msgv = -1;
341 u8 la = txn->la;
342 u8 mc = (u8)(txn->mc & 0xFF);
343 /*
344 * Voting for runtime PM: Slimbus has 2 possible use cases:
345 * 1. messaging
346 * 2. Data channels
347 * Messaging case goes through messaging slots and data channels
348 * use their own slots
349 * This "get" votes for messaging bandwidth
350 */
351 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
352 msgv = msm_slim_get_ctrl(dev);
353 if (msgv >= 0)
354 dev->state = MSM_CTRL_AWAKE;
355 mutex_lock(&dev->tx_lock);
356 if (dev->state == MSM_CTRL_ASLEEP ||
357 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
358 dev->state == MSM_CTRL_IDLE)) {
359 dev_err(dev->dev, "runtime or system PM suspended state");
360 mutex_unlock(&dev->tx_lock);
361 if (msgv >= 0)
362 msm_slim_put_ctrl(dev);
363 return -EBUSY;
364 }
365 if (txn->mt == SLIM_MSG_MT_CORE &&
366 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
367 if (dev->reconf_busy) {
368 wait_for_completion(&dev->reconf);
369 dev->reconf_busy = false;
370 }
371 /* This "get" votes for data channels */
372 if (dev->ctrl.sched.usedslots != 0 &&
373 !dev->chan_active) {
374 int chv = msm_slim_get_ctrl(dev);
375
376 if (chv >= 0)
377 dev->chan_active = true;
378 }
379 }
380 txn->rl--;
381 pbuf = msm_get_msg_buf(dev, txn->rl, &done);
382 dev->err = 0;
383
384 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
385 mutex_unlock(&dev->tx_lock);
386 if (msgv >= 0)
387 msm_slim_put_ctrl(dev);
388 return -EPROTONOSUPPORT;
389 }
390 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
391 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
392 mc == SLIM_MSG_MC_CONNECT_SINK ||
393 mc == SLIM_MSG_MC_DISCONNECT_PORT))
394 la = dev->pgdla;
395 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
396 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
397 else
398 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
399 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
400 puc = ((u8 *)pbuf) + 3;
401 else
402 puc = ((u8 *)pbuf) + 2;
403 if (txn->rbuf)
404 *(puc++) = txn->tid;
405 if ((txn->mt == SLIM_MSG_MT_CORE) &&
406 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
407 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
408 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
409 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
410 *(puc++) = (txn->ec & 0xFF);
411 *(puc++) = (txn->ec >> 8)&0xFF;
412 }
413 if (txn->wbuf)
414 memcpy(puc, txn->wbuf, txn->len);
415 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
416 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
417 mc == SLIM_MSG_MC_CONNECT_SINK ||
418 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
419 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
420 dev->err = msm_slim_connect_pipe_port(dev, *puc);
421 else {
422 /*
423 * Remove channel disconnects master-side ports from
424 * channel. No need to send that again on the bus
425 * Only disable port
426 */
427 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
428 dev->pipes[*puc].port_b, dev->ver));
429 mutex_unlock(&dev->tx_lock);
430 if (msgv >= 0)
431 msm_slim_put_ctrl(dev);
432 return 0;
433 }
434 if (dev->err) {
435 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
436 mutex_unlock(&dev->tx_lock);
437 if (msgv >= 0)
438 msm_slim_put_ctrl(dev);
439 return dev->err;
440 }
441 *(puc) = (u8)dev->pipes[*puc].port_b;
442 }
443 if (txn->mt == SLIM_MSG_MT_CORE &&
444 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
445 dev->reconf_busy = true;
446 msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
447 timeout = wait_for_completion_timeout(&done, HZ);
448 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
449 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
450 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
451 timeout) {
452 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
453 dev->reconf_busy = false;
454 if (timeout) {
455 clk_disable_unprepare(dev->rclk);
456 disable_irq(dev->irq);
457 }
458 }
459 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
460 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
461 !timeout) {
462 dev->reconf_busy = false;
463 dev_err(dev->dev, "clock pause failed");
464 mutex_unlock(&dev->tx_lock);
465 return -ETIMEDOUT;
466 }
467 if (txn->mt == SLIM_MSG_MT_CORE &&
468 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
469 if (dev->ctrl.sched.usedslots == 0 &&
470 dev->chan_active) {
471 dev->chan_active = false;
472 msm_slim_put_ctrl(dev);
473 }
474 }
475 }
476 mutex_unlock(&dev->tx_lock);
477 if (msgv >= 0)
478 msm_slim_put_ctrl(dev);
479
480 if (!timeout)
481 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
482 txn->mt);
483
484 return timeout ? dev->err : -ETIMEDOUT;
485}
486
487static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
488{
489 int msec_per_frm = 0;
490 int sfr_per_sec;
491 /* Wait for 1 superframe, or default time and then retry */
492 sfr_per_sec = dev->framer.superfreq /
493 (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
494 if (sfr_per_sec)
495 msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
496 if (msec_per_frm < DEF_RETRY_MS)
497 msec_per_frm = DEF_RETRY_MS;
498 msleep(msec_per_frm);
499}
500static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
501 u8 elen, u8 laddr)
502{
503 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
504 struct completion done;
505 int timeout, ret, retries = 0;
506 u32 *buf;
507retry_laddr:
508 init_completion(&done);
509 mutex_lock(&dev->tx_lock);
510 buf = msm_get_msg_buf(dev, 9, &done);
511 if (buf == NULL)
512 return -ENOMEM;
513 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
514 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
515 SLIM_MSG_DEST_LOGICALADDR,
516 ea[5] | ea[4] << 8);
517 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
518 buf[2] = laddr;
519
520 ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
521 timeout = wait_for_completion_timeout(&done, HZ);
522 if (!timeout)
523 dev->err = -ETIMEDOUT;
524 if (dev->err) {
525 ret = dev->err;
526 dev->err = 0;
527 }
528 mutex_unlock(&dev->tx_lock);
529 if (ret) {
530 pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
531 if (retries < INIT_MX_RETRIES) {
532 msm_slim_wait_retry(dev);
533 retries++;
534 goto retry_laddr;
535 } else {
536 pr_err("set LADDR failed after retrying:ret:%d", ret);
537 }
538 }
539 return ret;
540}
541
542static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
543{
544 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
545
546 enable_irq(dev->irq);
547 clk_prepare_enable(dev->rclk);
548 writel_relaxed(1, dev->base + FRM_WAKEUP);
549 /* Make sure framer wakeup write goes through before exiting function */
550 mb();
551 /*
552 * Workaround: Currently, slave is reporting lost-sync messages
553 * after slimbus comes out of clock pause.
554 * Transaction with slave fail before slave reports that message
555 * Give some time for that report to come
556 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
557 * being 250 usecs, we wait for 20 superframes here to ensure
558 * we get the message
559 */
560 usleep_range(4950, 5000);
561 return 0;
562}
563
564static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
565{
566 struct msm_slim_ctrl *dev = sat->dev;
567 enum slim_ch_control oper;
568 int i;
569 int ret = 0;
570
571 if (mc == SLIM_USR_MC_CHAN_CTRL) {
572 for (i = 0; i < sat->nsatch; i++) {
573 if (buf[5] == sat->satch[i].chan)
574 break;
575 }
576 if (i >= sat->nsatch)
577 return -ENOTCONN;
578 oper = ((buf[3] & 0xC0) >> 6);
579 /* part of grp. activating/removing 1 will take care of rest */
580 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
581 false);
582 if (!ret) {
583 for (i = 5; i < len; i++) {
584 int j;
585
586 for (j = 0; j < sat->nsatch; j++) {
587 if (buf[i] != sat->satch[j].chan)
588 continue;
589
590 if (oper == SLIM_CH_REMOVE)
591 sat->satch[j].req_rem++;
592 else
593 sat->satch[j].req_def++;
594 break;
595 }
596 }
597 }
598 } else {
599 u16 chh[40];
600 struct slim_ch prop;
601 u32 exp;
602 u16 *grph = NULL;
603 u8 coeff, cc;
604 u8 prrate = buf[6];
605
606 if (len <= 8)
607 return -EINVAL;
608 for (i = 8; i < len; i++) {
609 int j = 0;
610
611 for (j = 0; j < sat->nsatch; j++) {
612 if (sat->satch[j].chan == buf[i]) {
613 chh[i - 8] = sat->satch[j].chanh;
614 break;
615 }
616 }
617 if (j < sat->nsatch) {
618 u16 dummy;
619
620 ret = slim_query_ch(&sat->satcl, buf[i],
621 &dummy);
622 if (ret)
623 return ret;
624 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
625 sat->satch[j].req_def++;
626 /* First channel in group from satellite */
627 if (i == 8)
628 grph = &sat->satch[j].chanh;
629 continue;
630 }
631 if (sat->nsatch >= MSM_MAX_SATCH)
632 return -EXFULL;
633 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
634 if (ret)
635 return ret;
636 sat->satch[j].chan = buf[i];
637 sat->satch[j].chanh = chh[i - 8];
638 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
639 sat->satch[j].req_def++;
640 if (i == 8)
641 grph = &sat->satch[j].chanh;
642 sat->nsatch++;
643 }
644 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
645 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
646 prop.baser = SLIM_RATE_4000HZ;
647 if (prrate & 0x8)
648 prop.baser = SLIM_RATE_11025HZ;
649 else
650 prop.baser = SLIM_RATE_4000HZ;
651 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
652 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
653 exp = (u32)((buf[5] & 0xF0) >> 4);
654 coeff = (buf[4] & 0x20) >> 5;
655 cc = (coeff ? 3 : 1);
656 prop.ratem = cc * (1 << exp);
657 if (i > 9)
658 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
659 true, &chh[0]);
660 else
661 ret = slim_define_ch(&sat->satcl, &prop,
662 chh, 1, true, &chh[0]);
663 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
664 if (ret)
665 return ret;
666 else if (grph)
667 *grph = chh[0];
668
669 /* part of group so activating 1 will take care of rest */
670 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
671 ret = slim_control_ch(&sat->satcl,
672 chh[0],
673 SLIM_CH_ACTIVATE, false);
674 }
675 return ret;
676}
677
678static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
679{
680 u8 buf[40];
681 u8 mc, mt, len;
682 int i, ret;
683
684 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
685 len = buf[0] & 0x1F;
686 mt = (buf[0] >> 5) & 0x7;
687 mc = buf[1];
688 if (mt == SLIM_MSG_MT_CORE &&
689 mc == SLIM_MSG_MC_REPORT_PRESENT) {
690 u8 laddr;
691 u8 e_addr[6];
692
693 for (i = 0; i < 6; i++)
694 e_addr[i] = buf[7-i];
695
696 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
697 false);
698 /* Is this QTI ported generic device? */
699 if (!ret && e_addr[5] == QC_MFGID_LSB &&
700 e_addr[4] == QC_MFGID_MSB &&
701 e_addr[1] == QC_DEVID_PGD &&
702 e_addr[2] != QC_CHIPID_SL)
703 dev->pgdla = laddr;
704 if (!ret && !pm_runtime_enabled(dev->dev) &&
705 laddr == (QC_MSM_DEVS - 1))
706 pm_runtime_enable(dev->dev);
707
708 if (!ret && msm_is_sat_dev(e_addr)) {
709 struct msm_slim_sat *sat = addr_to_sat(dev,
710 laddr);
711 if (!sat)
712 sat = msm_slim_alloc_sat(dev);
713 if (!sat)
714 return;
715
716 sat->satcl.laddr = laddr;
717 msm_sat_enqueue(sat, (u32 *)buf, len);
718 queue_work(sat->wq, &sat->wd);
719 }
720 if (ret)
721 pr_err("assign laddr failed, error:%d", ret);
722 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
723 mc == SLIM_MSG_MC_REPLY_VALUE) {
724 u8 tid = buf[3];
725
726 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
727 slim_msg_response(&dev->ctrl, &buf[4], tid,
728 len - 4);
729 pm_runtime_mark_last_busy(dev->dev);
730 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
731 u8 l_addr = buf[2];
732 u16 ele = (u16)buf[4] << 4;
733
734 ele |= ((buf[3] & 0xf0) >> 4);
735 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
736 l_addr, ele);
737 for (i = 0; i < len - 5; i++)
738 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
739 i, buf[i+5]);
740 } else {
741 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
742 mc, mt);
743 for (i = 0; i < len; i++)
744 dev_err(dev->dev, "error msg: %x", buf[i]);
745
746 }
747 } else
748 dev_err(dev->dev, "rxwq called and no dequeue");
749}
750
751static void slim_sat_rxprocess(struct work_struct *work)
752{
753 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
754 struct msm_slim_ctrl *dev = sat->dev;
755 u8 buf[40];
756
757 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
758 struct slim_msg_txn txn;
759 u8 len, mc, mt;
760 u32 bw_sl;
761 int ret = 0;
762 int satv = -1;
763 bool gen_ack = false;
764 u8 tid;
765 u8 wbuf[8];
766 int i, retries = 0;
767
768 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
769 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
770 txn.ec = 0;
771 txn.rbuf = NULL;
772 txn.la = sat->satcl.laddr;
773 /* satellite handling */
774 len = buf[0] & 0x1F;
775 mc = buf[1];
776 mt = (buf[0] >> 5) & 0x7;
777
778 if (mt == SLIM_MSG_MT_CORE &&
779 mc == SLIM_MSG_MC_REPORT_PRESENT) {
780 u8 e_addr[6];
781
782 for (i = 0; i < 6; i++)
783 e_addr[i] = buf[7-i];
784
785 if (pm_runtime_enabled(dev->dev)) {
786 satv = msm_slim_get_ctrl(dev);
787 if (satv >= 0)
788 sat->pending_capability = true;
789 }
790 /*
791 * Since capability message is already sent, present
792 * message will indicate subsystem hosting this
793 * satellite has restarted.
794 * Remove all active channels of this satellite
795 * when this is detected
796 */
797 if (sat->sent_capability) {
798 for (i = 0; i < sat->nsatch; i++) {
799 if (sat->satch[i].reconf) {
800 pr_err("SSR, sat:%d, rm ch:%d",
801 sat->satcl.laddr,
802 sat->satch[i].chan);
803 slim_control_ch(&sat->satcl,
804 sat->satch[i].chanh,
805 SLIM_CH_REMOVE, true);
806 slim_dealloc_ch(&sat->satcl,
807 sat->satch[i].chanh);
808 sat->satch[i].reconf = false;
809 }
810 }
811 }
812 } else if (mt != SLIM_MSG_MT_CORE &&
813 mc != SLIM_MSG_MC_REPORT_PRESENT) {
814 satv = msm_slim_get_ctrl(dev);
815 }
816 switch (mc) {
817 case SLIM_MSG_MC_REPORT_PRESENT:
818 /* Remove runtime_pm vote once satellite acks */
819 if (mt != SLIM_MSG_MT_CORE) {
820 if (pm_runtime_enabled(dev->dev) &&
821 sat->pending_capability) {
822 msm_slim_put_ctrl(dev);
823 sat->pending_capability = false;
824 }
825 continue;
826 }
827 /* send a Manager capability msg */
828 if (sat->sent_capability) {
829 if (mt == SLIM_MSG_MT_CORE)
830 goto send_capability;
831 else
832 continue;
833 }
834 ret = slim_add_device(&dev->ctrl, &sat->satcl);
835 if (ret) {
836 dev_err(dev->dev,
837 "Satellite-init failed");
838 continue;
839 }
840 /* Satellite-channels */
841 sat->satch = kzalloc(MSM_MAX_SATCH *
842 sizeof(struct msm_sat_chan),
843 GFP_KERNEL);
844send_capability:
845 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
846 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
847 txn.la = sat->satcl.laddr;
848 txn.rl = 8;
849 wbuf[0] = SAT_MAGIC_LSB;
850 wbuf[1] = SAT_MAGIC_MSB;
851 wbuf[2] = SAT_MSG_VER;
852 wbuf[3] = SAT_MSG_PROT;
853 txn.wbuf = wbuf;
854 txn.len = 4;
855 ret = msm_xfer_msg(&dev->ctrl, &txn);
856 if (ret) {
857 pr_err("capability for:0x%x fail:%d, retry:%d",
858 sat->satcl.laddr, ret, retries);
859 if (retries < INIT_MX_RETRIES) {
860 msm_slim_wait_retry(dev);
861 retries++;
862 goto send_capability;
863 } else {
864 pr_err("failed after all retries:%d",
865 ret);
866 }
867 } else {
868 sat->sent_capability = true;
869 }
870 break;
871 case SLIM_USR_MC_ADDR_QUERY:
872 memcpy(&wbuf[1], &buf[4], 6);
873 ret = slim_get_logical_addr(&sat->satcl,
874 &wbuf[1], 6, &wbuf[7]);
875 if (ret)
876 memset(&wbuf[1], 0, 6);
877 wbuf[0] = buf[3];
878 txn.mc = SLIM_USR_MC_ADDR_REPLY;
879 txn.rl = 12;
880 txn.len = 8;
881 txn.wbuf = wbuf;
882 msm_xfer_msg(&dev->ctrl, &txn);
883 break;
884 case SLIM_USR_MC_DEFINE_CHAN:
885 case SLIM_USR_MC_DEF_ACT_CHAN:
886 case SLIM_USR_MC_CHAN_CTRL:
887 if (mc != SLIM_USR_MC_CHAN_CTRL)
888 tid = buf[7];
889 else
890 tid = buf[4];
891 gen_ack = true;
892 ret = msm_sat_define_ch(sat, buf, len, mc);
893 if (ret) {
894 dev_err(dev->dev,
895 "SAT define_ch returned:%d",
896 ret);
897 }
898 if (!sat->pending_reconf) {
899 int chv = msm_slim_get_ctrl(dev);
900
901 if (chv >= 0)
902 sat->pending_reconf = true;
903 }
904 break;
905 case SLIM_USR_MC_RECONFIG_NOW:
906 tid = buf[3];
907 gen_ack = true;
908 ret = slim_reconfigure_now(&sat->satcl);
909 for (i = 0; i < sat->nsatch; i++) {
910 struct msm_sat_chan *sch = &sat->satch[i];
911
912 if (sch->req_rem && sch->reconf) {
913 if (!ret) {
914 slim_dealloc_ch(&sat->satcl,
915 sch->chanh);
916 sch->reconf = false;
917 }
918 sch->req_rem--;
919 } else if (sch->req_def) {
920 if (ret)
921 slim_dealloc_ch(&sat->satcl,
922 sch->chanh);
923 else
924 sch->reconf = true;
925 sch->req_def--;
926 }
927 }
928 if (sat->pending_reconf) {
929 msm_slim_put_ctrl(dev);
930 sat->pending_reconf = false;
931 }
932 break;
933 case SLIM_USR_MC_REQ_BW:
934 /* what we get is in SLOTS */
935 bw_sl = (u32)buf[4] << 3 |
936 ((buf[3] & 0xE0) >> 5);
937 sat->satcl.pending_msgsl = bw_sl;
938 tid = buf[5];
939 gen_ack = true;
940 break;
941 case SLIM_USR_MC_CONNECT_SRC:
942 case SLIM_USR_MC_CONNECT_SINK:
943 if (mc == SLIM_USR_MC_CONNECT_SRC)
944 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
945 else
946 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
947 wbuf[0] = buf[4] & 0x1F;
948 wbuf[1] = buf[5];
949 tid = buf[6];
950 txn.la = buf[3];
951 txn.mt = SLIM_MSG_MT_CORE;
952 txn.rl = 6;
953 txn.len = 2;
954 txn.wbuf = wbuf;
955 gen_ack = true;
956 ret = msm_xfer_msg(&dev->ctrl, &txn);
957 break;
958 case SLIM_USR_MC_DISCONNECT_PORT:
959 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
960 wbuf[0] = buf[4] & 0x1F;
961 tid = buf[5];
962 txn.la = buf[3];
963 txn.rl = 5;
964 txn.len = 1;
965 txn.mt = SLIM_MSG_MT_CORE;
966 txn.wbuf = wbuf;
967 gen_ack = true;
968 ret = msm_xfer_msg(&dev->ctrl, &txn);
969 break;
970 case SLIM_MSG_MC_REPORT_ABSENT:
971 dev_info(dev->dev, "Received Report Absent Message\n");
972 break;
973 default:
974 break;
975 }
976 if (!gen_ack) {
977 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
978 msm_slim_put_ctrl(dev);
979 continue;
980 }
981
982 wbuf[0] = tid;
983 if (!ret)
984 wbuf[1] = MSM_SAT_SUCCSS;
985 else
986 wbuf[1] = 0;
987 txn.mc = SLIM_USR_MC_GENERIC_ACK;
988 txn.la = sat->satcl.laddr;
989 txn.rl = 6;
990 txn.len = 2;
991 txn.wbuf = wbuf;
992 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
993 msm_xfer_msg(&dev->ctrl, &txn);
994 if (satv >= 0)
995 msm_slim_put_ctrl(dev);
996 }
997}
998
999static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1000{
1001 struct msm_slim_sat *sat;
1002 char *name;
1003
1004 if (dev->nsats >= MSM_MAX_NSATS)
1005 return NULL;
1006
1007 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1008 if (!sat)
1009 return NULL;
1010 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1011 if (!name) {
1012 kfree(sat);
1013 return NULL;
1014 }
1015 dev->satd[dev->nsats] = sat;
1016 sat->dev = dev;
1017 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1018 sat->satcl.name = name;
1019 spin_lock_init(&sat->lock);
1020 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1021 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1022 if (!sat->wq) {
1023 kfree(name);
1024 kfree(sat);
1025 return NULL;
1026 }
1027 /*
1028 * Both sats will be allocated from RX thread and RX thread will
1029 * process messages sequentially. No synchronization necessary
1030 */
1031 dev->nsats++;
1032 return sat;
1033}
1034
1035static int msm_slim_rx_msgq_thread(void *data)
1036{
1037 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1038 struct completion *notify = &dev->rx_msgq_notify;
1039 struct msm_slim_sat *sat = NULL;
1040 u32 mc = 0;
1041 u32 mt = 0;
1042 u32 buffer[10];
1043 int index = 0;
1044 u8 msg_len = 0;
1045 int ret;
1046
1047 dev_dbg(dev->dev, "rx thread started");
1048
1049 while (!kthread_should_stop()) {
1050 set_current_state(TASK_INTERRUPTIBLE);
1051 ret = wait_for_completion_interruptible(notify);
1052
1053 if (ret)
1054 dev_err(dev->dev, "rx thread wait error:%d", ret);
1055
1056 /* 1 irq notification per message */
1057 if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
1058 msm_slim_rxwq(dev);
1059 continue;
1060 }
1061
1062 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1063 if (ret) {
1064 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1065 continue;
1066 }
1067
1068 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1069
1070 /* Decide if we use generic RX or satellite RX */
1071 if (index++ == 0) {
1072 msg_len = *buffer & 0x1F;
1073 pr_debug("Start of new message, len = %d\n", msg_len);
1074 mt = (buffer[0] >> 5) & 0x7;
1075 mc = (buffer[0] >> 8) & 0xff;
1076 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1077 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1078 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1079 u8 laddr;
1080
1081 laddr = (u8)((buffer[0] >> 16) & 0xff);
1082 sat = addr_to_sat(dev, laddr);
1083 }
1084 }
1085 if ((index * 4) >= msg_len) {
1086 index = 0;
1087 if (sat) {
1088 msm_sat_enqueue(sat, buffer, msg_len);
1089 queue_work(sat->wq, &sat->wd);
1090 sat = NULL;
1091 } else {
1092 msm_slim_rx_enqueue(dev, buffer, msg_len);
1093 msm_slim_rxwq(dev);
1094 }
1095 }
1096 }
1097
1098 return 0;
1099}
1100
1101static void msm_slim_prg_slew(struct platform_device *pdev,
1102 struct msm_slim_ctrl *dev)
1103{
1104 struct resource *slew_io;
1105 void __iomem *slew_reg;
1106 /* SLEW RATE register for this slimbus */
1107 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1108 "slimbus_slew_reg");
1109 if (!dev->slew_mem) {
1110 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1111 return;
1112 }
1113 slew_io = request_mem_region(dev->slew_mem->start,
1114 resource_size(dev->slew_mem), pdev->name);
1115 if (!slew_io) {
1116 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1117 dev->slew_mem = NULL;
1118 return;
1119 }
1120
1121 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1122 if (!slew_reg) {
1123 dev_dbg(dev->dev, "slew register mapping failed");
1124 release_mem_region(dev->slew_mem->start,
1125 resource_size(dev->slew_mem));
1126 dev->slew_mem = NULL;
1127 return;
1128 }
1129 writel_relaxed(1, slew_reg);
1130 /* Make sure slimbus-slew rate enabling goes through */
1131 wmb();
1132 iounmap(slew_reg);
1133}
1134
1135static int msm_slim_probe(struct platform_device *pdev)
1136{
1137 struct msm_slim_ctrl *dev;
1138 int ret;
1139 enum apr_subsys_state q6_state;
1140 struct resource *bam_mem, *bam_io;
1141 struct resource *slim_mem, *slim_io;
1142 struct resource *irq, *bam_irq;
1143 bool rxreg_access = false;
1144
1145 q6_state = apr_get_q6_state();
1146 if (q6_state == APR_SUBSYS_DOWN) {
1147 dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
1148 q6_state);
1149 return -EPROBE_DEFER;
1150 }
1151 dev_dbg(&pdev->dev, "adsp is ready\n");
1152
1153 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1154 "slimbus_physical");
1155 if (!slim_mem) {
1156 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1157 return -ENODEV;
1158 }
1159 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1160 pdev->name);
1161 if (!slim_io) {
1162 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1163 return -EBUSY;
1164 }
1165
1166 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1167 "slimbus_bam_physical");
1168 if (!bam_mem) {
1169 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1170 ret = -ENODEV;
1171 goto err_get_res_bam_failed;
1172 }
1173 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1174 pdev->name);
1175 if (!bam_io) {
1176 release_mem_region(slim_mem->start, resource_size(slim_mem));
1177 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1178 ret = -EBUSY;
1179 goto err_get_res_bam_failed;
1180 }
1181 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1182 "slimbus_irq");
1183 if (!irq) {
1184 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1185 ret = -ENODEV;
1186 goto err_get_res_failed;
1187 }
1188 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1189 "slimbus_bam_irq");
1190 if (!bam_irq) {
1191 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1192 ret = -ENODEV;
1193 goto err_get_res_failed;
1194 }
1195
1196 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1197 if (!dev) {
1198 ret = -ENOMEM;
1199 goto err_get_res_failed;
1200 }
1201 dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
1202 GFP_KERNEL);
1203 if (!dev->wr_comp)
1204 return -ENOMEM;
1205 dev->dev = &pdev->dev;
1206 platform_set_drvdata(pdev, dev);
1207 slim_set_ctrldata(&dev->ctrl, dev);
1208 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1209 if (!dev->base) {
1210 dev_err(&pdev->dev, "IOremap failed\n");
1211 ret = -ENOMEM;
1212 goto err_ioremap_failed;
1213 }
1214 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1215 if (!dev->bam.base) {
1216 dev_err(&pdev->dev, "BAM IOremap failed\n");
1217 ret = -ENOMEM;
1218 goto err_ioremap_bam_failed;
1219 }
1220 if (pdev->dev.of_node) {
1221
1222 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1223 &dev->ctrl.nr);
1224 if (ret) {
1225 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
1226 goto err_of_init_failed;
1227 }
1228 rxreg_access = of_property_read_bool(pdev->dev.of_node,
1229 "qcom,rxreg-access");
1230 /* Optional properties */
1231 ret = of_property_read_u32(pdev->dev.of_node,
1232 "qcom,min-clk-gear", &dev->ctrl.min_cg);
1233 ret = of_property_read_u32(pdev->dev.of_node,
1234 "qcom,max-clk-gear", &dev->ctrl.max_cg);
1235 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
1236 dev->ctrl.max_cg, rxreg_access);
1237 } else {
1238 dev->ctrl.nr = pdev->id;
1239 }
1240 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1241 dev->ctrl.nports = MSM_SLIM_NPORTS;
1242 dev->ctrl.set_laddr = msm_set_laddr;
1243 dev->ctrl.xfer_msg = msm_xfer_msg;
1244 dev->ctrl.wakeup = msm_clk_pause_wakeup;
1245 dev->ctrl.alloc_port = msm_alloc_port;
1246 dev->ctrl.dealloc_port = msm_dealloc_port;
1247 dev->ctrl.port_xfer = msm_slim_port_xfer;
1248 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1249 /* Reserve some messaging BW for satellite-apps driver communication */
1250 dev->ctrl.sched.pending_msgsl = 30;
1251
1252 init_completion(&dev->reconf);
1253 mutex_init(&dev->tx_lock);
1254 spin_lock_init(&dev->rx_lock);
1255 dev->ee = 1;
1256 if (rxreg_access)
1257 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
1258 else
1259 dev->use_rx_msgqs = MSM_MSGQ_RESET;
1260
1261 dev->irq = irq->start;
1262 dev->bam.irq = bam_irq->start;
1263
1264 dev->hclk = clk_get(dev->dev, "iface_clk");
1265 if (IS_ERR(dev->hclk))
1266 dev->hclk = NULL;
1267 else
1268 clk_prepare_enable(dev->hclk);
1269
1270 ret = msm_slim_sps_init(dev, bam_mem, MGR_STATUS, false);
1271 if (ret != 0) {
1272 dev_err(dev->dev, "error SPS init\n");
1273 goto err_sps_init_failed;
1274 }
1275
1276 /* Fire up the Rx message queue thread */
1277 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1278 MSM_SLIM_NAME "_rx_msgq_thread");
1279 if (IS_ERR(dev->rx_msgq_thread)) {
1280 ret = PTR_ERR(dev->rx_msgq_thread);
1281 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1282 goto err_thread_create_failed;
1283 }
1284
1285 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1286 dev->framer.superfreq =
1287 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1288 dev->ctrl.a_framer = &dev->framer;
1289 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
1290 dev->ctrl.dev.parent = &pdev->dev;
1291 dev->ctrl.dev.of_node = pdev->dev.of_node;
1292
1293 ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
1294 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1295 "msm_slim_irq", dev);
1296 if (ret) {
1297 dev_err(&pdev->dev, "request IRQ failed\n");
1298 goto err_request_irq_failed;
1299 }
1300
1301 msm_slim_prg_slew(pdev, dev);
1302
1303 /* Register with framework before enabling frame, clock */
1304 ret = slim_add_numbered_controller(&dev->ctrl);
1305 if (ret) {
1306 dev_err(dev->dev, "error adding controller\n");
1307 goto err_ctrl_failed;
1308 }
1309
1310
1311 dev->rclk = clk_get(dev->dev, "core_clk");
1312 if (!dev->rclk) {
1313 dev_err(dev->dev, "slimbus clock not found");
1314 goto err_clk_get_failed;
1315 }
1316 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1317 clk_prepare_enable(dev->rclk);
1318
1319 dev->ver = readl_relaxed(dev->base);
1320 /* Version info in 16 MSbits */
1321 dev->ver >>= 16;
1322 /* Component register initialization */
1323 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
1324 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1325 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
1326
1327 /*
1328 * Manager register initialization
1329 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1330 */
1331 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
1332 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1333 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1334 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1335 else
1336 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1337 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1338 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1339 writel_relaxed(1, dev->base + MGR_CFG);
1340 /*
1341 * Framer registers are beyond 1K memory region after Manager and/or
1342 * component registers. Make sure those writes are ordered
1343 * before framer register writes
1344 */
1345 wmb();
1346
1347 /* Framer register initialization */
1348 writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
1349 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1350 dev->base + FRM_CFG);
1351 /*
1352 * Make sure that framer wake-up and enabling writes go through
1353 * before any other component is enabled. Framer is responsible for
1354 * clocking the bus and enabling framer first will ensure that other
1355 * devices can report presence when they are enabled
1356 */
1357 mb();
1358
1359 /* Enable RX msg Q */
1360 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
1361 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1362 dev->base + MGR_CFG);
1363 else
1364 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1365 /*
1366 * Make sure that manager-enable is written through before interface
1367 * device is enabled
1368 */
1369 mb();
1370 writel_relaxed(1, dev->base + INTF_CFG);
1371 /*
1372 * Make sure that interface-enable is written through before enabling
1373 * ported generic device inside MSM manager
1374 */
1375 mb();
1376 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
1377 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
1378 (4 * dev->ee));
1379 /*
1380 * Make sure that ported generic device is enabled and port-EE settings
1381 * are written through before finally enabling the component
1382 */
1383 mb();
1384
1385 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
1386 /*
1387 * Make sure that all writes have gone through before exiting this
1388 * function
1389 */
1390 mb();
1391
1392 /* Add devices registered with board-info now that controller is up */
1393 slim_ctrl_add_boarddevs(&dev->ctrl);
1394
1395 if (pdev->dev.of_node)
1396 of_register_slim_devices(&dev->ctrl);
1397
1398 pm_runtime_use_autosuspend(&pdev->dev);
1399 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1400 pm_runtime_set_active(&pdev->dev);
1401
1402 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1403 return 0;
1404
1405err_ctrl_failed:
1406 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
1407err_clk_get_failed:
1408 kfree(dev->satd);
1409err_request_irq_failed:
1410 kthread_stop(dev->rx_msgq_thread);
1411err_thread_create_failed:
1412 msm_slim_sps_exit(dev, true);
1413 msm_slim_deinit_ep(dev, &dev->rx_msgq,
1414 &dev->use_rx_msgqs);
1415 msm_slim_deinit_ep(dev, &dev->tx_msgq,
1416 &dev->use_tx_msgqs);
1417err_sps_init_failed:
1418 if (dev->hclk) {
1419 clk_disable_unprepare(dev->hclk);
1420 clk_put(dev->hclk);
1421 }
1422err_of_init_failed:
1423 iounmap(dev->bam.base);
1424err_ioremap_bam_failed:
1425 iounmap(dev->base);
1426err_ioremap_failed:
1427 kfree(dev->wr_comp);
1428 kfree(dev);
1429err_get_res_failed:
1430 release_mem_region(bam_mem->start, resource_size(bam_mem));
1431err_get_res_bam_failed:
1432 release_mem_region(slim_mem->start, resource_size(slim_mem));
1433 return ret;
1434}
1435
1436static int msm_slim_remove(struct platform_device *pdev)
1437{
1438 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1439 struct resource *bam_mem;
1440 struct resource *slim_mem;
1441 struct resource *slew_mem = dev->slew_mem;
1442 int i;
1443
1444 for (i = 0; i < dev->nsats; i++) {
1445 struct msm_slim_sat *sat = dev->satd[i];
1446 int j;
1447
1448 for (j = 0; j < sat->nsatch; j++)
1449 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
1450 slim_remove_device(&sat->satcl);
1451 kfree(sat->satch);
1452 destroy_workqueue(sat->wq);
1453 kfree(sat->satcl.name);
1454 kfree(sat);
1455 }
1456 pm_runtime_disable(&pdev->dev);
1457 pm_runtime_set_suspended(&pdev->dev);
1458 free_irq(dev->irq, dev);
1459 slim_del_controller(&dev->ctrl);
1460 clk_put(dev->rclk);
1461 if (dev->hclk)
1462 clk_put(dev->hclk);
1463 msm_slim_sps_exit(dev, true);
1464 msm_slim_deinit_ep(dev, &dev->rx_msgq,
1465 &dev->use_rx_msgqs);
1466 msm_slim_deinit_ep(dev, &dev->tx_msgq,
1467 &dev->use_tx_msgqs);
1468
1469 kthread_stop(dev->rx_msgq_thread);
1470 iounmap(dev->bam.base);
1471 iounmap(dev->base);
1472 kfree(dev->wr_comp);
1473 kfree(dev);
1474 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1475 "slimbus_bam_physical");
1476 if (bam_mem)
1477 release_mem_region(bam_mem->start, resource_size(bam_mem));
1478 if (slew_mem)
1479 release_mem_region(slew_mem->start, resource_size(slew_mem));
1480 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1481 "slimbus_physical");
1482 if (slim_mem)
1483 release_mem_region(slim_mem->start, resource_size(slim_mem));
1484 return 0;
1485}
1486
1487#ifdef CONFIG_PM
1488static int msm_slim_runtime_idle(struct device *device)
1489{
1490 struct platform_device *pdev = to_platform_device(device);
1491 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1492
1493 if (dev->state == MSM_CTRL_AWAKE)
1494 dev->state = MSM_CTRL_IDLE;
1495 dev_dbg(device, "pm_runtime: idle...\n");
1496 pm_request_autosuspend(device);
1497 return -EAGAIN;
1498}
1499#endif
1500
1501/*
1502 * If PM_RUNTIME is not defined, these 2 functions become helper
1503 * functions to be called from system suspend/resume. So they are not
1504 * inside ifdef CONFIG_PM_RUNTIME
1505 */
1506#ifdef CONFIG_PM
1507static int msm_slim_runtime_suspend(struct device *device)
1508{
1509 struct platform_device *pdev = to_platform_device(device);
1510 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1511 int ret;
1512
1513 dev_dbg(device, "pm_runtime: suspending...\n");
1514 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
1515 if (ret) {
1516 dev_err(device, "clk pause not entered:%d", ret);
1517 dev->state = MSM_CTRL_AWAKE;
1518 } else {
1519 dev->state = MSM_CTRL_ASLEEP;
1520 }
1521 return ret;
1522}
1523
1524static int msm_slim_runtime_resume(struct device *device)
1525{
1526 struct platform_device *pdev = to_platform_device(device);
1527 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1528 int ret = 0;
1529
1530 dev_dbg(device, "pm_runtime: resuming...\n");
1531 if (dev->state == MSM_CTRL_ASLEEP)
1532 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
1533 if (ret) {
1534 dev_err(device, "clk pause not exited:%d", ret);
1535 dev->state = MSM_CTRL_ASLEEP;
1536 } else {
1537 dev->state = MSM_CTRL_AWAKE;
1538 }
1539 return ret;
1540}
1541#endif
1542
1543#ifdef CONFIG_PM_SLEEP
1544static int msm_slim_suspend(struct device *dev)
1545{
1546 int ret = -EBUSY;
1547 struct platform_device *pdev = to_platform_device(dev);
1548 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
1549
1550 if (!pm_runtime_enabled(dev) ||
1551 (!pm_runtime_suspended(dev) &&
1552 cdev->state == MSM_CTRL_IDLE)) {
1553 dev_dbg(dev, "system suspend");
1554 ret = msm_slim_runtime_suspend(dev);
1555 if (!ret) {
1556 if (cdev->hclk)
1557 clk_disable_unprepare(cdev->hclk);
1558 }
1559 }
1560 if (ret == -EBUSY) {
1561 /*
1562 * If the clock pause failed due to active channels, there is
1563 * a possibility that some audio stream is active during suspend
1564 * We dont want to return suspend failure in that case so that
1565 * display and relevant components can still go to suspend.
1566 * If there is some other error, then it should be passed-on
1567 * to system level suspend
1568 */
1569 ret = 0;
1570 }
1571 return ret;
1572}
1573
1574static int msm_slim_resume(struct device *dev)
1575{
1576 /* If runtime_pm is enabled, this resume shouldn't do anything */
1577 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1578 struct platform_device *pdev = to_platform_device(dev);
1579 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
1580 int ret;
1581
1582 dev_dbg(dev, "system resume");
1583 if (cdev->hclk)
1584 clk_prepare_enable(cdev->hclk);
1585 ret = msm_slim_runtime_resume(dev);
1586 if (!ret) {
1587 pm_runtime_mark_last_busy(dev);
1588 pm_request_autosuspend(dev);
1589 }
1590 return ret;
1591
1592 }
1593 return 0;
1594}
1595#endif /* CONFIG_PM_SLEEP */
1596
1597static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1598 SET_SYSTEM_SLEEP_PM_OPS(
1599 msm_slim_suspend,
1600 msm_slim_resume
1601 )
1602 SET_RUNTIME_PM_OPS(
1603 msm_slim_runtime_suspend,
1604 msm_slim_runtime_resume,
1605 msm_slim_runtime_idle
1606 )
1607};
1608
1609static const struct of_device_id msm_slim_dt_match[] = {
1610 {
1611 .compatible = "qcom,slim-msm",
1612 },
1613 {}
1614};
1615
1616static struct platform_driver msm_slim_driver = {
1617 .probe = msm_slim_probe,
1618 .remove = msm_slim_remove,
1619 .driver = {
1620 .name = MSM_SLIM_NAME,
1621 .owner = THIS_MODULE,
1622 .pm = &msm_slim_dev_pm_ops,
1623 .of_match_table = msm_slim_dt_match,
1624 },
1625};
1626
1627static int msm_slim_init(void)
1628{
1629 return platform_driver_register(&msm_slim_driver);
1630}
1631subsys_initcall(msm_slim_init);
1632
1633static void msm_slim_exit(void)
1634{
1635 platform_driver_unregister(&msm_slim_driver);
1636}
1637module_exit(msm_slim_exit);
1638
1639MODULE_LICENSE("GPL v2");
1640MODULE_DESCRIPTION("MSM Slimbus controller");
1641MODULE_ALIAS("platform:msm-slim");