blob: 7b293629a4fa94964e8956170126fcc9d7e8a0ea [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060078#define MSM_MAX_NSATS 2
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80#define QC_MFGID_LSB 0x2
81#define QC_MFGID_MSB 0x17
82#define QC_CHIPID_SL 0x10
83#define QC_DEVID_SAT1 0x3
84#define QC_DEVID_SAT2 0x4
85#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060086#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087
88/* Component registers */
89enum comp_reg {
90 COMP_CFG = 0,
91 COMP_TRUST_CFG = 0x14,
92};
93
94/* Manager registers */
95enum mgr_reg {
96 MGR_CFG = 0x200,
97 MGR_STATUS = 0x204,
98 MGR_RX_MSGQ_CFG = 0x208,
99 MGR_INT_EN = 0x210,
100 MGR_INT_STAT = 0x214,
101 MGR_INT_CLR = 0x218,
102 MGR_TX_MSG = 0x230,
103 MGR_RX_MSG = 0x270,
104 MGR_VE_STAT = 0x300,
105};
106
107enum msg_cfg {
108 MGR_CFG_ENABLE = 1,
109 MGR_CFG_RX_MSGQ_EN = 1 << 1,
110 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
111 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
112};
113/* Message queue types */
114enum msm_slim_msgq_type {
115 MSGQ_RX = 0,
116 MSGQ_TX_LOW = 1,
117 MSGQ_TX_HIGH = 2,
118};
119/* Framer registers */
120enum frm_reg {
121 FRM_CFG = 0x400,
122 FRM_STAT = 0x404,
123 FRM_INT_EN = 0x410,
124 FRM_INT_STAT = 0x414,
125 FRM_INT_CLR = 0x418,
126 FRM_WAKEUP = 0x41C,
127 FRM_CLKCTL_DONE = 0x420,
128 FRM_IE_STAT = 0x430,
129 FRM_VE_STAT = 0x440,
130};
131
132/* Interface registers */
133enum intf_reg {
134 INTF_CFG = 0x600,
135 INTF_STAT = 0x604,
136 INTF_INT_EN = 0x610,
137 INTF_INT_STAT = 0x614,
138 INTF_INT_CLR = 0x618,
139 INTF_IE_STAT = 0x630,
140 INTF_VE_STAT = 0x640,
141};
142
143/* Manager PGD registers */
144enum pgd_reg {
145 PGD_CFG = 0x1000,
146 PGD_STAT = 0x1004,
147 PGD_INT_EN = 0x1010,
148 PGD_INT_STAT = 0x1014,
149 PGD_INT_CLR = 0x1018,
150 PGD_OWN_EEn = 0x1020,
151 PGD_PORT_INT_EN_EEn = 0x1030,
152 PGD_PORT_INT_ST_EEn = 0x1034,
153 PGD_PORT_INT_CL_EEn = 0x1038,
154 PGD_PORT_CFGn = 0x1080,
155 PGD_PORT_STATn = 0x1084,
156 PGD_PORT_PARAMn = 0x1088,
157 PGD_PORT_BLKn = 0x108C,
158 PGD_PORT_TRANn = 0x1090,
159 PGD_PORT_MCHANn = 0x1094,
160 PGD_PORT_PSHPLLn = 0x1098,
161 PGD_PORT_PC_CFGn = 0x1600,
162 PGD_PORT_PC_VALn = 0x1604,
163 PGD_PORT_PC_VFR_TSn = 0x1608,
164 PGD_PORT_PC_VFR_STn = 0x160C,
165 PGD_PORT_PC_VFR_CLn = 0x1610,
166 PGD_IE_STAT = 0x1700,
167 PGD_VE_STAT = 0x1710,
168};
169
170enum rsc_grp {
171 EE_MGR_RSC_GRP = 1 << 10,
172 EE_NGD_2 = 2 << 6,
173 EE_NGD_1 = 0,
174};
175
176enum mgr_intr {
177 MGR_INT_RECFG_DONE = 1 << 24,
178 MGR_INT_TX_NACKED_2 = 1 << 25,
179 MGR_INT_MSG_BUF_CONTE = 1 << 26,
180 MGR_INT_RX_MSG_RCVD = 1 << 30,
181 MGR_INT_TX_MSG_SENT = 1 << 31,
182};
183
184enum frm_cfg {
185 FRM_ACTIVE = 1,
186 CLK_GEAR = 7,
187 ROOT_FREQ = 11,
188 REF_CLK_GEAR = 15,
189};
190
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600191enum msm_ctrl_state {
192 MSM_CTRL_AWAKE,
193 MSM_CTRL_SLEEPING,
194 MSM_CTRL_ASLEEP,
195};
196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197struct msm_slim_sps_bam {
198 u32 hdl;
199 void __iomem *base;
200 int irq;
201};
202
203struct msm_slim_endp {
204 struct sps_pipe *sps;
205 struct sps_connect config;
206 struct sps_register_event event;
207 struct sps_mem_buffer buf;
208 struct completion *xcomp;
209 bool connected;
210};
211
212struct msm_slim_ctrl {
213 struct slim_controller ctrl;
214 struct slim_framer framer;
215 struct device *dev;
216 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600217 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 u32 curr_bw;
219 u8 msg_cnt;
220 u32 tx_buf[10];
221 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
222 spinlock_t rx_lock;
223 int head;
224 int tail;
225 int irq;
226 int err;
227 int ee;
228 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600229 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 struct msm_slim_endp pipes[7];
231 struct msm_slim_sps_bam bam;
232 struct msm_slim_endp rx_msgq;
233 struct completion rx_msgq_notify;
234 struct task_struct *rx_msgq_thread;
235 struct clk *rclk;
236 struct mutex tx_lock;
237 u8 pgdla;
238 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 int pipe_b;
240 struct completion reconf;
241 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600242 bool chan_active;
243 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600244 int nsats;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245};
246
247struct msm_slim_sat {
248 struct slim_device satcl;
249 struct msm_slim_ctrl *dev;
250 struct workqueue_struct *wq;
251 struct work_struct wd;
252 u8 sat_msgs[SAT_CONCUR_MSG][40];
253 u16 *satch;
254 u8 nsatch;
255 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600256 bool pending_reconf;
257 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 int shead;
259 int stail;
260 spinlock_t lock;
261};
262
Sagar Dharia790cfd02011-09-25 17:56:24 -0600263static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
266{
267 spin_lock(&dev->rx_lock);
268 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
269 spin_unlock(&dev->rx_lock);
270 dev_err(dev->dev, "RX QUEUE full!");
271 return -EXFULL;
272 }
273 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
274 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
275 spin_unlock(&dev->rx_lock);
276 return 0;
277}
278
279static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
280{
281 unsigned long flags;
282 spin_lock_irqsave(&dev->rx_lock, flags);
283 if (dev->tail == dev->head) {
284 spin_unlock_irqrestore(&dev->rx_lock, flags);
285 return -ENODATA;
286 }
287 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
288 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
289 spin_unlock_irqrestore(&dev->rx_lock, flags);
290 return 0;
291}
292
293static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
294{
295 struct msm_slim_ctrl *dev = sat->dev;
296 spin_lock(&sat->lock);
297 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
298 spin_unlock(&sat->lock);
299 dev_err(dev->dev, "SAT QUEUE full!");
300 return -EXFULL;
301 }
302 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
303 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
304 spin_unlock(&sat->lock);
305 return 0;
306}
307
308static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
309{
310 unsigned long flags;
311 spin_lock_irqsave(&sat->lock, flags);
312 if (sat->stail == sat->shead) {
313 spin_unlock_irqrestore(&sat->lock, flags);
314 return -ENODATA;
315 }
316 memcpy(buf, sat->sat_msgs[sat->shead], 40);
317 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
318 spin_unlock_irqrestore(&sat->lock, flags);
319 return 0;
320}
321
322static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
323{
324 e_addr[0] = (buffer[1] >> 24) & 0xff;
325 e_addr[1] = (buffer[1] >> 16) & 0xff;
326 e_addr[2] = (buffer[1] >> 8) & 0xff;
327 e_addr[3] = buffer[1] & 0xff;
328 e_addr[4] = (buffer[0] >> 24) & 0xff;
329 e_addr[5] = (buffer[0] >> 16) & 0xff;
330}
331
332static bool msm_is_sat_dev(u8 *e_addr)
333{
334 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
335 e_addr[2] != QC_CHIPID_SL &&
336 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
337 return true;
338 return false;
339}
340
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700341static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600342{
Sagar Dharia45e77912012-01-10 09:55:18 -0700343#ifdef CONFIG_PM_RUNTIME
344 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700345 int ret = pm_runtime_get_sync(dev->dev);
346 if (ret >= 0) {
347 ref = atomic_read(&dev->dev->power.usage_count);
348 if (ref <= 0) {
349 dev_err(dev->dev, "reference count -ve:%d", ref);
350 ret = -ENODEV;
351 }
352 }
353 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700354#else
355 return -ENODEV;
356#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600357}
358static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
359{
Sagar Dharia45e77912012-01-10 09:55:18 -0700360#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700361 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600362 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700363 ref = atomic_read(&dev->dev->power.usage_count);
364 if (ref <= 0)
365 dev_err(dev->dev, "reference count mismatch:%d", ref);
366 else
367 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700368#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600369}
370
Sagar Dharia790cfd02011-09-25 17:56:24 -0600371static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
372{
373 struct msm_slim_sat *sat = NULL;
374 int i = 0;
375 while (!sat && i < dev->nsats) {
376 if (laddr == dev->satd[i]->satcl.laddr)
377 sat = dev->satd[i];
378 i++;
379 }
380 return sat;
381}
382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383static irqreturn_t msm_slim_interrupt(int irq, void *d)
384{
385 struct msm_slim_ctrl *dev = d;
386 u32 pstat;
387 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
388
389 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
390 if (stat & MGR_INT_TX_MSG_SENT)
391 writel_relaxed(MGR_INT_TX_MSG_SENT,
392 dev->base + MGR_INT_CLR);
393 else {
394 writel_relaxed(MGR_INT_TX_NACKED_2,
395 dev->base + MGR_INT_CLR);
396 dev->err = -EIO;
397 }
398 /*
399 * Guarantee that interrupt clear bit write goes through before
400 * signalling completion/exiting ISR
401 */
402 mb();
403 if (dev->wr_comp)
404 complete(dev->wr_comp);
405 }
406 if (stat & MGR_INT_RX_MSG_RCVD) {
407 u32 rx_buf[10];
408 u32 mc, mt;
409 u8 len, i;
410 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
411 len = rx_buf[0] & 0x1F;
412 for (i = 1; i < ((len + 3) >> 2); i++) {
413 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
414 (4 * i));
415 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
416 }
417 mt = (rx_buf[0] >> 5) & 0x7;
418 mc = (rx_buf[0] >> 8) & 0xff;
419 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
420 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
421 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600422 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
423 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
424 if (sat)
425 msm_sat_enqueue(sat, rx_buf, len);
426 else
427 dev_err(dev->dev, "unknown sat:%d message",
428 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 writel_relaxed(MGR_INT_RX_MSG_RCVD,
430 dev->base + MGR_INT_CLR);
431 /*
432 * Guarantee that CLR bit write goes through before
433 * queuing work
434 */
435 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600436 if (sat)
437 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 } else if (mt == SLIM_MSG_MT_CORE &&
439 mc == SLIM_MSG_MC_REPORT_PRESENT) {
440 u8 e_addr[6];
441 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600442 msm_slim_rx_enqueue(dev, rx_buf, len);
443 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
444 MGR_INT_CLR);
445 /*
446 * Guarantee that CLR bit write goes through
447 * before signalling completion
448 */
449 mb();
450 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
452 mc == SLIM_MSG_MC_REPLY_VALUE) {
453 msm_slim_rx_enqueue(dev, rx_buf, len);
454 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
455 MGR_INT_CLR);
456 /*
457 * Guarantee that CLR bit write goes through
458 * before signalling completion
459 */
460 mb();
461 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600462 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
463 u8 *buf = (u8 *)rx_buf;
464 u8 l_addr = buf[2];
465 u16 ele = (u16)buf[4] << 4;
466 ele |= ((buf[3] & 0xf0) >> 4);
467 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
468 l_addr, ele);
469 for (i = 0; i < len - 5; i++)
470 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
471 i, buf[i+5]);
472 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
473 MGR_INT_CLR);
474 /*
475 * Guarantee that CLR bit write goes through
476 * before exiting
477 */
478 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 } else {
480 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
481 mc, mt, len);
482 for (i = 0; i < ((len + 3) >> 2); i++)
483 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
484 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
485 MGR_INT_CLR);
486 /*
487 * Guarantee that CLR bit write goes through
488 * before exiting
489 */
490 mb();
491 }
492 }
493 if (stat & MGR_INT_RECFG_DONE) {
494 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
495 /*
496 * Guarantee that CLR bit write goes through
497 * before exiting ISR
498 */
499 mb();
500 complete(&dev->reconf);
501 }
502 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
503 if (pstat != 0) {
504 int i = 0;
505 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
506 if (pstat & 1 << i) {
507 u32 val = readl_relaxed(dev->base +
508 PGD_PORT_STATn + (i * 32));
509 if (val & (1 << 19)) {
510 dev->ctrl.ports[i].err =
511 SLIM_P_DISCONNECT;
512 dev->pipes[i-dev->pipe_b].connected =
513 false;
514 /*
515 * SPS will call completion since
516 * ERROR flags are registered
517 */
518 } else if (val & (1 << 2))
519 dev->ctrl.ports[i].err =
520 SLIM_P_OVERFLOW;
521 else if (val & (1 << 3))
522 dev->ctrl.ports[i].err =
523 SLIM_P_UNDERFLOW;
524 }
525 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
526 (dev->ee * 16));
527 }
528 /*
529 * Guarantee that port interrupt bit(s) clearing writes go
530 * through before exiting ISR
531 */
532 mb();
533 }
534
535 return IRQ_HANDLED;
536}
537
538static int
539msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
540{
541 int ret;
542 struct sps_pipe *endpoint;
543 struct sps_connect *config = &ep->config;
544
545 /* Allocate the endpoint */
546 endpoint = sps_alloc_endpoint();
547 if (!endpoint) {
548 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
549 return -ENOMEM;
550 }
551
552 /* Get default connection configuration for an endpoint */
553 ret = sps_get_config(endpoint, config);
554 if (ret) {
555 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
556 goto sps_config_failed;
557 }
558
559 ep->sps = endpoint;
560 return 0;
561
562sps_config_failed:
563 sps_free_endpoint(endpoint);
564 return ret;
565}
566
567static void
568msm_slim_free_endpoint(struct msm_slim_endp *ep)
569{
570 sps_free_endpoint(ep->sps);
571 ep->sps = NULL;
572}
573
574static int msm_slim_sps_mem_alloc(
575 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
576{
577 dma_addr_t phys;
578
579 mem->size = len;
580 mem->min_size = 0;
581 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
582
583 if (!mem->base) {
584 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
585 return -ENOMEM;
586 }
587
588 mem->phys_base = phys;
589 memset(mem->base, 0x00, mem->size);
590 return 0;
591}
592
593static void
594msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
595{
596 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
597 mem->size = 0;
598 mem->base = NULL;
599 mem->phys_base = 0;
600}
601
602static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
603{
604 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
605 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
606 (dev->ee * 16));
607 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
608 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
609 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
610 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
611 (dev->ee * 16));
612 /* Make sure that port registers are updated before returning */
613 mb();
614}
615
616static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
617{
618 struct msm_slim_endp *endpoint = &dev->pipes[pn];
619 struct sps_connect *cfg = &endpoint->config;
620 u32 stat;
621 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
622 if (ret) {
623 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
624 return ret;
625 }
626 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
627 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
628
629 if (dev->pipes[pn].connected) {
630 ret = sps_set_config(dev->pipes[pn].sps, cfg);
631 if (ret) {
632 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
633 ret);
634 return ret;
635 }
636 }
637
638 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
639 (32 * (pn + dev->pipe_b)));
640 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
641 cfg->destination = dev->bam.hdl;
642 cfg->source = SPS_DEV_HANDLE_MEM;
643 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
644 cfg->src_pipe_index = 0;
645 dev_dbg(dev->dev, "flow src:pipe num:%d",
646 cfg->dest_pipe_index);
647 cfg->mode = SPS_MODE_DEST;
648 } else {
649 cfg->source = dev->bam.hdl;
650 cfg->destination = SPS_DEV_HANDLE_MEM;
651 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
652 cfg->dest_pipe_index = 0;
653 dev_dbg(dev->dev, "flow dest:pipe num:%d",
654 cfg->src_pipe_index);
655 cfg->mode = SPS_MODE_SRC;
656 }
657 /* Space for desciptor FIFOs */
658 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
659 cfg->config = SPS_CONFIG_DEFAULT;
660 ret = sps_connect(dev->pipes[pn].sps, cfg);
661 if (!ret) {
662 dev->pipes[pn].connected = true;
663 msm_hw_set_port(dev, pn + dev->pipe_b);
664 }
665 return ret;
666}
667
668static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
669{
670 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
671 /*
672 * Currently we block a transaction until the current one completes.
673 * In case we need multiple transactions, use message Q
674 */
675 return dev->tx_buf;
676}
677
678static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
679{
680 int i;
681 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
682 for (i = 0; i < (len + 3) >> 2; i++) {
683 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
684 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
685 }
686 /* Guarantee that message is sent before returning */
687 mb();
688 return 0;
689}
690
691static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
692{
693 DECLARE_COMPLETION_ONSTACK(done);
694 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
695 u32 *pbuf;
696 u8 *puc;
697 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700698 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600700 u8 mc = (u8)(txn->mc & 0xFF);
701 /*
702 * Voting for runtime PM: Slimbus has 2 possible use cases:
703 * 1. messaging
704 * 2. Data channels
705 * Messaging case goes through messaging slots and data channels
706 * use their own slots
707 * This "get" votes for messaging bandwidth
708 */
709 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700710 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700712 if (dev->state == MSM_CTRL_ASLEEP ||
713 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
714 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600715 dev_err(dev->dev, "runtime or system PM suspended state");
716 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700717 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600718 msm_slim_put_ctrl(dev);
719 return -EBUSY;
720 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600722 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
723 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 wait_for_completion(&dev->reconf);
725 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600726 }
727 /* This "get" votes for data channels */
728 if (dev->ctrl.sched.usedslots != 0 &&
729 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700730 int chv = msm_slim_get_ctrl(dev);
731 if (chv >= 0)
732 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600733 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 }
735 txn->rl--;
736 pbuf = msm_get_msg_buf(ctrl, txn->rl);
737 dev->wr_comp = NULL;
738 dev->err = 0;
739
740 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
741 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700742 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600743 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 return -EPROTONOSUPPORT;
745 }
746 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600747 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
748 mc == SLIM_MSG_MC_CONNECT_SINK ||
749 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750 la = dev->pgdla;
751 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600752 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600754 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
756 puc = ((u8 *)pbuf) + 3;
757 else
758 puc = ((u8 *)pbuf) + 2;
759 if (txn->rbuf)
760 *(puc++) = txn->tid;
761 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600762 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
763 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
764 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
765 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 *(puc++) = (txn->ec & 0xFF);
767 *(puc++) = (txn->ec >> 8)&0xFF;
768 }
769 if (txn->wbuf)
770 memcpy(puc, txn->wbuf, txn->len);
771 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600772 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
773 mc == SLIM_MSG_MC_CONNECT_SINK ||
774 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
775 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 dev->err = msm_slim_connect_pipe_port(dev, *puc);
777 else {
778 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
779 struct sps_register_event sps_event;
780 memset(&sps_event, 0, sizeof(sps_event));
781 sps_register_event(endpoint->sps, &sps_event);
782 sps_disconnect(endpoint->sps);
783 /*
784 * Remove channel disconnects master-side ports from
785 * channel. No need to send that again on the bus
786 */
787 dev->pipes[*puc].connected = false;
788 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700789 if (msgv >= 0)
790 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 return 0;
792 }
793 if (dev->err) {
794 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
795 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700796 if (msgv >= 0)
797 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 return dev->err;
799 }
800 *(puc) = *(puc) + dev->pipe_b;
801 }
802 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600803 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 dev->reconf_busy = true;
805 dev->wr_comp = &done;
806 msm_send_msg_buf(ctrl, pbuf, txn->rl);
807 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600808
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700809 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
810 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
811 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
812 timeout) {
813 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
814 dev->reconf_busy = false;
815 if (timeout) {
816 clk_disable(dev->rclk);
817 disable_irq(dev->irq);
818 }
819 }
820 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
821 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
822 !timeout) {
823 dev->reconf_busy = false;
824 dev_err(dev->dev, "clock pause failed");
825 mutex_unlock(&dev->tx_lock);
826 return -ETIMEDOUT;
827 }
828 if (txn->mt == SLIM_MSG_MT_CORE &&
829 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
830 if (dev->ctrl.sched.usedslots == 0 &&
831 dev->chan_active) {
832 dev->chan_active = false;
833 msm_slim_put_ctrl(dev);
834 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600835 }
836 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600837 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700838 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600839 msm_slim_put_ctrl(dev);
840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 if (!timeout)
842 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
843 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 return timeout ? dev->err : -ETIMEDOUT;
846}
847
848static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
849 u8 elen, u8 laddr)
850{
851 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
852 DECLARE_COMPLETION_ONSTACK(done);
853 int timeout;
854 u32 *buf;
855 mutex_lock(&dev->tx_lock);
856 buf = msm_get_msg_buf(ctrl, 9);
857 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
858 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
859 SLIM_MSG_DEST_LOGICALADDR,
860 ea[5] | ea[4] << 8);
861 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
862 buf[2] = laddr;
863
864 dev->wr_comp = &done;
865 msm_send_msg_buf(ctrl, buf, 9);
866 timeout = wait_for_completion_timeout(&done, HZ);
867 mutex_unlock(&dev->tx_lock);
868 return timeout ? dev->err : -ETIMEDOUT;
869}
870
Sagar Dharia144e5e02011-08-08 17:30:11 -0600871static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
872{
873 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600874 enable_irq(dev->irq);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600875 clk_enable(dev->rclk);
876 writel_relaxed(1, dev->base + FRM_WAKEUP);
877 /* Make sure framer wakeup write goes through before exiting function */
878 mb();
879 /*
880 * Workaround: Currently, slave is reporting lost-sync messages
881 * after slimbus comes out of clock pause.
882 * Transaction with slave fail before slave reports that message
883 * Give some time for that report to come
884 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
885 * being 250 usecs, we wait for 20 superframes here to ensure
886 * we get the message
887 */
888 usleep_range(5000, 5000);
889 return 0;
890}
891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892static int msm_config_port(struct slim_controller *ctrl, u8 pn)
893{
894 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
895 struct msm_slim_endp *endpoint;
896 int ret = 0;
897 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
898 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
899 return -EPROTONOSUPPORT;
900 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
901 return -ENODEV;
902
903 endpoint = &dev->pipes[pn];
904 ret = msm_slim_init_endpoint(dev, endpoint);
905 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
906 return ret;
907}
908
909static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
910 u8 pn, u8 **done_buf, u32 *done_len)
911{
912 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
913 struct sps_iovec sio;
914 int ret;
915 if (done_len)
916 *done_len = 0;
917 if (done_buf)
918 *done_buf = NULL;
919 if (!dev->pipes[pn].connected)
920 return SLIM_P_DISCONNECT;
921 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
922 if (!ret) {
923 if (done_len)
924 *done_len = sio.size;
925 if (done_buf)
926 *done_buf = (u8 *)sio.addr;
927 }
928 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
929 return SLIM_P_INPROGRESS;
930}
931
932static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
933 u32 len, struct completion *comp)
934{
935 struct sps_register_event sreg;
936 int ret;
937 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600938 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return -ENODEV;
940
941
942 ctrl->ports[pn].xcomp = comp;
943 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
944 sreg.mode = SPS_TRIGGER_WAIT;
945 sreg.xfer_done = comp;
946 sreg.callback = NULL;
947 sreg.user = &ctrl->ports[pn];
948 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
949 if (ret) {
950 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
951 return ret;
952 }
953 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
954 SPS_IOVEC_FLAG_INT);
955 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
956
957 return ret;
958}
959
960static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
961{
962 struct msm_slim_ctrl *dev = sat->dev;
963 enum slim_ch_control oper;
964 int i;
965 int ret = 0;
966 if (mc == SLIM_USR_MC_CHAN_CTRL) {
967 u16 chanh = sat->satch[buf[5]];
968 oper = ((buf[3] & 0xC0) >> 6);
969 /* part of grp. activating/removing 1 will take care of rest */
970 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
971 } else {
972 u16 chh[40];
973 struct slim_ch prop;
974 u32 exp;
975 u8 coeff, cc;
976 u8 prrate = buf[6];
977 for (i = 8; i < len; i++)
978 chh[i-8] = sat->satch[buf[i]];
979 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
980 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
981 prop.baser = SLIM_RATE_4000HZ;
982 if (prrate & 0x8)
983 prop.baser = SLIM_RATE_11025HZ;
984 else
985 prop.baser = SLIM_RATE_4000HZ;
986 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
987 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
988 exp = (u32)((buf[5] & 0xF0) >> 4);
989 coeff = (buf[4] & 0x20) >> 5;
990 cc = (coeff ? 3 : 1);
991 prop.ratem = cc * (1 << exp);
992 if (i > 9)
993 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
994 true, &sat->satch[buf[8]]);
995 else
996 ret = slim_define_ch(&sat->satcl, &prop,
997 &sat->satch[buf[8]], 1, false,
998 NULL);
999 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
1000
1001 /* part of group so activating 1 will take care of rest */
1002 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1003 ret = slim_control_ch(&sat->satcl,
1004 sat->satch[buf[8]],
1005 SLIM_CH_ACTIVATE, false);
1006 }
1007 return ret;
1008}
1009
1010static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1011{
1012 u8 buf[40];
1013 u8 mc, mt, len;
1014 int i, ret;
1015 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1016 len = buf[0] & 0x1F;
1017 mt = (buf[0] >> 5) & 0x7;
1018 mc = buf[1];
1019 if (mt == SLIM_MSG_MT_CORE &&
1020 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1021 u8 laddr;
1022 u8 e_addr[6];
1023 for (i = 0; i < 6; i++)
1024 e_addr[i] = buf[7-i];
1025
1026 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1027 /* Is this Qualcomm ported generic device? */
1028 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1029 e_addr[4] == QC_MFGID_MSB &&
1030 e_addr[1] == QC_DEVID_PGD &&
1031 e_addr[2] != QC_CHIPID_SL)
1032 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001033 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001034 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001035 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036
Sagar Dharia790cfd02011-09-25 17:56:24 -06001037 if (!ret && msm_is_sat_dev(e_addr)) {
1038 struct msm_slim_sat *sat = addr_to_sat(dev,
1039 laddr);
1040 if (!sat)
1041 sat = msm_slim_alloc_sat(dev);
1042 if (!sat)
1043 return;
1044
1045 sat->satcl.laddr = laddr;
1046 msm_sat_enqueue(sat, (u32 *)buf, len);
1047 queue_work(sat->wq, &sat->wd);
1048 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1050 mc == SLIM_MSG_MC_REPLY_VALUE) {
1051 u8 tid = buf[3];
1052 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1053 slim_msg_response(&dev->ctrl, &buf[4], tid,
1054 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001055 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001056 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1057 u8 l_addr = buf[2];
1058 u16 ele = (u16)buf[4] << 4;
1059 ele |= ((buf[3] & 0xf0) >> 4);
1060 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1061 l_addr, ele);
1062 for (i = 0; i < len - 5; i++)
1063 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1064 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065 } else {
1066 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1067 mc, mt);
1068 for (i = 0; i < len; i++)
1069 dev_err(dev->dev, "error msg: %x", buf[i]);
1070
1071 }
1072 } else
1073 dev_err(dev->dev, "rxwq called and no dequeue");
1074}
1075
1076static void slim_sat_rxprocess(struct work_struct *work)
1077{
1078 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1079 struct msm_slim_ctrl *dev = sat->dev;
1080 u8 buf[40];
1081
1082 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1083 struct slim_msg_txn txn;
1084 int i;
1085 u8 len, mc, mt;
1086 u32 bw_sl;
1087 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001088 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 bool gen_ack = false;
1090 u8 tid;
1091 u8 wbuf[8];
1092 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1093 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1094 txn.ec = 0;
1095 txn.rbuf = NULL;
1096 txn.la = sat->satcl.laddr;
1097 /* satellite handling */
1098 len = buf[0] & 0x1F;
1099 mc = buf[1];
1100 mt = (buf[0] >> 5) & 0x7;
1101
1102 if (mt == SLIM_MSG_MT_CORE &&
1103 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1104 u8 laddr;
1105 u8 e_addr[6];
1106 for (i = 0; i < 6; i++)
1107 e_addr[i] = buf[7-i];
1108
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001109 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001110 satv = msm_slim_get_ctrl(dev);
1111 if (satv >= 0)
1112 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001113 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1115 sat->satcl.laddr = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001116 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001117 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1118 satv = msm_slim_get_ctrl(dev);
1119 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 switch (mc) {
1121 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001122 /* Remove runtime_pm vote once satellite acks */
1123 if (mt != SLIM_MSG_MT_CORE) {
1124 if (pm_runtime_enabled(dev->dev) &&
1125 sat->pending_capability) {
1126 msm_slim_put_ctrl(dev);
1127 sat->pending_capability = false;
1128 }
1129 continue;
1130 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001132 if (sat->sent_capability) {
1133 if (mt == SLIM_MSG_MT_CORE)
1134 goto send_capability;
1135 else
1136 continue;
1137 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1139 if (ret) {
1140 dev_err(dev->dev,
1141 "Satellite-init failed");
1142 continue;
1143 }
1144 /* Satellite owns first 21 channels */
1145 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
1146 sat->nsatch = 20;
1147 /* alloc all sat chans */
1148 for (i = 0; i < 21; i++)
1149 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001150send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1152 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1153 txn.la = sat->satcl.laddr;
1154 txn.rl = 8;
1155 wbuf[0] = SAT_MAGIC_LSB;
1156 wbuf[1] = SAT_MAGIC_MSB;
1157 wbuf[2] = SAT_MSG_VER;
1158 wbuf[3] = SAT_MSG_PROT;
1159 txn.wbuf = wbuf;
1160 txn.len = 4;
1161 sat->sent_capability = true;
1162 msm_xfer_msg(&dev->ctrl, &txn);
1163 break;
1164 case SLIM_USR_MC_ADDR_QUERY:
1165 memcpy(&wbuf[1], &buf[4], 6);
1166 ret = slim_get_logical_addr(&sat->satcl,
1167 &wbuf[1], 6, &wbuf[7]);
1168 if (ret)
1169 memset(&wbuf[1], 0, 6);
1170 wbuf[0] = buf[3];
1171 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1172 txn.rl = 12;
1173 txn.len = 8;
1174 txn.wbuf = wbuf;
1175 msm_xfer_msg(&dev->ctrl, &txn);
1176 break;
1177 case SLIM_USR_MC_DEFINE_CHAN:
1178 case SLIM_USR_MC_DEF_ACT_CHAN:
1179 case SLIM_USR_MC_CHAN_CTRL:
1180 if (mc != SLIM_USR_MC_CHAN_CTRL)
1181 tid = buf[7];
1182 else
1183 tid = buf[4];
1184 gen_ack = true;
1185 ret = msm_sat_define_ch(sat, buf, len, mc);
1186 if (ret) {
1187 dev_err(dev->dev,
1188 "SAT define_ch returned:%d",
1189 ret);
1190 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001191 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001192 int chv = msm_slim_get_ctrl(dev);
1193 if (chv >= 0)
1194 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001195 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 break;
1197 case SLIM_USR_MC_RECONFIG_NOW:
1198 tid = buf[3];
1199 gen_ack = true;
1200 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001201 if (sat->pending_reconf) {
1202 msm_slim_put_ctrl(dev);
1203 sat->pending_reconf = false;
1204 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 break;
1206 case SLIM_USR_MC_REQ_BW:
1207 /* what we get is in SLOTS */
1208 bw_sl = (u32)buf[4] << 3 |
1209 ((buf[3] & 0xE0) >> 5);
1210 sat->satcl.pending_msgsl = bw_sl;
1211 tid = buf[5];
1212 gen_ack = true;
1213 break;
1214 case SLIM_USR_MC_CONNECT_SRC:
1215 case SLIM_USR_MC_CONNECT_SINK:
1216 if (mc == SLIM_USR_MC_CONNECT_SRC)
1217 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1218 else
1219 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1220 wbuf[0] = buf[4] & 0x1F;
1221 wbuf[1] = buf[5];
1222 tid = buf[6];
1223 txn.la = buf[3];
1224 txn.mt = SLIM_MSG_MT_CORE;
1225 txn.rl = 6;
1226 txn.len = 2;
1227 txn.wbuf = wbuf;
1228 gen_ack = true;
1229 ret = msm_xfer_msg(&dev->ctrl, &txn);
1230 break;
1231 case SLIM_USR_MC_DISCONNECT_PORT:
1232 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1233 wbuf[0] = buf[4] & 0x1F;
1234 tid = buf[5];
1235 txn.la = buf[3];
1236 txn.rl = 5;
1237 txn.len = 1;
1238 txn.mt = SLIM_MSG_MT_CORE;
1239 txn.wbuf = wbuf;
1240 gen_ack = true;
1241 ret = msm_xfer_msg(&dev->ctrl, &txn);
1242 default:
1243 break;
1244 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001245 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001246 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001247 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001249 }
1250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 wbuf[0] = tid;
1252 if (!ret)
1253 wbuf[1] = MSM_SAT_SUCCSS;
1254 else
1255 wbuf[1] = 0;
1256 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1257 txn.la = sat->satcl.laddr;
1258 txn.rl = 6;
1259 txn.len = 2;
1260 txn.wbuf = wbuf;
1261 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1262 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001263 if (satv >= 0)
1264 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 }
1266}
1267
Sagar Dharia790cfd02011-09-25 17:56:24 -06001268static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1269{
1270 struct msm_slim_sat *sat;
1271 char *name;
1272 if (dev->nsats >= MSM_MAX_NSATS)
1273 return NULL;
1274
1275 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1276 if (!sat) {
1277 dev_err(dev->dev, "no memory for satellite");
1278 return NULL;
1279 }
1280 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1281 if (!name) {
1282 dev_err(dev->dev, "no memory for satellite name");
1283 kfree(sat);
1284 return NULL;
1285 }
1286 dev->satd[dev->nsats] = sat;
1287 sat->dev = dev;
1288 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1289 sat->satcl.name = name;
1290 spin_lock_init(&sat->lock);
1291 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1292 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1293 if (!sat->wq) {
1294 kfree(name);
1295 kfree(sat);
1296 return NULL;
1297 }
1298 /*
1299 * Both sats will be allocated from RX thread and RX thread will
1300 * process messages sequentially. No synchronization necessary
1301 */
1302 dev->nsats++;
1303 return sat;
1304}
1305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306static void
1307msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1308{
1309 u32 *buf = ev->data.transfer.user;
1310 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1311
1312 /*
1313 * Note the virtual address needs to be offset by the same index
1314 * as the physical address or just pass in the actual virtual address
1315 * if the sps_mem_buffer is not needed. Note that if completion is
1316 * used, the virtual address won't be available and will need to be
1317 * calculated based on the offset of the physical address
1318 */
1319 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1320
1321 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1322
1323 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1324 iovec->addr, iovec->size, iovec->flags);
1325
1326 } else {
1327 dev_err(dev->dev, "%s: unknown event %d\n",
1328 __func__, ev->event_id);
1329 }
1330}
1331
1332static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1333{
1334 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1335 msm_slim_rx_msgq_event(dev, notify);
1336}
1337
1338/* Queue up Rx message buffer */
1339static inline int
1340msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1341{
1342 int ret;
1343 u32 flags = SPS_IOVEC_FLAG_INT;
1344 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1345 struct sps_mem_buffer *mem = &endpoint->buf;
1346 struct sps_pipe *pipe = endpoint->sps;
1347
1348 /* Rx message queue buffers are 4 bytes in length */
1349 u8 *virt_addr = mem->base + (4 * ix);
1350 u32 phys_addr = mem->phys_base + (4 * ix);
1351
1352 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1353
1354 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1355 if (ret)
1356 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1357
1358 return ret;
1359}
1360
1361static inline int
1362msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1363{
1364 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1365 struct sps_mem_buffer *mem = &endpoint->buf;
1366 struct sps_pipe *pipe = endpoint->sps;
1367 struct sps_iovec iovec;
1368 int index;
1369 int ret;
1370
1371 ret = sps_get_iovec(pipe, &iovec);
1372 if (ret) {
1373 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1374 goto err_exit;
1375 }
1376
1377 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1378 iovec.addr, iovec.size, iovec.flags);
1379 BUG_ON(iovec.addr < mem->phys_base);
1380 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1381
1382 /* Calculate buffer index */
1383 index = (iovec.addr - mem->phys_base) / 4;
1384 *(data + offset) = *((u32 *)mem->base + index);
1385
1386 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1387
1388 /* Add buffer back to the queue */
1389 (void)msm_slim_post_rx_msgq(dev, index);
1390
1391err_exit:
1392 return ret;
1393}
1394
1395static int msm_slim_rx_msgq_thread(void *data)
1396{
1397 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1398 struct completion *notify = &dev->rx_msgq_notify;
1399 struct msm_slim_sat *sat = NULL;
1400 u32 mc = 0;
1401 u32 mt = 0;
1402 u32 buffer[10];
1403 int index = 0;
1404 u8 msg_len = 0;
1405 int ret;
1406
1407 dev_dbg(dev->dev, "rx thread started");
1408
1409 while (!kthread_should_stop()) {
1410 set_current_state(TASK_INTERRUPTIBLE);
1411 ret = wait_for_completion_interruptible(notify);
1412
1413 if (ret)
1414 dev_err(dev->dev, "rx thread wait error:%d", ret);
1415
1416 /* 1 irq notification per message */
1417 if (!dev->use_rx_msgqs) {
1418 msm_slim_rxwq(dev);
1419 continue;
1420 }
1421
1422 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1423 if (ret) {
1424 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1425 continue;
1426 }
1427
1428 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1429
1430 /* Decide if we use generic RX or satellite RX */
1431 if (index++ == 0) {
1432 msg_len = *buffer & 0x1F;
1433 pr_debug("Start of new message, len = %d\n", msg_len);
1434 mt = (buffer[0] >> 5) & 0x7;
1435 mc = (buffer[0] >> 8) & 0xff;
1436 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1437 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001438 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1439 u8 laddr;
1440 laddr = (u8)((buffer[0] >> 16) & 0xff);
1441 sat = addr_to_sat(dev, laddr);
1442 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443 } else if ((index * 4) >= msg_len) {
1444 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445 if (sat) {
1446 msm_sat_enqueue(sat, buffer, msg_len);
1447 queue_work(sat->wq, &sat->wd);
1448 sat = NULL;
1449 } else {
1450 msm_slim_rx_enqueue(dev, buffer, msg_len);
1451 msm_slim_rxwq(dev);
1452 }
1453 }
1454 }
1455
1456 return 0;
1457}
1458
1459static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1460{
1461 int i, ret;
1462 u32 pipe_offset;
1463 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1464 struct sps_connect *config = &endpoint->config;
1465 struct sps_mem_buffer *descr = &config->desc;
1466 struct sps_mem_buffer *mem = &endpoint->buf;
1467 struct completion *notify = &dev->rx_msgq_notify;
1468
1469 struct sps_register_event sps_error_event; /* SPS_ERROR */
1470 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1471
1472 /* Allocate the endpoint */
1473 ret = msm_slim_init_endpoint(dev, endpoint);
1474 if (ret) {
1475 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1476 goto sps_init_endpoint_failed;
1477 }
1478
1479 /* Get the pipe indices for the message queues */
1480 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1481 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1482
1483 config->mode = SPS_MODE_SRC;
1484 config->source = dev->bam.hdl;
1485 config->destination = SPS_DEV_HANDLE_MEM;
1486 config->src_pipe_index = pipe_offset;
1487 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1488 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1489
1490 /* Allocate memory for the FIFO descriptors */
1491 ret = msm_slim_sps_mem_alloc(dev, descr,
1492 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1493 if (ret) {
1494 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1495 goto alloc_descr_failed;
1496 }
1497
1498 ret = sps_connect(endpoint->sps, config);
1499 if (ret) {
1500 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1501 goto sps_connect_failed;
1502 }
1503
1504 /* Register completion for DESC_DONE */
1505 init_completion(notify);
1506 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1507
1508 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1509 sps_descr_event.options = SPS_O_DESC_DONE;
1510 sps_descr_event.user = (void *)dev;
1511 sps_descr_event.xfer_done = notify;
1512
1513 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1514 if (ret) {
1515 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1516 goto sps_reg_event_failed;
1517 }
1518
1519 /* Register callback for errors */
1520 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1521 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1522 sps_error_event.options = SPS_O_ERROR;
1523 sps_error_event.user = (void *)dev;
1524 sps_error_event.callback = msm_slim_rx_msgq_cb;
1525
1526 ret = sps_register_event(endpoint->sps, &sps_error_event);
1527 if (ret) {
1528 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1529 goto sps_reg_event_failed;
1530 }
1531
1532 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1533 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1534 if (ret) {
1535 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1536 goto alloc_buffer_failed;
1537 }
1538
1539 /*
1540 * Call transfer_one for each 4-byte buffer
1541 * Use (buf->size/4) - 1 for the number of buffer to post
1542 */
1543
1544 /* Setup the transfer */
1545 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1546 ret = msm_slim_post_rx_msgq(dev, i);
1547 if (ret) {
1548 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1549 goto sps_transfer_failed;
1550 }
1551 }
1552
1553 /* Fire up the Rx message queue thread */
1554 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1555 MSM_SLIM_NAME "_rx_msgq_thread");
1556 if (!dev->rx_msgq_thread) {
1557 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1558 ret = -EIO;
1559 } else
1560 return 0;
1561
1562sps_transfer_failed:
1563 msm_slim_sps_mem_free(dev, mem);
1564alloc_buffer_failed:
1565 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1566 sps_register_event(endpoint->sps, &sps_error_event);
1567sps_reg_event_failed:
1568 sps_disconnect(endpoint->sps);
1569sps_connect_failed:
1570 msm_slim_sps_mem_free(dev, descr);
1571alloc_descr_failed:
1572 msm_slim_free_endpoint(endpoint);
1573sps_init_endpoint_failed:
1574 return ret;
1575}
1576
1577/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1578static int __devinit
1579msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1580{
1581 int i, ret;
1582 u32 bam_handle;
1583 struct sps_bam_props bam_props = {0};
1584
1585 static struct sps_bam_sec_config_props sec_props = {
1586 .ees = {
1587 [0] = { /* LPASS */
1588 .vmid = 0,
1589 .pipe_mask = 0xFFFF98,
1590 },
1591 [1] = { /* Krait Apps */
1592 .vmid = 1,
1593 .pipe_mask = 0x3F000007,
1594 },
1595 [2] = { /* Modem */
1596 .vmid = 2,
1597 .pipe_mask = 0x00000060,
1598 },
1599 },
1600 };
1601
1602 bam_props.ee = dev->ee;
1603 bam_props.virt_addr = dev->bam.base;
1604 bam_props.phys_addr = bam_mem->start;
1605 bam_props.irq = dev->bam.irq;
1606 bam_props.manage = SPS_BAM_MGR_LOCAL;
1607 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1608
1609 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1610 bam_props.p_sec_config_props = &sec_props;
1611
1612 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1613 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1614
1615 /* First 7 bits are for message Qs */
1616 for (i = 7; i < 32; i++) {
1617 /* Check what pipes are owned by Apps. */
1618 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1619 break;
1620 }
1621 dev->pipe_b = i - 7;
1622
1623 /* Register the BAM device with the SPS driver */
1624 ret = sps_register_bam_device(&bam_props, &bam_handle);
1625 if (ret) {
1626 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1627 return ret;
1628 }
1629 dev->bam.hdl = bam_handle;
1630 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1631
1632 ret = msm_slim_init_rx_msgq(dev);
1633 if (ret) {
1634 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1635 goto rx_msgq_init_failed;
1636 }
1637
1638 return 0;
1639rx_msgq_init_failed:
1640 sps_deregister_bam_device(bam_handle);
1641 dev->bam.hdl = 0L;
1642 return ret;
1643}
1644
1645static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1646{
1647 if (dev->use_rx_msgqs) {
1648 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1649 struct sps_connect *config = &endpoint->config;
1650 struct sps_mem_buffer *descr = &config->desc;
1651 struct sps_mem_buffer *mem = &endpoint->buf;
1652 struct sps_register_event sps_event;
1653 memset(&sps_event, 0x00, sizeof(sps_event));
1654 msm_slim_sps_mem_free(dev, mem);
1655 sps_register_event(endpoint->sps, &sps_event);
1656 sps_disconnect(endpoint->sps);
1657 msm_slim_sps_mem_free(dev, descr);
1658 msm_slim_free_endpoint(endpoint);
1659 }
1660 sps_deregister_bam_device(dev->bam.hdl);
1661}
1662
Sagar Dhariacc969452011-09-19 10:34:30 -06001663static void msm_slim_prg_slew(struct platform_device *pdev,
1664 struct msm_slim_ctrl *dev)
1665{
1666 struct resource *slew_io;
1667 void __iomem *slew_reg;
1668 /* SLEW RATE register for this slimbus */
1669 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1670 "slimbus_slew_reg");
1671 if (!dev->slew_mem) {
1672 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1673 return;
1674 }
1675 slew_io = request_mem_region(dev->slew_mem->start,
1676 resource_size(dev->slew_mem), pdev->name);
1677 if (!slew_io) {
1678 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1679 dev->slew_mem = NULL;
1680 return;
1681 }
1682
1683 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1684 if (!slew_reg) {
1685 dev_dbg(dev->dev, "slew register mapping failed");
1686 release_mem_region(dev->slew_mem->start,
1687 resource_size(dev->slew_mem));
1688 dev->slew_mem = NULL;
1689 return;
1690 }
1691 writel_relaxed(1, slew_reg);
1692 /* Make sure slimbus-slew rate enabling goes through */
1693 wmb();
1694 iounmap(slew_reg);
1695}
1696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697static int __devinit msm_slim_probe(struct platform_device *pdev)
1698{
1699 struct msm_slim_ctrl *dev;
1700 int ret;
1701 struct resource *bam_mem, *bam_io;
1702 struct resource *slim_mem, *slim_io;
1703 struct resource *irq, *bam_irq;
1704 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1705 "slimbus_physical");
1706 if (!slim_mem) {
1707 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1708 return -ENODEV;
1709 }
1710 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1711 pdev->name);
1712 if (!slim_io) {
1713 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1714 return -EBUSY;
1715 }
1716
1717 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1718 "slimbus_bam_physical");
1719 if (!bam_mem) {
1720 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1721 ret = -ENODEV;
1722 goto err_get_res_bam_failed;
1723 }
1724 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1725 pdev->name);
1726 if (!bam_io) {
1727 release_mem_region(slim_mem->start, resource_size(slim_mem));
1728 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1729 ret = -EBUSY;
1730 goto err_get_res_bam_failed;
1731 }
1732 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1733 "slimbus_irq");
1734 if (!irq) {
1735 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1736 ret = -ENODEV;
1737 goto err_get_res_failed;
1738 }
1739 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1740 "slimbus_bam_irq");
1741 if (!bam_irq) {
1742 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1743 ret = -ENODEV;
1744 goto err_get_res_failed;
1745 }
1746
1747 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1748 if (!dev) {
1749 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1750 ret = -ENOMEM;
1751 goto err_get_res_failed;
1752 }
1753 dev->dev = &pdev->dev;
1754 platform_set_drvdata(pdev, dev);
1755 slim_set_ctrldata(&dev->ctrl, dev);
1756 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1757 if (!dev->base) {
1758 dev_err(&pdev->dev, "IOremap failed\n");
1759 ret = -ENOMEM;
1760 goto err_ioremap_failed;
1761 }
1762 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1763 if (!dev->bam.base) {
1764 dev_err(&pdev->dev, "BAM IOremap failed\n");
1765 ret = -ENOMEM;
1766 goto err_ioremap_bam_failed;
1767 }
1768 dev->ctrl.nr = pdev->id;
1769 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1770 dev->ctrl.nports = MSM_SLIM_NPORTS;
1771 dev->ctrl.set_laddr = msm_set_laddr;
1772 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001773 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774 dev->ctrl.config_port = msm_config_port;
1775 dev->ctrl.port_xfer = msm_slim_port_xfer;
1776 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1777 /* Reserve some messaging BW for satellite-apps driver communication */
1778 dev->ctrl.sched.pending_msgsl = 30;
1779
1780 init_completion(&dev->reconf);
1781 mutex_init(&dev->tx_lock);
1782 spin_lock_init(&dev->rx_lock);
1783 dev->ee = 1;
1784 dev->use_rx_msgqs = 1;
1785 dev->irq = irq->start;
1786 dev->bam.irq = bam_irq->start;
1787
1788 ret = msm_slim_sps_init(dev, bam_mem);
1789 if (ret != 0) {
1790 dev_err(dev->dev, "error SPS init\n");
1791 goto err_sps_init_failed;
1792 }
1793
1794
1795 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001796 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797 dev_err(dev->dev, "slimbus clock not found");
1798 goto err_clk_get_failed;
1799 }
1800 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1801 dev->framer.superfreq =
1802 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1803 dev->ctrl.a_framer = &dev->framer;
1804 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001805 dev->ctrl.dev.parent = &pdev->dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806
1807 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1808 "msm_slim_irq", dev);
1809 if (ret) {
1810 dev_err(&pdev->dev, "request IRQ failed\n");
1811 goto err_request_irq_failed;
1812 }
1813
Sagar Dhariacc969452011-09-19 10:34:30 -06001814 msm_slim_prg_slew(pdev, dev);
1815 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1816 clk_enable(dev->rclk);
1817
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001818 /* Component register initialization */
1819 writel_relaxed(1, dev->base + COMP_CFG);
1820 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1821 dev->base + COMP_TRUST_CFG);
1822
1823 /*
1824 * Manager register initialization
1825 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1826 */
1827 if (dev->use_rx_msgqs)
1828 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1829 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1830 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1831 else
1832 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1833 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1834 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1835 writel_relaxed(1, dev->base + MGR_CFG);
1836 /*
1837 * Framer registers are beyond 1K memory region after Manager and/or
1838 * component registers. Make sure those writes are ordered
1839 * before framer register writes
1840 */
1841 wmb();
1842
Sagar Dharia72007922011-12-13 21:14:26 -07001843 /* Register with framework before enabling frame, clock */
1844 ret = slim_add_numbered_controller(&dev->ctrl);
1845 if (ret) {
1846 dev_err(dev->dev, "error adding controller\n");
1847 goto err_ctrl_failed;
1848 }
1849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001850 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1852 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1853 dev->base + FRM_CFG);
1854 /*
1855 * Make sure that framer wake-up and enabling writes go through
1856 * before any other component is enabled. Framer is responsible for
1857 * clocking the bus and enabling framer first will ensure that other
1858 * devices can report presence when they are enabled
1859 */
1860 mb();
1861
1862 /* Enable RX msg Q */
1863 if (dev->use_rx_msgqs)
1864 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1865 dev->base + MGR_CFG);
1866 else
1867 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1868 /*
1869 * Make sure that manager-enable is written through before interface
1870 * device is enabled
1871 */
1872 mb();
1873 writel_relaxed(1, dev->base + INTF_CFG);
1874 /*
1875 * Make sure that interface-enable is written through before enabling
1876 * ported generic device inside MSM manager
1877 */
1878 mb();
1879 writel_relaxed(1, dev->base + PGD_CFG);
1880 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1881 /*
1882 * Make sure that ported generic device is enabled and port-EE settings
1883 * are written through before finally enabling the component
1884 */
1885 mb();
1886
1887 writel_relaxed(1, dev->base + COMP_CFG);
1888 /*
1889 * Make sure that all writes have gone through before exiting this
1890 * function
1891 */
1892 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001893 pm_runtime_use_autosuspend(&pdev->dev);
1894 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1895 pm_runtime_set_active(&pdev->dev);
1896
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1898 return 0;
1899
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001900err_ctrl_failed:
1901 writel_relaxed(0, dev->base + COMP_CFG);
1902 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001903err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 clk_disable(dev->rclk);
1905 clk_put(dev->rclk);
1906err_clk_get_failed:
1907 msm_slim_sps_exit(dev);
1908err_sps_init_failed:
1909 iounmap(dev->bam.base);
1910err_ioremap_bam_failed:
1911 iounmap(dev->base);
1912err_ioremap_failed:
1913 kfree(dev);
1914err_get_res_failed:
1915 release_mem_region(bam_mem->start, resource_size(bam_mem));
1916err_get_res_bam_failed:
1917 release_mem_region(slim_mem->start, resource_size(slim_mem));
1918 return ret;
1919}
1920
1921static int __devexit msm_slim_remove(struct platform_device *pdev)
1922{
1923 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1924 struct resource *bam_mem;
1925 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001926 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06001927 int i;
1928 for (i = 0; i < dev->nsats; i++) {
1929 struct msm_slim_sat *sat = dev->satd[i];
1930 slim_remove_device(&sat->satcl);
1931 kfree(sat->satch);
1932 destroy_workqueue(sat->wq);
1933 kfree(sat->satcl.name);
1934 kfree(sat);
1935 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001936 pm_runtime_disable(&pdev->dev);
1937 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 free_irq(dev->irq, dev);
1939 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 clk_put(dev->rclk);
1941 msm_slim_sps_exit(dev);
1942 kthread_stop(dev->rx_msgq_thread);
1943 iounmap(dev->bam.base);
1944 iounmap(dev->base);
1945 kfree(dev);
1946 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1947 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001948 if (bam_mem)
1949 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001950 if (slew_mem)
1951 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1953 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001954 if (slim_mem)
1955 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 return 0;
1957}
1958
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001959#ifdef CONFIG_PM_RUNTIME
1960static int msm_slim_runtime_idle(struct device *device)
1961{
1962 dev_dbg(device, "pm_runtime: idle...\n");
1963 pm_request_autosuspend(device);
1964 return -EAGAIN;
1965}
1966#endif
1967
1968/*
1969 * If PM_RUNTIME is not defined, these 2 functions become helper
1970 * functions to be called from system suspend/resume. So they are not
1971 * inside ifdef CONFIG_PM_RUNTIME
1972 */
Sagar Dharia45e77912012-01-10 09:55:18 -07001973#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001974static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975{
1976 struct platform_device *pdev = to_platform_device(device);
1977 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001978 int ret;
1979 dev_dbg(device, "pm_runtime: suspending...\n");
1980 dev->state = MSM_CTRL_SLEEPING;
1981 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001982 if (ret) {
1983 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001984 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001985 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001986 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001987 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001988 return ret;
1989}
1990
1991static int msm_slim_runtime_resume(struct device *device)
1992{
1993 struct platform_device *pdev = to_platform_device(device);
1994 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1995 int ret = 0;
1996 dev_dbg(device, "pm_runtime: resuming...\n");
1997 if (dev->state == MSM_CTRL_ASLEEP)
1998 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001999 if (ret) {
2000 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002001 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002002 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002003 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002004 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002005 return ret;
2006}
2007
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002008static int msm_slim_suspend(struct device *dev)
2009{
2010 int ret = 0;
2011 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2012 dev_dbg(dev, "system suspend");
2013 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06002014 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002015 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002016 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002017 * If the clock pause failed due to active channels, there is
2018 * a possibility that some audio stream is active during suspend
2019 * We dont want to return suspend failure in that case so that
2020 * display and relevant components can still go to suspend.
2021 * If there is some other error, then it should be passed-on
2022 * to system level suspend
2023 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002024 ret = 0;
2025 }
2026 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027}
2028
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002029static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002031 /* If runtime_pm is enabled, this resume shouldn't do anything */
2032 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2033 int ret;
2034 dev_dbg(dev, "system resume");
2035 ret = msm_slim_runtime_resume(dev);
2036 if (!ret) {
2037 pm_runtime_mark_last_busy(dev);
2038 pm_request_autosuspend(dev);
2039 }
2040 return ret;
2041
Sagar Dharia144e5e02011-08-08 17:30:11 -06002042 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 return 0;
2044}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002045#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046
2047static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2048 SET_SYSTEM_SLEEP_PM_OPS(
2049 msm_slim_suspend,
2050 msm_slim_resume
2051 )
2052 SET_RUNTIME_PM_OPS(
2053 msm_slim_runtime_suspend,
2054 msm_slim_runtime_resume,
2055 msm_slim_runtime_idle
2056 )
2057};
2058
2059static struct platform_driver msm_slim_driver = {
2060 .probe = msm_slim_probe,
2061 .remove = msm_slim_remove,
2062 .driver = {
2063 .name = MSM_SLIM_NAME,
2064 .owner = THIS_MODULE,
2065 .pm = &msm_slim_dev_pm_ops,
2066 },
2067};
2068
2069static int msm_slim_init(void)
2070{
2071 return platform_driver_register(&msm_slim_driver);
2072}
2073subsys_initcall(msm_slim_init);
2074
2075static void msm_slim_exit(void)
2076{
2077 platform_driver_unregister(&msm_slim_driver);
2078}
2079module_exit(msm_slim_exit);
2080
2081MODULE_LICENSE("GPL v2");
2082MODULE_VERSION("0.1");
2083MODULE_DESCRIPTION("MSM Slimbus controller");
2084MODULE_ALIAS("platform:msm-slim");