blob: 3512fa8c37e7df574d4bca97834fe3775ba94d93 [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
Joonwoo Parkf69f77a2012-08-28 15:26:11 -070028#include <mach/qdsp6v2/apr.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30/* Per spec.max 40 bytes per received message */
31#define SLIM_RX_MSGQ_BUF_LEN 40
32
33#define SLIM_USR_MC_GENERIC_ACK 0x25
34#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
35#define SLIM_USR_MC_REPORT_SATELLITE 0x1
36#define SLIM_USR_MC_ADDR_QUERY 0xD
37#define SLIM_USR_MC_ADDR_REPLY 0xE
38#define SLIM_USR_MC_DEFINE_CHAN 0x20
39#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
40#define SLIM_USR_MC_CHAN_CTRL 0x23
41#define SLIM_USR_MC_RECONFIG_NOW 0x24
42#define SLIM_USR_MC_REQ_BW 0x28
43#define SLIM_USR_MC_CONNECT_SRC 0x2C
44#define SLIM_USR_MC_CONNECT_SINK 0x2D
45#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
46
47/* MSM Slimbus peripheral settings */
48#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
49#define MSM_SLIM_NCHANS 32
50#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060051#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
53/*
54 * Need enough descriptors to receive present messages from slaves
55 * if received simultaneously. Present message needs 3 descriptors
56 * and this size will ensure around 10 simultaneous reports.
57 */
58#define MSM_SLIM_DESC_NUM 32
59
60#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
61 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
62
63#define MSM_SLIM_NAME "msm_slim_ctrl"
64#define SLIM_ROOT_FREQ 24576000
65
66#define MSM_CONCUR_MSG 8
67#define SAT_CONCUR_MSG 8
68#define DEF_WATERMARK (8 << 1)
69#define DEF_ALIGN 0
70#define DEF_PACK (1 << 6)
71#define ENABLE_PORT 1
72
73#define DEF_BLKSZ 0
74#define DEF_TRANSZ 0
75
76#define SAT_MAGIC_LSB 0xD9
77#define SAT_MAGIC_MSB 0xC5
78#define SAT_MSG_VER 0x1
79#define SAT_MSG_PROT 0x1
80#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060081#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060082#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083
84#define QC_MFGID_LSB 0x2
85#define QC_MFGID_MSB 0x17
86#define QC_CHIPID_SL 0x10
87#define QC_DEVID_SAT1 0x3
88#define QC_DEVID_SAT2 0x4
89#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060090#define QC_MSM_DEVS 5
Sagar Dhariaac913452012-09-04 11:27:26 -060091#define INIT_MX_RETRIES 10
92#define DEF_RETRY_MS 10
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093
Sagar Dharia82e516f2012-03-16 16:01:23 -060094#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
95#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
96#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
97
98#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
99#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
100#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -0600102enum comp_reg_v2 {
103 COMP_CFG_V2 = 4,
104 COMP_TRUST_CFG_V2 = 0x3000,
105};
106
107/* Manager PGD registers */
108enum pgd_reg_v2 {
109 PGD_CFG_V2 = 0x800,
110 PGD_STAT_V2 = 0x804,
111 PGD_INT_EN_V2 = 0x810,
112 PGD_INT_STAT_V2 = 0x814,
113 PGD_INT_CLR_V2 = 0x818,
114 PGD_OWN_EEn_V2 = 0x300C,
115 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
116 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
117 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
118 PGD_PORT_CFGn_V2 = 0x14000,
119 PGD_PORT_STATn_V2 = 0x14004,
120 PGD_PORT_PARAMn_V2 = 0x14008,
121 PGD_PORT_BLKn_V2 = 0x1400C,
122 PGD_PORT_TRANn_V2 = 0x14010,
123 PGD_PORT_MCHANn_V2 = 0x14014,
124 PGD_PORT_PSHPLLn_V2 = 0x14018,
125 PGD_PORT_PC_CFGn_V2 = 0x8000,
126 PGD_PORT_PC_VALn_V2 = 0x8004,
127 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
128 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
129 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
130 PGD_IE_STAT_V2 = 0x820,
131 PGD_VE_STAT_V2 = 0x830,
132};
133
134#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
135#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
136#define CFG_PORT_V1(r) ((r ## _V1))
137/* Component registers */
138enum comp_reg_v1 {
139 COMP_CFG_V1 = 0,
140 COMP_TRUST_CFG_V1 = 0x14,
141};
142
143/* Manager PGD registers */
144enum pgd_reg_v1 {
145 PGD_CFG_V1 = 0x1000,
146 PGD_STAT_V1 = 0x1004,
147 PGD_INT_EN_V1 = 0x1010,
148 PGD_INT_STAT_V1 = 0x1014,
149 PGD_INT_CLR_V1 = 0x1018,
150 PGD_OWN_EEn_V1 = 0x1020,
151 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
152 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
153 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
154 PGD_PORT_CFGn_V1 = 0x1080,
155 PGD_PORT_STATn_V1 = 0x1084,
156 PGD_PORT_PARAMn_V1 = 0x1088,
157 PGD_PORT_BLKn_V1 = 0x108C,
158 PGD_PORT_TRANn_V1 = 0x1090,
159 PGD_PORT_MCHANn_V1 = 0x1094,
160 PGD_PORT_PSHPLLn_V1 = 0x1098,
161 PGD_PORT_PC_CFGn_V1 = 0x1600,
162 PGD_PORT_PC_VALn_V1 = 0x1604,
163 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
164 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
165 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
166 PGD_IE_STAT_V1 = 0x1700,
167 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168};
169
170/* Manager registers */
171enum mgr_reg {
172 MGR_CFG = 0x200,
173 MGR_STATUS = 0x204,
174 MGR_RX_MSGQ_CFG = 0x208,
175 MGR_INT_EN = 0x210,
176 MGR_INT_STAT = 0x214,
177 MGR_INT_CLR = 0x218,
178 MGR_TX_MSG = 0x230,
179 MGR_RX_MSG = 0x270,
Sagar Dhariaac913452012-09-04 11:27:26 -0600180 MGR_IE_STAT = 0x2F0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 MGR_VE_STAT = 0x300,
182};
183
184enum msg_cfg {
185 MGR_CFG_ENABLE = 1,
186 MGR_CFG_RX_MSGQ_EN = 1 << 1,
187 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
188 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
189};
190/* Message queue types */
191enum msm_slim_msgq_type {
192 MSGQ_RX = 0,
193 MSGQ_TX_LOW = 1,
194 MSGQ_TX_HIGH = 2,
195};
196/* Framer registers */
197enum frm_reg {
198 FRM_CFG = 0x400,
199 FRM_STAT = 0x404,
200 FRM_INT_EN = 0x410,
201 FRM_INT_STAT = 0x414,
202 FRM_INT_CLR = 0x418,
203 FRM_WAKEUP = 0x41C,
204 FRM_CLKCTL_DONE = 0x420,
205 FRM_IE_STAT = 0x430,
206 FRM_VE_STAT = 0x440,
207};
208
209/* Interface registers */
210enum intf_reg {
211 INTF_CFG = 0x600,
212 INTF_STAT = 0x604,
213 INTF_INT_EN = 0x610,
214 INTF_INT_STAT = 0x614,
215 INTF_INT_CLR = 0x618,
216 INTF_IE_STAT = 0x630,
217 INTF_VE_STAT = 0x640,
218};
219
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220enum rsc_grp {
221 EE_MGR_RSC_GRP = 1 << 10,
222 EE_NGD_2 = 2 << 6,
223 EE_NGD_1 = 0,
224};
225
226enum mgr_intr {
227 MGR_INT_RECFG_DONE = 1 << 24,
228 MGR_INT_TX_NACKED_2 = 1 << 25,
229 MGR_INT_MSG_BUF_CONTE = 1 << 26,
230 MGR_INT_RX_MSG_RCVD = 1 << 30,
231 MGR_INT_TX_MSG_SENT = 1 << 31,
232};
233
234enum frm_cfg {
235 FRM_ACTIVE = 1,
236 CLK_GEAR = 7,
237 ROOT_FREQ = 11,
238 REF_CLK_GEAR = 15,
Sagar Dhariadebc8b72012-08-11 15:02:12 -0600239 INTR_WAKE = 19,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240};
241
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600242enum msm_ctrl_state {
243 MSM_CTRL_AWAKE,
244 MSM_CTRL_SLEEPING,
245 MSM_CTRL_ASLEEP,
246};
247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248struct msm_slim_sps_bam {
249 u32 hdl;
250 void __iomem *base;
251 int irq;
252};
253
254struct msm_slim_endp {
255 struct sps_pipe *sps;
256 struct sps_connect config;
257 struct sps_register_event event;
258 struct sps_mem_buffer buf;
259 struct completion *xcomp;
260 bool connected;
261};
262
263struct msm_slim_ctrl {
264 struct slim_controller ctrl;
265 struct slim_framer framer;
266 struct device *dev;
267 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600268 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 u32 curr_bw;
270 u8 msg_cnt;
271 u32 tx_buf[10];
272 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
273 spinlock_t rx_lock;
274 int head;
275 int tail;
276 int irq;
277 int err;
278 int ee;
279 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600280 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 struct msm_slim_endp pipes[7];
282 struct msm_slim_sps_bam bam;
283 struct msm_slim_endp rx_msgq;
284 struct completion rx_msgq_notify;
285 struct task_struct *rx_msgq_thread;
286 struct clk *rclk;
Sagar Dhariadebc8b72012-08-11 15:02:12 -0600287 struct clk *hclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 struct mutex tx_lock;
289 u8 pgdla;
290 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 int pipe_b;
292 struct completion reconf;
293 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600294 bool chan_active;
295 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600296 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600297 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298};
299
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600300struct msm_sat_chan {
301 u8 chan;
302 u16 chanh;
303 int req_rem;
304 int req_def;
Sagar Dharia1944a132012-08-15 00:01:57 -0600305 bool reconf;
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600306};
307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308struct msm_slim_sat {
309 struct slim_device satcl;
310 struct msm_slim_ctrl *dev;
311 struct workqueue_struct *wq;
312 struct work_struct wd;
313 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600314 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 u8 nsatch;
316 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600317 bool pending_reconf;
318 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 int shead;
320 int stail;
321 spinlock_t lock;
322};
323
Sagar Dharia790cfd02011-09-25 17:56:24 -0600324static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
327{
328 spin_lock(&dev->rx_lock);
329 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
330 spin_unlock(&dev->rx_lock);
331 dev_err(dev->dev, "RX QUEUE full!");
332 return -EXFULL;
333 }
334 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
335 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
336 spin_unlock(&dev->rx_lock);
337 return 0;
338}
339
340static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
341{
342 unsigned long flags;
343 spin_lock_irqsave(&dev->rx_lock, flags);
344 if (dev->tail == dev->head) {
345 spin_unlock_irqrestore(&dev->rx_lock, flags);
346 return -ENODATA;
347 }
348 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
349 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
350 spin_unlock_irqrestore(&dev->rx_lock, flags);
351 return 0;
352}
353
354static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
355{
356 struct msm_slim_ctrl *dev = sat->dev;
357 spin_lock(&sat->lock);
358 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
359 spin_unlock(&sat->lock);
360 dev_err(dev->dev, "SAT QUEUE full!");
361 return -EXFULL;
362 }
363 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
364 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
365 spin_unlock(&sat->lock);
366 return 0;
367}
368
369static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
370{
371 unsigned long flags;
372 spin_lock_irqsave(&sat->lock, flags);
373 if (sat->stail == sat->shead) {
374 spin_unlock_irqrestore(&sat->lock, flags);
375 return -ENODATA;
376 }
377 memcpy(buf, sat->sat_msgs[sat->shead], 40);
378 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
379 spin_unlock_irqrestore(&sat->lock, flags);
380 return 0;
381}
382
383static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
384{
385 e_addr[0] = (buffer[1] >> 24) & 0xff;
386 e_addr[1] = (buffer[1] >> 16) & 0xff;
387 e_addr[2] = (buffer[1] >> 8) & 0xff;
388 e_addr[3] = buffer[1] & 0xff;
389 e_addr[4] = (buffer[0] >> 24) & 0xff;
390 e_addr[5] = (buffer[0] >> 16) & 0xff;
391}
392
393static bool msm_is_sat_dev(u8 *e_addr)
394{
395 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
396 e_addr[2] != QC_CHIPID_SL &&
397 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
398 return true;
399 return false;
400}
401
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700402static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600403{
Sagar Dharia45e77912012-01-10 09:55:18 -0700404#ifdef CONFIG_PM_RUNTIME
405 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700406 int ret = pm_runtime_get_sync(dev->dev);
407 if (ret >= 0) {
408 ref = atomic_read(&dev->dev->power.usage_count);
409 if (ref <= 0) {
410 dev_err(dev->dev, "reference count -ve:%d", ref);
411 ret = -ENODEV;
412 }
413 }
414 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700415#else
416 return -ENODEV;
417#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600418}
419static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
420{
Sagar Dharia45e77912012-01-10 09:55:18 -0700421#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700422 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600423 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700424 ref = atomic_read(&dev->dev->power.usage_count);
425 if (ref <= 0)
426 dev_err(dev->dev, "reference count mismatch:%d", ref);
427 else
428 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700429#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600430}
431
Sagar Dharia790cfd02011-09-25 17:56:24 -0600432static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
433{
434 struct msm_slim_sat *sat = NULL;
435 int i = 0;
436 while (!sat && i < dev->nsats) {
437 if (laddr == dev->satd[i]->satcl.laddr)
438 sat = dev->satd[i];
439 i++;
440 }
441 return sat;
442}
443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444static irqreturn_t msm_slim_interrupt(int irq, void *d)
445{
446 struct msm_slim_ctrl *dev = d;
447 u32 pstat;
448 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
449
450 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
451 if (stat & MGR_INT_TX_MSG_SENT)
452 writel_relaxed(MGR_INT_TX_MSG_SENT,
453 dev->base + MGR_INT_CLR);
454 else {
Sagar Dhariaac913452012-09-04 11:27:26 -0600455 u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
456 u32 mgr_ie_stat = readl_relaxed(dev->base +
457 MGR_IE_STAT);
458 u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
459 u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
460 u32 frm_intr_stat = readl_relaxed(dev->base +
461 FRM_INT_STAT);
462 u32 frm_ie_stat = readl_relaxed(dev->base +
463 FRM_IE_STAT);
464 u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
465 u32 intf_intr_stat = readl_relaxed(dev->base +
466 INTF_INT_STAT);
467 u32 intf_ie_stat = readl_relaxed(dev->base +
468 INTF_IE_STAT);
469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 writel_relaxed(MGR_INT_TX_NACKED_2,
471 dev->base + MGR_INT_CLR);
Sagar Dhariaac913452012-09-04 11:27:26 -0600472 pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
473 stat, mgr_stat);
474 pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
475 pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
476 frm_intr_stat, frm_stat);
477 pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
478 frm_cfg, frm_ie_stat);
479 pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
480 intf_intr_stat, intf_stat);
481 pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 dev->err = -EIO;
484 }
485 /*
486 * Guarantee that interrupt clear bit write goes through before
487 * signalling completion/exiting ISR
488 */
489 mb();
490 if (dev->wr_comp)
491 complete(dev->wr_comp);
492 }
493 if (stat & MGR_INT_RX_MSG_RCVD) {
494 u32 rx_buf[10];
495 u32 mc, mt;
496 u8 len, i;
497 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
498 len = rx_buf[0] & 0x1F;
499 for (i = 1; i < ((len + 3) >> 2); i++) {
500 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
501 (4 * i));
502 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
503 }
504 mt = (rx_buf[0] >> 5) & 0x7;
505 mc = (rx_buf[0] >> 8) & 0xff;
506 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
507 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
508 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600509 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
510 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
511 if (sat)
512 msm_sat_enqueue(sat, rx_buf, len);
513 else
514 dev_err(dev->dev, "unknown sat:%d message",
515 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 writel_relaxed(MGR_INT_RX_MSG_RCVD,
517 dev->base + MGR_INT_CLR);
518 /*
519 * Guarantee that CLR bit write goes through before
520 * queuing work
521 */
522 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600523 if (sat)
524 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 } else if (mt == SLIM_MSG_MT_CORE &&
526 mc == SLIM_MSG_MC_REPORT_PRESENT) {
527 u8 e_addr[6];
528 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600529 msm_slim_rx_enqueue(dev, rx_buf, len);
530 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
531 MGR_INT_CLR);
532 /*
533 * Guarantee that CLR bit write goes through
534 * before signalling completion
535 */
536 mb();
537 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
539 mc == SLIM_MSG_MC_REPLY_VALUE) {
540 msm_slim_rx_enqueue(dev, rx_buf, len);
541 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
542 MGR_INT_CLR);
543 /*
544 * Guarantee that CLR bit write goes through
545 * before signalling completion
546 */
547 mb();
548 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600549 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
550 u8 *buf = (u8 *)rx_buf;
551 u8 l_addr = buf[2];
552 u16 ele = (u16)buf[4] << 4;
553 ele |= ((buf[3] & 0xf0) >> 4);
554 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
555 l_addr, ele);
556 for (i = 0; i < len - 5; i++)
557 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
558 i, buf[i+5]);
559 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
560 MGR_INT_CLR);
561 /*
562 * Guarantee that CLR bit write goes through
563 * before exiting
564 */
565 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 } else {
567 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
568 mc, mt, len);
569 for (i = 0; i < ((len + 3) >> 2); i++)
570 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
571 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
572 MGR_INT_CLR);
573 /*
574 * Guarantee that CLR bit write goes through
575 * before exiting
576 */
577 mb();
578 }
579 }
580 if (stat & MGR_INT_RECFG_DONE) {
581 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
582 /*
583 * Guarantee that CLR bit write goes through
584 * before exiting ISR
585 */
586 mb();
587 complete(&dev->reconf);
588 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600589 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 if (pstat != 0) {
591 int i = 0;
592 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
593 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600594 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
595 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 if (val & (1 << 19)) {
597 dev->ctrl.ports[i].err =
598 SLIM_P_DISCONNECT;
599 dev->pipes[i-dev->pipe_b].connected =
600 false;
601 /*
602 * SPS will call completion since
603 * ERROR flags are registered
604 */
605 } else if (val & (1 << 2))
606 dev->ctrl.ports[i].err =
607 SLIM_P_OVERFLOW;
608 else if (val & (1 << 3))
609 dev->ctrl.ports[i].err =
610 SLIM_P_UNDERFLOW;
611 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600612 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
613 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 }
615 /*
616 * Guarantee that port interrupt bit(s) clearing writes go
617 * through before exiting ISR
618 */
619 mb();
620 }
621
622 return IRQ_HANDLED;
623}
624
625static int
626msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
627{
628 int ret;
629 struct sps_pipe *endpoint;
630 struct sps_connect *config = &ep->config;
631
632 /* Allocate the endpoint */
633 endpoint = sps_alloc_endpoint();
634 if (!endpoint) {
635 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
636 return -ENOMEM;
637 }
638
639 /* Get default connection configuration for an endpoint */
640 ret = sps_get_config(endpoint, config);
641 if (ret) {
642 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
643 goto sps_config_failed;
644 }
645
646 ep->sps = endpoint;
647 return 0;
648
649sps_config_failed:
650 sps_free_endpoint(endpoint);
651 return ret;
652}
653
654static void
655msm_slim_free_endpoint(struct msm_slim_endp *ep)
656{
657 sps_free_endpoint(ep->sps);
658 ep->sps = NULL;
659}
660
661static int msm_slim_sps_mem_alloc(
662 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
663{
664 dma_addr_t phys;
665
666 mem->size = len;
667 mem->min_size = 0;
668 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
669
670 if (!mem->base) {
671 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
672 return -ENOMEM;
673 }
674
675 mem->phys_base = phys;
676 memset(mem->base, 0x00, mem->size);
677 return 0;
678}
679
680static void
681msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
682{
683 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
684 mem->size = 0;
685 mem->base = NULL;
686 mem->phys_base = 0;
687}
688
689static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
690{
691 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600692 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
693 dev->ver));
694 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
695 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
696 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
697 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
698 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 /* Make sure that port registers are updated before returning */
700 mb();
701}
702
703static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
704{
705 struct msm_slim_endp *endpoint = &dev->pipes[pn];
706 struct sps_connect *cfg = &endpoint->config;
707 u32 stat;
708 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
709 if (ret) {
710 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
711 return ret;
712 }
713 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
714 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
715
716 if (dev->pipes[pn].connected) {
717 ret = sps_set_config(dev->pipes[pn].sps, cfg);
718 if (ret) {
719 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
720 ret);
721 return ret;
722 }
723 }
724
Sagar Dharia82e516f2012-03-16 16:01:23 -0600725 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
726 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
728 cfg->destination = dev->bam.hdl;
729 cfg->source = SPS_DEV_HANDLE_MEM;
730 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
731 cfg->src_pipe_index = 0;
732 dev_dbg(dev->dev, "flow src:pipe num:%d",
733 cfg->dest_pipe_index);
734 cfg->mode = SPS_MODE_DEST;
735 } else {
736 cfg->source = dev->bam.hdl;
737 cfg->destination = SPS_DEV_HANDLE_MEM;
738 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
739 cfg->dest_pipe_index = 0;
740 dev_dbg(dev->dev, "flow dest:pipe num:%d",
741 cfg->src_pipe_index);
742 cfg->mode = SPS_MODE_SRC;
743 }
744 /* Space for desciptor FIFOs */
745 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
746 cfg->config = SPS_CONFIG_DEFAULT;
747 ret = sps_connect(dev->pipes[pn].sps, cfg);
748 if (!ret) {
749 dev->pipes[pn].connected = true;
750 msm_hw_set_port(dev, pn + dev->pipe_b);
751 }
752 return ret;
753}
754
755static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
756{
757 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
758 /*
759 * Currently we block a transaction until the current one completes.
760 * In case we need multiple transactions, use message Q
761 */
762 return dev->tx_buf;
763}
764
765static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
766{
767 int i;
768 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
769 for (i = 0; i < (len + 3) >> 2; i++) {
770 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
771 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
772 }
773 /* Guarantee that message is sent before returning */
774 mb();
775 return 0;
776}
777
778static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
779{
780 DECLARE_COMPLETION_ONSTACK(done);
781 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
782 u32 *pbuf;
783 u8 *puc;
784 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700785 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600787 u8 mc = (u8)(txn->mc & 0xFF);
788 /*
789 * Voting for runtime PM: Slimbus has 2 possible use cases:
790 * 1. messaging
791 * 2. Data channels
792 * Messaging case goes through messaging slots and data channels
793 * use their own slots
794 * This "get" votes for messaging bandwidth
795 */
796 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700797 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700799 if (dev->state == MSM_CTRL_ASLEEP ||
800 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
801 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600802 dev_err(dev->dev, "runtime or system PM suspended state");
803 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700804 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600805 msm_slim_put_ctrl(dev);
806 return -EBUSY;
807 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600809 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
810 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 wait_for_completion(&dev->reconf);
812 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600813 }
814 /* This "get" votes for data channels */
815 if (dev->ctrl.sched.usedslots != 0 &&
816 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700817 int chv = msm_slim_get_ctrl(dev);
818 if (chv >= 0)
819 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600820 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 }
822 txn->rl--;
823 pbuf = msm_get_msg_buf(ctrl, txn->rl);
824 dev->wr_comp = NULL;
825 dev->err = 0;
826
827 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
828 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700829 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600830 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 return -EPROTONOSUPPORT;
832 }
833 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600834 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
835 mc == SLIM_MSG_MC_CONNECT_SINK ||
836 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837 la = dev->pgdla;
838 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600839 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600841 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
843 puc = ((u8 *)pbuf) + 3;
844 else
845 puc = ((u8 *)pbuf) + 2;
846 if (txn->rbuf)
847 *(puc++) = txn->tid;
848 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600849 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
850 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
851 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
852 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 *(puc++) = (txn->ec & 0xFF);
854 *(puc++) = (txn->ec >> 8)&0xFF;
855 }
856 if (txn->wbuf)
857 memcpy(puc, txn->wbuf, txn->len);
858 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600859 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
860 mc == SLIM_MSG_MC_CONNECT_SINK ||
861 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
862 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 dev->err = msm_slim_connect_pipe_port(dev, *puc);
864 else {
865 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
866 struct sps_register_event sps_event;
867 memset(&sps_event, 0, sizeof(sps_event));
868 sps_register_event(endpoint->sps, &sps_event);
869 sps_disconnect(endpoint->sps);
870 /*
871 * Remove channel disconnects master-side ports from
872 * channel. No need to send that again on the bus
873 */
874 dev->pipes[*puc].connected = false;
875 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700876 if (msgv >= 0)
877 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 return 0;
879 }
880 if (dev->err) {
881 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
882 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700883 if (msgv >= 0)
884 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 return dev->err;
886 }
887 *(puc) = *(puc) + dev->pipe_b;
888 }
889 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600890 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 dev->reconf_busy = true;
892 dev->wr_comp = &done;
893 msm_send_msg_buf(ctrl, pbuf, txn->rl);
894 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dhariaac913452012-09-04 11:27:26 -0600895 if (!timeout)
896 dev->wr_comp = NULL;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700897 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
898 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
899 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
900 timeout) {
901 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
902 dev->reconf_busy = false;
903 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700904 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700905 disable_irq(dev->irq);
906 }
907 }
908 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
909 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
910 !timeout) {
911 dev->reconf_busy = false;
912 dev_err(dev->dev, "clock pause failed");
913 mutex_unlock(&dev->tx_lock);
914 return -ETIMEDOUT;
915 }
916 if (txn->mt == SLIM_MSG_MT_CORE &&
917 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
918 if (dev->ctrl.sched.usedslots == 0 &&
919 dev->chan_active) {
920 dev->chan_active = false;
921 msm_slim_put_ctrl(dev);
922 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600923 }
924 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600925 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700926 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600927 msm_slim_put_ctrl(dev);
928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929 if (!timeout)
930 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
931 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 return timeout ? dev->err : -ETIMEDOUT;
934}
935
Sagar Dhariaac913452012-09-04 11:27:26 -0600936static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
937{
938 int msec_per_frm = 0;
939 int sfr_per_sec;
940 /* Wait for 1 superframe, or default time and then retry */
941 sfr_per_sec = dev->framer.superfreq /
942 (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
943 if (sfr_per_sec)
944 msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
945 if (msec_per_frm < DEF_RETRY_MS)
946 msec_per_frm = DEF_RETRY_MS;
947 msleep(msec_per_frm);
948}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
950 u8 elen, u8 laddr)
951{
952 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariaac913452012-09-04 11:27:26 -0600953 struct completion done;
954 int timeout, ret, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 u32 *buf;
Sagar Dhariaac913452012-09-04 11:27:26 -0600956retry_laddr:
957 init_completion(&done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 mutex_lock(&dev->tx_lock);
959 buf = msm_get_msg_buf(ctrl, 9);
960 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
961 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
962 SLIM_MSG_DEST_LOGICALADDR,
963 ea[5] | ea[4] << 8);
964 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
965 buf[2] = laddr;
966
967 dev->wr_comp = &done;
Sagar Dhariaac913452012-09-04 11:27:26 -0600968 ret = msm_send_msg_buf(ctrl, buf, 9);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dhariaac913452012-09-04 11:27:26 -0600970 if (!timeout)
971 dev->err = -ETIMEDOUT;
972 if (dev->err) {
973 ret = dev->err;
974 dev->err = 0;
975 dev->wr_comp = NULL;
976 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 mutex_unlock(&dev->tx_lock);
Sagar Dhariaac913452012-09-04 11:27:26 -0600978 if (ret) {
979 pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
980 if (retries < INIT_MX_RETRIES) {
981 msm_slim_wait_retry(dev);
982 retries++;
983 goto retry_laddr;
984 } else {
985 pr_err("set LADDR failed after retrying:ret:%d", ret);
986 }
987 }
988 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989}
990
Sagar Dharia144e5e02011-08-08 17:30:11 -0600991static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
992{
993 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600994 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700995 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600996 writel_relaxed(1, dev->base + FRM_WAKEUP);
997 /* Make sure framer wakeup write goes through before exiting function */
998 mb();
999 /*
1000 * Workaround: Currently, slave is reporting lost-sync messages
1001 * after slimbus comes out of clock pause.
1002 * Transaction with slave fail before slave reports that message
1003 * Give some time for that report to come
1004 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
1005 * being 250 usecs, we wait for 20 superframes here to ensure
1006 * we get the message
1007 */
1008 usleep_range(5000, 5000);
1009 return 0;
1010}
1011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001012static int msm_config_port(struct slim_controller *ctrl, u8 pn)
1013{
1014 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
1015 struct msm_slim_endp *endpoint;
1016 int ret = 0;
1017 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
1018 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
1019 return -EPROTONOSUPPORT;
1020 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
1021 return -ENODEV;
1022
1023 endpoint = &dev->pipes[pn];
1024 ret = msm_slim_init_endpoint(dev, endpoint);
1025 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
1026 return ret;
1027}
1028
1029static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
1030 u8 pn, u8 **done_buf, u32 *done_len)
1031{
1032 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
1033 struct sps_iovec sio;
1034 int ret;
1035 if (done_len)
1036 *done_len = 0;
1037 if (done_buf)
1038 *done_buf = NULL;
1039 if (!dev->pipes[pn].connected)
1040 return SLIM_P_DISCONNECT;
1041 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
1042 if (!ret) {
1043 if (done_len)
1044 *done_len = sio.size;
1045 if (done_buf)
1046 *done_buf = (u8 *)sio.addr;
1047 }
1048 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
1049 return SLIM_P_INPROGRESS;
1050}
1051
1052static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
1053 u32 len, struct completion *comp)
1054{
1055 struct sps_register_event sreg;
1056 int ret;
1057 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -06001058 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059 return -ENODEV;
1060
1061
1062 ctrl->ports[pn].xcomp = comp;
1063 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1064 sreg.mode = SPS_TRIGGER_WAIT;
1065 sreg.xfer_done = comp;
1066 sreg.callback = NULL;
1067 sreg.user = &ctrl->ports[pn];
1068 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1069 if (ret) {
1070 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1071 return ret;
1072 }
1073 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1074 SPS_IOVEC_FLAG_INT);
1075 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1076
1077 return ret;
1078}
1079
1080static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1081{
1082 struct msm_slim_ctrl *dev = sat->dev;
1083 enum slim_ch_control oper;
1084 int i;
1085 int ret = 0;
1086 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001087 for (i = 0; i < sat->nsatch; i++) {
1088 if (buf[5] == sat->satch[i].chan)
1089 break;
1090 }
1091 if (i >= sat->nsatch)
1092 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093 oper = ((buf[3] & 0xC0) >> 6);
1094 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001095 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1096 false);
1097 if (!ret) {
1098 for (i = 5; i < len; i++) {
1099 int j;
1100 for (j = 0; j < sat->nsatch; j++) {
1101 if (buf[i] == sat->satch[j].chan) {
1102 if (oper == SLIM_CH_REMOVE)
1103 sat->satch[j].req_rem++;
1104 else
1105 sat->satch[j].req_def++;
1106 break;
1107 }
1108 }
1109 }
1110 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 } else {
1112 u16 chh[40];
1113 struct slim_ch prop;
1114 u32 exp;
1115 u8 coeff, cc;
1116 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001117 if (len <= 8)
1118 return -EINVAL;
1119 for (i = 8; i < len; i++) {
1120 int j = 0;
1121 for (j = 0; j < sat->nsatch; j++) {
1122 if (sat->satch[j].chan == buf[i]) {
1123 chh[i - 8] = sat->satch[j].chanh;
1124 break;
1125 }
1126 }
1127 if (j < sat->nsatch) {
1128 u16 dummy;
1129 ret = slim_query_ch(&sat->satcl, buf[i],
1130 &dummy);
1131 if (ret)
1132 return ret;
1133 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1134 sat->satch[j].req_def++;
1135 continue;
1136 }
1137 if (sat->nsatch >= MSM_MAX_SATCH)
1138 return -EXFULL;
1139 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1140 if (ret)
1141 return ret;
1142 sat->satch[j].chan = buf[i];
1143 sat->satch[j].chanh = chh[i - 8];
1144 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1145 sat->satch[j].req_def++;
1146 sat->nsatch++;
1147 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1149 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1150 prop.baser = SLIM_RATE_4000HZ;
1151 if (prrate & 0x8)
1152 prop.baser = SLIM_RATE_11025HZ;
1153 else
1154 prop.baser = SLIM_RATE_4000HZ;
1155 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1156 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1157 exp = (u32)((buf[5] & 0xF0) >> 4);
1158 coeff = (buf[4] & 0x20) >> 5;
1159 cc = (coeff ? 3 : 1);
1160 prop.ratem = cc * (1 << exp);
1161 if (i > 9)
1162 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001163 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 else
1165 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001166 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001168 if (ret)
1169 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170
1171 /* part of group so activating 1 will take care of rest */
1172 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1173 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001174 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 SLIM_CH_ACTIVATE, false);
1176 }
1177 return ret;
1178}
1179
1180static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1181{
1182 u8 buf[40];
1183 u8 mc, mt, len;
1184 int i, ret;
1185 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1186 len = buf[0] & 0x1F;
1187 mt = (buf[0] >> 5) & 0x7;
1188 mc = buf[1];
1189 if (mt == SLIM_MSG_MT_CORE &&
1190 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1191 u8 laddr;
1192 u8 e_addr[6];
1193 for (i = 0; i < 6; i++)
1194 e_addr[i] = buf[7-i];
1195
1196 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1197 /* Is this Qualcomm ported generic device? */
1198 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1199 e_addr[4] == QC_MFGID_MSB &&
1200 e_addr[1] == QC_DEVID_PGD &&
1201 e_addr[2] != QC_CHIPID_SL)
1202 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001203 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001204 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001205 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206
Sagar Dharia790cfd02011-09-25 17:56:24 -06001207 if (!ret && msm_is_sat_dev(e_addr)) {
1208 struct msm_slim_sat *sat = addr_to_sat(dev,
1209 laddr);
1210 if (!sat)
1211 sat = msm_slim_alloc_sat(dev);
1212 if (!sat)
1213 return;
1214
1215 sat->satcl.laddr = laddr;
1216 msm_sat_enqueue(sat, (u32 *)buf, len);
1217 queue_work(sat->wq, &sat->wd);
1218 }
Sagar Dhariaac913452012-09-04 11:27:26 -06001219 if (ret)
1220 pr_err("assign laddr failed, error:%d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1222 mc == SLIM_MSG_MC_REPLY_VALUE) {
1223 u8 tid = buf[3];
1224 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1225 slim_msg_response(&dev->ctrl, &buf[4], tid,
1226 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001227 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001228 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1229 u8 l_addr = buf[2];
1230 u16 ele = (u16)buf[4] << 4;
1231 ele |= ((buf[3] & 0xf0) >> 4);
1232 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1233 l_addr, ele);
1234 for (i = 0; i < len - 5; i++)
1235 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1236 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 } else {
1238 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1239 mc, mt);
1240 for (i = 0; i < len; i++)
1241 dev_err(dev->dev, "error msg: %x", buf[i]);
1242
1243 }
1244 } else
1245 dev_err(dev->dev, "rxwq called and no dequeue");
1246}
1247
1248static void slim_sat_rxprocess(struct work_struct *work)
1249{
1250 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1251 struct msm_slim_ctrl *dev = sat->dev;
1252 u8 buf[40];
1253
1254 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1255 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 u8 len, mc, mt;
1257 u32 bw_sl;
1258 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001259 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260 bool gen_ack = false;
1261 u8 tid;
1262 u8 wbuf[8];
Sagar Dhariaac913452012-09-04 11:27:26 -06001263 int i, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1265 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1266 txn.ec = 0;
1267 txn.rbuf = NULL;
1268 txn.la = sat->satcl.laddr;
1269 /* satellite handling */
1270 len = buf[0] & 0x1F;
1271 mc = buf[1];
1272 mt = (buf[0] >> 5) & 0x7;
1273
1274 if (mt == SLIM_MSG_MT_CORE &&
1275 mc == SLIM_MSG_MC_REPORT_PRESENT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001276 u8 e_addr[6];
1277 for (i = 0; i < 6; i++)
1278 e_addr[i] = buf[7-i];
1279
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001280 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001281 satv = msm_slim_get_ctrl(dev);
1282 if (satv >= 0)
1283 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001284 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001285 /*
1286 * Since capability message is already sent, present
1287 * message will indicate subsystem hosting this
1288 * satellite has restarted.
1289 * Remove all active channels of this satellite
1290 * when this is detected
1291 */
1292 if (sat->sent_capability) {
1293 for (i = 0; i < sat->nsatch; i++) {
Sagar Dharia1944a132012-08-15 00:01:57 -06001294 if (sat->satch[i].reconf) {
1295 pr_err("SSR, sat:%d, rm ch:%d",
Sagar Dhariaac913452012-09-04 11:27:26 -06001296 sat->satcl.laddr,
Sagar Dharia69bf5572012-02-21 14:45:35 -07001297 sat->satch[i].chan);
Sagar Dharia69bf5572012-02-21 14:45:35 -07001298 slim_control_ch(&sat->satcl,
1299 sat->satch[i].chanh,
1300 SLIM_CH_REMOVE, true);
Sagar Dharia1944a132012-08-15 00:01:57 -06001301 sat->satch[i].reconf = false;
1302 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001303 }
1304 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001305 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001306 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1307 satv = msm_slim_get_ctrl(dev);
1308 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 switch (mc) {
1310 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001311 /* Remove runtime_pm vote once satellite acks */
1312 if (mt != SLIM_MSG_MT_CORE) {
1313 if (pm_runtime_enabled(dev->dev) &&
1314 sat->pending_capability) {
1315 msm_slim_put_ctrl(dev);
1316 sat->pending_capability = false;
1317 }
1318 continue;
1319 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001321 if (sat->sent_capability) {
1322 if (mt == SLIM_MSG_MT_CORE)
1323 goto send_capability;
1324 else
1325 continue;
1326 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1328 if (ret) {
1329 dev_err(dev->dev,
1330 "Satellite-init failed");
1331 continue;
1332 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001333 /* Satellite-channels */
1334 sat->satch = kzalloc(MSM_MAX_SATCH *
1335 sizeof(struct msm_sat_chan),
1336 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001337send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1339 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1340 txn.la = sat->satcl.laddr;
1341 txn.rl = 8;
1342 wbuf[0] = SAT_MAGIC_LSB;
1343 wbuf[1] = SAT_MAGIC_MSB;
1344 wbuf[2] = SAT_MSG_VER;
1345 wbuf[3] = SAT_MSG_PROT;
1346 txn.wbuf = wbuf;
1347 txn.len = 4;
Sagar Dhariaac913452012-09-04 11:27:26 -06001348 ret = msm_xfer_msg(&dev->ctrl, &txn);
1349 if (ret) {
1350 pr_err("capability for:0x%x fail:%d, retry:%d",
1351 sat->satcl.laddr, ret, retries);
1352 if (retries < INIT_MX_RETRIES) {
1353 msm_slim_wait_retry(dev);
1354 retries++;
1355 goto send_capability;
1356 } else {
1357 pr_err("failed after all retries:%d",
1358 ret);
1359 }
1360 } else {
1361 sat->sent_capability = true;
1362 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 break;
1364 case SLIM_USR_MC_ADDR_QUERY:
1365 memcpy(&wbuf[1], &buf[4], 6);
1366 ret = slim_get_logical_addr(&sat->satcl,
1367 &wbuf[1], 6, &wbuf[7]);
1368 if (ret)
1369 memset(&wbuf[1], 0, 6);
1370 wbuf[0] = buf[3];
1371 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1372 txn.rl = 12;
1373 txn.len = 8;
1374 txn.wbuf = wbuf;
1375 msm_xfer_msg(&dev->ctrl, &txn);
1376 break;
1377 case SLIM_USR_MC_DEFINE_CHAN:
1378 case SLIM_USR_MC_DEF_ACT_CHAN:
1379 case SLIM_USR_MC_CHAN_CTRL:
1380 if (mc != SLIM_USR_MC_CHAN_CTRL)
1381 tid = buf[7];
1382 else
1383 tid = buf[4];
1384 gen_ack = true;
1385 ret = msm_sat_define_ch(sat, buf, len, mc);
1386 if (ret) {
1387 dev_err(dev->dev,
1388 "SAT define_ch returned:%d",
1389 ret);
1390 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001391 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001392 int chv = msm_slim_get_ctrl(dev);
1393 if (chv >= 0)
1394 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001395 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396 break;
1397 case SLIM_USR_MC_RECONFIG_NOW:
1398 tid = buf[3];
1399 gen_ack = true;
1400 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001401 for (i = 0; i < sat->nsatch; i++) {
1402 struct msm_sat_chan *sch = &sat->satch[i];
1403 if (sch->req_rem) {
Sagar Dharia1944a132012-08-15 00:01:57 -06001404 if (!ret) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001405 slim_dealloc_ch(&sat->satcl,
1406 sch->chanh);
Sagar Dharia1944a132012-08-15 00:01:57 -06001407 sch->reconf = false;
1408 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001409 sch->req_rem--;
1410 } else if (sch->req_def) {
1411 if (ret)
1412 slim_dealloc_ch(&sat->satcl,
1413 sch->chanh);
Sagar Dharia1944a132012-08-15 00:01:57 -06001414 else
1415 sch->reconf = true;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001416 sch->req_def--;
1417 }
1418 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001419 if (sat->pending_reconf) {
1420 msm_slim_put_ctrl(dev);
1421 sat->pending_reconf = false;
1422 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 break;
1424 case SLIM_USR_MC_REQ_BW:
1425 /* what we get is in SLOTS */
1426 bw_sl = (u32)buf[4] << 3 |
1427 ((buf[3] & 0xE0) >> 5);
1428 sat->satcl.pending_msgsl = bw_sl;
1429 tid = buf[5];
1430 gen_ack = true;
1431 break;
1432 case SLIM_USR_MC_CONNECT_SRC:
1433 case SLIM_USR_MC_CONNECT_SINK:
1434 if (mc == SLIM_USR_MC_CONNECT_SRC)
1435 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1436 else
1437 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1438 wbuf[0] = buf[4] & 0x1F;
1439 wbuf[1] = buf[5];
1440 tid = buf[6];
1441 txn.la = buf[3];
1442 txn.mt = SLIM_MSG_MT_CORE;
1443 txn.rl = 6;
1444 txn.len = 2;
1445 txn.wbuf = wbuf;
1446 gen_ack = true;
1447 ret = msm_xfer_msg(&dev->ctrl, &txn);
1448 break;
1449 case SLIM_USR_MC_DISCONNECT_PORT:
1450 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1451 wbuf[0] = buf[4] & 0x1F;
1452 tid = buf[5];
1453 txn.la = buf[3];
1454 txn.rl = 5;
1455 txn.len = 1;
1456 txn.mt = SLIM_MSG_MT_CORE;
1457 txn.wbuf = wbuf;
1458 gen_ack = true;
1459 ret = msm_xfer_msg(&dev->ctrl, &txn);
1460 default:
1461 break;
1462 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001463 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001464 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001465 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001467 }
1468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 wbuf[0] = tid;
1470 if (!ret)
1471 wbuf[1] = MSM_SAT_SUCCSS;
1472 else
1473 wbuf[1] = 0;
1474 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1475 txn.la = sat->satcl.laddr;
1476 txn.rl = 6;
1477 txn.len = 2;
1478 txn.wbuf = wbuf;
1479 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1480 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001481 if (satv >= 0)
1482 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483 }
1484}
1485
Sagar Dharia790cfd02011-09-25 17:56:24 -06001486static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1487{
1488 struct msm_slim_sat *sat;
1489 char *name;
1490 if (dev->nsats >= MSM_MAX_NSATS)
1491 return NULL;
1492
1493 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1494 if (!sat) {
1495 dev_err(dev->dev, "no memory for satellite");
1496 return NULL;
1497 }
1498 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1499 if (!name) {
1500 dev_err(dev->dev, "no memory for satellite name");
1501 kfree(sat);
1502 return NULL;
1503 }
1504 dev->satd[dev->nsats] = sat;
1505 sat->dev = dev;
1506 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1507 sat->satcl.name = name;
1508 spin_lock_init(&sat->lock);
1509 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1510 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1511 if (!sat->wq) {
1512 kfree(name);
1513 kfree(sat);
1514 return NULL;
1515 }
1516 /*
1517 * Both sats will be allocated from RX thread and RX thread will
1518 * process messages sequentially. No synchronization necessary
1519 */
1520 dev->nsats++;
1521 return sat;
1522}
1523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524static void
1525msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1526{
1527 u32 *buf = ev->data.transfer.user;
1528 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1529
1530 /*
1531 * Note the virtual address needs to be offset by the same index
1532 * as the physical address or just pass in the actual virtual address
1533 * if the sps_mem_buffer is not needed. Note that if completion is
1534 * used, the virtual address won't be available and will need to be
1535 * calculated based on the offset of the physical address
1536 */
1537 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1538
1539 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1540
1541 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1542 iovec->addr, iovec->size, iovec->flags);
1543
1544 } else {
1545 dev_err(dev->dev, "%s: unknown event %d\n",
1546 __func__, ev->event_id);
1547 }
1548}
1549
1550static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1551{
1552 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1553 msm_slim_rx_msgq_event(dev, notify);
1554}
1555
1556/* Queue up Rx message buffer */
1557static inline int
1558msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1559{
1560 int ret;
1561 u32 flags = SPS_IOVEC_FLAG_INT;
1562 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1563 struct sps_mem_buffer *mem = &endpoint->buf;
1564 struct sps_pipe *pipe = endpoint->sps;
1565
1566 /* Rx message queue buffers are 4 bytes in length */
1567 u8 *virt_addr = mem->base + (4 * ix);
1568 u32 phys_addr = mem->phys_base + (4 * ix);
1569
1570 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1571
1572 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1573 if (ret)
1574 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1575
1576 return ret;
1577}
1578
1579static inline int
1580msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1581{
1582 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1583 struct sps_mem_buffer *mem = &endpoint->buf;
1584 struct sps_pipe *pipe = endpoint->sps;
1585 struct sps_iovec iovec;
1586 int index;
1587 int ret;
1588
1589 ret = sps_get_iovec(pipe, &iovec);
1590 if (ret) {
1591 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1592 goto err_exit;
1593 }
1594
1595 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1596 iovec.addr, iovec.size, iovec.flags);
1597 BUG_ON(iovec.addr < mem->phys_base);
1598 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1599
1600 /* Calculate buffer index */
1601 index = (iovec.addr - mem->phys_base) / 4;
1602 *(data + offset) = *((u32 *)mem->base + index);
1603
1604 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1605
1606 /* Add buffer back to the queue */
1607 (void)msm_slim_post_rx_msgq(dev, index);
1608
1609err_exit:
1610 return ret;
1611}
1612
1613static int msm_slim_rx_msgq_thread(void *data)
1614{
1615 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1616 struct completion *notify = &dev->rx_msgq_notify;
1617 struct msm_slim_sat *sat = NULL;
1618 u32 mc = 0;
1619 u32 mt = 0;
1620 u32 buffer[10];
1621 int index = 0;
1622 u8 msg_len = 0;
1623 int ret;
1624
1625 dev_dbg(dev->dev, "rx thread started");
1626
1627 while (!kthread_should_stop()) {
1628 set_current_state(TASK_INTERRUPTIBLE);
1629 ret = wait_for_completion_interruptible(notify);
1630
1631 if (ret)
1632 dev_err(dev->dev, "rx thread wait error:%d", ret);
1633
1634 /* 1 irq notification per message */
1635 if (!dev->use_rx_msgqs) {
1636 msm_slim_rxwq(dev);
1637 continue;
1638 }
1639
1640 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1641 if (ret) {
1642 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1643 continue;
1644 }
1645
1646 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1647
1648 /* Decide if we use generic RX or satellite RX */
1649 if (index++ == 0) {
1650 msg_len = *buffer & 0x1F;
1651 pr_debug("Start of new message, len = %d\n", msg_len);
1652 mt = (buffer[0] >> 5) & 0x7;
1653 mc = (buffer[0] >> 8) & 0xff;
1654 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1655 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001656 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1657 u8 laddr;
1658 laddr = (u8)((buffer[0] >> 16) & 0xff);
1659 sat = addr_to_sat(dev, laddr);
1660 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001661 } else if ((index * 4) >= msg_len) {
1662 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663 if (sat) {
1664 msm_sat_enqueue(sat, buffer, msg_len);
1665 queue_work(sat->wq, &sat->wd);
1666 sat = NULL;
1667 } else {
1668 msm_slim_rx_enqueue(dev, buffer, msg_len);
1669 msm_slim_rxwq(dev);
1670 }
1671 }
1672 }
1673
1674 return 0;
1675}
1676
1677static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1678{
1679 int i, ret;
1680 u32 pipe_offset;
1681 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1682 struct sps_connect *config = &endpoint->config;
1683 struct sps_mem_buffer *descr = &config->desc;
1684 struct sps_mem_buffer *mem = &endpoint->buf;
1685 struct completion *notify = &dev->rx_msgq_notify;
1686
1687 struct sps_register_event sps_error_event; /* SPS_ERROR */
1688 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1689
Sagar Dharia31ac5812012-01-04 11:38:59 -07001690 init_completion(notify);
1691 if (!dev->use_rx_msgqs)
1692 goto rx_thread_create;
1693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 /* Allocate the endpoint */
1695 ret = msm_slim_init_endpoint(dev, endpoint);
1696 if (ret) {
1697 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1698 goto sps_init_endpoint_failed;
1699 }
1700
1701 /* Get the pipe indices for the message queues */
1702 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1703 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1704
1705 config->mode = SPS_MODE_SRC;
1706 config->source = dev->bam.hdl;
1707 config->destination = SPS_DEV_HANDLE_MEM;
1708 config->src_pipe_index = pipe_offset;
1709 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1710 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1711
1712 /* Allocate memory for the FIFO descriptors */
1713 ret = msm_slim_sps_mem_alloc(dev, descr,
1714 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1715 if (ret) {
1716 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1717 goto alloc_descr_failed;
1718 }
1719
1720 ret = sps_connect(endpoint->sps, config);
1721 if (ret) {
1722 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1723 goto sps_connect_failed;
1724 }
1725
1726 /* Register completion for DESC_DONE */
1727 init_completion(notify);
1728 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1729
1730 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1731 sps_descr_event.options = SPS_O_DESC_DONE;
1732 sps_descr_event.user = (void *)dev;
1733 sps_descr_event.xfer_done = notify;
1734
1735 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1736 if (ret) {
1737 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1738 goto sps_reg_event_failed;
1739 }
1740
1741 /* Register callback for errors */
1742 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1743 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1744 sps_error_event.options = SPS_O_ERROR;
1745 sps_error_event.user = (void *)dev;
1746 sps_error_event.callback = msm_slim_rx_msgq_cb;
1747
1748 ret = sps_register_event(endpoint->sps, &sps_error_event);
1749 if (ret) {
1750 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1751 goto sps_reg_event_failed;
1752 }
1753
1754 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1755 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1756 if (ret) {
1757 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1758 goto alloc_buffer_failed;
1759 }
1760
1761 /*
1762 * Call transfer_one for each 4-byte buffer
1763 * Use (buf->size/4) - 1 for the number of buffer to post
1764 */
1765
1766 /* Setup the transfer */
1767 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1768 ret = msm_slim_post_rx_msgq(dev, i);
1769 if (ret) {
1770 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1771 goto sps_transfer_failed;
1772 }
1773 }
1774
Sagar Dharia31ac5812012-01-04 11:38:59 -07001775rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 /* Fire up the Rx message queue thread */
1777 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1778 MSM_SLIM_NAME "_rx_msgq_thread");
1779 if (!dev->rx_msgq_thread) {
1780 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001781 /* Tear-down BAMs or return? */
1782 if (!dev->use_rx_msgqs)
1783 return -EIO;
1784 else
1785 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 } else
1787 return 0;
1788
1789sps_transfer_failed:
1790 msm_slim_sps_mem_free(dev, mem);
1791alloc_buffer_failed:
1792 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1793 sps_register_event(endpoint->sps, &sps_error_event);
1794sps_reg_event_failed:
1795 sps_disconnect(endpoint->sps);
1796sps_connect_failed:
1797 msm_slim_sps_mem_free(dev, descr);
1798alloc_descr_failed:
1799 msm_slim_free_endpoint(endpoint);
1800sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001801 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 return ret;
1803}
1804
1805/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1806static int __devinit
1807msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1808{
1809 int i, ret;
1810 u32 bam_handle;
1811 struct sps_bam_props bam_props = {0};
1812
1813 static struct sps_bam_sec_config_props sec_props = {
1814 .ees = {
1815 [0] = { /* LPASS */
1816 .vmid = 0,
1817 .pipe_mask = 0xFFFF98,
1818 },
1819 [1] = { /* Krait Apps */
1820 .vmid = 1,
1821 .pipe_mask = 0x3F000007,
1822 },
1823 [2] = { /* Modem */
1824 .vmid = 2,
1825 .pipe_mask = 0x00000060,
1826 },
1827 },
1828 };
1829
1830 bam_props.ee = dev->ee;
1831 bam_props.virt_addr = dev->bam.base;
1832 bam_props.phys_addr = bam_mem->start;
1833 bam_props.irq = dev->bam.irq;
1834 bam_props.manage = SPS_BAM_MGR_LOCAL;
1835 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1836
1837 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1838 bam_props.p_sec_config_props = &sec_props;
1839
1840 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1841 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1842
1843 /* First 7 bits are for message Qs */
1844 for (i = 7; i < 32; i++) {
1845 /* Check what pipes are owned by Apps. */
1846 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1847 break;
1848 }
1849 dev->pipe_b = i - 7;
1850
1851 /* Register the BAM device with the SPS driver */
1852 ret = sps_register_bam_device(&bam_props, &bam_handle);
1853 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001854 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1855 dev->use_rx_msgqs = 0;
1856 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857 }
1858 dev->bam.hdl = bam_handle;
1859 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1860
Sagar Dharia31ac5812012-01-04 11:38:59 -07001861init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001863 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001865 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001866 sps_deregister_bam_device(bam_handle);
1867 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 return ret;
1870}
1871
1872static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1873{
1874 if (dev->use_rx_msgqs) {
1875 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1876 struct sps_connect *config = &endpoint->config;
1877 struct sps_mem_buffer *descr = &config->desc;
1878 struct sps_mem_buffer *mem = &endpoint->buf;
1879 struct sps_register_event sps_event;
1880 memset(&sps_event, 0x00, sizeof(sps_event));
1881 msm_slim_sps_mem_free(dev, mem);
1882 sps_register_event(endpoint->sps, &sps_event);
1883 sps_disconnect(endpoint->sps);
1884 msm_slim_sps_mem_free(dev, descr);
1885 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001886 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888}
1889
Sagar Dhariacc969452011-09-19 10:34:30 -06001890static void msm_slim_prg_slew(struct platform_device *pdev,
1891 struct msm_slim_ctrl *dev)
1892{
1893 struct resource *slew_io;
1894 void __iomem *slew_reg;
1895 /* SLEW RATE register for this slimbus */
1896 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1897 "slimbus_slew_reg");
1898 if (!dev->slew_mem) {
1899 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1900 return;
1901 }
1902 slew_io = request_mem_region(dev->slew_mem->start,
1903 resource_size(dev->slew_mem), pdev->name);
1904 if (!slew_io) {
1905 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1906 dev->slew_mem = NULL;
1907 return;
1908 }
1909
1910 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1911 if (!slew_reg) {
1912 dev_dbg(dev->dev, "slew register mapping failed");
1913 release_mem_region(dev->slew_mem->start,
1914 resource_size(dev->slew_mem));
1915 dev->slew_mem = NULL;
1916 return;
1917 }
1918 writel_relaxed(1, slew_reg);
1919 /* Make sure slimbus-slew rate enabling goes through */
1920 wmb();
1921 iounmap(slew_reg);
1922}
1923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001924static int __devinit msm_slim_probe(struct platform_device *pdev)
1925{
1926 struct msm_slim_ctrl *dev;
1927 int ret;
Joonwoo Parkf69f77a2012-08-28 15:26:11 -07001928 enum apr_subsys_state q6_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929 struct resource *bam_mem, *bam_io;
1930 struct resource *slim_mem, *slim_io;
1931 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001932 bool rxreg_access = false;
Joonwoo Parkf69f77a2012-08-28 15:26:11 -07001933
1934 q6_state = apr_get_q6_state();
1935 if (q6_state == APR_SUBSYS_DOWN) {
1936 dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
1937 q6_state);
1938 return -EPROBE_DEFER;
1939 } else
1940 dev_dbg(&pdev->dev, "adsp is ready\n");
1941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1943 "slimbus_physical");
1944 if (!slim_mem) {
1945 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1946 return -ENODEV;
1947 }
1948 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1949 pdev->name);
1950 if (!slim_io) {
1951 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1952 return -EBUSY;
1953 }
1954
1955 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1956 "slimbus_bam_physical");
1957 if (!bam_mem) {
1958 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1959 ret = -ENODEV;
1960 goto err_get_res_bam_failed;
1961 }
1962 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1963 pdev->name);
1964 if (!bam_io) {
1965 release_mem_region(slim_mem->start, resource_size(slim_mem));
1966 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1967 ret = -EBUSY;
1968 goto err_get_res_bam_failed;
1969 }
1970 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1971 "slimbus_irq");
1972 if (!irq) {
1973 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1974 ret = -ENODEV;
1975 goto err_get_res_failed;
1976 }
1977 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1978 "slimbus_bam_irq");
1979 if (!bam_irq) {
1980 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1981 ret = -ENODEV;
1982 goto err_get_res_failed;
1983 }
1984
1985 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1986 if (!dev) {
1987 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1988 ret = -ENOMEM;
1989 goto err_get_res_failed;
1990 }
1991 dev->dev = &pdev->dev;
1992 platform_set_drvdata(pdev, dev);
1993 slim_set_ctrldata(&dev->ctrl, dev);
1994 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1995 if (!dev->base) {
1996 dev_err(&pdev->dev, "IOremap failed\n");
1997 ret = -ENOMEM;
1998 goto err_ioremap_failed;
1999 }
2000 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
2001 if (!dev->bam.base) {
2002 dev_err(&pdev->dev, "BAM IOremap failed\n");
2003 ret = -ENOMEM;
2004 goto err_ioremap_bam_failed;
2005 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002006 if (pdev->dev.of_node) {
2007
2008 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
2009 &dev->ctrl.nr);
2010 if (ret) {
2011 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
2012 goto err_of_init_failed;
2013 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06002014 rxreg_access = of_property_read_bool(pdev->dev.of_node,
2015 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002016 /* Optional properties */
2017 ret = of_property_read_u32(pdev->dev.of_node,
2018 "qcom,min-clk-gear", &dev->ctrl.min_cg);
2019 ret = of_property_read_u32(pdev->dev.of_node,
2020 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06002021 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
2022 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002023 } else {
2024 dev->ctrl.nr = pdev->id;
2025 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002026 dev->ctrl.nchans = MSM_SLIM_NCHANS;
2027 dev->ctrl.nports = MSM_SLIM_NPORTS;
2028 dev->ctrl.set_laddr = msm_set_laddr;
2029 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06002030 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031 dev->ctrl.config_port = msm_config_port;
2032 dev->ctrl.port_xfer = msm_slim_port_xfer;
2033 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
2034 /* Reserve some messaging BW for satellite-apps driver communication */
2035 dev->ctrl.sched.pending_msgsl = 30;
2036
2037 init_completion(&dev->reconf);
2038 mutex_init(&dev->tx_lock);
2039 spin_lock_init(&dev->rx_lock);
2040 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06002041 if (rxreg_access)
2042 dev->use_rx_msgqs = 0;
2043 else
2044 dev->use_rx_msgqs = 1;
2045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 dev->irq = irq->start;
2047 dev->bam.irq = bam_irq->start;
2048
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002049 dev->hclk = clk_get(dev->dev, "iface_clk");
2050 if (IS_ERR(dev->hclk))
2051 dev->hclk = NULL;
2052 else
2053 clk_prepare_enable(dev->hclk);
2054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002055 ret = msm_slim_sps_init(dev, bam_mem);
2056 if (ret != 0) {
2057 dev_err(dev->dev, "error SPS init\n");
2058 goto err_sps_init_failed;
2059 }
2060
2061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
2063 dev->framer.superfreq =
2064 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
2065 dev->ctrl.a_framer = &dev->framer;
2066 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002067 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002068 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069
2070 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
2071 "msm_slim_irq", dev);
2072 if (ret) {
2073 dev_err(&pdev->dev, "request IRQ failed\n");
2074 goto err_request_irq_failed;
2075 }
2076
Sagar Dhariacc969452011-09-19 10:34:30 -06002077 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002078
2079 /* Register with framework before enabling frame, clock */
2080 ret = slim_add_numbered_controller(&dev->ctrl);
2081 if (ret) {
2082 dev_err(dev->dev, "error adding controller\n");
2083 goto err_ctrl_failed;
2084 }
2085
2086
Tianyi Gou44a81b02012-02-06 17:49:07 -08002087 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002088 if (!dev->rclk) {
2089 dev_err(dev->dev, "slimbus clock not found");
2090 goto err_clk_get_failed;
2091 }
Sagar Dhariacc969452011-09-19 10:34:30 -06002092 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002093 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002094
Sagar Dharia82e516f2012-03-16 16:01:23 -06002095 dev->ver = readl_relaxed(dev->base);
2096 /* Version info in 16 MSbits */
2097 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002098 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002099 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002101 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102
2103 /*
2104 * Manager register initialization
2105 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2106 */
2107 if (dev->use_rx_msgqs)
2108 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2109 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2110 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2111 else
2112 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2113 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2114 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2115 writel_relaxed(1, dev->base + MGR_CFG);
2116 /*
2117 * Framer registers are beyond 1K memory region after Manager and/or
2118 * component registers. Make sure those writes are ordered
2119 * before framer register writes
2120 */
2121 wmb();
2122
2123 /* Framer register initialization */
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002124 writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
2125 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126 dev->base + FRM_CFG);
2127 /*
2128 * Make sure that framer wake-up and enabling writes go through
2129 * before any other component is enabled. Framer is responsible for
2130 * clocking the bus and enabling framer first will ensure that other
2131 * devices can report presence when they are enabled
2132 */
2133 mb();
2134
2135 /* Enable RX msg Q */
2136 if (dev->use_rx_msgqs)
2137 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2138 dev->base + MGR_CFG);
2139 else
2140 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2141 /*
2142 * Make sure that manager-enable is written through before interface
2143 * device is enabled
2144 */
2145 mb();
2146 writel_relaxed(1, dev->base + INTF_CFG);
2147 /*
2148 * Make sure that interface-enable is written through before enabling
2149 * ported generic device inside MSM manager
2150 */
2151 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002152 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2153 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2154 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002155 /*
2156 * Make sure that ported generic device is enabled and port-EE settings
2157 * are written through before finally enabling the component
2158 */
2159 mb();
2160
Sagar Dharia82e516f2012-03-16 16:01:23 -06002161 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002162 /*
2163 * Make sure that all writes have gone through before exiting this
2164 * function
2165 */
2166 mb();
Sagar Dhariaa6627e02012-08-28 12:20:49 -06002167
2168 /* Add devices registered with board-info now that controller is up */
2169 slim_ctrl_add_boarddevs(&dev->ctrl);
2170
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002171 if (pdev->dev.of_node)
2172 of_register_slim_devices(&dev->ctrl);
2173
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002174 pm_runtime_use_autosuspend(&pdev->dev);
2175 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2176 pm_runtime_set_active(&pdev->dev);
2177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2179 return 0;
2180
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002181err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002182 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002183err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002184 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 msm_slim_sps_exit(dev);
2187err_sps_init_failed:
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002188 if (dev->hclk) {
2189 clk_disable_unprepare(dev->hclk);
2190 clk_put(dev->hclk);
2191 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002192err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002193 iounmap(dev->bam.base);
2194err_ioremap_bam_failed:
2195 iounmap(dev->base);
2196err_ioremap_failed:
2197 kfree(dev);
2198err_get_res_failed:
2199 release_mem_region(bam_mem->start, resource_size(bam_mem));
2200err_get_res_bam_failed:
2201 release_mem_region(slim_mem->start, resource_size(slim_mem));
2202 return ret;
2203}
2204
2205static int __devexit msm_slim_remove(struct platform_device *pdev)
2206{
2207 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2208 struct resource *bam_mem;
2209 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002210 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002211 int i;
2212 for (i = 0; i < dev->nsats; i++) {
2213 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002214 int j;
2215 for (j = 0; j < sat->nsatch; j++)
2216 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002217 slim_remove_device(&sat->satcl);
2218 kfree(sat->satch);
2219 destroy_workqueue(sat->wq);
2220 kfree(sat->satcl.name);
2221 kfree(sat);
2222 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002223 pm_runtime_disable(&pdev->dev);
2224 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225 free_irq(dev->irq, dev);
2226 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002227 clk_put(dev->rclk);
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002228 if (dev->hclk)
2229 clk_put(dev->hclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002230 msm_slim_sps_exit(dev);
2231 kthread_stop(dev->rx_msgq_thread);
2232 iounmap(dev->bam.base);
2233 iounmap(dev->base);
2234 kfree(dev);
2235 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2236 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002237 if (bam_mem)
2238 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002239 if (slew_mem)
2240 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002241 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2242 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002243 if (slim_mem)
2244 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245 return 0;
2246}
2247
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002248#ifdef CONFIG_PM_RUNTIME
2249static int msm_slim_runtime_idle(struct device *device)
2250{
2251 dev_dbg(device, "pm_runtime: idle...\n");
2252 pm_request_autosuspend(device);
2253 return -EAGAIN;
2254}
2255#endif
2256
2257/*
2258 * If PM_RUNTIME is not defined, these 2 functions become helper
2259 * functions to be called from system suspend/resume. So they are not
2260 * inside ifdef CONFIG_PM_RUNTIME
2261 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002262#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002263static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002264{
2265 struct platform_device *pdev = to_platform_device(device);
2266 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002267 int ret;
2268 dev_dbg(device, "pm_runtime: suspending...\n");
2269 dev->state = MSM_CTRL_SLEEPING;
2270 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002271 if (ret) {
2272 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002273 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002274 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002275 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002276 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002277 return ret;
2278}
2279
2280static int msm_slim_runtime_resume(struct device *device)
2281{
2282 struct platform_device *pdev = to_platform_device(device);
2283 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2284 int ret = 0;
2285 dev_dbg(device, "pm_runtime: resuming...\n");
2286 if (dev->state == MSM_CTRL_ASLEEP)
2287 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002288 if (ret) {
2289 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002290 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002291 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002292 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002293 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002294 return ret;
2295}
2296
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002297static int msm_slim_suspend(struct device *dev)
2298{
2299 int ret = 0;
2300 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002301 struct platform_device *pdev = to_platform_device(dev);
2302 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002303 dev_dbg(dev, "system suspend");
2304 ret = msm_slim_runtime_suspend(dev);
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002305 if (!ret) {
2306 if (cdev->hclk)
2307 clk_disable_unprepare(cdev->hclk);
2308 }
Sagar Dharia6b559e02011-08-03 17:01:31 -06002309 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002310 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002311 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002312 * If the clock pause failed due to active channels, there is
2313 * a possibility that some audio stream is active during suspend
2314 * We dont want to return suspend failure in that case so that
2315 * display and relevant components can still go to suspend.
2316 * If there is some other error, then it should be passed-on
2317 * to system level suspend
2318 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002319 ret = 0;
2320 }
2321 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322}
2323
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002324static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002325{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002326 /* If runtime_pm is enabled, this resume shouldn't do anything */
2327 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002328 struct platform_device *pdev = to_platform_device(dev);
2329 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002330 int ret;
2331 dev_dbg(dev, "system resume");
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002332 if (cdev->hclk)
2333 clk_prepare_enable(cdev->hclk);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002334 ret = msm_slim_runtime_resume(dev);
2335 if (!ret) {
2336 pm_runtime_mark_last_busy(dev);
2337 pm_request_autosuspend(dev);
2338 }
2339 return ret;
2340
Sagar Dharia144e5e02011-08-08 17:30:11 -06002341 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342 return 0;
2343}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002344#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345
2346static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2347 SET_SYSTEM_SLEEP_PM_OPS(
2348 msm_slim_suspend,
2349 msm_slim_resume
2350 )
2351 SET_RUNTIME_PM_OPS(
2352 msm_slim_runtime_suspend,
2353 msm_slim_runtime_resume,
2354 msm_slim_runtime_idle
2355 )
2356};
2357
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002358static struct of_device_id msm_slim_dt_match[] = {
2359 {
2360 .compatible = "qcom,slim-msm",
2361 },
2362 {}
2363};
2364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002365static struct platform_driver msm_slim_driver = {
2366 .probe = msm_slim_probe,
2367 .remove = msm_slim_remove,
2368 .driver = {
2369 .name = MSM_SLIM_NAME,
2370 .owner = THIS_MODULE,
2371 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002372 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002373 },
2374};
2375
2376static int msm_slim_init(void)
2377{
2378 return platform_driver_register(&msm_slim_driver);
2379}
2380subsys_initcall(msm_slim_init);
2381
2382static void msm_slim_exit(void)
2383{
2384 platform_driver_unregister(&msm_slim_driver);
2385}
2386module_exit(msm_slim_exit);
2387
2388MODULE_LICENSE("GPL v2");
2389MODULE_VERSION("0.1");
2390MODULE_DESCRIPTION("MSM Slimbus controller");
2391MODULE_ALIAS("platform:msm-slim");