blob: f3a0d984adc37e76428c656e570589c0c3513f43 [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
Joonwoo Parkf69f77a2012-08-28 15:26:11 -070028#include <mach/qdsp6v2/apr.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30/* Per spec.max 40 bytes per received message */
31#define SLIM_RX_MSGQ_BUF_LEN 40
32
33#define SLIM_USR_MC_GENERIC_ACK 0x25
34#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
35#define SLIM_USR_MC_REPORT_SATELLITE 0x1
36#define SLIM_USR_MC_ADDR_QUERY 0xD
37#define SLIM_USR_MC_ADDR_REPLY 0xE
38#define SLIM_USR_MC_DEFINE_CHAN 0x20
39#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
40#define SLIM_USR_MC_CHAN_CTRL 0x23
41#define SLIM_USR_MC_RECONFIG_NOW 0x24
42#define SLIM_USR_MC_REQ_BW 0x28
43#define SLIM_USR_MC_CONNECT_SRC 0x2C
44#define SLIM_USR_MC_CONNECT_SINK 0x2D
45#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
46
47/* MSM Slimbus peripheral settings */
48#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
49#define MSM_SLIM_NCHANS 32
50#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060051#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
53/*
54 * Need enough descriptors to receive present messages from slaves
55 * if received simultaneously. Present message needs 3 descriptors
56 * and this size will ensure around 10 simultaneous reports.
57 */
58#define MSM_SLIM_DESC_NUM 32
59
60#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
61 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
62
63#define MSM_SLIM_NAME "msm_slim_ctrl"
64#define SLIM_ROOT_FREQ 24576000
65
66#define MSM_CONCUR_MSG 8
67#define SAT_CONCUR_MSG 8
68#define DEF_WATERMARK (8 << 1)
69#define DEF_ALIGN 0
70#define DEF_PACK (1 << 6)
71#define ENABLE_PORT 1
72
73#define DEF_BLKSZ 0
74#define DEF_TRANSZ 0
75
76#define SAT_MAGIC_LSB 0xD9
77#define SAT_MAGIC_MSB 0xC5
78#define SAT_MSG_VER 0x1
79#define SAT_MSG_PROT 0x1
80#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060081#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060082#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083
84#define QC_MFGID_LSB 0x2
85#define QC_MFGID_MSB 0x17
86#define QC_CHIPID_SL 0x10
87#define QC_DEVID_SAT1 0x3
88#define QC_DEVID_SAT2 0x4
89#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060090#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
Sagar Dharia82e516f2012-03-16 16:01:23 -060092#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
93#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
94#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
95
96#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
97#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
98#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -0600100enum comp_reg_v2 {
101 COMP_CFG_V2 = 4,
102 COMP_TRUST_CFG_V2 = 0x3000,
103};
104
105/* Manager PGD registers */
106enum pgd_reg_v2 {
107 PGD_CFG_V2 = 0x800,
108 PGD_STAT_V2 = 0x804,
109 PGD_INT_EN_V2 = 0x810,
110 PGD_INT_STAT_V2 = 0x814,
111 PGD_INT_CLR_V2 = 0x818,
112 PGD_OWN_EEn_V2 = 0x300C,
113 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
114 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
115 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
116 PGD_PORT_CFGn_V2 = 0x14000,
117 PGD_PORT_STATn_V2 = 0x14004,
118 PGD_PORT_PARAMn_V2 = 0x14008,
119 PGD_PORT_BLKn_V2 = 0x1400C,
120 PGD_PORT_TRANn_V2 = 0x14010,
121 PGD_PORT_MCHANn_V2 = 0x14014,
122 PGD_PORT_PSHPLLn_V2 = 0x14018,
123 PGD_PORT_PC_CFGn_V2 = 0x8000,
124 PGD_PORT_PC_VALn_V2 = 0x8004,
125 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
126 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
127 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
128 PGD_IE_STAT_V2 = 0x820,
129 PGD_VE_STAT_V2 = 0x830,
130};
131
132#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
133#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
134#define CFG_PORT_V1(r) ((r ## _V1))
135/* Component registers */
136enum comp_reg_v1 {
137 COMP_CFG_V1 = 0,
138 COMP_TRUST_CFG_V1 = 0x14,
139};
140
141/* Manager PGD registers */
142enum pgd_reg_v1 {
143 PGD_CFG_V1 = 0x1000,
144 PGD_STAT_V1 = 0x1004,
145 PGD_INT_EN_V1 = 0x1010,
146 PGD_INT_STAT_V1 = 0x1014,
147 PGD_INT_CLR_V1 = 0x1018,
148 PGD_OWN_EEn_V1 = 0x1020,
149 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
150 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
151 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
152 PGD_PORT_CFGn_V1 = 0x1080,
153 PGD_PORT_STATn_V1 = 0x1084,
154 PGD_PORT_PARAMn_V1 = 0x1088,
155 PGD_PORT_BLKn_V1 = 0x108C,
156 PGD_PORT_TRANn_V1 = 0x1090,
157 PGD_PORT_MCHANn_V1 = 0x1094,
158 PGD_PORT_PSHPLLn_V1 = 0x1098,
159 PGD_PORT_PC_CFGn_V1 = 0x1600,
160 PGD_PORT_PC_VALn_V1 = 0x1604,
161 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
162 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
163 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
164 PGD_IE_STAT_V1 = 0x1700,
165 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166};
167
168/* Manager registers */
169enum mgr_reg {
170 MGR_CFG = 0x200,
171 MGR_STATUS = 0x204,
172 MGR_RX_MSGQ_CFG = 0x208,
173 MGR_INT_EN = 0x210,
174 MGR_INT_STAT = 0x214,
175 MGR_INT_CLR = 0x218,
176 MGR_TX_MSG = 0x230,
177 MGR_RX_MSG = 0x270,
178 MGR_VE_STAT = 0x300,
179};
180
181enum msg_cfg {
182 MGR_CFG_ENABLE = 1,
183 MGR_CFG_RX_MSGQ_EN = 1 << 1,
184 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
185 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
186};
187/* Message queue types */
188enum msm_slim_msgq_type {
189 MSGQ_RX = 0,
190 MSGQ_TX_LOW = 1,
191 MSGQ_TX_HIGH = 2,
192};
193/* Framer registers */
194enum frm_reg {
195 FRM_CFG = 0x400,
196 FRM_STAT = 0x404,
197 FRM_INT_EN = 0x410,
198 FRM_INT_STAT = 0x414,
199 FRM_INT_CLR = 0x418,
200 FRM_WAKEUP = 0x41C,
201 FRM_CLKCTL_DONE = 0x420,
202 FRM_IE_STAT = 0x430,
203 FRM_VE_STAT = 0x440,
204};
205
206/* Interface registers */
207enum intf_reg {
208 INTF_CFG = 0x600,
209 INTF_STAT = 0x604,
210 INTF_INT_EN = 0x610,
211 INTF_INT_STAT = 0x614,
212 INTF_INT_CLR = 0x618,
213 INTF_IE_STAT = 0x630,
214 INTF_VE_STAT = 0x640,
215};
216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217enum rsc_grp {
218 EE_MGR_RSC_GRP = 1 << 10,
219 EE_NGD_2 = 2 << 6,
220 EE_NGD_1 = 0,
221};
222
223enum mgr_intr {
224 MGR_INT_RECFG_DONE = 1 << 24,
225 MGR_INT_TX_NACKED_2 = 1 << 25,
226 MGR_INT_MSG_BUF_CONTE = 1 << 26,
227 MGR_INT_RX_MSG_RCVD = 1 << 30,
228 MGR_INT_TX_MSG_SENT = 1 << 31,
229};
230
231enum frm_cfg {
232 FRM_ACTIVE = 1,
233 CLK_GEAR = 7,
234 ROOT_FREQ = 11,
235 REF_CLK_GEAR = 15,
Sagar Dhariadebc8b72012-08-11 15:02:12 -0600236 INTR_WAKE = 19,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237};
238
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600239enum msm_ctrl_state {
240 MSM_CTRL_AWAKE,
241 MSM_CTRL_SLEEPING,
242 MSM_CTRL_ASLEEP,
243};
244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245struct msm_slim_sps_bam {
246 u32 hdl;
247 void __iomem *base;
248 int irq;
249};
250
251struct msm_slim_endp {
252 struct sps_pipe *sps;
253 struct sps_connect config;
254 struct sps_register_event event;
255 struct sps_mem_buffer buf;
256 struct completion *xcomp;
257 bool connected;
258};
259
260struct msm_slim_ctrl {
261 struct slim_controller ctrl;
262 struct slim_framer framer;
263 struct device *dev;
264 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600265 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 u32 curr_bw;
267 u8 msg_cnt;
268 u32 tx_buf[10];
269 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
270 spinlock_t rx_lock;
271 int head;
272 int tail;
273 int irq;
274 int err;
275 int ee;
276 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600277 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 struct msm_slim_endp pipes[7];
279 struct msm_slim_sps_bam bam;
280 struct msm_slim_endp rx_msgq;
281 struct completion rx_msgq_notify;
282 struct task_struct *rx_msgq_thread;
283 struct clk *rclk;
Sagar Dhariadebc8b72012-08-11 15:02:12 -0600284 struct clk *hclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 struct mutex tx_lock;
286 u8 pgdla;
287 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 int pipe_b;
289 struct completion reconf;
290 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600291 bool chan_active;
292 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600293 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600294 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295};
296
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600297struct msm_sat_chan {
298 u8 chan;
299 u16 chanh;
300 int req_rem;
301 int req_def;
302};
303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304struct msm_slim_sat {
305 struct slim_device satcl;
306 struct msm_slim_ctrl *dev;
307 struct workqueue_struct *wq;
308 struct work_struct wd;
309 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600310 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 u8 nsatch;
312 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600313 bool pending_reconf;
314 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 int shead;
316 int stail;
317 spinlock_t lock;
318};
319
Sagar Dharia790cfd02011-09-25 17:56:24 -0600320static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
323{
324 spin_lock(&dev->rx_lock);
325 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
326 spin_unlock(&dev->rx_lock);
327 dev_err(dev->dev, "RX QUEUE full!");
328 return -EXFULL;
329 }
330 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
331 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
332 spin_unlock(&dev->rx_lock);
333 return 0;
334}
335
336static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
337{
338 unsigned long flags;
339 spin_lock_irqsave(&dev->rx_lock, flags);
340 if (dev->tail == dev->head) {
341 spin_unlock_irqrestore(&dev->rx_lock, flags);
342 return -ENODATA;
343 }
344 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
345 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
346 spin_unlock_irqrestore(&dev->rx_lock, flags);
347 return 0;
348}
349
350static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
351{
352 struct msm_slim_ctrl *dev = sat->dev;
353 spin_lock(&sat->lock);
354 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
355 spin_unlock(&sat->lock);
356 dev_err(dev->dev, "SAT QUEUE full!");
357 return -EXFULL;
358 }
359 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
360 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
361 spin_unlock(&sat->lock);
362 return 0;
363}
364
365static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
366{
367 unsigned long flags;
368 spin_lock_irqsave(&sat->lock, flags);
369 if (sat->stail == sat->shead) {
370 spin_unlock_irqrestore(&sat->lock, flags);
371 return -ENODATA;
372 }
373 memcpy(buf, sat->sat_msgs[sat->shead], 40);
374 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
375 spin_unlock_irqrestore(&sat->lock, flags);
376 return 0;
377}
378
379static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
380{
381 e_addr[0] = (buffer[1] >> 24) & 0xff;
382 e_addr[1] = (buffer[1] >> 16) & 0xff;
383 e_addr[2] = (buffer[1] >> 8) & 0xff;
384 e_addr[3] = buffer[1] & 0xff;
385 e_addr[4] = (buffer[0] >> 24) & 0xff;
386 e_addr[5] = (buffer[0] >> 16) & 0xff;
387}
388
389static bool msm_is_sat_dev(u8 *e_addr)
390{
391 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
392 e_addr[2] != QC_CHIPID_SL &&
393 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
394 return true;
395 return false;
396}
397
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700398static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600399{
Sagar Dharia45e77912012-01-10 09:55:18 -0700400#ifdef CONFIG_PM_RUNTIME
401 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700402 int ret = pm_runtime_get_sync(dev->dev);
403 if (ret >= 0) {
404 ref = atomic_read(&dev->dev->power.usage_count);
405 if (ref <= 0) {
406 dev_err(dev->dev, "reference count -ve:%d", ref);
407 ret = -ENODEV;
408 }
409 }
410 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700411#else
412 return -ENODEV;
413#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600414}
415static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
416{
Sagar Dharia45e77912012-01-10 09:55:18 -0700417#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700418 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600419 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700420 ref = atomic_read(&dev->dev->power.usage_count);
421 if (ref <= 0)
422 dev_err(dev->dev, "reference count mismatch:%d", ref);
423 else
424 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700425#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600426}
427
Sagar Dharia790cfd02011-09-25 17:56:24 -0600428static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
429{
430 struct msm_slim_sat *sat = NULL;
431 int i = 0;
432 while (!sat && i < dev->nsats) {
433 if (laddr == dev->satd[i]->satcl.laddr)
434 sat = dev->satd[i];
435 i++;
436 }
437 return sat;
438}
439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440static irqreturn_t msm_slim_interrupt(int irq, void *d)
441{
442 struct msm_slim_ctrl *dev = d;
443 u32 pstat;
444 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
445
446 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
447 if (stat & MGR_INT_TX_MSG_SENT)
448 writel_relaxed(MGR_INT_TX_MSG_SENT,
449 dev->base + MGR_INT_CLR);
450 else {
451 writel_relaxed(MGR_INT_TX_NACKED_2,
452 dev->base + MGR_INT_CLR);
453 dev->err = -EIO;
454 }
455 /*
456 * Guarantee that interrupt clear bit write goes through before
457 * signalling completion/exiting ISR
458 */
459 mb();
460 if (dev->wr_comp)
461 complete(dev->wr_comp);
462 }
463 if (stat & MGR_INT_RX_MSG_RCVD) {
464 u32 rx_buf[10];
465 u32 mc, mt;
466 u8 len, i;
467 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
468 len = rx_buf[0] & 0x1F;
469 for (i = 1; i < ((len + 3) >> 2); i++) {
470 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
471 (4 * i));
472 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
473 }
474 mt = (rx_buf[0] >> 5) & 0x7;
475 mc = (rx_buf[0] >> 8) & 0xff;
476 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
477 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
478 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600479 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
480 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
481 if (sat)
482 msm_sat_enqueue(sat, rx_buf, len);
483 else
484 dev_err(dev->dev, "unknown sat:%d message",
485 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 writel_relaxed(MGR_INT_RX_MSG_RCVD,
487 dev->base + MGR_INT_CLR);
488 /*
489 * Guarantee that CLR bit write goes through before
490 * queuing work
491 */
492 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600493 if (sat)
494 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 } else if (mt == SLIM_MSG_MT_CORE &&
496 mc == SLIM_MSG_MC_REPORT_PRESENT) {
497 u8 e_addr[6];
498 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600499 msm_slim_rx_enqueue(dev, rx_buf, len);
500 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
501 MGR_INT_CLR);
502 /*
503 * Guarantee that CLR bit write goes through
504 * before signalling completion
505 */
506 mb();
507 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
509 mc == SLIM_MSG_MC_REPLY_VALUE) {
510 msm_slim_rx_enqueue(dev, rx_buf, len);
511 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
512 MGR_INT_CLR);
513 /*
514 * Guarantee that CLR bit write goes through
515 * before signalling completion
516 */
517 mb();
518 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600519 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
520 u8 *buf = (u8 *)rx_buf;
521 u8 l_addr = buf[2];
522 u16 ele = (u16)buf[4] << 4;
523 ele |= ((buf[3] & 0xf0) >> 4);
524 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
525 l_addr, ele);
526 for (i = 0; i < len - 5; i++)
527 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
528 i, buf[i+5]);
529 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
530 MGR_INT_CLR);
531 /*
532 * Guarantee that CLR bit write goes through
533 * before exiting
534 */
535 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 } else {
537 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
538 mc, mt, len);
539 for (i = 0; i < ((len + 3) >> 2); i++)
540 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
541 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
542 MGR_INT_CLR);
543 /*
544 * Guarantee that CLR bit write goes through
545 * before exiting
546 */
547 mb();
548 }
549 }
550 if (stat & MGR_INT_RECFG_DONE) {
551 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
552 /*
553 * Guarantee that CLR bit write goes through
554 * before exiting ISR
555 */
556 mb();
557 complete(&dev->reconf);
558 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600559 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 if (pstat != 0) {
561 int i = 0;
562 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
563 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600564 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
565 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 if (val & (1 << 19)) {
567 dev->ctrl.ports[i].err =
568 SLIM_P_DISCONNECT;
569 dev->pipes[i-dev->pipe_b].connected =
570 false;
571 /*
572 * SPS will call completion since
573 * ERROR flags are registered
574 */
575 } else if (val & (1 << 2))
576 dev->ctrl.ports[i].err =
577 SLIM_P_OVERFLOW;
578 else if (val & (1 << 3))
579 dev->ctrl.ports[i].err =
580 SLIM_P_UNDERFLOW;
581 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600582 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
583 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 }
585 /*
586 * Guarantee that port interrupt bit(s) clearing writes go
587 * through before exiting ISR
588 */
589 mb();
590 }
591
592 return IRQ_HANDLED;
593}
594
595static int
596msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
597{
598 int ret;
599 struct sps_pipe *endpoint;
600 struct sps_connect *config = &ep->config;
601
602 /* Allocate the endpoint */
603 endpoint = sps_alloc_endpoint();
604 if (!endpoint) {
605 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
606 return -ENOMEM;
607 }
608
609 /* Get default connection configuration for an endpoint */
610 ret = sps_get_config(endpoint, config);
611 if (ret) {
612 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
613 goto sps_config_failed;
614 }
615
616 ep->sps = endpoint;
617 return 0;
618
619sps_config_failed:
620 sps_free_endpoint(endpoint);
621 return ret;
622}
623
624static void
625msm_slim_free_endpoint(struct msm_slim_endp *ep)
626{
627 sps_free_endpoint(ep->sps);
628 ep->sps = NULL;
629}
630
631static int msm_slim_sps_mem_alloc(
632 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
633{
634 dma_addr_t phys;
635
636 mem->size = len;
637 mem->min_size = 0;
638 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
639
640 if (!mem->base) {
641 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
642 return -ENOMEM;
643 }
644
645 mem->phys_base = phys;
646 memset(mem->base, 0x00, mem->size);
647 return 0;
648}
649
650static void
651msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
652{
653 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
654 mem->size = 0;
655 mem->base = NULL;
656 mem->phys_base = 0;
657}
658
659static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
660{
661 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600662 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
663 dev->ver));
664 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
665 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
666 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
667 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
668 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 /* Make sure that port registers are updated before returning */
670 mb();
671}
672
673static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
674{
675 struct msm_slim_endp *endpoint = &dev->pipes[pn];
676 struct sps_connect *cfg = &endpoint->config;
677 u32 stat;
678 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
679 if (ret) {
680 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
681 return ret;
682 }
683 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
684 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
685
686 if (dev->pipes[pn].connected) {
687 ret = sps_set_config(dev->pipes[pn].sps, cfg);
688 if (ret) {
689 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
690 ret);
691 return ret;
692 }
693 }
694
Sagar Dharia82e516f2012-03-16 16:01:23 -0600695 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
696 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
698 cfg->destination = dev->bam.hdl;
699 cfg->source = SPS_DEV_HANDLE_MEM;
700 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
701 cfg->src_pipe_index = 0;
702 dev_dbg(dev->dev, "flow src:pipe num:%d",
703 cfg->dest_pipe_index);
704 cfg->mode = SPS_MODE_DEST;
705 } else {
706 cfg->source = dev->bam.hdl;
707 cfg->destination = SPS_DEV_HANDLE_MEM;
708 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
709 cfg->dest_pipe_index = 0;
710 dev_dbg(dev->dev, "flow dest:pipe num:%d",
711 cfg->src_pipe_index);
712 cfg->mode = SPS_MODE_SRC;
713 }
714 /* Space for desciptor FIFOs */
715 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
716 cfg->config = SPS_CONFIG_DEFAULT;
717 ret = sps_connect(dev->pipes[pn].sps, cfg);
718 if (!ret) {
719 dev->pipes[pn].connected = true;
720 msm_hw_set_port(dev, pn + dev->pipe_b);
721 }
722 return ret;
723}
724
725static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
726{
727 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
728 /*
729 * Currently we block a transaction until the current one completes.
730 * In case we need multiple transactions, use message Q
731 */
732 return dev->tx_buf;
733}
734
735static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
736{
737 int i;
738 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
739 for (i = 0; i < (len + 3) >> 2; i++) {
740 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
741 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
742 }
743 /* Guarantee that message is sent before returning */
744 mb();
745 return 0;
746}
747
748static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
749{
750 DECLARE_COMPLETION_ONSTACK(done);
751 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
752 u32 *pbuf;
753 u8 *puc;
754 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700755 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600757 u8 mc = (u8)(txn->mc & 0xFF);
758 /*
759 * Voting for runtime PM: Slimbus has 2 possible use cases:
760 * 1. messaging
761 * 2. Data channels
762 * Messaging case goes through messaging slots and data channels
763 * use their own slots
764 * This "get" votes for messaging bandwidth
765 */
766 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700767 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700769 if (dev->state == MSM_CTRL_ASLEEP ||
770 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
771 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600772 dev_err(dev->dev, "runtime or system PM suspended state");
773 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700774 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600775 msm_slim_put_ctrl(dev);
776 return -EBUSY;
777 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600779 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
780 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781 wait_for_completion(&dev->reconf);
782 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600783 }
784 /* This "get" votes for data channels */
785 if (dev->ctrl.sched.usedslots != 0 &&
786 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700787 int chv = msm_slim_get_ctrl(dev);
788 if (chv >= 0)
789 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600790 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 }
792 txn->rl--;
793 pbuf = msm_get_msg_buf(ctrl, txn->rl);
794 dev->wr_comp = NULL;
795 dev->err = 0;
796
797 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
798 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700799 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600800 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 return -EPROTONOSUPPORT;
802 }
803 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600804 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
805 mc == SLIM_MSG_MC_CONNECT_SINK ||
806 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 la = dev->pgdla;
808 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600809 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600811 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
813 puc = ((u8 *)pbuf) + 3;
814 else
815 puc = ((u8 *)pbuf) + 2;
816 if (txn->rbuf)
817 *(puc++) = txn->tid;
818 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600819 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
820 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
821 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
822 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 *(puc++) = (txn->ec & 0xFF);
824 *(puc++) = (txn->ec >> 8)&0xFF;
825 }
826 if (txn->wbuf)
827 memcpy(puc, txn->wbuf, txn->len);
828 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600829 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
830 mc == SLIM_MSG_MC_CONNECT_SINK ||
831 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
832 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 dev->err = msm_slim_connect_pipe_port(dev, *puc);
834 else {
835 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
836 struct sps_register_event sps_event;
837 memset(&sps_event, 0, sizeof(sps_event));
838 sps_register_event(endpoint->sps, &sps_event);
839 sps_disconnect(endpoint->sps);
840 /*
841 * Remove channel disconnects master-side ports from
842 * channel. No need to send that again on the bus
843 */
844 dev->pipes[*puc].connected = false;
845 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700846 if (msgv >= 0)
847 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 return 0;
849 }
850 if (dev->err) {
851 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
852 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700853 if (msgv >= 0)
854 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 return dev->err;
856 }
857 *(puc) = *(puc) + dev->pipe_b;
858 }
859 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600860 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 dev->reconf_busy = true;
862 dev->wr_comp = &done;
863 msm_send_msg_buf(ctrl, pbuf, txn->rl);
864 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600865
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700866 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
867 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
868 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
869 timeout) {
870 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
871 dev->reconf_busy = false;
872 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700873 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700874 disable_irq(dev->irq);
875 }
876 }
877 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
878 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
879 !timeout) {
880 dev->reconf_busy = false;
881 dev_err(dev->dev, "clock pause failed");
882 mutex_unlock(&dev->tx_lock);
883 return -ETIMEDOUT;
884 }
885 if (txn->mt == SLIM_MSG_MT_CORE &&
886 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
887 if (dev->ctrl.sched.usedslots == 0 &&
888 dev->chan_active) {
889 dev->chan_active = false;
890 msm_slim_put_ctrl(dev);
891 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600892 }
893 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600894 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700895 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600896 msm_slim_put_ctrl(dev);
897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 if (!timeout)
899 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
900 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 return timeout ? dev->err : -ETIMEDOUT;
903}
904
905static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
906 u8 elen, u8 laddr)
907{
908 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
909 DECLARE_COMPLETION_ONSTACK(done);
910 int timeout;
911 u32 *buf;
912 mutex_lock(&dev->tx_lock);
913 buf = msm_get_msg_buf(ctrl, 9);
914 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
915 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
916 SLIM_MSG_DEST_LOGICALADDR,
917 ea[5] | ea[4] << 8);
918 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
919 buf[2] = laddr;
920
921 dev->wr_comp = &done;
922 msm_send_msg_buf(ctrl, buf, 9);
923 timeout = wait_for_completion_timeout(&done, HZ);
924 mutex_unlock(&dev->tx_lock);
925 return timeout ? dev->err : -ETIMEDOUT;
926}
927
Sagar Dharia144e5e02011-08-08 17:30:11 -0600928static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
929{
930 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600931 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700932 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600933 writel_relaxed(1, dev->base + FRM_WAKEUP);
934 /* Make sure framer wakeup write goes through before exiting function */
935 mb();
936 /*
937 * Workaround: Currently, slave is reporting lost-sync messages
938 * after slimbus comes out of clock pause.
939 * Transaction with slave fail before slave reports that message
940 * Give some time for that report to come
941 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
942 * being 250 usecs, we wait for 20 superframes here to ensure
943 * we get the message
944 */
945 usleep_range(5000, 5000);
946 return 0;
947}
948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949static int msm_config_port(struct slim_controller *ctrl, u8 pn)
950{
951 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
952 struct msm_slim_endp *endpoint;
953 int ret = 0;
954 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
955 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
956 return -EPROTONOSUPPORT;
957 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
958 return -ENODEV;
959
960 endpoint = &dev->pipes[pn];
961 ret = msm_slim_init_endpoint(dev, endpoint);
962 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
963 return ret;
964}
965
966static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
967 u8 pn, u8 **done_buf, u32 *done_len)
968{
969 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
970 struct sps_iovec sio;
971 int ret;
972 if (done_len)
973 *done_len = 0;
974 if (done_buf)
975 *done_buf = NULL;
976 if (!dev->pipes[pn].connected)
977 return SLIM_P_DISCONNECT;
978 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
979 if (!ret) {
980 if (done_len)
981 *done_len = sio.size;
982 if (done_buf)
983 *done_buf = (u8 *)sio.addr;
984 }
985 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
986 return SLIM_P_INPROGRESS;
987}
988
989static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
990 u32 len, struct completion *comp)
991{
992 struct sps_register_event sreg;
993 int ret;
994 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600995 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996 return -ENODEV;
997
998
999 ctrl->ports[pn].xcomp = comp;
1000 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1001 sreg.mode = SPS_TRIGGER_WAIT;
1002 sreg.xfer_done = comp;
1003 sreg.callback = NULL;
1004 sreg.user = &ctrl->ports[pn];
1005 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1006 if (ret) {
1007 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1008 return ret;
1009 }
1010 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1011 SPS_IOVEC_FLAG_INT);
1012 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1013
1014 return ret;
1015}
1016
1017static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1018{
1019 struct msm_slim_ctrl *dev = sat->dev;
1020 enum slim_ch_control oper;
1021 int i;
1022 int ret = 0;
1023 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001024 for (i = 0; i < sat->nsatch; i++) {
1025 if (buf[5] == sat->satch[i].chan)
1026 break;
1027 }
1028 if (i >= sat->nsatch)
1029 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030 oper = ((buf[3] & 0xC0) >> 6);
1031 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001032 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1033 false);
1034 if (!ret) {
1035 for (i = 5; i < len; i++) {
1036 int j;
1037 for (j = 0; j < sat->nsatch; j++) {
1038 if (buf[i] == sat->satch[j].chan) {
1039 if (oper == SLIM_CH_REMOVE)
1040 sat->satch[j].req_rem++;
1041 else
1042 sat->satch[j].req_def++;
1043 break;
1044 }
1045 }
1046 }
1047 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 } else {
1049 u16 chh[40];
1050 struct slim_ch prop;
1051 u32 exp;
1052 u8 coeff, cc;
1053 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001054 if (len <= 8)
1055 return -EINVAL;
1056 for (i = 8; i < len; i++) {
1057 int j = 0;
1058 for (j = 0; j < sat->nsatch; j++) {
1059 if (sat->satch[j].chan == buf[i]) {
1060 chh[i - 8] = sat->satch[j].chanh;
1061 break;
1062 }
1063 }
1064 if (j < sat->nsatch) {
1065 u16 dummy;
1066 ret = slim_query_ch(&sat->satcl, buf[i],
1067 &dummy);
1068 if (ret)
1069 return ret;
1070 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1071 sat->satch[j].req_def++;
1072 continue;
1073 }
1074 if (sat->nsatch >= MSM_MAX_SATCH)
1075 return -EXFULL;
1076 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1077 if (ret)
1078 return ret;
1079 sat->satch[j].chan = buf[i];
1080 sat->satch[j].chanh = chh[i - 8];
1081 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1082 sat->satch[j].req_def++;
1083 sat->nsatch++;
1084 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1086 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1087 prop.baser = SLIM_RATE_4000HZ;
1088 if (prrate & 0x8)
1089 prop.baser = SLIM_RATE_11025HZ;
1090 else
1091 prop.baser = SLIM_RATE_4000HZ;
1092 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1093 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1094 exp = (u32)((buf[5] & 0xF0) >> 4);
1095 coeff = (buf[4] & 0x20) >> 5;
1096 cc = (coeff ? 3 : 1);
1097 prop.ratem = cc * (1 << exp);
1098 if (i > 9)
1099 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001100 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 else
1102 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001103 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001105 if (ret)
1106 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107
1108 /* part of group so activating 1 will take care of rest */
1109 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1110 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001111 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 SLIM_CH_ACTIVATE, false);
1113 }
1114 return ret;
1115}
1116
1117static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1118{
1119 u8 buf[40];
1120 u8 mc, mt, len;
1121 int i, ret;
1122 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1123 len = buf[0] & 0x1F;
1124 mt = (buf[0] >> 5) & 0x7;
1125 mc = buf[1];
1126 if (mt == SLIM_MSG_MT_CORE &&
1127 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1128 u8 laddr;
1129 u8 e_addr[6];
1130 for (i = 0; i < 6; i++)
1131 e_addr[i] = buf[7-i];
1132
1133 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1134 /* Is this Qualcomm ported generic device? */
1135 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1136 e_addr[4] == QC_MFGID_MSB &&
1137 e_addr[1] == QC_DEVID_PGD &&
1138 e_addr[2] != QC_CHIPID_SL)
1139 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001140 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001141 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001142 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143
Sagar Dharia790cfd02011-09-25 17:56:24 -06001144 if (!ret && msm_is_sat_dev(e_addr)) {
1145 struct msm_slim_sat *sat = addr_to_sat(dev,
1146 laddr);
1147 if (!sat)
1148 sat = msm_slim_alloc_sat(dev);
1149 if (!sat)
1150 return;
1151
1152 sat->satcl.laddr = laddr;
1153 msm_sat_enqueue(sat, (u32 *)buf, len);
1154 queue_work(sat->wq, &sat->wd);
1155 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1157 mc == SLIM_MSG_MC_REPLY_VALUE) {
1158 u8 tid = buf[3];
1159 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1160 slim_msg_response(&dev->ctrl, &buf[4], tid,
1161 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001162 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001163 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1164 u8 l_addr = buf[2];
1165 u16 ele = (u16)buf[4] << 4;
1166 ele |= ((buf[3] & 0xf0) >> 4);
1167 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1168 l_addr, ele);
1169 for (i = 0; i < len - 5; i++)
1170 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1171 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 } else {
1173 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1174 mc, mt);
1175 for (i = 0; i < len; i++)
1176 dev_err(dev->dev, "error msg: %x", buf[i]);
1177
1178 }
1179 } else
1180 dev_err(dev->dev, "rxwq called and no dequeue");
1181}
1182
1183static void slim_sat_rxprocess(struct work_struct *work)
1184{
1185 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1186 struct msm_slim_ctrl *dev = sat->dev;
1187 u8 buf[40];
1188
1189 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1190 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 u8 len, mc, mt;
1192 u32 bw_sl;
1193 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001194 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 bool gen_ack = false;
1196 u8 tid;
1197 u8 wbuf[8];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001198 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1200 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1201 txn.ec = 0;
1202 txn.rbuf = NULL;
1203 txn.la = sat->satcl.laddr;
1204 /* satellite handling */
1205 len = buf[0] & 0x1F;
1206 mc = buf[1];
1207 mt = (buf[0] >> 5) & 0x7;
1208
1209 if (mt == SLIM_MSG_MT_CORE &&
1210 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1211 u8 laddr;
1212 u8 e_addr[6];
1213 for (i = 0; i < 6; i++)
1214 e_addr[i] = buf[7-i];
1215
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001216 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001217 satv = msm_slim_get_ctrl(dev);
1218 if (satv >= 0)
1219 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001220 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1222 sat->satcl.laddr = laddr;
Sagar Dharia69bf5572012-02-21 14:45:35 -07001223 /*
1224 * Since capability message is already sent, present
1225 * message will indicate subsystem hosting this
1226 * satellite has restarted.
1227 * Remove all active channels of this satellite
1228 * when this is detected
1229 */
1230 if (sat->sent_capability) {
1231 for (i = 0; i < sat->nsatch; i++) {
1232 enum slim_ch_state chs =
1233 slim_get_ch_state(&sat->satcl,
1234 sat->satch[i].chanh);
1235 pr_err("Slim-SSR, sat:%d, rm chan:%d",
1236 laddr,
1237 sat->satch[i].chan);
1238 if (chs == SLIM_CH_ACTIVE)
1239 slim_control_ch(&sat->satcl,
1240 sat->satch[i].chanh,
1241 SLIM_CH_REMOVE, true);
1242 }
1243 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001244 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001245 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1246 satv = msm_slim_get_ctrl(dev);
1247 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 switch (mc) {
1249 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001250 /* Remove runtime_pm vote once satellite acks */
1251 if (mt != SLIM_MSG_MT_CORE) {
1252 if (pm_runtime_enabled(dev->dev) &&
1253 sat->pending_capability) {
1254 msm_slim_put_ctrl(dev);
1255 sat->pending_capability = false;
1256 }
1257 continue;
1258 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001260 if (sat->sent_capability) {
1261 if (mt == SLIM_MSG_MT_CORE)
1262 goto send_capability;
1263 else
1264 continue;
1265 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1267 if (ret) {
1268 dev_err(dev->dev,
1269 "Satellite-init failed");
1270 continue;
1271 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001272 /* Satellite-channels */
1273 sat->satch = kzalloc(MSM_MAX_SATCH *
1274 sizeof(struct msm_sat_chan),
1275 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001276send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1278 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1279 txn.la = sat->satcl.laddr;
1280 txn.rl = 8;
1281 wbuf[0] = SAT_MAGIC_LSB;
1282 wbuf[1] = SAT_MAGIC_MSB;
1283 wbuf[2] = SAT_MSG_VER;
1284 wbuf[3] = SAT_MSG_PROT;
1285 txn.wbuf = wbuf;
1286 txn.len = 4;
1287 sat->sent_capability = true;
1288 msm_xfer_msg(&dev->ctrl, &txn);
1289 break;
1290 case SLIM_USR_MC_ADDR_QUERY:
1291 memcpy(&wbuf[1], &buf[4], 6);
1292 ret = slim_get_logical_addr(&sat->satcl,
1293 &wbuf[1], 6, &wbuf[7]);
1294 if (ret)
1295 memset(&wbuf[1], 0, 6);
1296 wbuf[0] = buf[3];
1297 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1298 txn.rl = 12;
1299 txn.len = 8;
1300 txn.wbuf = wbuf;
1301 msm_xfer_msg(&dev->ctrl, &txn);
1302 break;
1303 case SLIM_USR_MC_DEFINE_CHAN:
1304 case SLIM_USR_MC_DEF_ACT_CHAN:
1305 case SLIM_USR_MC_CHAN_CTRL:
1306 if (mc != SLIM_USR_MC_CHAN_CTRL)
1307 tid = buf[7];
1308 else
1309 tid = buf[4];
1310 gen_ack = true;
1311 ret = msm_sat_define_ch(sat, buf, len, mc);
1312 if (ret) {
1313 dev_err(dev->dev,
1314 "SAT define_ch returned:%d",
1315 ret);
1316 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001317 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001318 int chv = msm_slim_get_ctrl(dev);
1319 if (chv >= 0)
1320 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001321 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 break;
1323 case SLIM_USR_MC_RECONFIG_NOW:
1324 tid = buf[3];
1325 gen_ack = true;
1326 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001327 for (i = 0; i < sat->nsatch; i++) {
1328 struct msm_sat_chan *sch = &sat->satch[i];
1329 if (sch->req_rem) {
1330 if (!ret)
1331 slim_dealloc_ch(&sat->satcl,
1332 sch->chanh);
1333 sch->req_rem--;
1334 } else if (sch->req_def) {
1335 if (ret)
1336 slim_dealloc_ch(&sat->satcl,
1337 sch->chanh);
1338 sch->req_def--;
1339 }
1340 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001341 if (sat->pending_reconf) {
1342 msm_slim_put_ctrl(dev);
1343 sat->pending_reconf = false;
1344 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 break;
1346 case SLIM_USR_MC_REQ_BW:
1347 /* what we get is in SLOTS */
1348 bw_sl = (u32)buf[4] << 3 |
1349 ((buf[3] & 0xE0) >> 5);
1350 sat->satcl.pending_msgsl = bw_sl;
1351 tid = buf[5];
1352 gen_ack = true;
1353 break;
1354 case SLIM_USR_MC_CONNECT_SRC:
1355 case SLIM_USR_MC_CONNECT_SINK:
1356 if (mc == SLIM_USR_MC_CONNECT_SRC)
1357 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1358 else
1359 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1360 wbuf[0] = buf[4] & 0x1F;
1361 wbuf[1] = buf[5];
1362 tid = buf[6];
1363 txn.la = buf[3];
1364 txn.mt = SLIM_MSG_MT_CORE;
1365 txn.rl = 6;
1366 txn.len = 2;
1367 txn.wbuf = wbuf;
1368 gen_ack = true;
1369 ret = msm_xfer_msg(&dev->ctrl, &txn);
1370 break;
1371 case SLIM_USR_MC_DISCONNECT_PORT:
1372 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1373 wbuf[0] = buf[4] & 0x1F;
1374 tid = buf[5];
1375 txn.la = buf[3];
1376 txn.rl = 5;
1377 txn.len = 1;
1378 txn.mt = SLIM_MSG_MT_CORE;
1379 txn.wbuf = wbuf;
1380 gen_ack = true;
1381 ret = msm_xfer_msg(&dev->ctrl, &txn);
1382 default:
1383 break;
1384 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001385 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001386 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001387 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001389 }
1390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391 wbuf[0] = tid;
1392 if (!ret)
1393 wbuf[1] = MSM_SAT_SUCCSS;
1394 else
1395 wbuf[1] = 0;
1396 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1397 txn.la = sat->satcl.laddr;
1398 txn.rl = 6;
1399 txn.len = 2;
1400 txn.wbuf = wbuf;
1401 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1402 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001403 if (satv >= 0)
1404 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 }
1406}
1407
Sagar Dharia790cfd02011-09-25 17:56:24 -06001408static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1409{
1410 struct msm_slim_sat *sat;
1411 char *name;
1412 if (dev->nsats >= MSM_MAX_NSATS)
1413 return NULL;
1414
1415 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1416 if (!sat) {
1417 dev_err(dev->dev, "no memory for satellite");
1418 return NULL;
1419 }
1420 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1421 if (!name) {
1422 dev_err(dev->dev, "no memory for satellite name");
1423 kfree(sat);
1424 return NULL;
1425 }
1426 dev->satd[dev->nsats] = sat;
1427 sat->dev = dev;
1428 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1429 sat->satcl.name = name;
1430 spin_lock_init(&sat->lock);
1431 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1432 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1433 if (!sat->wq) {
1434 kfree(name);
1435 kfree(sat);
1436 return NULL;
1437 }
1438 /*
1439 * Both sats will be allocated from RX thread and RX thread will
1440 * process messages sequentially. No synchronization necessary
1441 */
1442 dev->nsats++;
1443 return sat;
1444}
1445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446static void
1447msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1448{
1449 u32 *buf = ev->data.transfer.user;
1450 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1451
1452 /*
1453 * Note the virtual address needs to be offset by the same index
1454 * as the physical address or just pass in the actual virtual address
1455 * if the sps_mem_buffer is not needed. Note that if completion is
1456 * used, the virtual address won't be available and will need to be
1457 * calculated based on the offset of the physical address
1458 */
1459 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1460
1461 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1462
1463 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1464 iovec->addr, iovec->size, iovec->flags);
1465
1466 } else {
1467 dev_err(dev->dev, "%s: unknown event %d\n",
1468 __func__, ev->event_id);
1469 }
1470}
1471
1472static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1473{
1474 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1475 msm_slim_rx_msgq_event(dev, notify);
1476}
1477
1478/* Queue up Rx message buffer */
1479static inline int
1480msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1481{
1482 int ret;
1483 u32 flags = SPS_IOVEC_FLAG_INT;
1484 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1485 struct sps_mem_buffer *mem = &endpoint->buf;
1486 struct sps_pipe *pipe = endpoint->sps;
1487
1488 /* Rx message queue buffers are 4 bytes in length */
1489 u8 *virt_addr = mem->base + (4 * ix);
1490 u32 phys_addr = mem->phys_base + (4 * ix);
1491
1492 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1493
1494 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1495 if (ret)
1496 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1497
1498 return ret;
1499}
1500
1501static inline int
1502msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1503{
1504 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1505 struct sps_mem_buffer *mem = &endpoint->buf;
1506 struct sps_pipe *pipe = endpoint->sps;
1507 struct sps_iovec iovec;
1508 int index;
1509 int ret;
1510
1511 ret = sps_get_iovec(pipe, &iovec);
1512 if (ret) {
1513 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1514 goto err_exit;
1515 }
1516
1517 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1518 iovec.addr, iovec.size, iovec.flags);
1519 BUG_ON(iovec.addr < mem->phys_base);
1520 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1521
1522 /* Calculate buffer index */
1523 index = (iovec.addr - mem->phys_base) / 4;
1524 *(data + offset) = *((u32 *)mem->base + index);
1525
1526 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1527
1528 /* Add buffer back to the queue */
1529 (void)msm_slim_post_rx_msgq(dev, index);
1530
1531err_exit:
1532 return ret;
1533}
1534
1535static int msm_slim_rx_msgq_thread(void *data)
1536{
1537 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1538 struct completion *notify = &dev->rx_msgq_notify;
1539 struct msm_slim_sat *sat = NULL;
1540 u32 mc = 0;
1541 u32 mt = 0;
1542 u32 buffer[10];
1543 int index = 0;
1544 u8 msg_len = 0;
1545 int ret;
1546
1547 dev_dbg(dev->dev, "rx thread started");
1548
1549 while (!kthread_should_stop()) {
1550 set_current_state(TASK_INTERRUPTIBLE);
1551 ret = wait_for_completion_interruptible(notify);
1552
1553 if (ret)
1554 dev_err(dev->dev, "rx thread wait error:%d", ret);
1555
1556 /* 1 irq notification per message */
1557 if (!dev->use_rx_msgqs) {
1558 msm_slim_rxwq(dev);
1559 continue;
1560 }
1561
1562 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1563 if (ret) {
1564 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1565 continue;
1566 }
1567
1568 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1569
1570 /* Decide if we use generic RX or satellite RX */
1571 if (index++ == 0) {
1572 msg_len = *buffer & 0x1F;
1573 pr_debug("Start of new message, len = %d\n", msg_len);
1574 mt = (buffer[0] >> 5) & 0x7;
1575 mc = (buffer[0] >> 8) & 0xff;
1576 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1577 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001578 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1579 u8 laddr;
1580 laddr = (u8)((buffer[0] >> 16) & 0xff);
1581 sat = addr_to_sat(dev, laddr);
1582 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583 } else if ((index * 4) >= msg_len) {
1584 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001585 if (sat) {
1586 msm_sat_enqueue(sat, buffer, msg_len);
1587 queue_work(sat->wq, &sat->wd);
1588 sat = NULL;
1589 } else {
1590 msm_slim_rx_enqueue(dev, buffer, msg_len);
1591 msm_slim_rxwq(dev);
1592 }
1593 }
1594 }
1595
1596 return 0;
1597}
1598
1599static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1600{
1601 int i, ret;
1602 u32 pipe_offset;
1603 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1604 struct sps_connect *config = &endpoint->config;
1605 struct sps_mem_buffer *descr = &config->desc;
1606 struct sps_mem_buffer *mem = &endpoint->buf;
1607 struct completion *notify = &dev->rx_msgq_notify;
1608
1609 struct sps_register_event sps_error_event; /* SPS_ERROR */
1610 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1611
Sagar Dharia31ac5812012-01-04 11:38:59 -07001612 init_completion(notify);
1613 if (!dev->use_rx_msgqs)
1614 goto rx_thread_create;
1615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616 /* Allocate the endpoint */
1617 ret = msm_slim_init_endpoint(dev, endpoint);
1618 if (ret) {
1619 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1620 goto sps_init_endpoint_failed;
1621 }
1622
1623 /* Get the pipe indices for the message queues */
1624 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1625 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1626
1627 config->mode = SPS_MODE_SRC;
1628 config->source = dev->bam.hdl;
1629 config->destination = SPS_DEV_HANDLE_MEM;
1630 config->src_pipe_index = pipe_offset;
1631 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1632 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1633
1634 /* Allocate memory for the FIFO descriptors */
1635 ret = msm_slim_sps_mem_alloc(dev, descr,
1636 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1637 if (ret) {
1638 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1639 goto alloc_descr_failed;
1640 }
1641
1642 ret = sps_connect(endpoint->sps, config);
1643 if (ret) {
1644 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1645 goto sps_connect_failed;
1646 }
1647
1648 /* Register completion for DESC_DONE */
1649 init_completion(notify);
1650 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1651
1652 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1653 sps_descr_event.options = SPS_O_DESC_DONE;
1654 sps_descr_event.user = (void *)dev;
1655 sps_descr_event.xfer_done = notify;
1656
1657 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1658 if (ret) {
1659 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1660 goto sps_reg_event_failed;
1661 }
1662
1663 /* Register callback for errors */
1664 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1665 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1666 sps_error_event.options = SPS_O_ERROR;
1667 sps_error_event.user = (void *)dev;
1668 sps_error_event.callback = msm_slim_rx_msgq_cb;
1669
1670 ret = sps_register_event(endpoint->sps, &sps_error_event);
1671 if (ret) {
1672 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1673 goto sps_reg_event_failed;
1674 }
1675
1676 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1677 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1678 if (ret) {
1679 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1680 goto alloc_buffer_failed;
1681 }
1682
1683 /*
1684 * Call transfer_one for each 4-byte buffer
1685 * Use (buf->size/4) - 1 for the number of buffer to post
1686 */
1687
1688 /* Setup the transfer */
1689 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1690 ret = msm_slim_post_rx_msgq(dev, i);
1691 if (ret) {
1692 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1693 goto sps_transfer_failed;
1694 }
1695 }
1696
Sagar Dharia31ac5812012-01-04 11:38:59 -07001697rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698 /* Fire up the Rx message queue thread */
1699 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1700 MSM_SLIM_NAME "_rx_msgq_thread");
1701 if (!dev->rx_msgq_thread) {
1702 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001703 /* Tear-down BAMs or return? */
1704 if (!dev->use_rx_msgqs)
1705 return -EIO;
1706 else
1707 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 } else
1709 return 0;
1710
1711sps_transfer_failed:
1712 msm_slim_sps_mem_free(dev, mem);
1713alloc_buffer_failed:
1714 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1715 sps_register_event(endpoint->sps, &sps_error_event);
1716sps_reg_event_failed:
1717 sps_disconnect(endpoint->sps);
1718sps_connect_failed:
1719 msm_slim_sps_mem_free(dev, descr);
1720alloc_descr_failed:
1721 msm_slim_free_endpoint(endpoint);
1722sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001723 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 return ret;
1725}
1726
1727/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1728static int __devinit
1729msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1730{
1731 int i, ret;
1732 u32 bam_handle;
1733 struct sps_bam_props bam_props = {0};
1734
1735 static struct sps_bam_sec_config_props sec_props = {
1736 .ees = {
1737 [0] = { /* LPASS */
1738 .vmid = 0,
1739 .pipe_mask = 0xFFFF98,
1740 },
1741 [1] = { /* Krait Apps */
1742 .vmid = 1,
1743 .pipe_mask = 0x3F000007,
1744 },
1745 [2] = { /* Modem */
1746 .vmid = 2,
1747 .pipe_mask = 0x00000060,
1748 },
1749 },
1750 };
1751
1752 bam_props.ee = dev->ee;
1753 bam_props.virt_addr = dev->bam.base;
1754 bam_props.phys_addr = bam_mem->start;
1755 bam_props.irq = dev->bam.irq;
1756 bam_props.manage = SPS_BAM_MGR_LOCAL;
1757 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1758
1759 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1760 bam_props.p_sec_config_props = &sec_props;
1761
1762 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1763 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1764
1765 /* First 7 bits are for message Qs */
1766 for (i = 7; i < 32; i++) {
1767 /* Check what pipes are owned by Apps. */
1768 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1769 break;
1770 }
1771 dev->pipe_b = i - 7;
1772
1773 /* Register the BAM device with the SPS driver */
1774 ret = sps_register_bam_device(&bam_props, &bam_handle);
1775 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001776 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1777 dev->use_rx_msgqs = 0;
1778 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 }
1780 dev->bam.hdl = bam_handle;
1781 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1782
Sagar Dharia31ac5812012-01-04 11:38:59 -07001783init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001785 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001787 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001788 sps_deregister_bam_device(bam_handle);
1789 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 return ret;
1792}
1793
1794static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1795{
1796 if (dev->use_rx_msgqs) {
1797 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1798 struct sps_connect *config = &endpoint->config;
1799 struct sps_mem_buffer *descr = &config->desc;
1800 struct sps_mem_buffer *mem = &endpoint->buf;
1801 struct sps_register_event sps_event;
1802 memset(&sps_event, 0x00, sizeof(sps_event));
1803 msm_slim_sps_mem_free(dev, mem);
1804 sps_register_event(endpoint->sps, &sps_event);
1805 sps_disconnect(endpoint->sps);
1806 msm_slim_sps_mem_free(dev, descr);
1807 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001808 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001809 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810}
1811
Sagar Dhariacc969452011-09-19 10:34:30 -06001812static void msm_slim_prg_slew(struct platform_device *pdev,
1813 struct msm_slim_ctrl *dev)
1814{
1815 struct resource *slew_io;
1816 void __iomem *slew_reg;
1817 /* SLEW RATE register for this slimbus */
1818 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1819 "slimbus_slew_reg");
1820 if (!dev->slew_mem) {
1821 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1822 return;
1823 }
1824 slew_io = request_mem_region(dev->slew_mem->start,
1825 resource_size(dev->slew_mem), pdev->name);
1826 if (!slew_io) {
1827 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1828 dev->slew_mem = NULL;
1829 return;
1830 }
1831
1832 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1833 if (!slew_reg) {
1834 dev_dbg(dev->dev, "slew register mapping failed");
1835 release_mem_region(dev->slew_mem->start,
1836 resource_size(dev->slew_mem));
1837 dev->slew_mem = NULL;
1838 return;
1839 }
1840 writel_relaxed(1, slew_reg);
1841 /* Make sure slimbus-slew rate enabling goes through */
1842 wmb();
1843 iounmap(slew_reg);
1844}
1845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846static int __devinit msm_slim_probe(struct platform_device *pdev)
1847{
1848 struct msm_slim_ctrl *dev;
1849 int ret;
Joonwoo Parkf69f77a2012-08-28 15:26:11 -07001850 enum apr_subsys_state q6_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 struct resource *bam_mem, *bam_io;
1852 struct resource *slim_mem, *slim_io;
1853 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001854 bool rxreg_access = false;
Joonwoo Parkf69f77a2012-08-28 15:26:11 -07001855
1856 q6_state = apr_get_q6_state();
1857 if (q6_state == APR_SUBSYS_DOWN) {
1858 dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
1859 q6_state);
1860 return -EPROBE_DEFER;
1861 } else
1862 dev_dbg(&pdev->dev, "adsp is ready\n");
1863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1865 "slimbus_physical");
1866 if (!slim_mem) {
1867 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1868 return -ENODEV;
1869 }
1870 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1871 pdev->name);
1872 if (!slim_io) {
1873 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1874 return -EBUSY;
1875 }
1876
1877 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1878 "slimbus_bam_physical");
1879 if (!bam_mem) {
1880 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1881 ret = -ENODEV;
1882 goto err_get_res_bam_failed;
1883 }
1884 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1885 pdev->name);
1886 if (!bam_io) {
1887 release_mem_region(slim_mem->start, resource_size(slim_mem));
1888 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1889 ret = -EBUSY;
1890 goto err_get_res_bam_failed;
1891 }
1892 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1893 "slimbus_irq");
1894 if (!irq) {
1895 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1896 ret = -ENODEV;
1897 goto err_get_res_failed;
1898 }
1899 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1900 "slimbus_bam_irq");
1901 if (!bam_irq) {
1902 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1903 ret = -ENODEV;
1904 goto err_get_res_failed;
1905 }
1906
1907 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1908 if (!dev) {
1909 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1910 ret = -ENOMEM;
1911 goto err_get_res_failed;
1912 }
1913 dev->dev = &pdev->dev;
1914 platform_set_drvdata(pdev, dev);
1915 slim_set_ctrldata(&dev->ctrl, dev);
1916 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1917 if (!dev->base) {
1918 dev_err(&pdev->dev, "IOremap failed\n");
1919 ret = -ENOMEM;
1920 goto err_ioremap_failed;
1921 }
1922 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1923 if (!dev->bam.base) {
1924 dev_err(&pdev->dev, "BAM IOremap failed\n");
1925 ret = -ENOMEM;
1926 goto err_ioremap_bam_failed;
1927 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001928 if (pdev->dev.of_node) {
1929
1930 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1931 &dev->ctrl.nr);
1932 if (ret) {
1933 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
1934 goto err_of_init_failed;
1935 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06001936 rxreg_access = of_property_read_bool(pdev->dev.of_node,
1937 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001938 /* Optional properties */
1939 ret = of_property_read_u32(pdev->dev.of_node,
1940 "qcom,min-clk-gear", &dev->ctrl.min_cg);
1941 ret = of_property_read_u32(pdev->dev.of_node,
1942 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001943 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
1944 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001945 } else {
1946 dev->ctrl.nr = pdev->id;
1947 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1949 dev->ctrl.nports = MSM_SLIM_NPORTS;
1950 dev->ctrl.set_laddr = msm_set_laddr;
1951 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001952 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 dev->ctrl.config_port = msm_config_port;
1954 dev->ctrl.port_xfer = msm_slim_port_xfer;
1955 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1956 /* Reserve some messaging BW for satellite-apps driver communication */
1957 dev->ctrl.sched.pending_msgsl = 30;
1958
1959 init_completion(&dev->reconf);
1960 mutex_init(&dev->tx_lock);
1961 spin_lock_init(&dev->rx_lock);
1962 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001963 if (rxreg_access)
1964 dev->use_rx_msgqs = 0;
1965 else
1966 dev->use_rx_msgqs = 1;
1967
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 dev->irq = irq->start;
1969 dev->bam.irq = bam_irq->start;
1970
Sagar Dhariadebc8b72012-08-11 15:02:12 -06001971 dev->hclk = clk_get(dev->dev, "iface_clk");
1972 if (IS_ERR(dev->hclk))
1973 dev->hclk = NULL;
1974 else
1975 clk_prepare_enable(dev->hclk);
1976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 ret = msm_slim_sps_init(dev, bam_mem);
1978 if (ret != 0) {
1979 dev_err(dev->dev, "error SPS init\n");
1980 goto err_sps_init_failed;
1981 }
1982
1983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1985 dev->framer.superfreq =
1986 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1987 dev->ctrl.a_framer = &dev->framer;
1988 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001989 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001990 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991
1992 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1993 "msm_slim_irq", dev);
1994 if (ret) {
1995 dev_err(&pdev->dev, "request IRQ failed\n");
1996 goto err_request_irq_failed;
1997 }
1998
Sagar Dhariacc969452011-09-19 10:34:30 -06001999 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002000
2001 /* Register with framework before enabling frame, clock */
2002 ret = slim_add_numbered_controller(&dev->ctrl);
2003 if (ret) {
2004 dev_err(dev->dev, "error adding controller\n");
2005 goto err_ctrl_failed;
2006 }
2007
2008
Tianyi Gou44a81b02012-02-06 17:49:07 -08002009 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002010 if (!dev->rclk) {
2011 dev_err(dev->dev, "slimbus clock not found");
2012 goto err_clk_get_failed;
2013 }
Sagar Dhariacc969452011-09-19 10:34:30 -06002014 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002015 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002016
Sagar Dharia82e516f2012-03-16 16:01:23 -06002017 dev->ver = readl_relaxed(dev->base);
2018 /* Version info in 16 MSbits */
2019 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002021 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002023 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024
2025 /*
2026 * Manager register initialization
2027 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2028 */
2029 if (dev->use_rx_msgqs)
2030 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2031 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2032 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2033 else
2034 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2035 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2036 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2037 writel_relaxed(1, dev->base + MGR_CFG);
2038 /*
2039 * Framer registers are beyond 1K memory region after Manager and/or
2040 * component registers. Make sure those writes are ordered
2041 * before framer register writes
2042 */
2043 wmb();
2044
2045 /* Framer register initialization */
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002046 writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
2047 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 dev->base + FRM_CFG);
2049 /*
2050 * Make sure that framer wake-up and enabling writes go through
2051 * before any other component is enabled. Framer is responsible for
2052 * clocking the bus and enabling framer first will ensure that other
2053 * devices can report presence when they are enabled
2054 */
2055 mb();
2056
2057 /* Enable RX msg Q */
2058 if (dev->use_rx_msgqs)
2059 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2060 dev->base + MGR_CFG);
2061 else
2062 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2063 /*
2064 * Make sure that manager-enable is written through before interface
2065 * device is enabled
2066 */
2067 mb();
2068 writel_relaxed(1, dev->base + INTF_CFG);
2069 /*
2070 * Make sure that interface-enable is written through before enabling
2071 * ported generic device inside MSM manager
2072 */
2073 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002074 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2075 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2076 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077 /*
2078 * Make sure that ported generic device is enabled and port-EE settings
2079 * are written through before finally enabling the component
2080 */
2081 mb();
2082
Sagar Dharia82e516f2012-03-16 16:01:23 -06002083 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 /*
2085 * Make sure that all writes have gone through before exiting this
2086 * function
2087 */
2088 mb();
Sagar Dhariaa6627e02012-08-28 12:20:49 -06002089
2090 /* Add devices registered with board-info now that controller is up */
2091 slim_ctrl_add_boarddevs(&dev->ctrl);
2092
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002093 if (pdev->dev.of_node)
2094 of_register_slim_devices(&dev->ctrl);
2095
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002096 pm_runtime_use_autosuspend(&pdev->dev);
2097 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2098 pm_runtime_set_active(&pdev->dev);
2099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2101 return 0;
2102
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002103err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002104 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002105err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002106 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002107err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108 msm_slim_sps_exit(dev);
2109err_sps_init_failed:
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002110 if (dev->hclk) {
2111 clk_disable_unprepare(dev->hclk);
2112 clk_put(dev->hclk);
2113 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002114err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115 iounmap(dev->bam.base);
2116err_ioremap_bam_failed:
2117 iounmap(dev->base);
2118err_ioremap_failed:
2119 kfree(dev);
2120err_get_res_failed:
2121 release_mem_region(bam_mem->start, resource_size(bam_mem));
2122err_get_res_bam_failed:
2123 release_mem_region(slim_mem->start, resource_size(slim_mem));
2124 return ret;
2125}
2126
2127static int __devexit msm_slim_remove(struct platform_device *pdev)
2128{
2129 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2130 struct resource *bam_mem;
2131 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002132 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002133 int i;
2134 for (i = 0; i < dev->nsats; i++) {
2135 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002136 int j;
2137 for (j = 0; j < sat->nsatch; j++)
2138 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002139 slim_remove_device(&sat->satcl);
2140 kfree(sat->satch);
2141 destroy_workqueue(sat->wq);
2142 kfree(sat->satcl.name);
2143 kfree(sat);
2144 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002145 pm_runtime_disable(&pdev->dev);
2146 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147 free_irq(dev->irq, dev);
2148 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 clk_put(dev->rclk);
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002150 if (dev->hclk)
2151 clk_put(dev->hclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152 msm_slim_sps_exit(dev);
2153 kthread_stop(dev->rx_msgq_thread);
2154 iounmap(dev->bam.base);
2155 iounmap(dev->base);
2156 kfree(dev);
2157 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2158 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002159 if (bam_mem)
2160 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002161 if (slew_mem)
2162 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2164 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002165 if (slim_mem)
2166 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167 return 0;
2168}
2169
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002170#ifdef CONFIG_PM_RUNTIME
2171static int msm_slim_runtime_idle(struct device *device)
2172{
2173 dev_dbg(device, "pm_runtime: idle...\n");
2174 pm_request_autosuspend(device);
2175 return -EAGAIN;
2176}
2177#endif
2178
2179/*
2180 * If PM_RUNTIME is not defined, these 2 functions become helper
2181 * functions to be called from system suspend/resume. So they are not
2182 * inside ifdef CONFIG_PM_RUNTIME
2183 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002184#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002185static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186{
2187 struct platform_device *pdev = to_platform_device(device);
2188 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002189 int ret;
2190 dev_dbg(device, "pm_runtime: suspending...\n");
2191 dev->state = MSM_CTRL_SLEEPING;
2192 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002193 if (ret) {
2194 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002195 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002196 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002197 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002198 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002199 return ret;
2200}
2201
2202static int msm_slim_runtime_resume(struct device *device)
2203{
2204 struct platform_device *pdev = to_platform_device(device);
2205 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2206 int ret = 0;
2207 dev_dbg(device, "pm_runtime: resuming...\n");
2208 if (dev->state == MSM_CTRL_ASLEEP)
2209 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002210 if (ret) {
2211 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002212 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002213 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002214 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002215 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002216 return ret;
2217}
2218
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002219static int msm_slim_suspend(struct device *dev)
2220{
2221 int ret = 0;
2222 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002223 struct platform_device *pdev = to_platform_device(dev);
2224 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002225 dev_dbg(dev, "system suspend");
2226 ret = msm_slim_runtime_suspend(dev);
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002227 if (!ret) {
2228 if (cdev->hclk)
2229 clk_disable_unprepare(cdev->hclk);
2230 }
Sagar Dharia6b559e02011-08-03 17:01:31 -06002231 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002232 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002233 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002234 * If the clock pause failed due to active channels, there is
2235 * a possibility that some audio stream is active during suspend
2236 * We dont want to return suspend failure in that case so that
2237 * display and relevant components can still go to suspend.
2238 * If there is some other error, then it should be passed-on
2239 * to system level suspend
2240 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002241 ret = 0;
2242 }
2243 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244}
2245
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002246static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002247{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002248 /* If runtime_pm is enabled, this resume shouldn't do anything */
2249 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002250 struct platform_device *pdev = to_platform_device(dev);
2251 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002252 int ret;
2253 dev_dbg(dev, "system resume");
Sagar Dhariadebc8b72012-08-11 15:02:12 -06002254 if (cdev->hclk)
2255 clk_prepare_enable(cdev->hclk);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002256 ret = msm_slim_runtime_resume(dev);
2257 if (!ret) {
2258 pm_runtime_mark_last_busy(dev);
2259 pm_request_autosuspend(dev);
2260 }
2261 return ret;
2262
Sagar Dharia144e5e02011-08-08 17:30:11 -06002263 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002264 return 0;
2265}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002266#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002267
2268static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2269 SET_SYSTEM_SLEEP_PM_OPS(
2270 msm_slim_suspend,
2271 msm_slim_resume
2272 )
2273 SET_RUNTIME_PM_OPS(
2274 msm_slim_runtime_suspend,
2275 msm_slim_runtime_resume,
2276 msm_slim_runtime_idle
2277 )
2278};
2279
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002280static struct of_device_id msm_slim_dt_match[] = {
2281 {
2282 .compatible = "qcom,slim-msm",
2283 },
2284 {}
2285};
2286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287static struct platform_driver msm_slim_driver = {
2288 .probe = msm_slim_probe,
2289 .remove = msm_slim_remove,
2290 .driver = {
2291 .name = MSM_SLIM_NAME,
2292 .owner = THIS_MODULE,
2293 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002294 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002295 },
2296};
2297
2298static int msm_slim_init(void)
2299{
2300 return platform_driver_register(&msm_slim_driver);
2301}
2302subsys_initcall(msm_slim_init);
2303
2304static void msm_slim_exit(void)
2305{
2306 platform_driver_unregister(&msm_slim_driver);
2307}
2308module_exit(msm_slim_exit);
2309
2310MODULE_LICENSE("GPL v2");
2311MODULE_VERSION("0.1");
2312MODULE_DESCRIPTION("MSM Slimbus controller");
2313MODULE_ALIAS("platform:msm-slim");