blob: 1e4302bee86ad0788da6827338ecb307f4b7f12f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
24#include <mach/sps.h>
25
26/* Per spec.max 40 bytes per received message */
27#define SLIM_RX_MSGQ_BUF_LEN 40
28
29#define SLIM_USR_MC_GENERIC_ACK 0x25
30#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
31#define SLIM_USR_MC_REPORT_SATELLITE 0x1
32#define SLIM_USR_MC_ADDR_QUERY 0xD
33#define SLIM_USR_MC_ADDR_REPLY 0xE
34#define SLIM_USR_MC_DEFINE_CHAN 0x20
35#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
36#define SLIM_USR_MC_CHAN_CTRL 0x23
37#define SLIM_USR_MC_RECONFIG_NOW 0x24
38#define SLIM_USR_MC_REQ_BW 0x28
39#define SLIM_USR_MC_CONNECT_SRC 0x2C
40#define SLIM_USR_MC_CONNECT_SINK 0x2D
41#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
42
43/* MSM Slimbus peripheral settings */
44#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
45#define MSM_SLIM_NCHANS 32
46#define MSM_SLIM_NPORTS 24
47
48/*
49 * Need enough descriptors to receive present messages from slaves
50 * if received simultaneously. Present message needs 3 descriptors
51 * and this size will ensure around 10 simultaneous reports.
52 */
53#define MSM_SLIM_DESC_NUM 32
54
55#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
56 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
57
58#define MSM_SLIM_NAME "msm_slim_ctrl"
59#define SLIM_ROOT_FREQ 24576000
60
61#define MSM_CONCUR_MSG 8
62#define SAT_CONCUR_MSG 8
63#define DEF_WATERMARK (8 << 1)
64#define DEF_ALIGN 0
65#define DEF_PACK (1 << 6)
66#define ENABLE_PORT 1
67
68#define DEF_BLKSZ 0
69#define DEF_TRANSZ 0
70
71#define SAT_MAGIC_LSB 0xD9
72#define SAT_MAGIC_MSB 0xC5
73#define SAT_MSG_VER 0x1
74#define SAT_MSG_PROT 0x1
75#define MSM_SAT_SUCCSS 0x20
76
77#define QC_MFGID_LSB 0x2
78#define QC_MFGID_MSB 0x17
79#define QC_CHIPID_SL 0x10
80#define QC_DEVID_SAT1 0x3
81#define QC_DEVID_SAT2 0x4
82#define QC_DEVID_PGD 0x5
83
84/* Component registers */
85enum comp_reg {
86 COMP_CFG = 0,
87 COMP_TRUST_CFG = 0x14,
88};
89
90/* Manager registers */
91enum mgr_reg {
92 MGR_CFG = 0x200,
93 MGR_STATUS = 0x204,
94 MGR_RX_MSGQ_CFG = 0x208,
95 MGR_INT_EN = 0x210,
96 MGR_INT_STAT = 0x214,
97 MGR_INT_CLR = 0x218,
98 MGR_TX_MSG = 0x230,
99 MGR_RX_MSG = 0x270,
100 MGR_VE_STAT = 0x300,
101};
102
103enum msg_cfg {
104 MGR_CFG_ENABLE = 1,
105 MGR_CFG_RX_MSGQ_EN = 1 << 1,
106 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
107 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
108};
109/* Message queue types */
110enum msm_slim_msgq_type {
111 MSGQ_RX = 0,
112 MSGQ_TX_LOW = 1,
113 MSGQ_TX_HIGH = 2,
114};
115/* Framer registers */
116enum frm_reg {
117 FRM_CFG = 0x400,
118 FRM_STAT = 0x404,
119 FRM_INT_EN = 0x410,
120 FRM_INT_STAT = 0x414,
121 FRM_INT_CLR = 0x418,
122 FRM_WAKEUP = 0x41C,
123 FRM_CLKCTL_DONE = 0x420,
124 FRM_IE_STAT = 0x430,
125 FRM_VE_STAT = 0x440,
126};
127
128/* Interface registers */
129enum intf_reg {
130 INTF_CFG = 0x600,
131 INTF_STAT = 0x604,
132 INTF_INT_EN = 0x610,
133 INTF_INT_STAT = 0x614,
134 INTF_INT_CLR = 0x618,
135 INTF_IE_STAT = 0x630,
136 INTF_VE_STAT = 0x640,
137};
138
139/* Manager PGD registers */
140enum pgd_reg {
141 PGD_CFG = 0x1000,
142 PGD_STAT = 0x1004,
143 PGD_INT_EN = 0x1010,
144 PGD_INT_STAT = 0x1014,
145 PGD_INT_CLR = 0x1018,
146 PGD_OWN_EEn = 0x1020,
147 PGD_PORT_INT_EN_EEn = 0x1030,
148 PGD_PORT_INT_ST_EEn = 0x1034,
149 PGD_PORT_INT_CL_EEn = 0x1038,
150 PGD_PORT_CFGn = 0x1080,
151 PGD_PORT_STATn = 0x1084,
152 PGD_PORT_PARAMn = 0x1088,
153 PGD_PORT_BLKn = 0x108C,
154 PGD_PORT_TRANn = 0x1090,
155 PGD_PORT_MCHANn = 0x1094,
156 PGD_PORT_PSHPLLn = 0x1098,
157 PGD_PORT_PC_CFGn = 0x1600,
158 PGD_PORT_PC_VALn = 0x1604,
159 PGD_PORT_PC_VFR_TSn = 0x1608,
160 PGD_PORT_PC_VFR_STn = 0x160C,
161 PGD_PORT_PC_VFR_CLn = 0x1610,
162 PGD_IE_STAT = 0x1700,
163 PGD_VE_STAT = 0x1710,
164};
165
166enum rsc_grp {
167 EE_MGR_RSC_GRP = 1 << 10,
168 EE_NGD_2 = 2 << 6,
169 EE_NGD_1 = 0,
170};
171
172enum mgr_intr {
173 MGR_INT_RECFG_DONE = 1 << 24,
174 MGR_INT_TX_NACKED_2 = 1 << 25,
175 MGR_INT_MSG_BUF_CONTE = 1 << 26,
176 MGR_INT_RX_MSG_RCVD = 1 << 30,
177 MGR_INT_TX_MSG_SENT = 1 << 31,
178};
179
180enum frm_cfg {
181 FRM_ACTIVE = 1,
182 CLK_GEAR = 7,
183 ROOT_FREQ = 11,
184 REF_CLK_GEAR = 15,
185};
186
187struct msm_slim_sps_bam {
188 u32 hdl;
189 void __iomem *base;
190 int irq;
191};
192
193struct msm_slim_endp {
194 struct sps_pipe *sps;
195 struct sps_connect config;
196 struct sps_register_event event;
197 struct sps_mem_buffer buf;
198 struct completion *xcomp;
199 bool connected;
200};
201
202struct msm_slim_ctrl {
203 struct slim_controller ctrl;
204 struct slim_framer framer;
205 struct device *dev;
206 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600207 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 u32 curr_bw;
209 u8 msg_cnt;
210 u32 tx_buf[10];
211 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
212 spinlock_t rx_lock;
213 int head;
214 int tail;
215 int irq;
216 int err;
217 int ee;
218 struct completion *wr_comp;
219 struct msm_slim_sat *satd;
220 struct msm_slim_endp pipes[7];
221 struct msm_slim_sps_bam bam;
222 struct msm_slim_endp rx_msgq;
223 struct completion rx_msgq_notify;
224 struct task_struct *rx_msgq_thread;
225 struct clk *rclk;
226 struct mutex tx_lock;
227 u8 pgdla;
228 bool use_rx_msgqs;
229 int suspended;
230 int pipe_b;
231 struct completion reconf;
232 bool reconf_busy;
233};
234
235struct msm_slim_sat {
236 struct slim_device satcl;
237 struct msm_slim_ctrl *dev;
238 struct workqueue_struct *wq;
239 struct work_struct wd;
240 u8 sat_msgs[SAT_CONCUR_MSG][40];
241 u16 *satch;
242 u8 nsatch;
243 bool sent_capability;
244 int shead;
245 int stail;
246 spinlock_t lock;
247};
248
249static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
250{
251 spin_lock(&dev->rx_lock);
252 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
253 spin_unlock(&dev->rx_lock);
254 dev_err(dev->dev, "RX QUEUE full!");
255 return -EXFULL;
256 }
257 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
258 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
259 spin_unlock(&dev->rx_lock);
260 return 0;
261}
262
263static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
264{
265 unsigned long flags;
266 spin_lock_irqsave(&dev->rx_lock, flags);
267 if (dev->tail == dev->head) {
268 spin_unlock_irqrestore(&dev->rx_lock, flags);
269 return -ENODATA;
270 }
271 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
272 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
273 spin_unlock_irqrestore(&dev->rx_lock, flags);
274 return 0;
275}
276
277static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
278{
279 struct msm_slim_ctrl *dev = sat->dev;
280 spin_lock(&sat->lock);
281 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
282 spin_unlock(&sat->lock);
283 dev_err(dev->dev, "SAT QUEUE full!");
284 return -EXFULL;
285 }
286 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
287 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
288 spin_unlock(&sat->lock);
289 return 0;
290}
291
292static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
293{
294 unsigned long flags;
295 spin_lock_irqsave(&sat->lock, flags);
296 if (sat->stail == sat->shead) {
297 spin_unlock_irqrestore(&sat->lock, flags);
298 return -ENODATA;
299 }
300 memcpy(buf, sat->sat_msgs[sat->shead], 40);
301 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
302 spin_unlock_irqrestore(&sat->lock, flags);
303 return 0;
304}
305
306static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
307{
308 e_addr[0] = (buffer[1] >> 24) & 0xff;
309 e_addr[1] = (buffer[1] >> 16) & 0xff;
310 e_addr[2] = (buffer[1] >> 8) & 0xff;
311 e_addr[3] = buffer[1] & 0xff;
312 e_addr[4] = (buffer[0] >> 24) & 0xff;
313 e_addr[5] = (buffer[0] >> 16) & 0xff;
314}
315
316static bool msm_is_sat_dev(u8 *e_addr)
317{
318 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
319 e_addr[2] != QC_CHIPID_SL &&
320 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
321 return true;
322 return false;
323}
324
325static irqreturn_t msm_slim_interrupt(int irq, void *d)
326{
327 struct msm_slim_ctrl *dev = d;
328 u32 pstat;
329 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
330
331 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
332 if (stat & MGR_INT_TX_MSG_SENT)
333 writel_relaxed(MGR_INT_TX_MSG_SENT,
334 dev->base + MGR_INT_CLR);
335 else {
336 writel_relaxed(MGR_INT_TX_NACKED_2,
337 dev->base + MGR_INT_CLR);
338 dev->err = -EIO;
339 }
340 /*
341 * Guarantee that interrupt clear bit write goes through before
342 * signalling completion/exiting ISR
343 */
344 mb();
345 if (dev->wr_comp)
346 complete(dev->wr_comp);
347 }
348 if (stat & MGR_INT_RX_MSG_RCVD) {
349 u32 rx_buf[10];
350 u32 mc, mt;
351 u8 len, i;
352 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
353 len = rx_buf[0] & 0x1F;
354 for (i = 1; i < ((len + 3) >> 2); i++) {
355 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
356 (4 * i));
357 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
358 }
359 mt = (rx_buf[0] >> 5) & 0x7;
360 mc = (rx_buf[0] >> 8) & 0xff;
361 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
362 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
363 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
364 struct msm_slim_sat *sat = dev->satd;
365 msm_sat_enqueue(sat, rx_buf, len);
366 writel_relaxed(MGR_INT_RX_MSG_RCVD,
367 dev->base + MGR_INT_CLR);
368 /*
369 * Guarantee that CLR bit write goes through before
370 * queuing work
371 */
372 mb();
373 queue_work(sat->wq, &sat->wd);
374 } else if (mt == SLIM_MSG_MT_CORE &&
375 mc == SLIM_MSG_MC_REPORT_PRESENT) {
376 u8 e_addr[6];
377 msm_get_eaddr(e_addr, rx_buf);
378 if (msm_is_sat_dev(e_addr)) {
379 /*
380 * Consider possibility that this device may
381 * be reporting more than once?
382 */
383 struct msm_slim_sat *sat = dev->satd;
384 msm_sat_enqueue(sat, rx_buf, len);
385 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
386 MGR_INT_CLR);
387 /*
388 * Guarantee that CLR bit write goes through
389 * before queuing work
390 */
391 mb();
392 queue_work(sat->wq, &sat->wd);
393 } else {
394 msm_slim_rx_enqueue(dev, rx_buf, len);
395 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
396 MGR_INT_CLR);
397 /*
398 * Guarantee that CLR bit write goes through
399 * before signalling completion
400 */
401 mb();
402 complete(&dev->rx_msgq_notify);
403 }
404 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
405 mc == SLIM_MSG_MC_REPLY_VALUE) {
406 msm_slim_rx_enqueue(dev, rx_buf, len);
407 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
408 MGR_INT_CLR);
409 /*
410 * Guarantee that CLR bit write goes through
411 * before signalling completion
412 */
413 mb();
414 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600415 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
416 u8 *buf = (u8 *)rx_buf;
417 u8 l_addr = buf[2];
418 u16 ele = (u16)buf[4] << 4;
419 ele |= ((buf[3] & 0xf0) >> 4);
420 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
421 l_addr, ele);
422 for (i = 0; i < len - 5; i++)
423 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
424 i, buf[i+5]);
425 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
426 MGR_INT_CLR);
427 /*
428 * Guarantee that CLR bit write goes through
429 * before exiting
430 */
431 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 } else {
433 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
434 mc, mt, len);
435 for (i = 0; i < ((len + 3) >> 2); i++)
436 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
437 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
438 MGR_INT_CLR);
439 /*
440 * Guarantee that CLR bit write goes through
441 * before exiting
442 */
443 mb();
444 }
445 }
446 if (stat & MGR_INT_RECFG_DONE) {
447 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
448 /*
449 * Guarantee that CLR bit write goes through
450 * before exiting ISR
451 */
452 mb();
453 complete(&dev->reconf);
454 }
455 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
456 if (pstat != 0) {
457 int i = 0;
458 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
459 if (pstat & 1 << i) {
460 u32 val = readl_relaxed(dev->base +
461 PGD_PORT_STATn + (i * 32));
462 if (val & (1 << 19)) {
463 dev->ctrl.ports[i].err =
464 SLIM_P_DISCONNECT;
465 dev->pipes[i-dev->pipe_b].connected =
466 false;
467 /*
468 * SPS will call completion since
469 * ERROR flags are registered
470 */
471 } else if (val & (1 << 2))
472 dev->ctrl.ports[i].err =
473 SLIM_P_OVERFLOW;
474 else if (val & (1 << 3))
475 dev->ctrl.ports[i].err =
476 SLIM_P_UNDERFLOW;
477 }
478 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
479 (dev->ee * 16));
480 }
481 /*
482 * Guarantee that port interrupt bit(s) clearing writes go
483 * through before exiting ISR
484 */
485 mb();
486 }
487
488 return IRQ_HANDLED;
489}
490
491static int
492msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
493{
494 int ret;
495 struct sps_pipe *endpoint;
496 struct sps_connect *config = &ep->config;
497
498 /* Allocate the endpoint */
499 endpoint = sps_alloc_endpoint();
500 if (!endpoint) {
501 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
502 return -ENOMEM;
503 }
504
505 /* Get default connection configuration for an endpoint */
506 ret = sps_get_config(endpoint, config);
507 if (ret) {
508 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
509 goto sps_config_failed;
510 }
511
512 ep->sps = endpoint;
513 return 0;
514
515sps_config_failed:
516 sps_free_endpoint(endpoint);
517 return ret;
518}
519
520static void
521msm_slim_free_endpoint(struct msm_slim_endp *ep)
522{
523 sps_free_endpoint(ep->sps);
524 ep->sps = NULL;
525}
526
527static int msm_slim_sps_mem_alloc(
528 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
529{
530 dma_addr_t phys;
531
532 mem->size = len;
533 mem->min_size = 0;
534 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
535
536 if (!mem->base) {
537 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
538 return -ENOMEM;
539 }
540
541 mem->phys_base = phys;
542 memset(mem->base, 0x00, mem->size);
543 return 0;
544}
545
546static void
547msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
548{
549 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
550 mem->size = 0;
551 mem->base = NULL;
552 mem->phys_base = 0;
553}
554
555static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
556{
557 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
558 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
559 (dev->ee * 16));
560 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
561 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
562 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
563 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
564 (dev->ee * 16));
565 /* Make sure that port registers are updated before returning */
566 mb();
567}
568
569static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
570{
571 struct msm_slim_endp *endpoint = &dev->pipes[pn];
572 struct sps_connect *cfg = &endpoint->config;
573 u32 stat;
574 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
575 if (ret) {
576 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
577 return ret;
578 }
579 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
580 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
581
582 if (dev->pipes[pn].connected) {
583 ret = sps_set_config(dev->pipes[pn].sps, cfg);
584 if (ret) {
585 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
586 ret);
587 return ret;
588 }
589 }
590
591 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
592 (32 * (pn + dev->pipe_b)));
593 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
594 cfg->destination = dev->bam.hdl;
595 cfg->source = SPS_DEV_HANDLE_MEM;
596 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
597 cfg->src_pipe_index = 0;
598 dev_dbg(dev->dev, "flow src:pipe num:%d",
599 cfg->dest_pipe_index);
600 cfg->mode = SPS_MODE_DEST;
601 } else {
602 cfg->source = dev->bam.hdl;
603 cfg->destination = SPS_DEV_HANDLE_MEM;
604 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
605 cfg->dest_pipe_index = 0;
606 dev_dbg(dev->dev, "flow dest:pipe num:%d",
607 cfg->src_pipe_index);
608 cfg->mode = SPS_MODE_SRC;
609 }
610 /* Space for desciptor FIFOs */
611 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
612 cfg->config = SPS_CONFIG_DEFAULT;
613 ret = sps_connect(dev->pipes[pn].sps, cfg);
614 if (!ret) {
615 dev->pipes[pn].connected = true;
616 msm_hw_set_port(dev, pn + dev->pipe_b);
617 }
618 return ret;
619}
620
621static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
622{
623 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
624 /*
625 * Currently we block a transaction until the current one completes.
626 * In case we need multiple transactions, use message Q
627 */
628 return dev->tx_buf;
629}
630
631static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
632{
633 int i;
634 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
635 for (i = 0; i < (len + 3) >> 2; i++) {
636 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
637 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
638 }
639 /* Guarantee that message is sent before returning */
640 mb();
641 return 0;
642}
643
644static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
645{
646 DECLARE_COMPLETION_ONSTACK(done);
647 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
648 u32 *pbuf;
649 u8 *puc;
650 int timeout;
651 u8 la = txn->la;
652 mutex_lock(&dev->tx_lock);
653 if (txn->mt == SLIM_MSG_MT_CORE &&
654 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
655 dev->reconf_busy) {
656 wait_for_completion(&dev->reconf);
657 dev->reconf_busy = false;
658 }
659 if (dev->suspended) {
660 dev_err(dev->dev, "No transaction in suspended state");
661 mutex_unlock(&dev->tx_lock);
662 return -EBUSY;
663 }
664 txn->rl--;
665 pbuf = msm_get_msg_buf(ctrl, txn->rl);
666 dev->wr_comp = NULL;
667 dev->err = 0;
668
669 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
670 mutex_unlock(&dev->tx_lock);
671 return -EPROTONOSUPPORT;
672 }
673 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
674 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
675 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
676 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT))
677 la = dev->pgdla;
678 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
679 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
680 0, la);
681 else
682 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
683 1, la);
684 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
685 puc = ((u8 *)pbuf) + 3;
686 else
687 puc = ((u8 *)pbuf) + 2;
688 if (txn->rbuf)
689 *(puc++) = txn->tid;
690 if ((txn->mt == SLIM_MSG_MT_CORE) &&
691 ((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
692 txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
693 (txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
694 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
695 *(puc++) = (txn->ec & 0xFF);
696 *(puc++) = (txn->ec >> 8)&0xFF;
697 }
698 if (txn->wbuf)
699 memcpy(puc, txn->wbuf, txn->len);
700 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
701 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
702 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
703 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
704 if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
705 dev->err = msm_slim_connect_pipe_port(dev, *puc);
706 else {
707 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
708 struct sps_register_event sps_event;
709 memset(&sps_event, 0, sizeof(sps_event));
710 sps_register_event(endpoint->sps, &sps_event);
711 sps_disconnect(endpoint->sps);
712 /*
713 * Remove channel disconnects master-side ports from
714 * channel. No need to send that again on the bus
715 */
716 dev->pipes[*puc].connected = false;
717 mutex_unlock(&dev->tx_lock);
718 return 0;
719 }
720 if (dev->err) {
721 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
722 mutex_unlock(&dev->tx_lock);
723 return dev->err;
724 }
725 *(puc) = *(puc) + dev->pipe_b;
726 }
727 if (txn->mt == SLIM_MSG_MT_CORE &&
728 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
729 dev->reconf_busy = true;
730 dev->wr_comp = &done;
731 msm_send_msg_buf(ctrl, pbuf, txn->rl);
732 timeout = wait_for_completion_timeout(&done, HZ);
733 if (!timeout)
734 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
735 txn->mt);
736 mutex_unlock(&dev->tx_lock);
737 return timeout ? dev->err : -ETIMEDOUT;
738}
739
740static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
741 u8 elen, u8 laddr)
742{
743 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
744 DECLARE_COMPLETION_ONSTACK(done);
745 int timeout;
746 u32 *buf;
747 mutex_lock(&dev->tx_lock);
748 buf = msm_get_msg_buf(ctrl, 9);
749 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
750 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
751 SLIM_MSG_DEST_LOGICALADDR,
752 ea[5] | ea[4] << 8);
753 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
754 buf[2] = laddr;
755
756 dev->wr_comp = &done;
757 msm_send_msg_buf(ctrl, buf, 9);
758 timeout = wait_for_completion_timeout(&done, HZ);
759 mutex_unlock(&dev->tx_lock);
760 return timeout ? dev->err : -ETIMEDOUT;
761}
762
Sagar Dharia144e5e02011-08-08 17:30:11 -0600763static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
764{
765 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
766 clk_enable(dev->rclk);
767 writel_relaxed(1, dev->base + FRM_WAKEUP);
768 /* Make sure framer wakeup write goes through before exiting function */
769 mb();
770 /*
771 * Workaround: Currently, slave is reporting lost-sync messages
772 * after slimbus comes out of clock pause.
773 * Transaction with slave fail before slave reports that message
774 * Give some time for that report to come
775 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
776 * being 250 usecs, we wait for 20 superframes here to ensure
777 * we get the message
778 */
779 usleep_range(5000, 5000);
780 return 0;
781}
782
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783static int msm_config_port(struct slim_controller *ctrl, u8 pn)
784{
785 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
786 struct msm_slim_endp *endpoint;
787 int ret = 0;
788 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
789 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
790 return -EPROTONOSUPPORT;
791 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
792 return -ENODEV;
793
794 endpoint = &dev->pipes[pn];
795 ret = msm_slim_init_endpoint(dev, endpoint);
796 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
797 return ret;
798}
799
800static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
801 u8 pn, u8 **done_buf, u32 *done_len)
802{
803 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
804 struct sps_iovec sio;
805 int ret;
806 if (done_len)
807 *done_len = 0;
808 if (done_buf)
809 *done_buf = NULL;
810 if (!dev->pipes[pn].connected)
811 return SLIM_P_DISCONNECT;
812 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
813 if (!ret) {
814 if (done_len)
815 *done_len = sio.size;
816 if (done_buf)
817 *done_buf = (u8 *)sio.addr;
818 }
819 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
820 return SLIM_P_INPROGRESS;
821}
822
823static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
824 u32 len, struct completion *comp)
825{
826 struct sps_register_event sreg;
827 int ret;
828 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600829 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 return -ENODEV;
831
832
833 ctrl->ports[pn].xcomp = comp;
834 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
835 sreg.mode = SPS_TRIGGER_WAIT;
836 sreg.xfer_done = comp;
837 sreg.callback = NULL;
838 sreg.user = &ctrl->ports[pn];
839 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
840 if (ret) {
841 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
842 return ret;
843 }
844 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
845 SPS_IOVEC_FLAG_INT);
846 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
847
848 return ret;
849}
850
851static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
852{
853 struct msm_slim_ctrl *dev = sat->dev;
854 enum slim_ch_control oper;
855 int i;
856 int ret = 0;
857 if (mc == SLIM_USR_MC_CHAN_CTRL) {
858 u16 chanh = sat->satch[buf[5]];
859 oper = ((buf[3] & 0xC0) >> 6);
860 /* part of grp. activating/removing 1 will take care of rest */
861 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
862 } else {
863 u16 chh[40];
864 struct slim_ch prop;
865 u32 exp;
866 u8 coeff, cc;
867 u8 prrate = buf[6];
868 for (i = 8; i < len; i++)
869 chh[i-8] = sat->satch[buf[i]];
870 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
871 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
872 prop.baser = SLIM_RATE_4000HZ;
873 if (prrate & 0x8)
874 prop.baser = SLIM_RATE_11025HZ;
875 else
876 prop.baser = SLIM_RATE_4000HZ;
877 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
878 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
879 exp = (u32)((buf[5] & 0xF0) >> 4);
880 coeff = (buf[4] & 0x20) >> 5;
881 cc = (coeff ? 3 : 1);
882 prop.ratem = cc * (1 << exp);
883 if (i > 9)
884 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
885 true, &sat->satch[buf[8]]);
886 else
887 ret = slim_define_ch(&sat->satcl, &prop,
888 &sat->satch[buf[8]], 1, false,
889 NULL);
890 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
891
892 /* part of group so activating 1 will take care of rest */
893 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
894 ret = slim_control_ch(&sat->satcl,
895 sat->satch[buf[8]],
896 SLIM_CH_ACTIVATE, false);
897 }
898 return ret;
899}
900
901static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
902{
903 u8 buf[40];
904 u8 mc, mt, len;
905 int i, ret;
906 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
907 len = buf[0] & 0x1F;
908 mt = (buf[0] >> 5) & 0x7;
909 mc = buf[1];
910 if (mt == SLIM_MSG_MT_CORE &&
911 mc == SLIM_MSG_MC_REPORT_PRESENT) {
912 u8 laddr;
913 u8 e_addr[6];
914 for (i = 0; i < 6; i++)
915 e_addr[i] = buf[7-i];
916
917 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
918 /* Is this Qualcomm ported generic device? */
919 if (!ret && e_addr[5] == QC_MFGID_LSB &&
920 e_addr[4] == QC_MFGID_MSB &&
921 e_addr[1] == QC_DEVID_PGD &&
922 e_addr[2] != QC_CHIPID_SL)
923 dev->pgdla = laddr;
924
925 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
926 mc == SLIM_MSG_MC_REPLY_VALUE) {
927 u8 tid = buf[3];
928 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
929 slim_msg_response(&dev->ctrl, &buf[4], tid,
930 len - 4);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600931 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
932 u8 l_addr = buf[2];
933 u16 ele = (u16)buf[4] << 4;
934 ele |= ((buf[3] & 0xf0) >> 4);
935 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
936 l_addr, ele);
937 for (i = 0; i < len - 5; i++)
938 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
939 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 } else {
941 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
942 mc, mt);
943 for (i = 0; i < len; i++)
944 dev_err(dev->dev, "error msg: %x", buf[i]);
945
946 }
947 } else
948 dev_err(dev->dev, "rxwq called and no dequeue");
949}
950
951static void slim_sat_rxprocess(struct work_struct *work)
952{
953 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
954 struct msm_slim_ctrl *dev = sat->dev;
955 u8 buf[40];
956
957 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
958 struct slim_msg_txn txn;
959 int i;
960 u8 len, mc, mt;
961 u32 bw_sl;
962 int ret = 0;
963 bool gen_ack = false;
964 u8 tid;
965 u8 wbuf[8];
966 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
967 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
968 txn.ec = 0;
969 txn.rbuf = NULL;
970 txn.la = sat->satcl.laddr;
971 /* satellite handling */
972 len = buf[0] & 0x1F;
973 mc = buf[1];
974 mt = (buf[0] >> 5) & 0x7;
975
976 if (mt == SLIM_MSG_MT_CORE &&
977 mc == SLIM_MSG_MC_REPORT_PRESENT) {
978 u8 laddr;
979 u8 e_addr[6];
980 for (i = 0; i < 6; i++)
981 e_addr[i] = buf[7-i];
982
983 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
984 sat->satcl.laddr = laddr;
985 }
986 switch (mc) {
987 case SLIM_MSG_MC_REPORT_PRESENT:
988 /* send a Manager capability msg */
989 if (sat->sent_capability)
990 continue;
991 ret = slim_add_device(&dev->ctrl, &sat->satcl);
992 if (ret) {
993 dev_err(dev->dev,
994 "Satellite-init failed");
995 continue;
996 }
997 /* Satellite owns first 21 channels */
998 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
999 sat->nsatch = 20;
1000 /* alloc all sat chans */
1001 for (i = 0; i < 21; i++)
1002 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1003 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1004 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1005 txn.la = sat->satcl.laddr;
1006 txn.rl = 8;
1007 wbuf[0] = SAT_MAGIC_LSB;
1008 wbuf[1] = SAT_MAGIC_MSB;
1009 wbuf[2] = SAT_MSG_VER;
1010 wbuf[3] = SAT_MSG_PROT;
1011 txn.wbuf = wbuf;
1012 txn.len = 4;
1013 sat->sent_capability = true;
1014 msm_xfer_msg(&dev->ctrl, &txn);
1015 break;
1016 case SLIM_USR_MC_ADDR_QUERY:
1017 memcpy(&wbuf[1], &buf[4], 6);
1018 ret = slim_get_logical_addr(&sat->satcl,
1019 &wbuf[1], 6, &wbuf[7]);
1020 if (ret)
1021 memset(&wbuf[1], 0, 6);
1022 wbuf[0] = buf[3];
1023 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1024 txn.rl = 12;
1025 txn.len = 8;
1026 txn.wbuf = wbuf;
1027 msm_xfer_msg(&dev->ctrl, &txn);
1028 break;
1029 case SLIM_USR_MC_DEFINE_CHAN:
1030 case SLIM_USR_MC_DEF_ACT_CHAN:
1031 case SLIM_USR_MC_CHAN_CTRL:
1032 if (mc != SLIM_USR_MC_CHAN_CTRL)
1033 tid = buf[7];
1034 else
1035 tid = buf[4];
1036 gen_ack = true;
1037 ret = msm_sat_define_ch(sat, buf, len, mc);
1038 if (ret) {
1039 dev_err(dev->dev,
1040 "SAT define_ch returned:%d",
1041 ret);
1042 }
1043 break;
1044 case SLIM_USR_MC_RECONFIG_NOW:
1045 tid = buf[3];
1046 gen_ack = true;
1047 ret = slim_reconfigure_now(&sat->satcl);
1048 break;
1049 case SLIM_USR_MC_REQ_BW:
1050 /* what we get is in SLOTS */
1051 bw_sl = (u32)buf[4] << 3 |
1052 ((buf[3] & 0xE0) >> 5);
1053 sat->satcl.pending_msgsl = bw_sl;
1054 tid = buf[5];
1055 gen_ack = true;
1056 break;
1057 case SLIM_USR_MC_CONNECT_SRC:
1058 case SLIM_USR_MC_CONNECT_SINK:
1059 if (mc == SLIM_USR_MC_CONNECT_SRC)
1060 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1061 else
1062 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1063 wbuf[0] = buf[4] & 0x1F;
1064 wbuf[1] = buf[5];
1065 tid = buf[6];
1066 txn.la = buf[3];
1067 txn.mt = SLIM_MSG_MT_CORE;
1068 txn.rl = 6;
1069 txn.len = 2;
1070 txn.wbuf = wbuf;
1071 gen_ack = true;
1072 ret = msm_xfer_msg(&dev->ctrl, &txn);
1073 break;
1074 case SLIM_USR_MC_DISCONNECT_PORT:
1075 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1076 wbuf[0] = buf[4] & 0x1F;
1077 tid = buf[5];
1078 txn.la = buf[3];
1079 txn.rl = 5;
1080 txn.len = 1;
1081 txn.mt = SLIM_MSG_MT_CORE;
1082 txn.wbuf = wbuf;
1083 gen_ack = true;
1084 ret = msm_xfer_msg(&dev->ctrl, &txn);
1085 default:
1086 break;
1087 }
1088 if (!gen_ack)
1089 continue;
1090 wbuf[0] = tid;
1091 if (!ret)
1092 wbuf[1] = MSM_SAT_SUCCSS;
1093 else
1094 wbuf[1] = 0;
1095 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1096 txn.la = sat->satcl.laddr;
1097 txn.rl = 6;
1098 txn.len = 2;
1099 txn.wbuf = wbuf;
1100 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1101 msm_xfer_msg(&dev->ctrl, &txn);
1102 }
1103}
1104
1105static void
1106msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1107{
1108 u32 *buf = ev->data.transfer.user;
1109 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1110
1111 /*
1112 * Note the virtual address needs to be offset by the same index
1113 * as the physical address or just pass in the actual virtual address
1114 * if the sps_mem_buffer is not needed. Note that if completion is
1115 * used, the virtual address won't be available and will need to be
1116 * calculated based on the offset of the physical address
1117 */
1118 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1119
1120 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1121
1122 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1123 iovec->addr, iovec->size, iovec->flags);
1124
1125 } else {
1126 dev_err(dev->dev, "%s: unknown event %d\n",
1127 __func__, ev->event_id);
1128 }
1129}
1130
1131static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1132{
1133 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1134 msm_slim_rx_msgq_event(dev, notify);
1135}
1136
1137/* Queue up Rx message buffer */
1138static inline int
1139msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1140{
1141 int ret;
1142 u32 flags = SPS_IOVEC_FLAG_INT;
1143 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1144 struct sps_mem_buffer *mem = &endpoint->buf;
1145 struct sps_pipe *pipe = endpoint->sps;
1146
1147 /* Rx message queue buffers are 4 bytes in length */
1148 u8 *virt_addr = mem->base + (4 * ix);
1149 u32 phys_addr = mem->phys_base + (4 * ix);
1150
1151 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1152
1153 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1154 if (ret)
1155 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1156
1157 return ret;
1158}
1159
1160static inline int
1161msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1162{
1163 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1164 struct sps_mem_buffer *mem = &endpoint->buf;
1165 struct sps_pipe *pipe = endpoint->sps;
1166 struct sps_iovec iovec;
1167 int index;
1168 int ret;
1169
1170 ret = sps_get_iovec(pipe, &iovec);
1171 if (ret) {
1172 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1173 goto err_exit;
1174 }
1175
1176 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1177 iovec.addr, iovec.size, iovec.flags);
1178 BUG_ON(iovec.addr < mem->phys_base);
1179 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1180
1181 /* Calculate buffer index */
1182 index = (iovec.addr - mem->phys_base) / 4;
1183 *(data + offset) = *((u32 *)mem->base + index);
1184
1185 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1186
1187 /* Add buffer back to the queue */
1188 (void)msm_slim_post_rx_msgq(dev, index);
1189
1190err_exit:
1191 return ret;
1192}
1193
1194static int msm_slim_rx_msgq_thread(void *data)
1195{
1196 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1197 struct completion *notify = &dev->rx_msgq_notify;
1198 struct msm_slim_sat *sat = NULL;
1199 u32 mc = 0;
1200 u32 mt = 0;
1201 u32 buffer[10];
1202 int index = 0;
1203 u8 msg_len = 0;
1204 int ret;
1205
1206 dev_dbg(dev->dev, "rx thread started");
1207
1208 while (!kthread_should_stop()) {
1209 set_current_state(TASK_INTERRUPTIBLE);
1210 ret = wait_for_completion_interruptible(notify);
1211
1212 if (ret)
1213 dev_err(dev->dev, "rx thread wait error:%d", ret);
1214
1215 /* 1 irq notification per message */
1216 if (!dev->use_rx_msgqs) {
1217 msm_slim_rxwq(dev);
1218 continue;
1219 }
1220
1221 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1222 if (ret) {
1223 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1224 continue;
1225 }
1226
1227 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1228
1229 /* Decide if we use generic RX or satellite RX */
1230 if (index++ == 0) {
1231 msg_len = *buffer & 0x1F;
1232 pr_debug("Start of new message, len = %d\n", msg_len);
1233 mt = (buffer[0] >> 5) & 0x7;
1234 mc = (buffer[0] >> 8) & 0xff;
1235 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1236 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1237 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1238 sat = dev->satd;
1239
1240 } else if ((index * 4) >= msg_len) {
1241 index = 0;
1242 if (mt == SLIM_MSG_MT_CORE &&
1243 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1244 u8 e_addr[6];
1245 msm_get_eaddr(e_addr, buffer);
1246 if (msm_is_sat_dev(e_addr))
1247 sat = dev->satd;
1248 }
1249 if (sat) {
1250 msm_sat_enqueue(sat, buffer, msg_len);
1251 queue_work(sat->wq, &sat->wd);
1252 sat = NULL;
1253 } else {
1254 msm_slim_rx_enqueue(dev, buffer, msg_len);
1255 msm_slim_rxwq(dev);
1256 }
1257 }
1258 }
1259
1260 return 0;
1261}
1262
1263static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1264{
1265 int i, ret;
1266 u32 pipe_offset;
1267 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1268 struct sps_connect *config = &endpoint->config;
1269 struct sps_mem_buffer *descr = &config->desc;
1270 struct sps_mem_buffer *mem = &endpoint->buf;
1271 struct completion *notify = &dev->rx_msgq_notify;
1272
1273 struct sps_register_event sps_error_event; /* SPS_ERROR */
1274 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1275
1276 /* Allocate the endpoint */
1277 ret = msm_slim_init_endpoint(dev, endpoint);
1278 if (ret) {
1279 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1280 goto sps_init_endpoint_failed;
1281 }
1282
1283 /* Get the pipe indices for the message queues */
1284 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1285 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1286
1287 config->mode = SPS_MODE_SRC;
1288 config->source = dev->bam.hdl;
1289 config->destination = SPS_DEV_HANDLE_MEM;
1290 config->src_pipe_index = pipe_offset;
1291 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1292 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1293
1294 /* Allocate memory for the FIFO descriptors */
1295 ret = msm_slim_sps_mem_alloc(dev, descr,
1296 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1297 if (ret) {
1298 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1299 goto alloc_descr_failed;
1300 }
1301
1302 ret = sps_connect(endpoint->sps, config);
1303 if (ret) {
1304 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1305 goto sps_connect_failed;
1306 }
1307
1308 /* Register completion for DESC_DONE */
1309 init_completion(notify);
1310 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1311
1312 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1313 sps_descr_event.options = SPS_O_DESC_DONE;
1314 sps_descr_event.user = (void *)dev;
1315 sps_descr_event.xfer_done = notify;
1316
1317 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1318 if (ret) {
1319 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1320 goto sps_reg_event_failed;
1321 }
1322
1323 /* Register callback for errors */
1324 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1325 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1326 sps_error_event.options = SPS_O_ERROR;
1327 sps_error_event.user = (void *)dev;
1328 sps_error_event.callback = msm_slim_rx_msgq_cb;
1329
1330 ret = sps_register_event(endpoint->sps, &sps_error_event);
1331 if (ret) {
1332 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1333 goto sps_reg_event_failed;
1334 }
1335
1336 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1337 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1338 if (ret) {
1339 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1340 goto alloc_buffer_failed;
1341 }
1342
1343 /*
1344 * Call transfer_one for each 4-byte buffer
1345 * Use (buf->size/4) - 1 for the number of buffer to post
1346 */
1347
1348 /* Setup the transfer */
1349 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1350 ret = msm_slim_post_rx_msgq(dev, i);
1351 if (ret) {
1352 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1353 goto sps_transfer_failed;
1354 }
1355 }
1356
1357 /* Fire up the Rx message queue thread */
1358 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1359 MSM_SLIM_NAME "_rx_msgq_thread");
1360 if (!dev->rx_msgq_thread) {
1361 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1362 ret = -EIO;
1363 } else
1364 return 0;
1365
1366sps_transfer_failed:
1367 msm_slim_sps_mem_free(dev, mem);
1368alloc_buffer_failed:
1369 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1370 sps_register_event(endpoint->sps, &sps_error_event);
1371sps_reg_event_failed:
1372 sps_disconnect(endpoint->sps);
1373sps_connect_failed:
1374 msm_slim_sps_mem_free(dev, descr);
1375alloc_descr_failed:
1376 msm_slim_free_endpoint(endpoint);
1377sps_init_endpoint_failed:
1378 return ret;
1379}
1380
1381/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1382static int __devinit
1383msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1384{
1385 int i, ret;
1386 u32 bam_handle;
1387 struct sps_bam_props bam_props = {0};
1388
1389 static struct sps_bam_sec_config_props sec_props = {
1390 .ees = {
1391 [0] = { /* LPASS */
1392 .vmid = 0,
1393 .pipe_mask = 0xFFFF98,
1394 },
1395 [1] = { /* Krait Apps */
1396 .vmid = 1,
1397 .pipe_mask = 0x3F000007,
1398 },
1399 [2] = { /* Modem */
1400 .vmid = 2,
1401 .pipe_mask = 0x00000060,
1402 },
1403 },
1404 };
1405
1406 bam_props.ee = dev->ee;
1407 bam_props.virt_addr = dev->bam.base;
1408 bam_props.phys_addr = bam_mem->start;
1409 bam_props.irq = dev->bam.irq;
1410 bam_props.manage = SPS_BAM_MGR_LOCAL;
1411 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1412
1413 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1414 bam_props.p_sec_config_props = &sec_props;
1415
1416 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1417 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1418
1419 /* First 7 bits are for message Qs */
1420 for (i = 7; i < 32; i++) {
1421 /* Check what pipes are owned by Apps. */
1422 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1423 break;
1424 }
1425 dev->pipe_b = i - 7;
1426
1427 /* Register the BAM device with the SPS driver */
1428 ret = sps_register_bam_device(&bam_props, &bam_handle);
1429 if (ret) {
1430 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1431 return ret;
1432 }
1433 dev->bam.hdl = bam_handle;
1434 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1435
1436 ret = msm_slim_init_rx_msgq(dev);
1437 if (ret) {
1438 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1439 goto rx_msgq_init_failed;
1440 }
1441
1442 return 0;
1443rx_msgq_init_failed:
1444 sps_deregister_bam_device(bam_handle);
1445 dev->bam.hdl = 0L;
1446 return ret;
1447}
1448
1449static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1450{
1451 if (dev->use_rx_msgqs) {
1452 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1453 struct sps_connect *config = &endpoint->config;
1454 struct sps_mem_buffer *descr = &config->desc;
1455 struct sps_mem_buffer *mem = &endpoint->buf;
1456 struct sps_register_event sps_event;
1457 memset(&sps_event, 0x00, sizeof(sps_event));
1458 msm_slim_sps_mem_free(dev, mem);
1459 sps_register_event(endpoint->sps, &sps_event);
1460 sps_disconnect(endpoint->sps);
1461 msm_slim_sps_mem_free(dev, descr);
1462 msm_slim_free_endpoint(endpoint);
1463 }
1464 sps_deregister_bam_device(dev->bam.hdl);
1465}
1466
Sagar Dhariacc969452011-09-19 10:34:30 -06001467static void msm_slim_prg_slew(struct platform_device *pdev,
1468 struct msm_slim_ctrl *dev)
1469{
1470 struct resource *slew_io;
1471 void __iomem *slew_reg;
1472 /* SLEW RATE register for this slimbus */
1473 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1474 "slimbus_slew_reg");
1475 if (!dev->slew_mem) {
1476 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1477 return;
1478 }
1479 slew_io = request_mem_region(dev->slew_mem->start,
1480 resource_size(dev->slew_mem), pdev->name);
1481 if (!slew_io) {
1482 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1483 dev->slew_mem = NULL;
1484 return;
1485 }
1486
1487 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1488 if (!slew_reg) {
1489 dev_dbg(dev->dev, "slew register mapping failed");
1490 release_mem_region(dev->slew_mem->start,
1491 resource_size(dev->slew_mem));
1492 dev->slew_mem = NULL;
1493 return;
1494 }
1495 writel_relaxed(1, slew_reg);
1496 /* Make sure slimbus-slew rate enabling goes through */
1497 wmb();
1498 iounmap(slew_reg);
1499}
1500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501static int __devinit msm_slim_probe(struct platform_device *pdev)
1502{
1503 struct msm_slim_ctrl *dev;
1504 int ret;
1505 struct resource *bam_mem, *bam_io;
1506 struct resource *slim_mem, *slim_io;
1507 struct resource *irq, *bam_irq;
1508 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1509 "slimbus_physical");
1510 if (!slim_mem) {
1511 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1512 return -ENODEV;
1513 }
1514 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1515 pdev->name);
1516 if (!slim_io) {
1517 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1518 return -EBUSY;
1519 }
1520
1521 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1522 "slimbus_bam_physical");
1523 if (!bam_mem) {
1524 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1525 ret = -ENODEV;
1526 goto err_get_res_bam_failed;
1527 }
1528 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1529 pdev->name);
1530 if (!bam_io) {
1531 release_mem_region(slim_mem->start, resource_size(slim_mem));
1532 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1533 ret = -EBUSY;
1534 goto err_get_res_bam_failed;
1535 }
1536 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1537 "slimbus_irq");
1538 if (!irq) {
1539 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1540 ret = -ENODEV;
1541 goto err_get_res_failed;
1542 }
1543 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1544 "slimbus_bam_irq");
1545 if (!bam_irq) {
1546 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1547 ret = -ENODEV;
1548 goto err_get_res_failed;
1549 }
1550
1551 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1552 if (!dev) {
1553 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1554 ret = -ENOMEM;
1555 goto err_get_res_failed;
1556 }
1557 dev->dev = &pdev->dev;
1558 platform_set_drvdata(pdev, dev);
1559 slim_set_ctrldata(&dev->ctrl, dev);
1560 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1561 if (!dev->base) {
1562 dev_err(&pdev->dev, "IOremap failed\n");
1563 ret = -ENOMEM;
1564 goto err_ioremap_failed;
1565 }
1566 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1567 if (!dev->bam.base) {
1568 dev_err(&pdev->dev, "BAM IOremap failed\n");
1569 ret = -ENOMEM;
1570 goto err_ioremap_bam_failed;
1571 }
1572 dev->ctrl.nr = pdev->id;
1573 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1574 dev->ctrl.nports = MSM_SLIM_NPORTS;
1575 dev->ctrl.set_laddr = msm_set_laddr;
1576 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001577 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 dev->ctrl.config_port = msm_config_port;
1579 dev->ctrl.port_xfer = msm_slim_port_xfer;
1580 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1581 /* Reserve some messaging BW for satellite-apps driver communication */
1582 dev->ctrl.sched.pending_msgsl = 30;
1583
1584 init_completion(&dev->reconf);
1585 mutex_init(&dev->tx_lock);
1586 spin_lock_init(&dev->rx_lock);
1587 dev->ee = 1;
1588 dev->use_rx_msgqs = 1;
1589 dev->irq = irq->start;
1590 dev->bam.irq = bam_irq->start;
1591
1592 ret = msm_slim_sps_init(dev, bam_mem);
1593 if (ret != 0) {
1594 dev_err(dev->dev, "error SPS init\n");
1595 goto err_sps_init_failed;
1596 }
1597
1598
1599 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001600 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601 dev_err(dev->dev, "slimbus clock not found");
1602 goto err_clk_get_failed;
1603 }
1604 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1605 dev->framer.superfreq =
1606 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1607 dev->ctrl.a_framer = &dev->framer;
1608 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
1609 ret = slim_add_numbered_controller(&dev->ctrl);
1610 if (ret) {
1611 dev_err(dev->dev, "error adding controller\n");
1612 goto err_ctrl_failed;
1613 }
1614
1615 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1616 "msm_slim_irq", dev);
1617 if (ret) {
1618 dev_err(&pdev->dev, "request IRQ failed\n");
1619 goto err_request_irq_failed;
1620 }
1621
1622 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1623 if (!dev->satd) {
1624 ret = -ENOMEM;
1625 goto err_sat_failed;
1626 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001627
1628 msm_slim_prg_slew(pdev, dev);
1629 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1630 clk_enable(dev->rclk);
1631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632 dev->satd->dev = dev;
1633 dev->satd->satcl.name = "msm_sat_dev";
1634 spin_lock_init(&dev->satd->lock);
1635 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1636 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1637 /* Component register initialization */
1638 writel_relaxed(1, dev->base + COMP_CFG);
1639 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1640 dev->base + COMP_TRUST_CFG);
1641
1642 /*
1643 * Manager register initialization
1644 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1645 */
1646 if (dev->use_rx_msgqs)
1647 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1648 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1649 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1650 else
1651 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1652 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1653 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1654 writel_relaxed(1, dev->base + MGR_CFG);
1655 /*
1656 * Framer registers are beyond 1K memory region after Manager and/or
1657 * component registers. Make sure those writes are ordered
1658 * before framer register writes
1659 */
1660 wmb();
1661
1662 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1664 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1665 dev->base + FRM_CFG);
1666 /*
1667 * Make sure that framer wake-up and enabling writes go through
1668 * before any other component is enabled. Framer is responsible for
1669 * clocking the bus and enabling framer first will ensure that other
1670 * devices can report presence when they are enabled
1671 */
1672 mb();
1673
1674 /* Enable RX msg Q */
1675 if (dev->use_rx_msgqs)
1676 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1677 dev->base + MGR_CFG);
1678 else
1679 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1680 /*
1681 * Make sure that manager-enable is written through before interface
1682 * device is enabled
1683 */
1684 mb();
1685 writel_relaxed(1, dev->base + INTF_CFG);
1686 /*
1687 * Make sure that interface-enable is written through before enabling
1688 * ported generic device inside MSM manager
1689 */
1690 mb();
1691 writel_relaxed(1, dev->base + PGD_CFG);
1692 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1693 /*
1694 * Make sure that ported generic device is enabled and port-EE settings
1695 * are written through before finally enabling the component
1696 */
1697 mb();
1698
1699 writel_relaxed(1, dev->base + COMP_CFG);
1700 /*
1701 * Make sure that all writes have gone through before exiting this
1702 * function
1703 */
1704 mb();
1705 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1706 return 0;
1707
1708err_sat_failed:
1709 free_irq(dev->irq, dev);
1710err_request_irq_failed:
1711 slim_del_controller(&dev->ctrl);
1712err_ctrl_failed:
1713 clk_disable(dev->rclk);
1714 clk_put(dev->rclk);
1715err_clk_get_failed:
1716 msm_slim_sps_exit(dev);
1717err_sps_init_failed:
1718 iounmap(dev->bam.base);
1719err_ioremap_bam_failed:
1720 iounmap(dev->base);
1721err_ioremap_failed:
1722 kfree(dev);
1723err_get_res_failed:
1724 release_mem_region(bam_mem->start, resource_size(bam_mem));
1725err_get_res_bam_failed:
1726 release_mem_region(slim_mem->start, resource_size(slim_mem));
1727 return ret;
1728}
1729
1730static int __devexit msm_slim_remove(struct platform_device *pdev)
1731{
1732 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1733 struct resource *bam_mem;
1734 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001735 struct resource *slew_mem = dev->slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736 struct msm_slim_sat *sat = dev->satd;
1737 slim_remove_device(&sat->satcl);
1738 kfree(sat->satch);
1739 destroy_workqueue(sat->wq);
1740 kfree(sat);
1741 free_irq(dev->irq, dev);
1742 slim_del_controller(&dev->ctrl);
1743 clk_disable(dev->rclk);
1744 clk_put(dev->rclk);
1745 msm_slim_sps_exit(dev);
1746 kthread_stop(dev->rx_msgq_thread);
1747 iounmap(dev->bam.base);
1748 iounmap(dev->base);
1749 kfree(dev);
1750 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1751 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001752 if (bam_mem)
1753 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001754 if (slew_mem)
1755 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001756 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1757 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001758 if (slim_mem)
1759 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 return 0;
1761}
1762
1763#ifdef CONFIG_PM
1764static int msm_slim_suspend(struct device *device)
1765{
1766 struct platform_device *pdev = to_platform_device(device);
1767 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001768 int ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
1769 /* Make sure clock pause goes through */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001770 mutex_lock(&dev->tx_lock);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001771 if (!ret && dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 wait_for_completion(&dev->reconf);
1773 dev->reconf_busy = false;
1774 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 mutex_unlock(&dev->tx_lock);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001776 if (!ret) {
1777 clk_disable(dev->rclk);
1778 disable_irq(dev->irq);
1779 dev->suspended = 1;
1780 } else if (ret == -EBUSY) {
1781 /*
1782 * If the clock pause failed due to active channels, there is
1783 * a possibility that some audio stream is active during suspend
1784 * We dont want to return suspend failure in that case so that
1785 * display and relevant components can still go to suspend.
1786 * If there is some other error, then it should be passed-on
1787 * to system level suspend
1788 */
1789 ret = 0;
1790 }
1791 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792}
1793
1794static int msm_slim_resume(struct device *device)
1795{
1796 struct platform_device *pdev = to_platform_device(device);
1797 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001798 mutex_lock(&dev->tx_lock);
1799 if (dev->suspended) {
1800 dev->suspended = 0;
1801 mutex_unlock(&dev->tx_lock);
1802 enable_irq(dev->irq);
1803 return slim_ctrl_clk_pause(&dev->ctrl, true, 0);
1804 }
1805 mutex_unlock(&dev->tx_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806 return 0;
1807}
1808#else
1809#define msm_slim_suspend NULL
1810#define msm_slim_resume NULL
1811#endif /* CONFIG_PM */
1812
1813#ifdef CONFIG_PM_RUNTIME
1814static int msm_slim_runtime_idle(struct device *dev)
1815{
1816 dev_dbg(dev, "pm_runtime: idle...\n");
1817 return 0;
1818}
1819
1820static int msm_slim_runtime_suspend(struct device *dev)
1821{
1822 dev_dbg(dev, "pm_runtime: suspending...\n");
1823 return 0;
1824}
1825
1826static int msm_slim_runtime_resume(struct device *dev)
1827{
1828 dev_dbg(dev, "pm_runtime: resuming...\n");
1829 return 0;
1830}
1831#else
1832#define msm_slim_runtime_idle NULL
1833#define msm_slim_runtime_suspend NULL
1834#define msm_slim_runtime_resume NULL
1835#endif
1836
1837static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1838 SET_SYSTEM_SLEEP_PM_OPS(
1839 msm_slim_suspend,
1840 msm_slim_resume
1841 )
1842 SET_RUNTIME_PM_OPS(
1843 msm_slim_runtime_suspend,
1844 msm_slim_runtime_resume,
1845 msm_slim_runtime_idle
1846 )
1847};
1848
1849static struct platform_driver msm_slim_driver = {
1850 .probe = msm_slim_probe,
1851 .remove = msm_slim_remove,
1852 .driver = {
1853 .name = MSM_SLIM_NAME,
1854 .owner = THIS_MODULE,
1855 .pm = &msm_slim_dev_pm_ops,
1856 },
1857};
1858
1859static int msm_slim_init(void)
1860{
1861 return platform_driver_register(&msm_slim_driver);
1862}
1863subsys_initcall(msm_slim_init);
1864
1865static void msm_slim_exit(void)
1866{
1867 platform_driver_unregister(&msm_slim_driver);
1868}
1869module_exit(msm_slim_exit);
1870
1871MODULE_LICENSE("GPL v2");
1872MODULE_VERSION("0.1");
1873MODULE_DESCRIPTION("MSM Slimbus controller");
1874MODULE_ALIAS("platform:msm-slim");