blob: 6122178f5f8528af89b229b13bcefa4fb6b62e25 [file] [log] [blame]
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001/*
2 * linux/drivers/video/omap2/dss/dsi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DSS_SUBSYS_NAME "DSI"
21
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/device.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/mutex.h>
30#include <linux/seq_file.h>
31#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h>
33#include <linux/kthread.h>
34#include <linux/wait.h>
35
36#include <plat/display.h>
37#include <plat/clock.h>
38
39#include "dss.h"
40
41/*#define VERBOSE_IRQ*/
42#define DSI_CATCH_MISSING_TE
43
44#define DSI_BASE 0x4804FC00
45
46struct dsi_reg { u16 idx; };
47
48#define DSI_REG(idx) ((const struct dsi_reg) { idx })
49
50#define DSI_SZ_REGS SZ_1K
51/* DSI Protocol Engine */
52
53#define DSI_REVISION DSI_REG(0x0000)
54#define DSI_SYSCONFIG DSI_REG(0x0010)
55#define DSI_SYSSTATUS DSI_REG(0x0014)
56#define DSI_IRQSTATUS DSI_REG(0x0018)
57#define DSI_IRQENABLE DSI_REG(0x001C)
58#define DSI_CTRL DSI_REG(0x0040)
59#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
60#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
61#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
62#define DSI_CLK_CTRL DSI_REG(0x0054)
63#define DSI_TIMING1 DSI_REG(0x0058)
64#define DSI_TIMING2 DSI_REG(0x005C)
65#define DSI_VM_TIMING1 DSI_REG(0x0060)
66#define DSI_VM_TIMING2 DSI_REG(0x0064)
67#define DSI_VM_TIMING3 DSI_REG(0x0068)
68#define DSI_CLK_TIMING DSI_REG(0x006C)
69#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
70#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
71#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
72#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
73#define DSI_VM_TIMING4 DSI_REG(0x0080)
74#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
75#define DSI_VM_TIMING5 DSI_REG(0x0088)
76#define DSI_VM_TIMING6 DSI_REG(0x008C)
77#define DSI_VM_TIMING7 DSI_REG(0x0090)
78#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
79#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
80#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
81#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
82#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
83#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
84#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
85#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
86
87/* DSIPHY_SCP */
88
89#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
90#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
91#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
92#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
93
94/* DSI_PLL_CTRL_SCP */
95
96#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
97#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
98#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
99#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
100#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
101
102#define REG_GET(idx, start, end) \
103 FLD_GET(dsi_read_reg(idx), start, end)
104
105#define REG_FLD_MOD(idx, val, start, end) \
106 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
107
108/* Global interrupts */
109#define DSI_IRQ_VC0 (1 << 0)
110#define DSI_IRQ_VC1 (1 << 1)
111#define DSI_IRQ_VC2 (1 << 2)
112#define DSI_IRQ_VC3 (1 << 3)
113#define DSI_IRQ_WAKEUP (1 << 4)
114#define DSI_IRQ_RESYNC (1 << 5)
115#define DSI_IRQ_PLL_LOCK (1 << 7)
116#define DSI_IRQ_PLL_UNLOCK (1 << 8)
117#define DSI_IRQ_PLL_RECALL (1 << 9)
118#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
119#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
120#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
121#define DSI_IRQ_TE_TRIGGER (1 << 16)
122#define DSI_IRQ_ACK_TRIGGER (1 << 17)
123#define DSI_IRQ_SYNC_LOST (1 << 18)
124#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
125#define DSI_IRQ_TA_TIMEOUT (1 << 20)
126#define DSI_IRQ_ERROR_MASK \
127 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
128 DSI_IRQ_TA_TIMEOUT)
129#define DSI_IRQ_CHANNEL_MASK 0xf
130
131/* Virtual channel interrupts */
132#define DSI_VC_IRQ_CS (1 << 0)
133#define DSI_VC_IRQ_ECC_CORR (1 << 1)
134#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
135#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
136#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
137#define DSI_VC_IRQ_BTA (1 << 5)
138#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
139#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
140#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
141#define DSI_VC_IRQ_ERROR_MASK \
142 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
143 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
144 DSI_VC_IRQ_FIFO_TX_UDF)
145
146/* ComplexIO interrupts */
147#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
148#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
149#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
150#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
151#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
152#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
153#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
154#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
155#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
156#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
157#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
158#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
159#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
160#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
161#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
162#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
163#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
164#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
165#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
167
168#define DSI_DT_DCS_SHORT_WRITE_0 0x05
169#define DSI_DT_DCS_SHORT_WRITE_1 0x15
170#define DSI_DT_DCS_READ 0x06
171#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
172#define DSI_DT_NULL_PACKET 0x09
173#define DSI_DT_DCS_LONG_WRITE 0x39
174
175#define DSI_DT_RX_ACK_WITH_ERR 0x02
176#define DSI_DT_RX_DCS_LONG_READ 0x1c
177#define DSI_DT_RX_SHORT_READ_1 0x21
178#define DSI_DT_RX_SHORT_READ_2 0x22
179
180#define FINT_MAX 2100000
181#define FINT_MIN 750000
182#define REGN_MAX (1 << 7)
183#define REGM_MAX ((1 << 11) - 1)
184#define REGM3_MAX (1 << 4)
185#define REGM4_MAX (1 << 4)
186#define LP_DIV_MAX ((1 << 13) - 1)
187
188enum fifo_size {
189 DSI_FIFO_SIZE_0 = 0,
190 DSI_FIFO_SIZE_32 = 1,
191 DSI_FIFO_SIZE_64 = 2,
192 DSI_FIFO_SIZE_96 = 3,
193 DSI_FIFO_SIZE_128 = 4,
194};
195
196enum dsi_vc_mode {
197 DSI_VC_MODE_L4 = 0,
198 DSI_VC_MODE_VP,
199};
200
201struct dsi_update_region {
202 bool dirty;
203 u16 x, y, w, h;
204 struct omap_dss_device *device;
205};
206
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200207struct dsi_irq_stats {
208 unsigned long last_reset;
209 unsigned irq_count;
210 unsigned dsi_irqs[32];
211 unsigned vc_irqs[4][32];
212 unsigned cio_irqs[32];
213};
214
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200215static struct
216{
217 void __iomem *base;
218
219 struct dsi_clock_info current_cinfo;
220
221 struct regulator *vdds_dsi_reg;
222
223 struct {
224 enum dsi_vc_mode mode;
225 struct omap_dss_device *dssdev;
226 enum fifo_size fifo_size;
227 int dest_per; /* destination peripheral 0-3 */
228 } vc[4];
229
230 struct mutex lock;
231 struct mutex bus_lock;
232
233 unsigned pll_locked;
234
235 struct completion bta_completion;
236
237 struct task_struct *thread;
238 wait_queue_head_t waitqueue;
239
240 spinlock_t update_lock;
241 bool framedone_received;
242 struct dsi_update_region update_region;
243 struct dsi_update_region active_update_region;
244 struct completion update_completion;
245
246 enum omap_dss_update_mode user_update_mode;
247 enum omap_dss_update_mode update_mode;
248 bool te_enabled;
249 bool use_ext_te;
250
251#ifdef DSI_CATCH_MISSING_TE
252 struct timer_list te_timer;
253#endif
254
255 unsigned long cache_req_pck;
256 unsigned long cache_clk_freq;
257 struct dsi_clock_info cache_cinfo;
258
259 u32 errors;
260 spinlock_t errors_lock;
261#ifdef DEBUG
262 ktime_t perf_setup_time;
263 ktime_t perf_start_time;
264 ktime_t perf_start_time_auto;
265 int perf_measure_frames;
266#endif
267 int debug_read;
268 int debug_write;
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200269
270#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
271 spinlock_t irq_stats_lock;
272 struct dsi_irq_stats irq_stats;
273#endif
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200274} dsi;
275
276#ifdef DEBUG
277static unsigned int dsi_perf;
278module_param_named(dsi_perf, dsi_perf, bool, 0644);
279#endif
280
281static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
282{
283 __raw_writel(val, dsi.base + idx.idx);
284}
285
286static inline u32 dsi_read_reg(const struct dsi_reg idx)
287{
288 return __raw_readl(dsi.base + idx.idx);
289}
290
291
292void dsi_save_context(void)
293{
294}
295
296void dsi_restore_context(void)
297{
298}
299
300void dsi_bus_lock(void)
301{
302 mutex_lock(&dsi.bus_lock);
303}
304EXPORT_SYMBOL(dsi_bus_lock);
305
306void dsi_bus_unlock(void)
307{
308 mutex_unlock(&dsi.bus_lock);
309}
310EXPORT_SYMBOL(dsi_bus_unlock);
311
312static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
313 int value)
314{
315 int t = 100000;
316
317 while (REG_GET(idx, bitnum, bitnum) != value) {
318 if (--t == 0)
319 return !value;
320 }
321
322 return value;
323}
324
325#ifdef DEBUG
326static void dsi_perf_mark_setup(void)
327{
328 dsi.perf_setup_time = ktime_get();
329}
330
331static void dsi_perf_mark_start(void)
332{
333 dsi.perf_start_time = ktime_get();
334}
335
336static void dsi_perf_mark_start_auto(void)
337{
338 dsi.perf_measure_frames = 0;
339 dsi.perf_start_time_auto = ktime_get();
340}
341
342static void dsi_perf_show(const char *name)
343{
344 ktime_t t, setup_time, trans_time;
345 u32 total_bytes;
346 u32 setup_us, trans_us, total_us;
347
348 if (!dsi_perf)
349 return;
350
351 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
352 return;
353
354 t = ktime_get();
355
356 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
357 setup_us = (u32)ktime_to_us(setup_time);
358 if (setup_us == 0)
359 setup_us = 1;
360
361 trans_time = ktime_sub(t, dsi.perf_start_time);
362 trans_us = (u32)ktime_to_us(trans_time);
363 if (trans_us == 0)
364 trans_us = 1;
365
366 total_us = setup_us + trans_us;
367
368 total_bytes = dsi.active_update_region.w *
369 dsi.active_update_region.h *
370 dsi.active_update_region.device->ctrl.pixel_size / 8;
371
372 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
373 static u32 s_total_trans_us, s_total_setup_us;
374 static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
375 static u32 s_max_trans_us, s_max_setup_us;
376 const int numframes = 100;
377 ktime_t total_time_auto;
378 u32 total_time_auto_us;
379
380 dsi.perf_measure_frames++;
381
382 if (setup_us < s_min_setup_us)
383 s_min_setup_us = setup_us;
384
385 if (setup_us > s_max_setup_us)
386 s_max_setup_us = setup_us;
387
388 s_total_setup_us += setup_us;
389
390 if (trans_us < s_min_trans_us)
391 s_min_trans_us = trans_us;
392
393 if (trans_us > s_max_trans_us)
394 s_max_trans_us = trans_us;
395
396 s_total_trans_us += trans_us;
397
398 if (dsi.perf_measure_frames < numframes)
399 return;
400
401 total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
402 total_time_auto_us = (u32)ktime_to_us(total_time_auto);
403
404 printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
405 "trans %u/%u/%u\n",
406 name,
407 1000 * 1000 * numframes / total_time_auto_us,
408 s_min_setup_us,
409 s_max_setup_us,
410 s_total_setup_us / numframes,
411 s_min_trans_us,
412 s_max_trans_us,
413 s_total_trans_us / numframes);
414
415 s_total_setup_us = 0;
416 s_min_setup_us = 0xffffffff;
417 s_max_setup_us = 0;
418 s_total_trans_us = 0;
419 s_min_trans_us = 0xffffffff;
420 s_max_trans_us = 0;
421 dsi_perf_mark_start_auto();
422 } else {
423 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
424 "%u bytes, %u kbytes/sec\n",
425 name,
426 setup_us,
427 trans_us,
428 total_us,
429 1000*1000 / total_us,
430 total_bytes,
431 total_bytes * 1000 / total_us);
432 }
433}
434#else
435#define dsi_perf_mark_setup()
436#define dsi_perf_mark_start()
437#define dsi_perf_mark_start_auto()
438#define dsi_perf_show(x)
439#endif
440
441static void print_irq_status(u32 status)
442{
443#ifndef VERBOSE_IRQ
444 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
445 return;
446#endif
447 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
448
449#define PIS(x) \
450 if (status & DSI_IRQ_##x) \
451 printk(#x " ");
452#ifdef VERBOSE_IRQ
453 PIS(VC0);
454 PIS(VC1);
455 PIS(VC2);
456 PIS(VC3);
457#endif
458 PIS(WAKEUP);
459 PIS(RESYNC);
460 PIS(PLL_LOCK);
461 PIS(PLL_UNLOCK);
462 PIS(PLL_RECALL);
463 PIS(COMPLEXIO_ERR);
464 PIS(HS_TX_TIMEOUT);
465 PIS(LP_RX_TIMEOUT);
466 PIS(TE_TRIGGER);
467 PIS(ACK_TRIGGER);
468 PIS(SYNC_LOST);
469 PIS(LDO_POWER_GOOD);
470 PIS(TA_TIMEOUT);
471#undef PIS
472
473 printk("\n");
474}
475
476static void print_irq_status_vc(int channel, u32 status)
477{
478#ifndef VERBOSE_IRQ
479 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
480 return;
481#endif
482 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
483
484#define PIS(x) \
485 if (status & DSI_VC_IRQ_##x) \
486 printk(#x " ");
487 PIS(CS);
488 PIS(ECC_CORR);
489#ifdef VERBOSE_IRQ
490 PIS(PACKET_SENT);
491#endif
492 PIS(FIFO_TX_OVF);
493 PIS(FIFO_RX_OVF);
494 PIS(BTA);
495 PIS(ECC_NO_CORR);
496 PIS(FIFO_TX_UDF);
497 PIS(PP_BUSY_CHANGE);
498#undef PIS
499 printk("\n");
500}
501
502static void print_irq_status_cio(u32 status)
503{
504 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
505
506#define PIS(x) \
507 if (status & DSI_CIO_IRQ_##x) \
508 printk(#x " ");
509 PIS(ERRSYNCESC1);
510 PIS(ERRSYNCESC2);
511 PIS(ERRSYNCESC3);
512 PIS(ERRESC1);
513 PIS(ERRESC2);
514 PIS(ERRESC3);
515 PIS(ERRCONTROL1);
516 PIS(ERRCONTROL2);
517 PIS(ERRCONTROL3);
518 PIS(STATEULPS1);
519 PIS(STATEULPS2);
520 PIS(STATEULPS3);
521 PIS(ERRCONTENTIONLP0_1);
522 PIS(ERRCONTENTIONLP1_1);
523 PIS(ERRCONTENTIONLP0_2);
524 PIS(ERRCONTENTIONLP1_2);
525 PIS(ERRCONTENTIONLP0_3);
526 PIS(ERRCONTENTIONLP1_3);
527 PIS(ULPSACTIVENOT_ALL0);
528 PIS(ULPSACTIVENOT_ALL1);
529#undef PIS
530
531 printk("\n");
532}
533
534static int debug_irq;
535
536/* called from dss */
537void dsi_irq_handler(void)
538{
539 u32 irqstatus, vcstatus, ciostatus;
540 int i;
541
542 irqstatus = dsi_read_reg(DSI_IRQSTATUS);
543
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200544#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
545 spin_lock(&dsi.irq_stats_lock);
546 dsi.irq_stats.irq_count++;
547 dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
548#endif
549
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200550 if (irqstatus & DSI_IRQ_ERROR_MASK) {
551 DSSERR("DSI error, irqstatus %x\n", irqstatus);
552 print_irq_status(irqstatus);
553 spin_lock(&dsi.errors_lock);
554 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
555 spin_unlock(&dsi.errors_lock);
556 } else if (debug_irq) {
557 print_irq_status(irqstatus);
558 }
559
560#ifdef DSI_CATCH_MISSING_TE
561 if (irqstatus & DSI_IRQ_TE_TRIGGER)
562 del_timer(&dsi.te_timer);
563#endif
564
565 for (i = 0; i < 4; ++i) {
566 if ((irqstatus & (1<<i)) == 0)
567 continue;
568
569 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
570
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200571#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
572 dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
573#endif
574
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200575 if (vcstatus & DSI_VC_IRQ_BTA)
576 complete(&dsi.bta_completion);
577
578 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
579 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
580 i, vcstatus);
581 print_irq_status_vc(i, vcstatus);
582 } else if (debug_irq) {
583 print_irq_status_vc(i, vcstatus);
584 }
585
586 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
587 /* flush posted write */
588 dsi_read_reg(DSI_VC_IRQSTATUS(i));
589 }
590
591 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
592 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
593
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200594#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
595 dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
596#endif
597
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200598 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
599 /* flush posted write */
600 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
601
602 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
603 print_irq_status_cio(ciostatus);
604 }
605
606 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
607 /* flush posted write */
608 dsi_read_reg(DSI_IRQSTATUS);
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +0200609
610#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
611 spin_unlock(&dsi.irq_stats_lock);
612#endif
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200613}
614
615
616static void _dsi_initialize_irq(void)
617{
618 u32 l;
619 int i;
620
621 /* disable all interrupts */
622 dsi_write_reg(DSI_IRQENABLE, 0);
623 for (i = 0; i < 4; ++i)
624 dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
625 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
626
627 /* clear interrupt status */
628 l = dsi_read_reg(DSI_IRQSTATUS);
629 dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
630
631 for (i = 0; i < 4; ++i) {
632 l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
633 dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
634 }
635
636 l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
637 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
638
639 /* enable error irqs */
640 l = DSI_IRQ_ERROR_MASK;
641#ifdef DSI_CATCH_MISSING_TE
642 l |= DSI_IRQ_TE_TRIGGER;
643#endif
644 dsi_write_reg(DSI_IRQENABLE, l);
645
646 l = DSI_VC_IRQ_ERROR_MASK;
647 for (i = 0; i < 4; ++i)
648 dsi_write_reg(DSI_VC_IRQENABLE(i), l);
649
650 /* XXX zonda responds incorrectly, causing control error:
651 Exit from LP-ESC mode to LP11 uses wrong transition states on the
652 data lines LP0 and LN0. */
653 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
654 -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
655}
656
657static u32 dsi_get_errors(void)
658{
659 unsigned long flags;
660 u32 e;
661 spin_lock_irqsave(&dsi.errors_lock, flags);
662 e = dsi.errors;
663 dsi.errors = 0;
664 spin_unlock_irqrestore(&dsi.errors_lock, flags);
665 return e;
666}
667
668static void dsi_vc_enable_bta_irq(int channel)
669{
670 u32 l;
671
672 dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
673
674 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
675 l |= DSI_VC_IRQ_BTA;
676 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
677}
678
679static void dsi_vc_disable_bta_irq(int channel)
680{
681 u32 l;
682
683 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
684 l &= ~DSI_VC_IRQ_BTA;
685 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
686}
687
688/* DSI func clock. this could also be DSI2_PLL_FCLK */
689static inline void enable_clocks(bool enable)
690{
691 if (enable)
692 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
693 else
694 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
695}
696
697/* source clock for DSI PLL. this could also be PCLKFREE */
698static inline void dsi_enable_pll_clock(bool enable)
699{
700 if (enable)
701 dss_clk_enable(DSS_CLK_FCK2);
702 else
703 dss_clk_disable(DSS_CLK_FCK2);
704
705 if (enable && dsi.pll_locked) {
706 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
707 DSSERR("cannot lock PLL when enabling clocks\n");
708 }
709}
710
711#ifdef DEBUG
712static void _dsi_print_reset_status(void)
713{
714 u32 l;
715
716 if (!dss_debug)
717 return;
718
719 /* A dummy read using the SCP interface to any DSIPHY register is
720 * required after DSIPHY reset to complete the reset of the DSI complex
721 * I/O. */
722 l = dsi_read_reg(DSI_DSIPHY_CFG5);
723
724 printk(KERN_DEBUG "DSI resets: ");
725
726 l = dsi_read_reg(DSI_PLL_STATUS);
727 printk("PLL (%d) ", FLD_GET(l, 0, 0));
728
729 l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
730 printk("CIO (%d) ", FLD_GET(l, 29, 29));
731
732 l = dsi_read_reg(DSI_DSIPHY_CFG5);
733 printk("PHY (%x, %d, %d, %d)\n",
734 FLD_GET(l, 28, 26),
735 FLD_GET(l, 29, 29),
736 FLD_GET(l, 30, 30),
737 FLD_GET(l, 31, 31));
738}
739#else
740#define _dsi_print_reset_status()
741#endif
742
743static inline int dsi_if_enable(bool enable)
744{
745 DSSDBG("dsi_if_enable(%d)\n", enable);
746
747 enable = enable ? 1 : 0;
748 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
749
750 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
751 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
752 return -EIO;
753 }
754
755 return 0;
756}
757
758unsigned long dsi_get_dsi1_pll_rate(void)
759{
760 return dsi.current_cinfo.dsi1_pll_fclk;
761}
762
763static unsigned long dsi_get_dsi2_pll_rate(void)
764{
765 return dsi.current_cinfo.dsi2_pll_fclk;
766}
767
768static unsigned long dsi_get_txbyteclkhs(void)
769{
770 return dsi.current_cinfo.clkin4ddr / 16;
771}
772
773static unsigned long dsi_fclk_rate(void)
774{
775 unsigned long r;
776
777 if (dss_get_dsi_clk_source() == 0) {
778 /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
779 r = dss_clk_get_rate(DSS_CLK_FCK1);
780 } else {
781 /* DSI FCLK source is DSI2_PLL_FCLK */
782 r = dsi_get_dsi2_pll_rate();
783 }
784
785 return r;
786}
787
788static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
789{
790 unsigned long dsi_fclk;
791 unsigned lp_clk_div;
792 unsigned long lp_clk;
793
794 lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
795
796 if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
797 return -EINVAL;
798
799 dsi_fclk = dsi_fclk_rate();
800
801 lp_clk = dsi_fclk / 2 / lp_clk_div;
802
803 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
804 dsi.current_cinfo.lp_clk = lp_clk;
805 dsi.current_cinfo.lp_clk_div = lp_clk_div;
806
807 REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */
808
809 REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
810 21, 21); /* LP_RX_SYNCHRO_ENABLE */
811
812 return 0;
813}
814
815
816enum dsi_pll_power_state {
817 DSI_PLL_POWER_OFF = 0x0,
818 DSI_PLL_POWER_ON_HSCLK = 0x1,
819 DSI_PLL_POWER_ON_ALL = 0x2,
820 DSI_PLL_POWER_ON_DIV = 0x3,
821};
822
823static int dsi_pll_power(enum dsi_pll_power_state state)
824{
825 int t = 0;
826
827 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
828
829 /* PLL_PWR_STATUS */
830 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
Tomi Valkeinen24be78b2010-01-07 14:19:48 +0200831 if (++t > 1000) {
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200832 DSSERR("Failed to set DSI PLL power mode to %d\n",
833 state);
834 return -ENODEV;
835 }
Tomi Valkeinen24be78b2010-01-07 14:19:48 +0200836 udelay(1);
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +0200837 }
838
839 return 0;
840}
841
842/* calculate clock rates using dividers in cinfo */
843static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
844{
845 if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
846 return -EINVAL;
847
848 if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
849 return -EINVAL;
850
851 if (cinfo->regm3 > REGM3_MAX)
852 return -EINVAL;
853
854 if (cinfo->regm4 > REGM4_MAX)
855 return -EINVAL;
856
857 if (cinfo->use_dss2_fck) {
858 cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
859 /* XXX it is unclear if highfreq should be used
860 * with DSS2_FCK source also */
861 cinfo->highfreq = 0;
862 } else {
863 cinfo->clkin = dispc_pclk_rate();
864
865 if (cinfo->clkin < 32000000)
866 cinfo->highfreq = 0;
867 else
868 cinfo->highfreq = 1;
869 }
870
871 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
872
873 if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
874 return -EINVAL;
875
876 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
877
878 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
879 return -EINVAL;
880
881 if (cinfo->regm3 > 0)
882 cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
883 else
884 cinfo->dsi1_pll_fclk = 0;
885
886 if (cinfo->regm4 > 0)
887 cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
888 else
889 cinfo->dsi2_pll_fclk = 0;
890
891 return 0;
892}
893
894int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
895 struct dsi_clock_info *dsi_cinfo,
896 struct dispc_clock_info *dispc_cinfo)
897{
898 struct dsi_clock_info cur, best;
899 struct dispc_clock_info best_dispc;
900 int min_fck_per_pck;
901 int match = 0;
902 unsigned long dss_clk_fck2;
903
904 dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
905
906 if (req_pck == dsi.cache_req_pck &&
907 dsi.cache_cinfo.clkin == dss_clk_fck2) {
908 DSSDBG("DSI clock info found from cache\n");
909 *dsi_cinfo = dsi.cache_cinfo;
910 dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
911 dispc_cinfo);
912 return 0;
913 }
914
915 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
916
917 if (min_fck_per_pck &&
918 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
919 DSSERR("Requested pixel clock not possible with the current "
920 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
921 "the constraint off.\n");
922 min_fck_per_pck = 0;
923 }
924
925 DSSDBG("dsi_pll_calc\n");
926
927retry:
928 memset(&best, 0, sizeof(best));
929 memset(&best_dispc, 0, sizeof(best_dispc));
930
931 memset(&cur, 0, sizeof(cur));
932 cur.clkin = dss_clk_fck2;
933 cur.use_dss2_fck = 1;
934 cur.highfreq = 0;
935
936 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
937 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
938 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
939 for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
940 if (cur.highfreq == 0)
941 cur.fint = cur.clkin / cur.regn;
942 else
943 cur.fint = cur.clkin / (2 * cur.regn);
944
945 if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
946 continue;
947
948 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
949 for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
950 unsigned long a, b;
951
952 a = 2 * cur.regm * (cur.clkin/1000);
953 b = cur.regn * (cur.highfreq + 1);
954 cur.clkin4ddr = a / b * 1000;
955
956 if (cur.clkin4ddr > 1800 * 1000 * 1000)
957 break;
958
959 /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
960 for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
961 ++cur.regm3) {
962 struct dispc_clock_info cur_dispc;
963 cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
964
965 /* this will narrow down the search a bit,
966 * but still give pixclocks below what was
967 * requested */
968 if (cur.dsi1_pll_fclk < req_pck)
969 break;
970
971 if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
972 continue;
973
974 if (min_fck_per_pck &&
975 cur.dsi1_pll_fclk <
976 req_pck * min_fck_per_pck)
977 continue;
978
979 match = 1;
980
981 dispc_find_clk_divs(is_tft, req_pck,
982 cur.dsi1_pll_fclk,
983 &cur_dispc);
984
985 if (abs(cur_dispc.pck - req_pck) <
986 abs(best_dispc.pck - req_pck)) {
987 best = cur;
988 best_dispc = cur_dispc;
989
990 if (cur_dispc.pck == req_pck)
991 goto found;
992 }
993 }
994 }
995 }
996found:
997 if (!match) {
998 if (min_fck_per_pck) {
999 DSSERR("Could not find suitable clock settings.\n"
1000 "Turning FCK/PCK constraint off and"
1001 "trying again.\n");
1002 min_fck_per_pck = 0;
1003 goto retry;
1004 }
1005
1006 DSSERR("Could not find suitable clock settings.\n");
1007
1008 return -EINVAL;
1009 }
1010
1011 /* DSI2_PLL_FCLK (regm4) is not used */
1012 best.regm4 = 0;
1013 best.dsi2_pll_fclk = 0;
1014
1015 if (dsi_cinfo)
1016 *dsi_cinfo = best;
1017 if (dispc_cinfo)
1018 *dispc_cinfo = best_dispc;
1019
1020 dsi.cache_req_pck = req_pck;
1021 dsi.cache_clk_freq = 0;
1022 dsi.cache_cinfo = best;
1023
1024 return 0;
1025}
1026
1027int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1028{
1029 int r = 0;
1030 u32 l;
1031 int f;
1032
1033 DSSDBGF();
1034
1035 dsi.current_cinfo.fint = cinfo->fint;
1036 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1037 dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
1038 dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
1039
1040 dsi.current_cinfo.regn = cinfo->regn;
1041 dsi.current_cinfo.regm = cinfo->regm;
1042 dsi.current_cinfo.regm3 = cinfo->regm3;
1043 dsi.current_cinfo.regm4 = cinfo->regm4;
1044
1045 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1046
1047 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1048 cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
1049 cinfo->clkin,
1050 cinfo->highfreq);
1051
1052 /* DSIPHY == CLKIN4DDR */
1053 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1054 cinfo->regm,
1055 cinfo->regn,
1056 cinfo->clkin,
1057 cinfo->highfreq + 1,
1058 cinfo->clkin4ddr);
1059
1060 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1061 cinfo->clkin4ddr / 1000 / 1000 / 2);
1062
1063 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1064
1065 DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
1066 cinfo->regm3, cinfo->dsi1_pll_fclk);
1067 DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
1068 cinfo->regm4, cinfo->dsi2_pll_fclk);
1069
1070 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
1071
1072 l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
1073 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1074 l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
1075 l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
1076 l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
1077 22, 19); /* DSI_CLOCK_DIV */
1078 l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
1079 26, 23); /* DSIPROTO_CLOCK_DIV */
1080 dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
1081
1082 BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
1083 if (cinfo->fint < 1000000)
1084 f = 0x3;
1085 else if (cinfo->fint < 1250000)
1086 f = 0x4;
1087 else if (cinfo->fint < 1500000)
1088 f = 0x5;
1089 else if (cinfo->fint < 1750000)
1090 f = 0x6;
1091 else
1092 f = 0x7;
1093
1094 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1095 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1096 l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
1097 11, 11); /* DSI_PLL_CLKSEL */
1098 l = FLD_MOD(l, cinfo->highfreq,
1099 12, 12); /* DSI_PLL_HIGHFREQ */
1100 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1101 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1102 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1103 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1104
1105 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1106
1107 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
1108 DSSERR("dsi pll go bit not going down.\n");
1109 r = -EIO;
1110 goto err;
1111 }
1112
1113 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
1114 DSSERR("cannot lock PLL\n");
1115 r = -EIO;
1116 goto err;
1117 }
1118
1119 dsi.pll_locked = 1;
1120
1121 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1122 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1123 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1124 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1125 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1126 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1127 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1128 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1129 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1130 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1131 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1132 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1133 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1134 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1135 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1136 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1137
1138 DSSDBG("PLL config done\n");
1139err:
1140 return r;
1141}
1142
1143int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1144 bool enable_hsdiv)
1145{
1146 int r = 0;
1147 enum dsi_pll_power_state pwstate;
1148
1149 DSSDBG("PLL init\n");
1150
1151 enable_clocks(1);
1152 dsi_enable_pll_clock(1);
1153
1154 r = regulator_enable(dsi.vdds_dsi_reg);
1155 if (r)
1156 goto err0;
1157
1158 /* XXX PLL does not come out of reset without this... */
1159 dispc_pck_free_enable(1);
1160
1161 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
1162 DSSERR("PLL not coming out of reset.\n");
1163 r = -ENODEV;
1164 goto err1;
1165 }
1166
1167 /* XXX ... but if left on, we get problems when planes do not
1168 * fill the whole display. No idea about this */
1169 dispc_pck_free_enable(0);
1170
1171 if (enable_hsclk && enable_hsdiv)
1172 pwstate = DSI_PLL_POWER_ON_ALL;
1173 else if (enable_hsclk)
1174 pwstate = DSI_PLL_POWER_ON_HSCLK;
1175 else if (enable_hsdiv)
1176 pwstate = DSI_PLL_POWER_ON_DIV;
1177 else
1178 pwstate = DSI_PLL_POWER_OFF;
1179
1180 r = dsi_pll_power(pwstate);
1181
1182 if (r)
1183 goto err1;
1184
1185 DSSDBG("PLL init done\n");
1186
1187 return 0;
1188err1:
1189 regulator_disable(dsi.vdds_dsi_reg);
1190err0:
1191 enable_clocks(0);
1192 dsi_enable_pll_clock(0);
1193 return r;
1194}
1195
1196void dsi_pll_uninit(void)
1197{
1198 enable_clocks(0);
1199 dsi_enable_pll_clock(0);
1200
1201 dsi.pll_locked = 0;
1202 dsi_pll_power(DSI_PLL_POWER_OFF);
1203 regulator_disable(dsi.vdds_dsi_reg);
1204 DSSDBG("PLL uninit done\n");
1205}
1206
1207void dsi_dump_clocks(struct seq_file *s)
1208{
1209 int clksel;
1210 struct dsi_clock_info *cinfo = &dsi.current_cinfo;
1211
1212 enable_clocks(1);
1213
1214 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
1215
1216 seq_printf(s, "- DSI PLL -\n");
1217
1218 seq_printf(s, "dsi pll source = %s\n",
1219 clksel == 0 ?
1220 "dss2_alwon_fclk" : "pclkfree");
1221
1222 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1223
1224 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1225 cinfo->clkin4ddr, cinfo->regm);
1226
1227 seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
1228 cinfo->dsi1_pll_fclk,
1229 cinfo->regm3,
1230 dss_get_dispc_clk_source() == 0 ? "off" : "on");
1231
1232 seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
1233 cinfo->dsi2_pll_fclk,
1234 cinfo->regm4,
1235 dss_get_dsi_clk_source() == 0 ? "off" : "on");
1236
1237 seq_printf(s, "- DSI -\n");
1238
1239 seq_printf(s, "dsi fclk source = %s\n",
1240 dss_get_dsi_clk_source() == 0 ?
1241 "dss1_alwon_fclk" : "dsi2_pll_fclk");
1242
1243 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
1244
1245 seq_printf(s, "DDR_CLK\t\t%lu\n",
1246 cinfo->clkin4ddr / 4);
1247
1248 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
1249
1250 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1251
1252 seq_printf(s, "VP_CLK\t\t%lu\n"
1253 "VP_PCLK\t\t%lu\n",
1254 dispc_lclk_rate(),
1255 dispc_pclk_rate());
1256
1257 enable_clocks(0);
1258}
1259
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +02001260#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1261void dsi_dump_irqs(struct seq_file *s)
1262{
1263 unsigned long flags;
1264 struct dsi_irq_stats stats;
1265
1266 spin_lock_irqsave(&dsi.irq_stats_lock, flags);
1267
1268 stats = dsi.irq_stats;
1269 memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats));
1270 dsi.irq_stats.last_reset = jiffies;
1271
1272 spin_unlock_irqrestore(&dsi.irq_stats_lock, flags);
1273
1274 seq_printf(s, "period %u ms\n",
1275 jiffies_to_msecs(jiffies - stats.last_reset));
1276
1277 seq_printf(s, "irqs %d\n", stats.irq_count);
1278#define PIS(x) \
1279 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1280
1281 seq_printf(s, "-- DSI interrupts --\n");
1282 PIS(VC0);
1283 PIS(VC1);
1284 PIS(VC2);
1285 PIS(VC3);
1286 PIS(WAKEUP);
1287 PIS(RESYNC);
1288 PIS(PLL_LOCK);
1289 PIS(PLL_UNLOCK);
1290 PIS(PLL_RECALL);
1291 PIS(COMPLEXIO_ERR);
1292 PIS(HS_TX_TIMEOUT);
1293 PIS(LP_RX_TIMEOUT);
1294 PIS(TE_TRIGGER);
1295 PIS(ACK_TRIGGER);
1296 PIS(SYNC_LOST);
1297 PIS(LDO_POWER_GOOD);
1298 PIS(TA_TIMEOUT);
1299#undef PIS
1300
1301#define PIS(x) \
1302 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1303 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1304 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1305 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1306 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1307
1308 seq_printf(s, "-- VC interrupts --\n");
1309 PIS(CS);
1310 PIS(ECC_CORR);
1311 PIS(PACKET_SENT);
1312 PIS(FIFO_TX_OVF);
1313 PIS(FIFO_RX_OVF);
1314 PIS(BTA);
1315 PIS(ECC_NO_CORR);
1316 PIS(FIFO_TX_UDF);
1317 PIS(PP_BUSY_CHANGE);
1318#undef PIS
1319
1320#define PIS(x) \
1321 seq_printf(s, "%-20s %10d\n", #x, \
1322 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1323
1324 seq_printf(s, "-- CIO interrupts --\n");
1325 PIS(ERRSYNCESC1);
1326 PIS(ERRSYNCESC2);
1327 PIS(ERRSYNCESC3);
1328 PIS(ERRESC1);
1329 PIS(ERRESC2);
1330 PIS(ERRESC3);
1331 PIS(ERRCONTROL1);
1332 PIS(ERRCONTROL2);
1333 PIS(ERRCONTROL3);
1334 PIS(STATEULPS1);
1335 PIS(STATEULPS2);
1336 PIS(STATEULPS3);
1337 PIS(ERRCONTENTIONLP0_1);
1338 PIS(ERRCONTENTIONLP1_1);
1339 PIS(ERRCONTENTIONLP0_2);
1340 PIS(ERRCONTENTIONLP1_2);
1341 PIS(ERRCONTENTIONLP0_3);
1342 PIS(ERRCONTENTIONLP1_3);
1343 PIS(ULPSACTIVENOT_ALL0);
1344 PIS(ULPSACTIVENOT_ALL1);
1345#undef PIS
1346}
1347#endif
1348
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001349void dsi_dump_regs(struct seq_file *s)
1350{
1351#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
1352
1353 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1354
1355 DUMPREG(DSI_REVISION);
1356 DUMPREG(DSI_SYSCONFIG);
1357 DUMPREG(DSI_SYSSTATUS);
1358 DUMPREG(DSI_IRQSTATUS);
1359 DUMPREG(DSI_IRQENABLE);
1360 DUMPREG(DSI_CTRL);
1361 DUMPREG(DSI_COMPLEXIO_CFG1);
1362 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1363 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1364 DUMPREG(DSI_CLK_CTRL);
1365 DUMPREG(DSI_TIMING1);
1366 DUMPREG(DSI_TIMING2);
1367 DUMPREG(DSI_VM_TIMING1);
1368 DUMPREG(DSI_VM_TIMING2);
1369 DUMPREG(DSI_VM_TIMING3);
1370 DUMPREG(DSI_CLK_TIMING);
1371 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1372 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1373 DUMPREG(DSI_COMPLEXIO_CFG2);
1374 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1375 DUMPREG(DSI_VM_TIMING4);
1376 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1377 DUMPREG(DSI_VM_TIMING5);
1378 DUMPREG(DSI_VM_TIMING6);
1379 DUMPREG(DSI_VM_TIMING7);
1380 DUMPREG(DSI_STOPCLK_TIMING);
1381
1382 DUMPREG(DSI_VC_CTRL(0));
1383 DUMPREG(DSI_VC_TE(0));
1384 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1385 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1386 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1387 DUMPREG(DSI_VC_IRQSTATUS(0));
1388 DUMPREG(DSI_VC_IRQENABLE(0));
1389
1390 DUMPREG(DSI_VC_CTRL(1));
1391 DUMPREG(DSI_VC_TE(1));
1392 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1393 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1394 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1395 DUMPREG(DSI_VC_IRQSTATUS(1));
1396 DUMPREG(DSI_VC_IRQENABLE(1));
1397
1398 DUMPREG(DSI_VC_CTRL(2));
1399 DUMPREG(DSI_VC_TE(2));
1400 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1401 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1402 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1403 DUMPREG(DSI_VC_IRQSTATUS(2));
1404 DUMPREG(DSI_VC_IRQENABLE(2));
1405
1406 DUMPREG(DSI_VC_CTRL(3));
1407 DUMPREG(DSI_VC_TE(3));
1408 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1409 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1410 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1411 DUMPREG(DSI_VC_IRQSTATUS(3));
1412 DUMPREG(DSI_VC_IRQENABLE(3));
1413
1414 DUMPREG(DSI_DSIPHY_CFG0);
1415 DUMPREG(DSI_DSIPHY_CFG1);
1416 DUMPREG(DSI_DSIPHY_CFG2);
1417 DUMPREG(DSI_DSIPHY_CFG5);
1418
1419 DUMPREG(DSI_PLL_CONTROL);
1420 DUMPREG(DSI_PLL_STATUS);
1421 DUMPREG(DSI_PLL_GO);
1422 DUMPREG(DSI_PLL_CONFIGURATION1);
1423 DUMPREG(DSI_PLL_CONFIGURATION2);
1424
1425 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1426#undef DUMPREG
1427}
1428
1429enum dsi_complexio_power_state {
1430 DSI_COMPLEXIO_POWER_OFF = 0x0,
1431 DSI_COMPLEXIO_POWER_ON = 0x1,
1432 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1433};
1434
1435static int dsi_complexio_power(enum dsi_complexio_power_state state)
1436{
1437 int t = 0;
1438
1439 /* PWR_CMD */
1440 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
1441
1442 /* PWR_STATUS */
1443 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
Tomi Valkeinen24be78b2010-01-07 14:19:48 +02001444 if (++t > 1000) {
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001445 DSSERR("failed to set complexio power state to "
1446 "%d\n", state);
1447 return -ENODEV;
1448 }
Tomi Valkeinen24be78b2010-01-07 14:19:48 +02001449 udelay(1);
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001450 }
1451
1452 return 0;
1453}
1454
1455static void dsi_complexio_config(struct omap_dss_device *dssdev)
1456{
1457 u32 r;
1458
1459 int clk_lane = dssdev->phy.dsi.clk_lane;
1460 int data1_lane = dssdev->phy.dsi.data1_lane;
1461 int data2_lane = dssdev->phy.dsi.data2_lane;
1462 int clk_pol = dssdev->phy.dsi.clk_pol;
1463 int data1_pol = dssdev->phy.dsi.data1_pol;
1464 int data2_pol = dssdev->phy.dsi.data2_pol;
1465
1466 r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
1467 r = FLD_MOD(r, clk_lane, 2, 0);
1468 r = FLD_MOD(r, clk_pol, 3, 3);
1469 r = FLD_MOD(r, data1_lane, 6, 4);
1470 r = FLD_MOD(r, data1_pol, 7, 7);
1471 r = FLD_MOD(r, data2_lane, 10, 8);
1472 r = FLD_MOD(r, data2_pol, 11, 11);
1473 dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
1474
1475 /* The configuration of the DSI complex I/O (number of data lanes,
1476 position, differential order) should not be changed while
1477 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
1478 the hardware to take into account a new configuration of the complex
1479 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
1480 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
1481 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
1482 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
1483 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
1484 DSI complex I/O configuration is unknown. */
1485
1486 /*
1487 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1488 REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
1489 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
1490 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1491 */
1492}
1493
1494static inline unsigned ns2ddr(unsigned ns)
1495{
1496 /* convert time in ns to ddr ticks, rounding up */
1497 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1498 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1499}
1500
1501static inline unsigned ddr2ns(unsigned ddr)
1502{
1503 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1504 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1505}
1506
1507static void dsi_complexio_timings(void)
1508{
1509 u32 r;
1510 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
1511 u32 tlpx_half, tclk_trail, tclk_zero;
1512 u32 tclk_prepare;
1513
1514 /* calculate timings */
1515
1516 /* 1 * DDR_CLK = 2 * UI */
1517
1518 /* min 40ns + 4*UI max 85ns + 6*UI */
1519 ths_prepare = ns2ddr(70) + 2;
1520
1521 /* min 145ns + 10*UI */
1522 ths_prepare_ths_zero = ns2ddr(175) + 2;
1523
1524 /* min max(8*UI, 60ns+4*UI) */
1525 ths_trail = ns2ddr(60) + 5;
1526
1527 /* min 100ns */
1528 ths_exit = ns2ddr(145);
1529
1530 /* tlpx min 50n */
1531 tlpx_half = ns2ddr(25);
1532
1533 /* min 60ns */
1534 tclk_trail = ns2ddr(60) + 2;
1535
1536 /* min 38ns, max 95ns */
1537 tclk_prepare = ns2ddr(65);
1538
1539 /* min tclk-prepare + tclk-zero = 300ns */
1540 tclk_zero = ns2ddr(260);
1541
1542 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1543 ths_prepare, ddr2ns(ths_prepare),
1544 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
1545 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1546 ths_trail, ddr2ns(ths_trail),
1547 ths_exit, ddr2ns(ths_exit));
1548
1549 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1550 "tclk_zero %u (%uns)\n",
1551 tlpx_half, ddr2ns(tlpx_half),
1552 tclk_trail, ddr2ns(tclk_trail),
1553 tclk_zero, ddr2ns(tclk_zero));
1554 DSSDBG("tclk_prepare %u (%uns)\n",
1555 tclk_prepare, ddr2ns(tclk_prepare));
1556
1557 /* program timings */
1558
1559 r = dsi_read_reg(DSI_DSIPHY_CFG0);
1560 r = FLD_MOD(r, ths_prepare, 31, 24);
1561 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1562 r = FLD_MOD(r, ths_trail, 15, 8);
1563 r = FLD_MOD(r, ths_exit, 7, 0);
1564 dsi_write_reg(DSI_DSIPHY_CFG0, r);
1565
1566 r = dsi_read_reg(DSI_DSIPHY_CFG1);
1567 r = FLD_MOD(r, tlpx_half, 22, 16);
1568 r = FLD_MOD(r, tclk_trail, 15, 8);
1569 r = FLD_MOD(r, tclk_zero, 7, 0);
1570 dsi_write_reg(DSI_DSIPHY_CFG1, r);
1571
1572 r = dsi_read_reg(DSI_DSIPHY_CFG2);
1573 r = FLD_MOD(r, tclk_prepare, 7, 0);
1574 dsi_write_reg(DSI_DSIPHY_CFG2, r);
1575}
1576
1577
1578static int dsi_complexio_init(struct omap_dss_device *dssdev)
1579{
1580 int r = 0;
1581
1582 DSSDBG("dsi_complexio_init\n");
1583
1584 /* CIO_CLK_ICG, enable L3 clk to CIO */
1585 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
1586
1587 /* A dummy read using the SCP interface to any DSIPHY register is
1588 * required after DSIPHY reset to complete the reset of the DSI complex
1589 * I/O. */
1590 dsi_read_reg(DSI_DSIPHY_CFG5);
1591
1592 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
1593 DSSERR("ComplexIO PHY not coming out of reset.\n");
1594 r = -ENODEV;
1595 goto err;
1596 }
1597
1598 dsi_complexio_config(dssdev);
1599
1600 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
1601
1602 if (r)
1603 goto err;
1604
1605 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1606 DSSERR("ComplexIO not coming out of reset.\n");
1607 r = -ENODEV;
1608 goto err;
1609 }
1610
1611 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
1612 DSSERR("ComplexIO LDO power down.\n");
1613 r = -ENODEV;
1614 goto err;
1615 }
1616
1617 dsi_complexio_timings();
1618
1619 /*
1620 The configuration of the DSI complex I/O (number of data lanes,
1621 position, differential order) should not be changed while
1622 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1623 hardware to recognize a new configuration of the complex I/O (done
1624 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1625 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1626 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1627 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1628 bit to 1. If the sequence is not followed, the DSi complex I/O
1629 configuration is undetermined.
1630 */
1631 dsi_if_enable(1);
1632 dsi_if_enable(0);
1633 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1634 dsi_if_enable(1);
1635 dsi_if_enable(0);
1636
1637 DSSDBG("CIO init done\n");
1638err:
1639 return r;
1640}
1641
1642static void dsi_complexio_uninit(void)
1643{
1644 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
1645}
1646
1647static int _dsi_wait_reset(void)
1648{
Tomi Valkeinen24be78b2010-01-07 14:19:48 +02001649 int t = 0;
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001650
1651 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
Tomi Valkeinen24be78b2010-01-07 14:19:48 +02001652 if (++t > 5) {
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02001653 DSSERR("soft reset failed\n");
1654 return -ENODEV;
1655 }
1656 udelay(1);
1657 }
1658
1659 return 0;
1660}
1661
1662static int _dsi_reset(void)
1663{
1664 /* Soft reset */
1665 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
1666 return _dsi_wait_reset();
1667}
1668
1669static void dsi_reset_tx_fifo(int channel)
1670{
1671 u32 mask;
1672 u32 l;
1673
1674 /* set fifosize of the channel to 0, then return the old size */
1675 l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
1676
1677 mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
1678 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
1679
1680 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
1681}
1682
1683static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1684 enum fifo_size size3, enum fifo_size size4)
1685{
1686 u32 r = 0;
1687 int add = 0;
1688 int i;
1689
1690 dsi.vc[0].fifo_size = size1;
1691 dsi.vc[1].fifo_size = size2;
1692 dsi.vc[2].fifo_size = size3;
1693 dsi.vc[3].fifo_size = size4;
1694
1695 for (i = 0; i < 4; i++) {
1696 u8 v;
1697 int size = dsi.vc[i].fifo_size;
1698
1699 if (add + size > 4) {
1700 DSSERR("Illegal FIFO configuration\n");
1701 BUG();
1702 }
1703
1704 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1705 r |= v << (8 * i);
1706 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
1707 add += size;
1708 }
1709
1710 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
1711}
1712
1713static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
1714 enum fifo_size size3, enum fifo_size size4)
1715{
1716 u32 r = 0;
1717 int add = 0;
1718 int i;
1719
1720 dsi.vc[0].fifo_size = size1;
1721 dsi.vc[1].fifo_size = size2;
1722 dsi.vc[2].fifo_size = size3;
1723 dsi.vc[3].fifo_size = size4;
1724
1725 for (i = 0; i < 4; i++) {
1726 u8 v;
1727 int size = dsi.vc[i].fifo_size;
1728
1729 if (add + size > 4) {
1730 DSSERR("Illegal FIFO configuration\n");
1731 BUG();
1732 }
1733
1734 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1735 r |= v << (8 * i);
1736 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
1737 add += size;
1738 }
1739
1740 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
1741}
1742
1743static int dsi_force_tx_stop_mode_io(void)
1744{
1745 u32 r;
1746
1747 r = dsi_read_reg(DSI_TIMING1);
1748 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
1749 dsi_write_reg(DSI_TIMING1, r);
1750
1751 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
1752 DSSERR("TX_STOP bit not going down\n");
1753 return -EIO;
1754 }
1755
1756 return 0;
1757}
1758
1759static void dsi_vc_print_status(int channel)
1760{
1761 u32 r;
1762
1763 r = dsi_read_reg(DSI_VC_CTRL(channel));
1764 DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
1765 "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
1766 channel,
1767 FLD_GET(r, 5, 5),
1768 FLD_GET(r, 6, 6),
1769 FLD_GET(r, 15, 15),
1770 FLD_GET(r, 16, 16),
1771 FLD_GET(r, 20, 20));
1772
1773 r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
1774 DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
1775}
1776
1777static int dsi_vc_enable(int channel, bool enable)
1778{
1779 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
1780 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
1781 channel, enable);
1782
1783 enable = enable ? 1 : 0;
1784
1785 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
1786
1787 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
1788 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
1789 return -EIO;
1790 }
1791
1792 return 0;
1793}
1794
1795static void dsi_vc_initial_config(int channel)
1796{
1797 u32 r;
1798
1799 DSSDBGF("%d", channel);
1800
1801 r = dsi_read_reg(DSI_VC_CTRL(channel));
1802
1803 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
1804 DSSERR("VC(%d) busy when trying to configure it!\n",
1805 channel);
1806
1807 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
1808 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
1809 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
1810 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
1811 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
1812 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
1813 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
1814
1815 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
1816 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
1817
1818 dsi_write_reg(DSI_VC_CTRL(channel), r);
1819
1820 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1821}
1822
1823static void dsi_vc_config_l4(int channel)
1824{
1825 if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
1826 return;
1827
1828 DSSDBGF("%d", channel);
1829
1830 dsi_vc_enable(channel, 0);
1831
1832 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1833 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
1834
1835 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
1836
1837 dsi_vc_enable(channel, 1);
1838
1839 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1840}
1841
1842static void dsi_vc_config_vp(int channel)
1843{
1844 if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
1845 return;
1846
1847 DSSDBGF("%d", channel);
1848
1849 dsi_vc_enable(channel, 0);
1850
1851 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1852 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
1853
1854 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
1855
1856 dsi_vc_enable(channel, 1);
1857
1858 dsi.vc[channel].mode = DSI_VC_MODE_VP;
1859}
1860
1861
1862static void dsi_vc_enable_hs(int channel, bool enable)
1863{
1864 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
1865
1866 dsi_vc_enable(channel, 0);
1867 dsi_if_enable(0);
1868
1869 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
1870
1871 dsi_vc_enable(channel, 1);
1872 dsi_if_enable(1);
1873
1874 dsi_force_tx_stop_mode_io();
1875}
1876
1877static void dsi_vc_flush_long_data(int channel)
1878{
1879 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1880 u32 val;
1881 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1882 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
1883 (val >> 0) & 0xff,
1884 (val >> 8) & 0xff,
1885 (val >> 16) & 0xff,
1886 (val >> 24) & 0xff);
1887 }
1888}
1889
1890static void dsi_show_rx_ack_with_err(u16 err)
1891{
1892 DSSERR("\tACK with ERROR (%#x):\n", err);
1893 if (err & (1 << 0))
1894 DSSERR("\t\tSoT Error\n");
1895 if (err & (1 << 1))
1896 DSSERR("\t\tSoT Sync Error\n");
1897 if (err & (1 << 2))
1898 DSSERR("\t\tEoT Sync Error\n");
1899 if (err & (1 << 3))
1900 DSSERR("\t\tEscape Mode Entry Command Error\n");
1901 if (err & (1 << 4))
1902 DSSERR("\t\tLP Transmit Sync Error\n");
1903 if (err & (1 << 5))
1904 DSSERR("\t\tHS Receive Timeout Error\n");
1905 if (err & (1 << 6))
1906 DSSERR("\t\tFalse Control Error\n");
1907 if (err & (1 << 7))
1908 DSSERR("\t\t(reserved7)\n");
1909 if (err & (1 << 8))
1910 DSSERR("\t\tECC Error, single-bit (corrected)\n");
1911 if (err & (1 << 9))
1912 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
1913 if (err & (1 << 10))
1914 DSSERR("\t\tChecksum Error\n");
1915 if (err & (1 << 11))
1916 DSSERR("\t\tData type not recognized\n");
1917 if (err & (1 << 12))
1918 DSSERR("\t\tInvalid VC ID\n");
1919 if (err & (1 << 13))
1920 DSSERR("\t\tInvalid Transmission Length\n");
1921 if (err & (1 << 14))
1922 DSSERR("\t\t(reserved14)\n");
1923 if (err & (1 << 15))
1924 DSSERR("\t\tDSI Protocol Violation\n");
1925}
1926
1927static u16 dsi_vc_flush_receive_data(int channel)
1928{
1929 /* RX_FIFO_NOT_EMPTY */
1930 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1931 u32 val;
1932 u8 dt;
1933 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1934 DSSDBG("\trawval %#08x\n", val);
1935 dt = FLD_GET(val, 5, 0);
1936 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
1937 u16 err = FLD_GET(val, 23, 8);
1938 dsi_show_rx_ack_with_err(err);
1939 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
1940 DSSDBG("\tDCS short response, 1 byte: %#x\n",
1941 FLD_GET(val, 23, 8));
1942 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
1943 DSSDBG("\tDCS short response, 2 byte: %#x\n",
1944 FLD_GET(val, 23, 8));
1945 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
1946 DSSDBG("\tDCS long response, len %d\n",
1947 FLD_GET(val, 23, 8));
1948 dsi_vc_flush_long_data(channel);
1949 } else {
1950 DSSERR("\tunknown datatype 0x%02x\n", dt);
1951 }
1952 }
1953 return 0;
1954}
1955
1956static int dsi_vc_send_bta(int channel)
1957{
1958 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
1959 (dsi.debug_write || dsi.debug_read))
1960 DSSDBG("dsi_vc_send_bta %d\n", channel);
1961
1962 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1963
1964 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
1965 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
1966 dsi_vc_flush_receive_data(channel);
1967 }
1968
1969 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
1970
1971 return 0;
1972}
1973
1974int dsi_vc_send_bta_sync(int channel)
1975{
1976 int r = 0;
1977 u32 err;
1978
1979 INIT_COMPLETION(dsi.bta_completion);
1980
1981 dsi_vc_enable_bta_irq(channel);
1982
1983 r = dsi_vc_send_bta(channel);
1984 if (r)
1985 goto err;
1986
1987 if (wait_for_completion_timeout(&dsi.bta_completion,
1988 msecs_to_jiffies(500)) == 0) {
1989 DSSERR("Failed to receive BTA\n");
1990 r = -EIO;
1991 goto err;
1992 }
1993
1994 err = dsi_get_errors();
1995 if (err) {
1996 DSSERR("Error while sending BTA: %x\n", err);
1997 r = -EIO;
1998 goto err;
1999 }
2000err:
2001 dsi_vc_disable_bta_irq(channel);
2002
2003 return r;
2004}
2005EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2006
2007static inline void dsi_vc_write_long_header(int channel, u8 data_type,
2008 u16 len, u8 ecc)
2009{
2010 u32 val;
2011 u8 data_id;
2012
2013 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
2014
2015 /*data_id = data_type | channel << 6; */
2016 data_id = data_type | dsi.vc[channel].dest_per << 6;
2017
2018 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2019 FLD_VAL(ecc, 31, 24);
2020
2021 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
2022}
2023
2024static inline void dsi_vc_write_long_payload(int channel,
2025 u8 b1, u8 b2, u8 b3, u8 b4)
2026{
2027 u32 val;
2028
2029 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
2030
2031/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2032 b1, b2, b3, b4, val); */
2033
2034 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2035}
2036
2037static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
2038 u8 ecc)
2039{
2040 /*u32 val; */
2041 int i;
2042 u8 *p;
2043 int r = 0;
2044 u8 b1, b2, b3, b4;
2045
2046 if (dsi.debug_write)
2047 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2048
2049 /* len + header */
2050 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
2051 DSSERR("unable to send long packet: packet too long.\n");
2052 return -EINVAL;
2053 }
2054
2055 dsi_vc_config_l4(channel);
2056
2057 dsi_vc_write_long_header(channel, data_type, len, ecc);
2058
2059 /*dsi_vc_print_status(0); */
2060
2061 p = data;
2062 for (i = 0; i < len >> 2; i++) {
2063 if (dsi.debug_write)
2064 DSSDBG("\tsending full packet %d\n", i);
2065 /*dsi_vc_print_status(0); */
2066
2067 b1 = *p++;
2068 b2 = *p++;
2069 b3 = *p++;
2070 b4 = *p++;
2071
2072 dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
2073 }
2074
2075 i = len % 4;
2076 if (i) {
2077 b1 = 0; b2 = 0; b3 = 0;
2078
2079 if (dsi.debug_write)
2080 DSSDBG("\tsending remainder bytes %d\n", i);
2081
2082 switch (i) {
2083 case 3:
2084 b1 = *p++;
2085 b2 = *p++;
2086 b3 = *p++;
2087 break;
2088 case 2:
2089 b1 = *p++;
2090 b2 = *p++;
2091 break;
2092 case 1:
2093 b1 = *p++;
2094 break;
2095 }
2096
2097 dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
2098 }
2099
2100 return r;
2101}
2102
2103static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
2104{
2105 u32 r;
2106 u8 data_id;
2107
2108 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
2109
2110 if (dsi.debug_write)
2111 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2112 channel,
2113 data_type, data & 0xff, (data >> 8) & 0xff);
2114
2115 dsi_vc_config_l4(channel);
2116
2117 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
2118 DSSERR("ERROR FIFO FULL, aborting transfer\n");
2119 return -EINVAL;
2120 }
2121
Tomi Valkeinen397bb3c2009-12-03 13:37:31 +02002122 data_id = data_type | dsi.vc[channel].dest_per << 6;
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002123
2124 r = (data_id << 0) | (data << 8) | (ecc << 24);
2125
2126 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
2127
2128 return 0;
2129}
2130
2131int dsi_vc_send_null(int channel)
2132{
2133 u8 nullpkg[] = {0, 0, 0, 0};
Tomi Valkeinen397bb3c2009-12-03 13:37:31 +02002134 return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002135}
2136EXPORT_SYMBOL(dsi_vc_send_null);
2137
2138int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
2139{
2140 int r;
2141
2142 BUG_ON(len == 0);
2143
2144 if (len == 1) {
2145 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
2146 data[0], 0);
2147 } else if (len == 2) {
2148 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
2149 data[0] | (data[1] << 8), 0);
2150 } else {
2151 /* 0x39 = DCS Long Write */
2152 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
2153 data, len, 0);
2154 }
2155
2156 return r;
2157}
2158EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
2159
2160int dsi_vc_dcs_write(int channel, u8 *data, int len)
2161{
2162 int r;
2163
2164 r = dsi_vc_dcs_write_nosync(channel, data, len);
2165 if (r)
2166 return r;
2167
2168 r = dsi_vc_send_bta_sync(channel);
2169
2170 return r;
2171}
2172EXPORT_SYMBOL(dsi_vc_dcs_write);
2173
2174int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2175{
2176 u32 val;
2177 u8 dt;
2178 int r;
2179
2180 if (dsi.debug_read)
Tomi Valkeinenff90a342009-12-03 13:38:04 +02002181 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002182
2183 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2184 if (r)
2185 return r;
2186
2187 r = dsi_vc_send_bta_sync(channel);
2188 if (r)
2189 return r;
2190
2191 /* RX_FIFO_NOT_EMPTY */
2192 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
2193 DSSERR("RX fifo empty when trying to read.\n");
2194 return -EIO;
2195 }
2196
2197 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2198 if (dsi.debug_read)
2199 DSSDBG("\theader: %08x\n", val);
2200 dt = FLD_GET(val, 5, 0);
2201 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2202 u16 err = FLD_GET(val, 23, 8);
2203 dsi_show_rx_ack_with_err(err);
2204 return -EIO;
2205
2206 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2207 u8 data = FLD_GET(val, 15, 8);
2208 if (dsi.debug_read)
2209 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2210
2211 if (buflen < 1)
2212 return -EIO;
2213
2214 buf[0] = data;
2215
2216 return 1;
2217 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2218 u16 data = FLD_GET(val, 23, 8);
2219 if (dsi.debug_read)
2220 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2221
2222 if (buflen < 2)
2223 return -EIO;
2224
2225 buf[0] = data & 0xff;
2226 buf[1] = (data >> 8) & 0xff;
2227
2228 return 2;
2229 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2230 int w;
2231 int len = FLD_GET(val, 23, 8);
2232 if (dsi.debug_read)
2233 DSSDBG("\tDCS long response, len %d\n", len);
2234
2235 if (len > buflen)
2236 return -EIO;
2237
2238 /* two byte checksum ends the packet, not included in len */
2239 for (w = 0; w < len + 2;) {
2240 int b;
2241 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2242 if (dsi.debug_read)
2243 DSSDBG("\t\t%02x %02x %02x %02x\n",
2244 (val >> 0) & 0xff,
2245 (val >> 8) & 0xff,
2246 (val >> 16) & 0xff,
2247 (val >> 24) & 0xff);
2248
2249 for (b = 0; b < 4; ++b) {
2250 if (w < len)
2251 buf[w] = (val >> (b * 8)) & 0xff;
2252 /* we discard the 2 byte checksum */
2253 ++w;
2254 }
2255 }
2256
2257 return len;
2258
2259 } else {
2260 DSSERR("\tunknown datatype 0x%02x\n", dt);
2261 return -EIO;
2262 }
2263}
2264EXPORT_SYMBOL(dsi_vc_dcs_read);
2265
2266
2267int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
2268{
2269 int r;
2270 r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2271 len, 0);
2272
2273 if (r)
2274 return r;
2275
2276 r = dsi_vc_send_bta_sync(channel);
2277
2278 return r;
2279}
2280EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2281
2282static void dsi_set_lp_rx_timeout(unsigned long ns)
2283{
2284 u32 r;
2285 unsigned x4, x16;
2286 unsigned long fck;
2287 unsigned long ticks;
2288
2289 /* ticks in DSI_FCK */
2290
2291 fck = dsi_fclk_rate();
2292 ticks = (fck / 1000 / 1000) * ns / 1000;
2293 x4 = 0;
2294 x16 = 0;
2295
2296 if (ticks > 0x1fff) {
2297 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2298 x4 = 1;
2299 x16 = 0;
2300 }
2301
2302 if (ticks > 0x1fff) {
2303 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2304 x4 = 0;
2305 x16 = 1;
2306 }
2307
2308 if (ticks > 0x1fff) {
2309 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2310 x4 = 1;
2311 x16 = 1;
2312 }
2313
2314 if (ticks > 0x1fff) {
2315 DSSWARN("LP_TX_TO over limit, setting it to max\n");
2316 ticks = 0x1fff;
2317 x4 = 1;
2318 x16 = 1;
2319 }
2320
2321 r = dsi_read_reg(DSI_TIMING2);
2322 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2323 r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
2324 r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
2325 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2326 dsi_write_reg(DSI_TIMING2, r);
2327
2328 DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
2329 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2330 (fck / 1000 / 1000),
2331 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2332}
2333
2334static void dsi_set_ta_timeout(unsigned long ns)
2335{
2336 u32 r;
2337 unsigned x8, x16;
2338 unsigned long fck;
2339 unsigned long ticks;
2340
2341 /* ticks in DSI_FCK */
2342 fck = dsi_fclk_rate();
2343 ticks = (fck / 1000 / 1000) * ns / 1000;
2344 x8 = 0;
2345 x16 = 0;
2346
2347 if (ticks > 0x1fff) {
2348 ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
2349 x8 = 1;
2350 x16 = 0;
2351 }
2352
2353 if (ticks > 0x1fff) {
2354 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2355 x8 = 0;
2356 x16 = 1;
2357 }
2358
2359 if (ticks > 0x1fff) {
2360 ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
2361 x8 = 1;
2362 x16 = 1;
2363 }
2364
2365 if (ticks > 0x1fff) {
2366 DSSWARN("TA_TO over limit, setting it to max\n");
2367 ticks = 0x1fff;
2368 x8 = 1;
2369 x16 = 1;
2370 }
2371
2372 r = dsi_read_reg(DSI_TIMING1);
2373 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2374 r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
2375 r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
2376 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2377 dsi_write_reg(DSI_TIMING1, r);
2378
2379 DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
2380 (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
2381 (fck / 1000 / 1000),
2382 ticks, x8 ? " x8" : "", x16 ? " x16" : "");
2383}
2384
2385static void dsi_set_stop_state_counter(unsigned long ns)
2386{
2387 u32 r;
2388 unsigned x4, x16;
2389 unsigned long fck;
2390 unsigned long ticks;
2391
2392 /* ticks in DSI_FCK */
2393
2394 fck = dsi_fclk_rate();
2395 ticks = (fck / 1000 / 1000) * ns / 1000;
2396 x4 = 0;
2397 x16 = 0;
2398
2399 if (ticks > 0x1fff) {
2400 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2401 x4 = 1;
2402 x16 = 0;
2403 }
2404
2405 if (ticks > 0x1fff) {
2406 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2407 x4 = 0;
2408 x16 = 1;
2409 }
2410
2411 if (ticks > 0x1fff) {
2412 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2413 x4 = 1;
2414 x16 = 1;
2415 }
2416
2417 if (ticks > 0x1fff) {
2418 DSSWARN("STOP_STATE_COUNTER_IO over limit, "
2419 "setting it to max\n");
2420 ticks = 0x1fff;
2421 x4 = 1;
2422 x16 = 1;
2423 }
2424
2425 r = dsi_read_reg(DSI_TIMING1);
2426 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2427 r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
2428 r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
2429 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2430 dsi_write_reg(DSI_TIMING1, r);
2431
2432 DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
2433 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2434 (fck / 1000 / 1000),
2435 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2436}
2437
2438static void dsi_set_hs_tx_timeout(unsigned long ns)
2439{
2440 u32 r;
2441 unsigned x4, x16;
2442 unsigned long fck;
2443 unsigned long ticks;
2444
2445 /* ticks in TxByteClkHS */
2446
2447 fck = dsi_get_txbyteclkhs();
2448 ticks = (fck / 1000 / 1000) * ns / 1000;
2449 x4 = 0;
2450 x16 = 0;
2451
2452 if (ticks > 0x1fff) {
2453 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2454 x4 = 1;
2455 x16 = 0;
2456 }
2457
2458 if (ticks > 0x1fff) {
2459 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2460 x4 = 0;
2461 x16 = 1;
2462 }
2463
2464 if (ticks > 0x1fff) {
2465 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2466 x4 = 1;
2467 x16 = 1;
2468 }
2469
2470 if (ticks > 0x1fff) {
2471 DSSWARN("HS_TX_TO over limit, setting it to max\n");
2472 ticks = 0x1fff;
2473 x4 = 1;
2474 x16 = 1;
2475 }
2476
2477 r = dsi_read_reg(DSI_TIMING2);
2478 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2479 r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
2480 r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
2481 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2482 dsi_write_reg(DSI_TIMING2, r);
2483
2484 DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
2485 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2486 (fck / 1000 / 1000),
2487 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2488}
2489static int dsi_proto_config(struct omap_dss_device *dssdev)
2490{
2491 u32 r;
2492 int buswidth = 0;
2493
2494 dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
2495 DSI_FIFO_SIZE_0,
2496 DSI_FIFO_SIZE_0,
2497 DSI_FIFO_SIZE_0);
2498
2499 dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
2500 DSI_FIFO_SIZE_0,
2501 DSI_FIFO_SIZE_0,
2502 DSI_FIFO_SIZE_0);
2503
2504 /* XXX what values for the timeouts? */
2505 dsi_set_stop_state_counter(1000);
2506 dsi_set_ta_timeout(6400000);
2507 dsi_set_lp_rx_timeout(48000);
2508 dsi_set_hs_tx_timeout(1000000);
2509
2510 switch (dssdev->ctrl.pixel_size) {
2511 case 16:
2512 buswidth = 0;
2513 break;
2514 case 18:
2515 buswidth = 1;
2516 break;
2517 case 24:
2518 buswidth = 2;
2519 break;
2520 default:
2521 BUG();
2522 }
2523
2524 r = dsi_read_reg(DSI_CTRL);
2525 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2526 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2527 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
2528 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
2529 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
2530 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
2531 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2532 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2533 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2534 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
2535 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
2536
2537 dsi_write_reg(DSI_CTRL, r);
2538
2539 dsi_vc_initial_config(0);
2540
2541 /* set all vc targets to peripheral 0 */
2542 dsi.vc[0].dest_per = 0;
2543 dsi.vc[1].dest_per = 0;
2544 dsi.vc[2].dest_per = 0;
2545 dsi.vc[3].dest_per = 0;
2546
2547 return 0;
2548}
2549
2550static void dsi_proto_timings(struct omap_dss_device *dssdev)
2551{
2552 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
2553 unsigned tclk_pre, tclk_post;
2554 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
2555 unsigned ths_trail, ths_exit;
2556 unsigned ddr_clk_pre, ddr_clk_post;
2557 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
2558 unsigned ths_eot;
2559 u32 r;
2560
2561 r = dsi_read_reg(DSI_DSIPHY_CFG0);
2562 ths_prepare = FLD_GET(r, 31, 24);
2563 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
2564 ths_zero = ths_prepare_ths_zero - ths_prepare;
2565 ths_trail = FLD_GET(r, 15, 8);
2566 ths_exit = FLD_GET(r, 7, 0);
2567
2568 r = dsi_read_reg(DSI_DSIPHY_CFG1);
2569 tlpx = FLD_GET(r, 22, 16) * 2;
2570 tclk_trail = FLD_GET(r, 15, 8);
2571 tclk_zero = FLD_GET(r, 7, 0);
2572
2573 r = dsi_read_reg(DSI_DSIPHY_CFG2);
2574 tclk_prepare = FLD_GET(r, 7, 0);
2575
2576 /* min 8*UI */
2577 tclk_pre = 20;
2578 /* min 60ns + 52*UI */
2579 tclk_post = ns2ddr(60) + 26;
2580
2581 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
2582 if (dssdev->phy.dsi.data1_lane != 0 &&
2583 dssdev->phy.dsi.data2_lane != 0)
2584 ths_eot = 2;
2585 else
2586 ths_eot = 4;
2587
2588 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
2589 4);
2590 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
2591
2592 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
2593 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
2594
2595 r = dsi_read_reg(DSI_CLK_TIMING);
2596 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2597 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2598 dsi_write_reg(DSI_CLK_TIMING, r);
2599
2600 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
2601 ddr_clk_pre,
2602 ddr_clk_post);
2603
2604 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
2605 DIV_ROUND_UP(ths_prepare, 4) +
2606 DIV_ROUND_UP(ths_zero + 3, 4);
2607
2608 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
2609
2610 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
2611 FLD_VAL(exit_hs_mode_lat, 15, 0);
2612 dsi_write_reg(DSI_VM_TIMING7, r);
2613
2614 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
2615 enter_hs_mode_lat, exit_hs_mode_lat);
2616}
2617
2618
2619#define DSI_DECL_VARS \
2620 int __dsi_cb = 0; u32 __dsi_cv = 0;
2621
2622#define DSI_FLUSH(ch) \
2623 if (__dsi_cb > 0) { \
2624 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2625 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2626 __dsi_cb = __dsi_cv = 0; \
2627 }
2628
2629#define DSI_PUSH(ch, data) \
2630 do { \
2631 __dsi_cv |= (data) << (__dsi_cb * 8); \
2632 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2633 if (++__dsi_cb > 3) \
2634 DSI_FLUSH(ch); \
2635 } while (0)
2636
2637static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2638 int x, int y, int w, int h)
2639{
2640 /* Note: supports only 24bit colors in 32bit container */
2641 int first = 1;
2642 int fifo_stalls = 0;
2643 int max_dsi_packet_size;
2644 int max_data_per_packet;
2645 int max_pixels_per_packet;
2646 int pixels_left;
2647 int bytespp = dssdev->ctrl.pixel_size / 8;
2648 int scr_width;
2649 u32 __iomem *data;
2650 int start_offset;
2651 int horiz_inc;
2652 int current_x;
2653 struct omap_overlay *ovl;
2654
2655 debug_irq = 0;
2656
2657 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
2658 x, y, w, h);
2659
2660 ovl = dssdev->manager->overlays[0];
2661
2662 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
2663 return -EINVAL;
2664
2665 if (dssdev->ctrl.pixel_size != 24)
2666 return -EINVAL;
2667
2668 scr_width = ovl->info.screen_width;
2669 data = ovl->info.vaddr;
2670
2671 start_offset = scr_width * y + x;
2672 horiz_inc = scr_width - w;
2673 current_x = x;
2674
2675 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
2676 * in fifo */
2677
2678 /* When using CPU, max long packet size is TX buffer size */
2679 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
2680
2681 /* we seem to get better perf if we divide the tx fifo to half,
2682 and while the other half is being sent, we fill the other half
2683 max_dsi_packet_size /= 2; */
2684
2685 max_data_per_packet = max_dsi_packet_size - 4 - 1;
2686
2687 max_pixels_per_packet = max_data_per_packet / bytespp;
2688
2689 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
2690
2691 pixels_left = w * h;
2692
2693 DSSDBG("total pixels %d\n", pixels_left);
2694
2695 data += start_offset;
2696
2697 while (pixels_left > 0) {
2698 /* 0x2c = write_memory_start */
2699 /* 0x3c = write_memory_continue */
2700 u8 dcs_cmd = first ? 0x2c : 0x3c;
2701 int pixels;
2702 DSI_DECL_VARS;
2703 first = 0;
2704
2705#if 1
2706 /* using fifo not empty */
2707 /* TX_FIFO_NOT_EMPTY */
2708 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002709 fifo_stalls++;
2710 if (fifo_stalls > 0xfffff) {
2711 DSSERR("fifo stalls overflow, pixels left %d\n",
2712 pixels_left);
2713 dsi_if_enable(0);
2714 return -EIO;
2715 }
Tomi Valkeinen24be78b2010-01-07 14:19:48 +02002716 udelay(1);
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002717 }
2718#elif 1
2719 /* using fifo emptiness */
2720 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2721 max_dsi_packet_size) {
2722 fifo_stalls++;
2723 if (fifo_stalls > 0xfffff) {
2724 DSSERR("fifo stalls overflow, pixels left %d\n",
2725 pixels_left);
2726 dsi_if_enable(0);
2727 return -EIO;
2728 }
2729 }
2730#else
2731 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
2732 fifo_stalls++;
2733 if (fifo_stalls > 0xfffff) {
2734 DSSERR("fifo stalls overflow, pixels left %d\n",
2735 pixels_left);
2736 dsi_if_enable(0);
2737 return -EIO;
2738 }
2739 }
2740#endif
2741 pixels = min(max_pixels_per_packet, pixels_left);
2742
2743 pixels_left -= pixels;
2744
2745 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
2746 1 + pixels * bytespp, 0);
2747
2748 DSI_PUSH(0, dcs_cmd);
2749
2750 while (pixels-- > 0) {
2751 u32 pix = __raw_readl(data++);
2752
2753 DSI_PUSH(0, (pix >> 16) & 0xff);
2754 DSI_PUSH(0, (pix >> 8) & 0xff);
2755 DSI_PUSH(0, (pix >> 0) & 0xff);
2756
2757 current_x++;
2758 if (current_x == x+w) {
2759 current_x = x;
2760 data += horiz_inc;
2761 }
2762 }
2763
2764 DSI_FLUSH(0);
2765 }
2766
2767 return 0;
2768}
2769
2770static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
2771 u16 x, u16 y, u16 w, u16 h)
2772{
2773 unsigned bytespp;
2774 unsigned bytespl;
2775 unsigned bytespf;
2776 unsigned total_len;
2777 unsigned packet_payload;
2778 unsigned packet_len;
2779 u32 l;
2780 bool use_te_trigger;
2781 const unsigned channel = 0;
2782 /* line buffer is 1024 x 24bits */
2783 /* XXX: for some reason using full buffer size causes considerable TX
2784 * slowdown with update sizes that fill the whole buffer */
2785 const unsigned line_buf_size = 1023 * 3;
2786
2787 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2788
2789 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2790 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
2791 x, y, w, h);
2792
2793 bytespp = dssdev->ctrl.pixel_size / 8;
2794 bytespl = w * bytespp;
2795 bytespf = bytespl * h;
2796
2797 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
2798 * number of lines in a packet. See errata about VP_CLK_RATIO */
2799
2800 if (bytespf < line_buf_size)
2801 packet_payload = bytespf;
2802 else
2803 packet_payload = (line_buf_size) / bytespl * bytespl;
2804
2805 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
2806 total_len = (bytespf / packet_payload) * packet_len;
2807
2808 if (bytespf % packet_payload)
2809 total_len += (bytespf % packet_payload) + 1;
2810
2811 if (0)
2812 dsi_vc_print_status(1);
2813
2814 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
2815 dsi_write_reg(DSI_VC_TE(channel), l);
2816
2817 dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
2818
2819 if (use_te_trigger)
2820 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
2821 else
2822 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
2823 dsi_write_reg(DSI_VC_TE(channel), l);
2824
2825 /* We put SIDLEMODE to no-idle for the duration of the transfer,
2826 * because DSS interrupts are not capable of waking up the CPU and the
2827 * framedone interrupt could be delayed for quite a long time. I think
2828 * the same goes for any DSS interrupts, but for some reason I have not
2829 * seen the problem anywhere else than here.
2830 */
2831 dispc_disable_sidle();
2832
2833 dss_start_update(dssdev);
2834
2835 if (use_te_trigger) {
2836 /* disable LP_RX_TO, so that we can receive TE. Time to wait
2837 * for TE is longer than the timer allows */
2838 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
2839
2840 dsi_vc_send_bta(channel);
2841
2842#ifdef DSI_CATCH_MISSING_TE
2843 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
2844#endif
2845 }
2846}
2847
2848#ifdef DSI_CATCH_MISSING_TE
2849static void dsi_te_timeout(unsigned long arg)
2850{
2851 DSSERR("TE not received for 250ms!\n");
2852}
2853#endif
2854
2855static void dsi_framedone_irq_callback(void *data, u32 mask)
2856{
2857 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
2858 * turns itself off. However, DSI still has the pixels in its buffers,
2859 * and is sending the data.
2860 */
2861
2862 /* SIDLEMODE back to smart-idle */
2863 dispc_enable_sidle();
2864
2865 dsi.framedone_received = true;
2866 wake_up(&dsi.waitqueue);
2867}
2868
2869static void dsi_set_update_region(struct omap_dss_device *dssdev,
2870 u16 x, u16 y, u16 w, u16 h)
2871{
2872 spin_lock(&dsi.update_lock);
2873 if (dsi.update_region.dirty) {
2874 dsi.update_region.x = min(x, dsi.update_region.x);
2875 dsi.update_region.y = min(y, dsi.update_region.y);
2876 dsi.update_region.w = max(w, dsi.update_region.w);
2877 dsi.update_region.h = max(h, dsi.update_region.h);
2878 } else {
2879 dsi.update_region.x = x;
2880 dsi.update_region.y = y;
2881 dsi.update_region.w = w;
2882 dsi.update_region.h = h;
2883 }
2884
2885 dsi.update_region.device = dssdev;
2886 dsi.update_region.dirty = true;
2887
2888 spin_unlock(&dsi.update_lock);
2889
2890}
2891
2892static int dsi_set_update_mode(struct omap_dss_device *dssdev,
2893 enum omap_dss_update_mode mode)
2894{
2895 int r = 0;
2896 int i;
2897
2898 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
2899
2900 if (dsi.update_mode != mode) {
2901 dsi.update_mode = mode;
2902
2903 /* Mark the overlays dirty, and do apply(), so that we get the
2904 * overlays configured properly after update mode change. */
2905 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2906 struct omap_overlay *ovl;
2907 ovl = omap_dss_get_overlay(i);
2908 if (ovl->manager == dssdev->manager)
2909 ovl->info_dirty = true;
2910 }
2911
2912 r = dssdev->manager->apply(dssdev->manager);
2913
2914 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
2915 mode == OMAP_DSS_UPDATE_AUTO) {
2916 u16 w, h;
2917
2918 DSSDBG("starting auto update\n");
2919
2920 dssdev->get_resolution(dssdev, &w, &h);
2921
2922 dsi_set_update_region(dssdev, 0, 0, w, h);
2923
2924 dsi_perf_mark_start_auto();
2925
2926 wake_up(&dsi.waitqueue);
2927 }
2928 }
2929
2930 return r;
2931}
2932
2933static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
2934{
Tomi Valkeinen7475e442009-12-08 17:30:24 +02002935 int r = 0;
2936
2937 if (dssdev->driver->enable_te) {
2938 r = dssdev->driver->enable_te(dssdev, enable);
2939 /* XXX for some reason, DSI TE breaks if we don't wait here.
2940 * Panel bug? Needs more studying */
2941 msleep(100);
2942 }
2943
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02002944 return r;
2945}
2946
2947static void dsi_handle_framedone(void)
2948{
2949 int r;
2950 const int channel = 0;
2951 bool use_te_trigger;
2952
2953 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2954
2955 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2956 DSSDBG("FRAMEDONE\n");
2957
2958 if (use_te_trigger) {
2959 /* enable LP_RX_TO again after the TE */
2960 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
2961 }
2962
2963 /* Send BTA after the frame. We need this for the TE to work, as TE
2964 * trigger is only sent for BTAs without preceding packet. Thus we need
2965 * to BTA after the pixel packets so that next BTA will cause TE
2966 * trigger.
2967 *
2968 * This is not needed when TE is not in use, but we do it anyway to
2969 * make sure that the transfer has been completed. It would be more
2970 * optimal, but more complex, to wait only just before starting next
2971 * transfer. */
2972 r = dsi_vc_send_bta_sync(channel);
2973 if (r)
2974 DSSERR("BTA after framedone failed\n");
2975
2976 /* RX_FIFO_NOT_EMPTY */
2977 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
2978 DSSERR("Received error during frame transfer:\n");
2979 dsi_vc_flush_receive_data(0);
2980 }
2981
2982#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2983 dispc_fake_vsync_irq();
2984#endif
2985}
2986
2987static int dsi_update_thread(void *data)
2988{
2989 unsigned long timeout;
2990 struct omap_dss_device *device;
2991 u16 x, y, w, h;
2992
2993 while (1) {
2994 bool sched;
2995
2996 wait_event_interruptible(dsi.waitqueue,
2997 dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
2998 (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
2999 dsi.update_region.dirty == true) ||
3000 kthread_should_stop());
3001
3002 if (kthread_should_stop())
3003 break;
3004
3005 dsi_bus_lock();
3006
3007 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
3008 kthread_should_stop()) {
3009 dsi_bus_unlock();
3010 break;
3011 }
3012
3013 dsi_perf_mark_setup();
3014
3015 if (dsi.update_region.dirty) {
3016 spin_lock(&dsi.update_lock);
3017 dsi.active_update_region = dsi.update_region;
3018 dsi.update_region.dirty = false;
3019 spin_unlock(&dsi.update_lock);
3020 }
3021
3022 device = dsi.active_update_region.device;
3023 x = dsi.active_update_region.x;
3024 y = dsi.active_update_region.y;
3025 w = dsi.active_update_region.w;
3026 h = dsi.active_update_region.h;
3027
3028 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3029
3030 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
3031 dss_setup_partial_planes(device,
3032 &x, &y, &w, &h);
3033
3034 dispc_set_lcd_size(w, h);
3035 }
3036
3037 if (dsi.active_update_region.dirty) {
3038 dsi.active_update_region.dirty = false;
3039 /* XXX TODO we don't need to send the coords, if they
3040 * are the same that are already programmed to the
3041 * panel. That should speed up manual update a bit */
3042 device->driver->setup_update(device, x, y, w, h);
3043 }
3044
3045 dsi_perf_mark_start();
3046
3047 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3048 dsi_vc_config_vp(0);
3049
3050 if (dsi.te_enabled && dsi.use_ext_te)
3051 device->driver->wait_for_te(device);
3052
3053 dsi.framedone_received = false;
3054
3055 dsi_update_screen_dispc(device, x, y, w, h);
3056
3057 /* wait for framedone */
3058 timeout = msecs_to_jiffies(1000);
3059 wait_event_timeout(dsi.waitqueue,
3060 dsi.framedone_received == true,
3061 timeout);
3062
3063 if (!dsi.framedone_received) {
3064 DSSERR("framedone timeout\n");
3065 DSSERR("failed update %d,%d %dx%d\n",
3066 x, y, w, h);
3067
3068 dispc_enable_sidle();
3069 dispc_enable_lcd_out(0);
3070
3071 dsi_reset_tx_fifo(0);
3072 } else {
3073 dsi_handle_framedone();
3074 dsi_perf_show("DISPC");
3075 }
3076 } else {
3077 dsi_update_screen_l4(device, x, y, w, h);
3078 dsi_perf_show("L4");
3079 }
3080
3081 sched = atomic_read(&dsi.bus_lock.count) < 0;
3082
3083 complete_all(&dsi.update_completion);
3084
3085 dsi_bus_unlock();
3086
3087 /* XXX We need to give others chance to get the bus lock. Is
3088 * there a better way for this? */
3089 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
3090 schedule_timeout_interruptible(1);
3091 }
3092
3093 DSSDBG("update thread exiting\n");
3094
3095 return 0;
3096}
3097
3098
3099
3100/* Display funcs */
3101
3102static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3103{
3104 int r;
3105
3106 r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
3107 DISPC_IRQ_FRAMEDONE);
3108 if (r) {
3109 DSSERR("can't get FRAMEDONE irq\n");
3110 return r;
3111 }
3112
3113 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
3114
3115 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
3116 dispc_enable_fifohandcheck(1);
3117
3118 dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
3119
3120 {
3121 struct omap_video_timings timings = {
3122 .hsw = 1,
3123 .hfp = 1,
3124 .hbp = 1,
3125 .vsw = 1,
3126 .vfp = 0,
3127 .vbp = 0,
3128 };
3129
3130 dispc_set_lcd_timings(&timings);
3131 }
3132
3133 return 0;
3134}
3135
3136static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3137{
3138 omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
3139 DISPC_IRQ_FRAMEDONE);
3140}
3141
3142static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3143{
3144 struct dsi_clock_info cinfo;
3145 int r;
3146
3147 /* we always use DSS2_FCK as input clock */
3148 cinfo.use_dss2_fck = true;
3149 cinfo.regn = dssdev->phy.dsi.div.regn;
3150 cinfo.regm = dssdev->phy.dsi.div.regm;
3151 cinfo.regm3 = dssdev->phy.dsi.div.regm3;
3152 cinfo.regm4 = dssdev->phy.dsi.div.regm4;
3153 r = dsi_calc_clock_rates(&cinfo);
3154 if (r)
3155 return r;
3156
3157 r = dsi_pll_set_clock_div(&cinfo);
3158 if (r) {
3159 DSSERR("Failed to set dsi clocks\n");
3160 return r;
3161 }
3162
3163 return 0;
3164}
3165
3166static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3167{
3168 struct dispc_clock_info dispc_cinfo;
3169 int r;
3170 unsigned long long fck;
3171
3172 fck = dsi_get_dsi1_pll_rate();
3173
3174 dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
3175 dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
3176
3177 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
3178 if (r) {
3179 DSSERR("Failed to calc dispc clocks\n");
3180 return r;
3181 }
3182
3183 r = dispc_set_clock_div(&dispc_cinfo);
3184 if (r) {
3185 DSSERR("Failed to set dispc clocks\n");
3186 return r;
3187 }
3188
3189 return 0;
3190}
3191
3192static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3193{
3194 int r;
3195
3196 _dsi_print_reset_status();
3197
3198 r = dsi_pll_init(dssdev, true, true);
3199 if (r)
3200 goto err0;
3201
3202 r = dsi_configure_dsi_clocks(dssdev);
3203 if (r)
3204 goto err1;
3205
3206 dss_select_clk_source(true, true);
3207
3208 DSSDBG("PLL OK\n");
3209
3210 r = dsi_configure_dispc_clocks(dssdev);
3211 if (r)
3212 goto err2;
3213
3214 r = dsi_complexio_init(dssdev);
3215 if (r)
3216 goto err2;
3217
3218 _dsi_print_reset_status();
3219
3220 dsi_proto_timings(dssdev);
3221 dsi_set_lp_clk_divisor(dssdev);
3222
3223 if (1)
3224 _dsi_print_reset_status();
3225
3226 r = dsi_proto_config(dssdev);
3227 if (r)
3228 goto err3;
3229
3230 /* enable interface */
3231 dsi_vc_enable(0, 1);
3232 dsi_if_enable(1);
3233 dsi_force_tx_stop_mode_io();
3234
3235 if (dssdev->driver->enable) {
3236 r = dssdev->driver->enable(dssdev);
3237 if (r)
3238 goto err4;
3239 }
3240
3241 /* enable high-speed after initial config */
3242 dsi_vc_enable_hs(0, 1);
3243
3244 return 0;
3245err4:
3246 dsi_if_enable(0);
3247err3:
3248 dsi_complexio_uninit();
3249err2:
3250 dss_select_clk_source(false, false);
3251err1:
3252 dsi_pll_uninit();
3253err0:
3254 return r;
3255}
3256
3257static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
3258{
3259 if (dssdev->driver->disable)
3260 dssdev->driver->disable(dssdev);
3261
3262 dss_select_clk_source(false, false);
3263 dsi_complexio_uninit();
3264 dsi_pll_uninit();
3265}
3266
3267static int dsi_core_init(void)
3268{
3269 /* Autoidle */
3270 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
3271
3272 /* ENWAKEUP */
3273 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
3274
3275 /* SIDLEMODE smart-idle */
3276 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
3277
3278 _dsi_initialize_irq();
3279
3280 return 0;
3281}
3282
3283static int dsi_display_enable(struct omap_dss_device *dssdev)
3284{
3285 int r = 0;
3286
3287 DSSDBG("dsi_display_enable\n");
3288
3289 mutex_lock(&dsi.lock);
3290 dsi_bus_lock();
3291
3292 r = omap_dss_start_device(dssdev);
3293 if (r) {
3294 DSSERR("failed to start device\n");
3295 goto err0;
3296 }
3297
3298 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
3299 DSSERR("dssdev already enabled\n");
3300 r = -EINVAL;
3301 goto err1;
3302 }
3303
3304 enable_clocks(1);
3305 dsi_enable_pll_clock(1);
3306
3307 r = _dsi_reset();
3308 if (r)
3309 goto err2;
3310
3311 dsi_core_init();
3312
3313 r = dsi_display_init_dispc(dssdev);
3314 if (r)
3315 goto err2;
3316
3317 r = dsi_display_init_dsi(dssdev);
3318 if (r)
3319 goto err3;
3320
3321 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3322
3323 dsi.use_ext_te = dssdev->phy.dsi.ext_te;
3324 r = dsi_set_te(dssdev, dsi.te_enabled);
3325 if (r)
3326 goto err4;
3327
3328 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3329
3330 dsi_bus_unlock();
3331 mutex_unlock(&dsi.lock);
3332
3333 return 0;
3334
3335err4:
3336
3337 dsi_display_uninit_dsi(dssdev);
3338err3:
3339 dsi_display_uninit_dispc(dssdev);
3340err2:
3341 enable_clocks(0);
3342 dsi_enable_pll_clock(0);
3343err1:
3344 omap_dss_stop_device(dssdev);
3345err0:
3346 dsi_bus_unlock();
3347 mutex_unlock(&dsi.lock);
3348 DSSDBG("dsi_display_enable FAILED\n");
3349 return r;
3350}
3351
3352static void dsi_display_disable(struct omap_dss_device *dssdev)
3353{
3354 DSSDBG("dsi_display_disable\n");
3355
3356 mutex_lock(&dsi.lock);
3357 dsi_bus_lock();
3358
3359 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3360 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3361 goto end;
3362
3363 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3364 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
3365
3366 dsi_display_uninit_dispc(dssdev);
3367
3368 dsi_display_uninit_dsi(dssdev);
3369
3370 enable_clocks(0);
3371 dsi_enable_pll_clock(0);
3372
3373 omap_dss_stop_device(dssdev);
3374end:
3375 dsi_bus_unlock();
3376 mutex_unlock(&dsi.lock);
3377}
3378
3379static int dsi_display_suspend(struct omap_dss_device *dssdev)
3380{
3381 DSSDBG("dsi_display_suspend\n");
3382
3383 mutex_lock(&dsi.lock);
3384 dsi_bus_lock();
3385
3386 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3387 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3388 goto end;
3389
3390 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3391 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
3392
3393 dsi_display_uninit_dispc(dssdev);
3394
3395 dsi_display_uninit_dsi(dssdev);
3396
3397 enable_clocks(0);
3398 dsi_enable_pll_clock(0);
3399end:
3400 dsi_bus_unlock();
3401 mutex_unlock(&dsi.lock);
3402
3403 return 0;
3404}
3405
3406static int dsi_display_resume(struct omap_dss_device *dssdev)
3407{
3408 int r;
3409
3410 DSSDBG("dsi_display_resume\n");
3411
3412 mutex_lock(&dsi.lock);
3413 dsi_bus_lock();
3414
3415 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
3416 DSSERR("dssdev not suspended\n");
3417 r = -EINVAL;
3418 goto err0;
3419 }
3420
3421 enable_clocks(1);
3422 dsi_enable_pll_clock(1);
3423
3424 r = _dsi_reset();
3425 if (r)
3426 goto err1;
3427
3428 dsi_core_init();
3429
3430 r = dsi_display_init_dispc(dssdev);
3431 if (r)
3432 goto err1;
3433
3434 r = dsi_display_init_dsi(dssdev);
3435 if (r)
3436 goto err2;
3437
3438 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3439
3440 r = dsi_set_te(dssdev, dsi.te_enabled);
3441 if (r)
3442 goto err2;
3443
3444 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3445
3446 dsi_bus_unlock();
3447 mutex_unlock(&dsi.lock);
3448
3449 return 0;
3450
3451err2:
3452 dsi_display_uninit_dispc(dssdev);
3453err1:
3454 enable_clocks(0);
3455 dsi_enable_pll_clock(0);
3456err0:
3457 dsi_bus_unlock();
3458 mutex_unlock(&dsi.lock);
3459 DSSDBG("dsi_display_resume FAILED\n");
3460 return r;
3461}
3462
3463static int dsi_display_update(struct omap_dss_device *dssdev,
3464 u16 x, u16 y, u16 w, u16 h)
3465{
3466 int r = 0;
3467 u16 dw, dh;
3468
3469 DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
3470
3471 mutex_lock(&dsi.lock);
3472
3473 if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
3474 goto end;
3475
3476 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3477 goto end;
3478
3479 dssdev->get_resolution(dssdev, &dw, &dh);
3480
3481 if (x > dw || y > dh)
3482 goto end;
3483
3484 if (x + w > dw)
3485 w = dw - x;
3486
3487 if (y + h > dh)
3488 h = dh - y;
3489
3490 if (w == 0 || h == 0)
3491 goto end;
3492
3493 if (w == 1) {
3494 r = -EINVAL;
3495 goto end;
3496 }
3497
3498 dsi_set_update_region(dssdev, x, y, w, h);
3499
3500 wake_up(&dsi.waitqueue);
3501
3502end:
3503 mutex_unlock(&dsi.lock);
3504
3505 return r;
3506}
3507
3508static int dsi_display_sync(struct omap_dss_device *dssdev)
3509{
3510 bool wait;
3511
3512 DSSDBG("dsi_display_sync()\n");
3513
3514 mutex_lock(&dsi.lock);
3515 dsi_bus_lock();
3516
3517 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
3518 dsi.update_region.dirty) {
3519 INIT_COMPLETION(dsi.update_completion);
3520 wait = true;
3521 } else {
3522 wait = false;
3523 }
3524
3525 dsi_bus_unlock();
3526 mutex_unlock(&dsi.lock);
3527
3528 if (wait)
3529 wait_for_completion_interruptible(&dsi.update_completion);
3530
3531 DSSDBG("dsi_display_sync() done\n");
3532 return 0;
3533}
3534
3535static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
3536 enum omap_dss_update_mode mode)
3537{
3538 int r = 0;
3539
3540 DSSDBGF("%d", mode);
3541
3542 mutex_lock(&dsi.lock);
3543 dsi_bus_lock();
3544
3545 dsi.user_update_mode = mode;
3546 r = dsi_set_update_mode(dssdev, mode);
3547
3548 dsi_bus_unlock();
3549 mutex_unlock(&dsi.lock);
3550
3551 return r;
3552}
3553
3554static enum omap_dss_update_mode dsi_display_get_update_mode(
3555 struct omap_dss_device *dssdev)
3556{
3557 return dsi.update_mode;
3558}
3559
3560
3561static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
3562{
3563 int r = 0;
3564
3565 DSSDBGF("%d", enable);
3566
3567 if (!dssdev->driver->enable_te)
3568 return -ENOENT;
3569
3570 dsi_bus_lock();
3571
3572 dsi.te_enabled = enable;
3573
3574 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3575 goto end;
3576
3577 r = dsi_set_te(dssdev, enable);
3578end:
3579 dsi_bus_unlock();
3580
3581 return r;
3582}
3583
3584static int dsi_display_get_te(struct omap_dss_device *dssdev)
3585{
3586 return dsi.te_enabled;
3587}
3588
3589static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
3590{
3591
3592 DSSDBGF("%d", rotate);
3593
3594 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3595 return -EINVAL;
3596
3597 dsi_bus_lock();
3598 dssdev->driver->set_rotate(dssdev, rotate);
3599 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
3600 u16 w, h;
3601 /* the display dimensions may have changed, so set a new
3602 * update region */
3603 dssdev->get_resolution(dssdev, &w, &h);
3604 dsi_set_update_region(dssdev, 0, 0, w, h);
3605 }
3606 dsi_bus_unlock();
3607
3608 return 0;
3609}
3610
3611static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
3612{
3613 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3614 return 0;
3615
3616 return dssdev->driver->get_rotate(dssdev);
3617}
3618
3619static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
3620{
3621 DSSDBGF("%d", mirror);
3622
3623 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3624 return -EINVAL;
3625
3626 dsi_bus_lock();
3627 dssdev->driver->set_mirror(dssdev, mirror);
3628 dsi_bus_unlock();
3629
3630 return 0;
3631}
3632
3633static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
3634{
3635 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3636 return 0;
3637
3638 return dssdev->driver->get_mirror(dssdev);
3639}
3640
3641static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
3642{
3643 int r;
3644
3645 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3646 return -EIO;
3647
3648 DSSDBGF("%d", test_num);
3649
3650 dsi_bus_lock();
3651
3652 /* run test first in low speed mode */
3653 dsi_vc_enable_hs(0, 0);
3654
3655 if (dssdev->driver->run_test) {
3656 r = dssdev->driver->run_test(dssdev, test_num);
3657 if (r)
3658 goto end;
3659 }
3660
3661 /* then in high speed */
3662 dsi_vc_enable_hs(0, 1);
3663
3664 if (dssdev->driver->run_test) {
3665 r = dssdev->driver->run_test(dssdev, test_num);
3666 if (r)
3667 goto end;
3668 }
3669
3670end:
3671 dsi_vc_enable_hs(0, 1);
3672
3673 dsi_bus_unlock();
3674
3675 return r;
3676}
3677
3678static int dsi_display_memory_read(struct omap_dss_device *dssdev,
3679 void *buf, size_t size,
3680 u16 x, u16 y, u16 w, u16 h)
3681{
3682 int r;
3683
3684 DSSDBGF("");
3685
3686 if (!dssdev->driver->memory_read)
3687 return -EINVAL;
3688
3689 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3690 return -EIO;
3691
3692 dsi_bus_lock();
3693
3694 r = dssdev->driver->memory_read(dssdev, buf, size,
3695 x, y, w, h);
3696
3697 /* Memory read usually changes the update area. This will
3698 * force the next update to re-set the update area */
3699 dsi.active_update_region.dirty = true;
3700
3701 dsi_bus_unlock();
3702
3703 return r;
3704}
3705
3706void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3707 u32 fifo_size, enum omap_burst_size *burst_size,
3708 u32 *fifo_low, u32 *fifo_high)
3709{
3710 unsigned burst_size_bytes;
3711
3712 *burst_size = OMAP_DSS_BURST_16x32;
3713 burst_size_bytes = 16 * 32 / 8;
3714
3715 *fifo_high = fifo_size - burst_size_bytes;
3716 *fifo_low = fifo_size - burst_size_bytes * 8;
3717}
3718
3719int dsi_init_display(struct omap_dss_device *dssdev)
3720{
3721 DSSDBG("DSI init\n");
3722
3723 dssdev->enable = dsi_display_enable;
3724 dssdev->disable = dsi_display_disable;
3725 dssdev->suspend = dsi_display_suspend;
3726 dssdev->resume = dsi_display_resume;
3727 dssdev->update = dsi_display_update;
3728 dssdev->sync = dsi_display_sync;
3729 dssdev->set_update_mode = dsi_display_set_update_mode;
3730 dssdev->get_update_mode = dsi_display_get_update_mode;
3731 dssdev->enable_te = dsi_display_enable_te;
3732 dssdev->get_te = dsi_display_get_te;
3733
3734 dssdev->get_rotate = dsi_display_get_rotate;
3735 dssdev->set_rotate = dsi_display_set_rotate;
3736
3737 dssdev->get_mirror = dsi_display_get_mirror;
3738 dssdev->set_mirror = dsi_display_set_mirror;
3739
3740 dssdev->run_test = dsi_display_run_test;
3741 dssdev->memory_read = dsi_display_memory_read;
3742
3743 /* XXX these should be figured out dynamically */
3744 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
3745 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
3746
3747 dsi.vc[0].dssdev = dssdev;
3748 dsi.vc[1].dssdev = dssdev;
3749
3750 return 0;
3751}
3752
3753int dsi_init(struct platform_device *pdev)
3754{
3755 u32 rev;
3756 int r;
3757 struct sched_param param = {
3758 .sched_priority = MAX_USER_RT_PRIO-1
3759 };
3760
3761 spin_lock_init(&dsi.errors_lock);
3762 dsi.errors = 0;
3763
Tomi Valkeinendfc0fd82009-12-17 14:35:21 +02003764#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3765 spin_lock_init(&dsi.irq_stats_lock);
3766 dsi.irq_stats.last_reset = jiffies;
3767#endif
3768
Tomi Valkeinen3de7a1d2009-10-28 11:59:56 +02003769 init_completion(&dsi.bta_completion);
3770 init_completion(&dsi.update_completion);
3771
3772 dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
3773 if (IS_ERR(dsi.thread)) {
3774 DSSERR("cannot create kthread\n");
3775 r = PTR_ERR(dsi.thread);
3776 goto err0;
3777 }
3778 sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
3779
3780 init_waitqueue_head(&dsi.waitqueue);
3781 spin_lock_init(&dsi.update_lock);
3782
3783 mutex_init(&dsi.lock);
3784 mutex_init(&dsi.bus_lock);
3785
3786#ifdef DSI_CATCH_MISSING_TE
3787 init_timer(&dsi.te_timer);
3788 dsi.te_timer.function = dsi_te_timeout;
3789 dsi.te_timer.data = 0;
3790#endif
3791
3792 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3793 dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
3794
3795 dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
3796 if (!dsi.base) {
3797 DSSERR("can't ioremap DSI\n");
3798 r = -ENOMEM;
3799 goto err1;
3800 }
3801
3802 dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
3803 if (IS_ERR(dsi.vdds_dsi_reg)) {
3804 iounmap(dsi.base);
3805 DSSERR("can't get VDDS_DSI regulator\n");
3806 r = PTR_ERR(dsi.vdds_dsi_reg);
3807 goto err2;
3808 }
3809
3810 enable_clocks(1);
3811
3812 rev = dsi_read_reg(DSI_REVISION);
3813 printk(KERN_INFO "OMAP DSI rev %d.%d\n",
3814 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3815
3816 enable_clocks(0);
3817
3818 wake_up_process(dsi.thread);
3819
3820 return 0;
3821err2:
3822 iounmap(dsi.base);
3823err1:
3824 kthread_stop(dsi.thread);
3825err0:
3826 return r;
3827}
3828
3829void dsi_exit(void)
3830{
3831 kthread_stop(dsi.thread);
3832
3833 regulator_put(dsi.vdds_dsi_reg);
3834
3835 iounmap(dsi.base);
3836
3837 DSSDBG("omap_dsi_exit\n");
3838}
3839