blob: 99636a2b20f2e1574ae384051d42756083ead121 [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
25#ifdef CONFIG_ANDROID_PMEM
26#include <linux/android_pmem.h>
27#endif
28#include <linux/major.h>
29
30#include <mach/msm_iomap.h>
31#include <mach/msm_fb.h>
32#include <linux/platform_device.h>
33
34#include "mdp_hw.h"
35
36struct class *mdp_class;
37
38#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
39
40static uint16_t mdp_default_ccs[] = {
41 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
42 0x010, 0x080, 0x080
43};
44
45static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
46static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
47static struct msmfb_callback *dma_callback;
48static struct clk *clk;
49static unsigned int mdp_irq_mask;
50static DEFINE_SPINLOCK(mdp_lock);
51DEFINE_MUTEX(mdp_mutex);
52
53static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
54{
55 unsigned long irq_flags;
56 int ret = 0;
57
58 BUG_ON(!mask);
59
60 spin_lock_irqsave(&mdp_lock, irq_flags);
61 /* if the mask bits are already set return an error, this interrupt
62 * is already enabled */
63 if (mdp_irq_mask & mask) {
64 printk(KERN_ERR "mdp irq already on already on %x %x\n",
65 mdp_irq_mask, mask);
66 ret = -1;
67 }
68 /* if the mdp irq is not already enabled enable it */
69 if (!mdp_irq_mask) {
70 if (clk)
71 clk_enable(clk);
72 enable_irq(mdp->irq);
73 }
74
75 /* update the irq mask to reflect the fact that the interrupt is
76 * enabled */
77 mdp_irq_mask |= mask;
78 spin_unlock_irqrestore(&mdp_lock, irq_flags);
79 return ret;
80}
81
82static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
83{
84 /* this interrupt is already disabled! */
85 if (!(mdp_irq_mask & mask)) {
86 printk(KERN_ERR "mdp irq already off %x %x\n",
87 mdp_irq_mask, mask);
88 return -1;
89 }
90 /* update the irq mask to reflect the fact that the interrupt is
91 * disabled */
92 mdp_irq_mask &= ~(mask);
93 /* if no one is waiting on the interrupt, disable it */
94 if (!mdp_irq_mask) {
95 disable_irq(mdp->irq);
96 if (clk)
97 clk_disable(clk);
98 }
99 return 0;
100}
101
102static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
103{
104 unsigned long irq_flags;
105 int ret;
106
107 spin_lock_irqsave(&mdp_lock, irq_flags);
108 ret = locked_disable_mdp_irq(mdp, mask);
109 spin_unlock_irqrestore(&mdp_lock, irq_flags);
110 return ret;
111}
112
113static irqreturn_t mdp_isr(int irq, void *data)
114{
115 uint32_t status;
116 unsigned long irq_flags;
117 struct mdp_info *mdp = data;
118
119 spin_lock_irqsave(&mdp_lock, irq_flags);
120
121 status = mdp_readl(mdp, MDP_INTR_STATUS);
122 mdp_writel(mdp, status, MDP_INTR_CLEAR);
123
124 status &= mdp_irq_mask;
125 if (status & DL0_DMA2_TERM_DONE) {
126 if (dma_callback) {
127 dma_callback->func(dma_callback);
128 dma_callback = NULL;
129 }
130 wake_up(&mdp_dma2_waitqueue);
131 }
132
133 if (status & DL0_ROI_DONE)
134 wake_up(&mdp_ppp_waitqueue);
135
136 if (status)
137 locked_disable_mdp_irq(mdp, status);
138
139 spin_unlock_irqrestore(&mdp_lock, irq_flags);
140 return IRQ_HANDLED;
141}
142
143static uint32_t mdp_check_mask(uint32_t mask)
144{
145 uint32_t ret;
146 unsigned long irq_flags;
147
148 spin_lock_irqsave(&mdp_lock, irq_flags);
149 ret = mdp_irq_mask & mask;
150 spin_unlock_irqrestore(&mdp_lock, irq_flags);
151 return ret;
152}
153
154static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
155{
156 int ret = 0;
157 unsigned long irq_flags;
158
159 wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
160
161 spin_lock_irqsave(&mdp_lock, irq_flags);
162 if (mdp_irq_mask & mask) {
163 locked_disable_mdp_irq(mdp, mask);
164 printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
165 mask);
166 ret = -ETIMEDOUT;
167 }
168 spin_unlock_irqrestore(&mdp_lock, irq_flags);
169
170 return ret;
171}
172
173void mdp_dma_wait(struct mdp_device *mdp_dev)
174{
175#define MDP_MAX_TIMEOUTS 20
176 static int timeout_count;
177 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
178
179 if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
180 timeout_count++;
181 else
182 timeout_count = 0;
183
184 if (timeout_count > MDP_MAX_TIMEOUTS) {
185 printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
186 MDP_MAX_TIMEOUTS);
187 BUG();
188 }
189}
190
191static int mdp_ppp_wait(struct mdp_info *mdp)
192{
193 return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
194}
195
196void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
197 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
198 struct msmfb_callback *callback)
199{
200 uint32_t dma2_cfg;
201 uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
202
203 if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
204 printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
205 return;
206 }
207
208 dma_callback = callback;
209
210 dma2_cfg = DMA_PACK_TIGHT |
211 DMA_PACK_ALIGN_LSB |
212 DMA_PACK_PATTERN_RGB |
213 DMA_OUT_SEL_AHB |
214 DMA_IBUF_NONCONTIGUOUS;
215
216 dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
217
218 dma2_cfg |= DMA_OUT_SEL_MDDI;
219
220 dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
221
222 dma2_cfg |= DMA_DITHER_EN;
223
224 /* setup size, address, and stride */
225 mdp_writel(mdp, (height << 16) | (width),
226 MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
227 mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
228 mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
229
230 /* 666 18BPP */
231 dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
232
233 /* set y & x offset and MDDI transaction parameters */
234 mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
235 mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
236 mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
237 MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
238
239 mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
240
241 /* start DMA2 */
242 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
243}
244
245void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
246 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
247 struct msmfb_callback *callback, int interface)
248{
249 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
250
251 if (interface == MSM_MDDI_PMDH_INTERFACE) {
252 mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
253 callback);
254 }
255}
256
257int get_img(struct mdp_img *img, struct fb_info *info,
258 unsigned long *start, unsigned long *len,
259 struct file **filep)
260{
261 int put_needed, ret = 0;
262 struct file *file;
263 unsigned long vstart;
264
265#ifdef CONFIG_ANDROID_PMEM
266 if (!get_pmem_file(img->memory_id, start, &vstart, len, filep))
267 return 0;
268#endif
269
270 file = fget_light(img->memory_id, &put_needed);
271 if (file == NULL)
272 return -1;
273
274 if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
275 *start = info->fix.smem_start;
276 *len = info->fix.smem_len;
277 } else
278 ret = -1;
279 fput_light(file, put_needed);
280
281 return ret;
282}
283
284void put_img(struct file *src_file, struct file *dst_file)
285{
286#ifdef CONFIG_ANDROID_PMEM
287 if (src_file)
288 put_pmem_file(src_file);
289 if (dst_file)
290 put_pmem_file(dst_file);
291#endif
292}
293
294int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
295 struct mdp_blit_req *req)
296{
297 int ret;
298 unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
299 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
300 struct file *src_file = 0, *dst_file = 0;
301
302 /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
303 if (unlikely(req->src_rect.h == 0 ||
304 req->src_rect.w == 0)) {
305 printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
306 return -EINVAL;
307 }
308 if (unlikely(req->dst_rect.h == 0 ||
309 req->dst_rect.w == 0))
310 return -EINVAL;
311
312 /* do this first so that if this fails, the caller can always
313 * safely call put_img */
314 if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
315 printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
316 "memory\n");
317 return -EINVAL;
318 }
319
320 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
321 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
322 "memory\n");
323#ifdef CONFIG_ANDROID_PMEM
324 put_pmem_file(src_file);
325#endif
326 return -EINVAL;
327 }
328 mutex_lock(&mdp_mutex);
329
330 /* transp_masking unimplemented */
331 req->transp_mask = MDP_TRANSP_NOP;
332 if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
333 req->alpha != MDP_ALPHA_NOP ||
334 HAS_ALPHA(req->src.format)) &&
335 (req->flags & MDP_ROT_90 &&
336 req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
337 int i;
338 unsigned int tiles = req->dst_rect.h / 16;
339 unsigned int remainder = req->dst_rect.h % 16;
340 req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
341 req->dst_rect.h = 16;
342 for (i = 0; i < tiles; i++) {
343 enable_mdp_irq(mdp, DL0_ROI_DONE);
344 ret = mdp_ppp_blit(mdp, req, src_file, src_start,
345 src_len, dst_file, dst_start,
346 dst_len);
347 if (ret)
348 goto err_bad_blit;
349 ret = mdp_ppp_wait(mdp);
350 if (ret)
351 goto err_wait_failed;
352 req->dst_rect.y += 16;
353 req->src_rect.x += req->src_rect.w;
354 }
355 if (!remainder)
356 goto end;
357 req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
358 req->dst_rect.h = remainder;
359 }
360 enable_mdp_irq(mdp, DL0_ROI_DONE);
361 ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
362 dst_start,
363 dst_len);
364 if (ret)
365 goto err_bad_blit;
366 ret = mdp_ppp_wait(mdp);
367 if (ret)
368 goto err_wait_failed;
369end:
370 put_img(src_file, dst_file);
371 mutex_unlock(&mdp_mutex);
372 return 0;
373err_bad_blit:
374 disable_mdp_irq(mdp, DL0_ROI_DONE);
375err_wait_failed:
376 put_img(src_file, dst_file);
377 mutex_unlock(&mdp_mutex);
378 return ret;
379}
380
381void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
382{
383 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
384
385 disp_id &= 0xf;
386 mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
387}
388
389int register_mdp_client(struct class_interface *cint)
390{
391 if (!mdp_class) {
392 pr_err("mdp: no mdp_class when registering mdp client\n");
393 return -ENODEV;
394 }
395 cint->class = mdp_class;
396 return class_interface_register(cint);
397}
398
399#include "mdp_csc_table.h"
400#include "mdp_scale_tables.h"
401
402int mdp_probe(struct platform_device *pdev)
403{
404 struct resource *resource;
405 int ret;
406 int n;
407 struct mdp_info *mdp;
408
409 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
410 if (!resource) {
411 pr_err("mdp: can not get mdp mem resource!\n");
412 return -ENOMEM;
413 }
414
415 mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
416 if (!mdp)
417 return -ENOMEM;
418
419 mdp->irq = platform_get_irq(pdev, 0);
420 if (mdp->irq < 0) {
421 pr_err("mdp: can not get mdp irq\n");
422 ret = mdp->irq;
423 goto error_get_irq;
424 }
425
426 mdp->base = ioremap(resource->start,
427 resource->end - resource->start);
428 if (mdp->base == 0) {
429 printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
430 ret = -ENOMEM;
431 goto error_ioremap;
432 }
433
434 mdp->mdp_dev.dma = mdp_dma;
435 mdp->mdp_dev.dma_wait = mdp_dma_wait;
436 mdp->mdp_dev.blit = mdp_blit;
437 mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
438
439 clk = clk_get(&pdev->dev, "mdp_clk");
440 if (IS_ERR(clk)) {
441 printk(KERN_INFO "mdp: failed to get mdp clk");
442 return PTR_ERR(clk);
443 }
444
445 ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
446 if (ret)
447 goto error_request_irq;
448 disable_irq(mdp->irq);
449 mdp_irq_mask = 0;
450
451 /* debug interface write access */
452 mdp_writel(mdp, 1, 0x60);
453
454 mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
455 mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
456
457 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
458 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
459
460 for (n = 0; n < ARRAY_SIZE(csc_table); n++)
461 mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
462
463 /* clear up unused fg/main registers */
464 /* comp.plane 2&3 ystride */
465 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
466
467 /* unpacked pattern */
468 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
469 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
470 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
471 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
472 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
473 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
474 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
475 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
476 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
477
478 /* comp.plane 2 & 3 */
479 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
480 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
481
482 /* clear unused bg registers */
483 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
484 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
485 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
486 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
487 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
488
489 for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
490 mdp_writel(mdp, mdp_upscale_table[n].val,
491 mdp_upscale_table[n].reg);
492
493 for (n = 0; n < 9; n++)
494 mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
495 mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
496 mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
497 mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
498
499 /* register mdp device */
500 mdp->mdp_dev.dev.parent = &pdev->dev;
501 mdp->mdp_dev.dev.class = mdp_class;
502 snprintf(mdp->mdp_dev.dev.bus_id, BUS_ID_SIZE, "mdp%d", pdev->id);
503
504 /* if you can remove the platform device you'd have to implement
505 * this:
506 mdp_dev.release = mdp_class; */
507
508 ret = device_register(&mdp->mdp_dev.dev);
509 if (ret)
510 goto error_device_register;
511 return 0;
512
513error_device_register:
514 free_irq(mdp->irq, mdp);
515error_request_irq:
516 iounmap(mdp->base);
517error_get_irq:
518error_ioremap:
519 kfree(mdp);
520 return ret;
521}
522
523static struct platform_driver msm_mdp_driver = {
524 .probe = mdp_probe,
525 .driver = {.name = "msm_mdp"},
526};
527
528static int __init mdp_init(void)
529{
530 mdp_class = class_create(THIS_MODULE, "msm_mdp");
531 if (IS_ERR(mdp_class)) {
532 printk(KERN_ERR "Error creating mdp class\n");
533 return PTR_ERR(mdp_class);
534 }
535 return platform_driver_register(&msm_mdp_driver);
536}
537
538subsys_initcall(mdp_init);