blob: 68f0045f86b860d7ecd4b7477a98719a950a7f95 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_fbdev.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 *
Inki Daed81aecb2012-12-18 02:30:17 +09009 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090013 */
14
David Howells760285e2012-10-02 18:01:07 +010015#include <drm/drmP.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h>
Inki Dae1c248b72011-10-04 19:19:01 +090019
20#include "exynos_drm_drv.h"
21#include "exynos_drm_fb.h"
Inki Dae2c871122011-11-12 15:23:32 +090022#include "exynos_drm_gem.h"
Inki Daec704f1b2012-12-21 17:59:20 +090023#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090024
25#define MAX_CONNECTOR 4
26#define PREFERRED_BPP 32
27
28#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
29 drm_fb_helper)
30
31struct exynos_drm_fbdev {
Joonyoung Shime1533c02011-12-13 14:46:57 +090032 struct drm_fb_helper drm_fb_helper;
33 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +090034};
35
Prathyush Kdd265852012-11-19 13:55:28 +053036static int exynos_drm_fb_mmap(struct fb_info *info,
37 struct vm_area_struct *vma)
38{
39 struct drm_fb_helper *helper = info->par;
40 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
41 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
42 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
43 unsigned long vm_size;
44 int ret;
45
46 DRM_DEBUG_KMS("%s\n", __func__);
47
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49
50 vm_size = vma->vm_end - vma->vm_start;
51
52 if (vm_size > buffer->size)
53 return -EINVAL;
54
Inki Dae4744ad22012-12-07 17:51:27 +090055 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
Prathyush Kdd265852012-11-19 13:55:28 +053056 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n");
59 return ret;
60 }
61
62 return 0;
63}
64
Inki Dae1c248b72011-10-04 19:19:01 +090065static struct fb_ops exynos_drm_fb_ops = {
66 .owner = THIS_MODULE,
Prathyush Kdd265852012-11-19 13:55:28 +053067 .fb_mmap = exynos_drm_fb_mmap,
Inki Dae1c248b72011-10-04 19:19:01 +090068 .fb_fillrect = cfb_fillrect,
69 .fb_copyarea = cfb_copyarea,
70 .fb_imageblit = cfb_imageblit,
71 .fb_check_var = drm_fb_helper_check_var,
Sascha Hauer83b316f2012-02-01 11:38:37 +010072 .fb_set_par = drm_fb_helper_set_par,
Inki Dae1c248b72011-10-04 19:19:01 +090073 .fb_blank = drm_fb_helper_blank,
74 .fb_pan_display = drm_fb_helper_pan_display,
75 .fb_setcmap = drm_fb_helper_setcmap,
76};
77
Inki Dae19c8b832011-10-14 13:29:46 +090078static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090079 struct drm_framebuffer *fb)
Inki Dae1c248b72011-10-04 19:19:01 +090080{
81 struct fb_info *fbi = helper->fbdev;
82 struct drm_device *dev = helper->dev;
Inki Dae2c871122011-11-12 15:23:32 +090083 struct exynos_drm_gem_buf *buffer;
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090084 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
Inki Dae19c8b832011-10-14 13:29:46 +090085 unsigned long offset;
Inki Dae1c248b72011-10-04 19:19:01 +090086
87 DRM_DEBUG_KMS("%s\n", __FILE__);
88
Ville Syrjälä01f2c772011-12-20 00:06:49 +020089 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +090090 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
Inki Dae1c248b72011-10-04 19:19:01 +090091
Seung-Woo Kim229d3532011-12-15 14:36:22 +090092 /* RGB formats use only one buffer */
93 buffer = exynos_drm_fb_buffer(fb, 0);
Inki Dae2c871122011-11-12 15:23:32 +090094 if (!buffer) {
95 DRM_LOG_KMS("buffer is null.\n");
Inki Dae19c8b832011-10-14 13:29:46 +090096 return -EFAULT;
97 }
Inki Dae1c248b72011-10-04 19:19:01 +090098
Inki Dae4744ad22012-12-07 17:51:27 +090099 /* map pages with kernel virtual space. */
100 if (!buffer->kvaddr) {
Inki Daec704f1b2012-12-21 17:59:20 +0900101 if (is_drm_iommu_supported(dev)) {
102 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
103
104 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
Inki Dae4744ad22012-12-07 17:51:27 +0900105 pgprot_writecombine(PAGE_KERNEL));
Inki Daec704f1b2012-12-21 17:59:20 +0900106 } else {
107 phys_addr_t dma_addr = buffer->dma_addr;
108 if (dma_addr)
109 buffer->kvaddr = phys_to_virt(dma_addr);
110 else
111 buffer->kvaddr = (void __iomem *)NULL;
112 }
Inki Dae4744ad22012-12-07 17:51:27 +0900113 if (!buffer->kvaddr) {
114 DRM_ERROR("failed to map pages to kernel space.\n");
115 return -EIO;
116 }
117 }
118
Inki Dae01ed8122012-08-20 20:05:56 +0900119 /* buffer count to framebuffer always is 1 at booting time. */
120 exynos_drm_fb_set_buf_cnt(fb, 1);
121
Inki Dae19c8b832011-10-14 13:29:46 +0900122 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
Ville Syrjälä01f2c772011-12-20 00:06:49 +0200123 offset += fbi->var.yoffset * fb->pitches[0];
Inki Dae1c248b72011-10-04 19:19:01 +0900124
Inki Dae2c871122011-11-12 15:23:32 +0900125 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
126 fbi->screen_base = buffer->kvaddr + offset;
Inki Daec704f1b2012-12-21 17:59:20 +0900127 if (is_drm_iommu_supported(dev))
128 fbi->fix.smem_start = (unsigned long)
Prathyush K640631d2012-11-22 12:18:35 +0530129 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
Inki Daec704f1b2012-12-21 17:59:20 +0900130 else
131 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
132
Inki Dae1c248b72011-10-04 19:19:01 +0900133 fbi->screen_size = size;
Inki Dae1c248b72011-10-04 19:19:01 +0900134 fbi->fix.smem_len = size;
Inki Dae19c8b832011-10-14 13:29:46 +0900135
136 return 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900137}
138
139static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
140 struct drm_fb_helper_surface_size *sizes)
141{
142 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
Joonyoung Shime1533c02011-12-13 14:46:57 +0900143 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900144 struct drm_device *dev = helper->dev;
145 struct fb_info *fbi;
Joonyoung Shima794d572011-12-08 15:05:19 +0900146 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Inki Dae1c248b72011-10-04 19:19:01 +0900147 struct platform_device *pdev = dev->platformdev;
Joonyoung Shime1533c02011-12-13 14:46:57 +0900148 unsigned long size;
Inki Dae1c248b72011-10-04 19:19:01 +0900149 int ret;
150
151 DRM_DEBUG_KMS("%s\n", __FILE__);
152
153 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
154 sizes->surface_width, sizes->surface_height,
155 sizes->surface_bpp);
156
157 mode_cmd.width = sizes->surface_width;
158 mode_cmd.height = sizes->surface_height;
Joonyoung Shima794d572011-12-08 15:05:19 +0900159 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
160 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
161 sizes->surface_depth);
Inki Dae1c248b72011-10-04 19:19:01 +0900162
163 mutex_lock(&dev->struct_mutex);
164
165 fbi = framebuffer_alloc(0, &pdev->dev);
166 if (!fbi) {
167 DRM_ERROR("failed to allocate fb info.\n");
168 ret = -ENOMEM;
169 goto out;
170 }
171
Joonyoung Shime1533c02011-12-13 14:46:57 +0900172 size = mode_cmd.pitches[0] * mode_cmd.height;
Inki Dae2b358922012-03-16 18:47:05 +0900173
174 /* 0 means to allocate physically continuous memory */
175 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
Joonyoung Shime1533c02011-12-13 14:46:57 +0900176 if (IS_ERR(exynos_gem_obj)) {
177 ret = PTR_ERR(exynos_gem_obj);
Inki Dae662aa6d2012-12-07 18:06:43 +0900178 goto err_release_framebuffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900179 }
180
Joonyoung Shime1533c02011-12-13 14:46:57 +0900181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
182
183 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
184 &exynos_gem_obj->base);
185 if (IS_ERR_OR_NULL(helper->fb)) {
186 DRM_ERROR("failed to create drm framebuffer.\n");
187 ret = PTR_ERR(helper->fb);
Inki Dae662aa6d2012-12-07 18:06:43 +0900188 goto err_destroy_gem;
Joonyoung Shime1533c02011-12-13 14:46:57 +0900189 }
190
Inki Dae1c248b72011-10-04 19:19:01 +0900191 helper->fbdev = fbi;
192
193 fbi->par = helper;
194 fbi->flags = FBINFO_FLAG_DEFAULT;
195 fbi->fbops = &exynos_drm_fb_ops;
196
197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
198 if (ret) {
199 DRM_ERROR("failed to allocate cmap.\n");
Inki Dae662aa6d2012-12-07 18:06:43 +0900200 goto err_destroy_framebuffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900201 }
202
Seung-Woo Kimaa6b2b62011-11-04 13:44:38 +0900203 ret = exynos_drm_fbdev_update(helper, helper->fb);
Inki Dae662aa6d2012-12-07 18:06:43 +0900204 if (ret < 0)
205 goto err_dealloc_cmap;
206
207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216err_release_framebuffer:
217 framebuffer_release(fbi);
Inki Dae1c248b72011-10-04 19:19:01 +0900218
219/*
220 * if failed, all resources allocated above would be released by
221 * drm_mode_config_cleanup() when drm_load() had been called prior
222 * to any specific driver such as fimd or hdmi driver.
223 */
224out:
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227}
228
Inki Dae1c248b72011-10-04 19:19:01 +0900229static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
Daniel Vettercd5428a2013-01-21 23:42:49 +0100230 .fb_probe = exynos_drm_fbdev_create,
Inki Dae1c248b72011-10-04 19:19:01 +0900231};
232
233int exynos_drm_fbdev_init(struct drm_device *dev)
234{
235 struct exynos_drm_fbdev *fbdev;
236 struct exynos_drm_private *private = dev->dev_private;
237 struct drm_fb_helper *helper;
238 unsigned int num_crtc;
239 int ret;
240
241 DRM_DEBUG_KMS("%s\n", __FILE__);
242
243 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
244 return 0;
245
246 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
247 if (!fbdev) {
248 DRM_ERROR("failed to allocate drm fbdev.\n");
249 return -ENOMEM;
250 }
251
252 private->fb_helper = helper = &fbdev->drm_fb_helper;
253 helper->funcs = &exynos_drm_fb_helper_funcs;
254
255 num_crtc = dev->mode_config.num_crtc;
256
257 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
258 if (ret < 0) {
259 DRM_ERROR("failed to initialize drm fb helper.\n");
260 goto err_init;
261 }
262
263 ret = drm_fb_helper_single_add_all_connectors(helper);
264 if (ret < 0) {
265 DRM_ERROR("failed to register drm_fb_helper_connector.\n");
266 goto err_setup;
267
268 }
269
Daniel Vetter76a39db2013-01-20 23:12:54 +0100270 /* disable all the possible outputs/crtcs before entering KMS mode */
271 drm_helper_disable_unused_functions(dev);
272
Inki Dae1c248b72011-10-04 19:19:01 +0900273 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
274 if (ret < 0) {
275 DRM_ERROR("failed to set up hw configuration.\n");
276 goto err_setup;
277 }
278
279 return 0;
280
281err_setup:
282 drm_fb_helper_fini(helper);
283
284err_init:
285 private->fb_helper = NULL;
286 kfree(fbdev);
287
288 return ret;
289}
290
291static void exynos_drm_fbdev_destroy(struct drm_device *dev,
292 struct drm_fb_helper *fb_helper)
293{
Inki Dae4744ad22012-12-07 17:51:27 +0900294 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
295 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900296 struct drm_framebuffer *fb;
297
Inki Daec704f1b2012-12-21 17:59:20 +0900298 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
Inki Dae4744ad22012-12-07 17:51:27 +0900299 vunmap(exynos_gem_obj->buffer->kvaddr);
300
Inki Dae1c248b72011-10-04 19:19:01 +0900301 /* release drm framebuffer and real buffer */
302 if (fb_helper->fb && fb_helper->fb->funcs) {
303 fb = fb_helper->fb;
Daniel Vetter36206362012-12-10 20:42:17 +0100304 if (fb) {
305 drm_framebuffer_unregister_private(fb);
Rob Clarkf7eff602012-09-05 21:48:38 +0000306 drm_framebuffer_remove(fb);
Daniel Vetter36206362012-12-10 20:42:17 +0100307 }
Inki Dae1c248b72011-10-04 19:19:01 +0900308 }
309
310 /* release linux framebuffer */
311 if (fb_helper->fbdev) {
312 struct fb_info *info;
313 int ret;
314
315 info = fb_helper->fbdev;
316 ret = unregister_framebuffer(info);
317 if (ret < 0)
318 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
319
320 if (info->cmap.len)
321 fb_dealloc_cmap(&info->cmap);
322
323 framebuffer_release(info);
324 }
325
326 drm_fb_helper_fini(fb_helper);
327}
328
329void exynos_drm_fbdev_fini(struct drm_device *dev)
330{
331 struct exynos_drm_private *private = dev->dev_private;
332 struct exynos_drm_fbdev *fbdev;
333
334 if (!private || !private->fb_helper)
335 return;
336
337 fbdev = to_exynos_fbdev(private->fb_helper);
338
Joonyoung Shime1533c02011-12-13 14:46:57 +0900339 if (fbdev->exynos_gem_obj)
340 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
341
Inki Dae1c248b72011-10-04 19:19:01 +0900342 exynos_drm_fbdev_destroy(dev, private->fb_helper);
343 kfree(fbdev);
344 private->fb_helper = NULL;
345}
346
347void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
348{
349 struct exynos_drm_private *private = dev->dev_private;
350
351 if (!private || !private->fb_helper)
352 return;
353
Daniel Vetter6aed8ec2013-01-20 17:32:21 +0100354 drm_modeset_lock_all(dev);
Inki Dae1c248b72011-10-04 19:19:01 +0900355 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
Daniel Vetter6aed8ec2013-01-20 17:32:21 +0100356 drm_modeset_unlock_all(dev);
Inki Dae1c248b72011-10-04 19:19:01 +0900357}