blob: 3541b567bbd8dffa9cb8d3e6d4c552ef06f09f21 [file] [log] [blame]
Dave Airlief9aa76a2012-04-17 14:12:29 +01001/*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
13#include <drm/drm_fb_helper.h>
Daniel Vetter76a39db2013-01-20 23:12:54 +010014#include <drm/drm_crtc_helper.h>
Dave Airlief9aa76a2012-04-17 14:12:29 +010015
16#include <linux/fb.h>
17
18#include "cirrus_drv.h"
19
20static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
21 int x, int y, int width, int height)
22{
23 int i;
24 struct drm_gem_object *obj;
25 struct cirrus_bo *bo;
26 int src_offset, dst_offset;
27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
28 int ret;
29 bool unmap = false;
Dave Airlief3b2bbd2013-05-02 02:45:02 -040030 bool store_for_later = false;
31 int x2, y2;
32 unsigned long flags;
Dave Airlief9aa76a2012-04-17 14:12:29 +010033
34 obj = afbdev->gfb.obj;
35 bo = gem_to_cirrus_bo(obj);
36
Dave Airlief3b2bbd2013-05-02 02:45:02 -040037 /*
38 * try and reserve the BO, if we fail with busy
39 * then the BO is being moved and we should
40 * store up the damage until later.
41 */
Dave Airlief9aa76a2012-04-17 14:12:29 +010042 ret = cirrus_bo_reserve(bo, true);
43 if (ret) {
Dave Airlief3b2bbd2013-05-02 02:45:02 -040044 if (ret != -EBUSY)
45 return;
46 store_for_later = true;
47 }
48
49 x2 = x + width - 1;
50 y2 = y + height - 1;
51 spin_lock_irqsave(&afbdev->dirty_lock, flags);
52
53 if (afbdev->y1 < y)
54 y = afbdev->y1;
55 if (afbdev->y2 > y2)
56 y2 = afbdev->y2;
57 if (afbdev->x1 < x)
58 x = afbdev->x1;
59 if (afbdev->x2 > x2)
60 x2 = afbdev->x2;
61
62 if (store_for_later) {
63 afbdev->x1 = x;
64 afbdev->x2 = x2;
65 afbdev->y1 = y;
66 afbdev->y2 = y2;
67 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
Dave Airlief9aa76a2012-04-17 14:12:29 +010068 return;
69 }
70
Dave Airlief3b2bbd2013-05-02 02:45:02 -040071 afbdev->x1 = afbdev->y1 = INT_MAX;
72 afbdev->x2 = afbdev->y2 = 0;
73 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
74
Dave Airlief9aa76a2012-04-17 14:12:29 +010075 if (!bo->kmap.virtual) {
76 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
77 if (ret) {
78 DRM_ERROR("failed to kmap fb updates\n");
79 cirrus_bo_unreserve(bo);
80 return;
81 }
82 unmap = true;
83 }
84 for (i = y; i < y + height; i++) {
85 /* assume equal stride for now */
86 src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
87 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
88
89 }
90 if (unmap)
91 ttm_bo_kunmap(&bo->kmap);
92
93 cirrus_bo_unreserve(bo);
94}
95
96static void cirrus_fillrect(struct fb_info *info,
97 const struct fb_fillrect *rect)
98{
99 struct cirrus_fbdev *afbdev = info->par;
100 sys_fillrect(info, rect);
101 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
102 rect->height);
103}
104
105static void cirrus_copyarea(struct fb_info *info,
106 const struct fb_copyarea *area)
107{
108 struct cirrus_fbdev *afbdev = info->par;
109 sys_copyarea(info, area);
110 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
111 area->height);
112}
113
114static void cirrus_imageblit(struct fb_info *info,
115 const struct fb_image *image)
116{
117 struct cirrus_fbdev *afbdev = info->par;
118 sys_imageblit(info, image);
119 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
120 image->height);
121}
122
123
124static struct fb_ops cirrusfb_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_fillrect = cirrus_fillrect,
129 .fb_copyarea = cirrus_copyarea,
130 .fb_imageblit = cirrus_imageblit,
131 .fb_pan_display = drm_fb_helper_pan_display,
132 .fb_blank = drm_fb_helper_blank,
133 .fb_setcmap = drm_fb_helper_setcmap,
134};
135
136static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
137 struct drm_mode_fb_cmd2 *mode_cmd,
138 struct drm_gem_object **gobj_p)
139{
140 struct drm_device *dev = afbdev->helper.dev;
141 u32 bpp, depth;
142 u32 size;
143 struct drm_gem_object *gobj;
144
145 int ret = 0;
146 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
147
148 if (bpp > 24)
149 return -EINVAL;
150 size = mode_cmd->pitches[0] * mode_cmd->height;
151 ret = cirrus_gem_create(dev, size, true, &gobj);
152 if (ret)
153 return ret;
154
155 *gobj_p = gobj;
156 return ret;
157}
158
Daniel Vettercd5428a2013-01-21 23:42:49 +0100159static int cirrusfb_create(struct drm_fb_helper *helper,
Dave Airlief9aa76a2012-04-17 14:12:29 +0100160 struct drm_fb_helper_surface_size *sizes)
161{
Daniel Vettercd5428a2013-01-21 23:42:49 +0100162 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
Dave Airlief9aa76a2012-04-17 14:12:29 +0100163 struct drm_device *dev = gfbdev->helper.dev;
164 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
165 struct fb_info *info;
166 struct drm_framebuffer *fb;
167 struct drm_mode_fb_cmd2 mode_cmd;
168 struct device *device = &dev->pdev->dev;
169 void *sysram;
170 struct drm_gem_object *gobj = NULL;
171 struct cirrus_bo *bo = NULL;
172 int size, ret;
173
174 mode_cmd.width = sizes->surface_width;
175 mode_cmd.height = sizes->surface_height;
176 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
177 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
178 sizes->surface_depth);
179 size = mode_cmd.pitches[0] * mode_cmd.height;
180
181 ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
182 if (ret) {
183 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
184 return ret;
185 }
186
187 bo = gem_to_cirrus_bo(gobj);
188
189 sysram = vmalloc(size);
190 if (!sysram)
191 return -ENOMEM;
192
193 info = framebuffer_alloc(0, device);
194 if (info == NULL)
195 return -ENOMEM;
196
197 info->par = gfbdev;
198
199 ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
200 if (ret)
201 return ret;
202
203 gfbdev->sysram = sysram;
204 gfbdev->size = size;
205
206 fb = &gfbdev->gfb.base;
207 if (!fb) {
208 DRM_INFO("fb is NULL\n");
209 return -EINVAL;
210 }
211
212 /* setup helper */
213 gfbdev->helper.fb = fb;
214 gfbdev->helper.fbdev = info;
215
216 strcpy(info->fix.id, "cirrusdrmfb");
217
218
219 info->flags = FBINFO_DEFAULT;
220 info->fbops = &cirrusfb_ops;
221
222 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
223 drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
224 sizes->fb_height);
225
226 /* setup aperture base/size for vesafb takeover */
227 info->apertures = alloc_apertures(1);
228 if (!info->apertures) {
229 ret = -ENOMEM;
230 goto out_iounmap;
231 }
232 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
233 info->apertures->ranges[0].size = cdev->mc.vram_size;
234
235 info->screen_base = sysram;
236 info->screen_size = size;
237
238 info->fix.mmio_start = 0;
239 info->fix.mmio_len = 0;
240
241 ret = fb_alloc_cmap(&info->cmap, 256, 0);
242 if (ret) {
243 DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
244 ret = -ENOMEM;
245 goto out_iounmap;
246 }
247
248 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
249 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
250 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
251 DRM_INFO("fb depth is %d\n", fb->depth);
252 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
253
254 return 0;
255out_iounmap:
256 return ret;
257}
258
Dave Airlief9aa76a2012-04-17 14:12:29 +0100259static int cirrus_fbdev_destroy(struct drm_device *dev,
260 struct cirrus_fbdev *gfbdev)
261{
262 struct fb_info *info;
263 struct cirrus_framebuffer *gfb = &gfbdev->gfb;
264
265 if (gfbdev->helper.fbdev) {
266 info = gfbdev->helper.fbdev;
267
268 unregister_framebuffer(info);
269 if (info->cmap.len)
270 fb_dealloc_cmap(&info->cmap);
271 framebuffer_release(info);
272 }
273
274 if (gfb->obj) {
275 drm_gem_object_unreference_unlocked(gfb->obj);
276 gfb->obj = NULL;
277 }
278
279 vfree(gfbdev->sysram);
280 drm_fb_helper_fini(&gfbdev->helper);
Daniel Vetter36206362012-12-10 20:42:17 +0100281 drm_framebuffer_unregister_private(&gfb->base);
Dave Airlief9aa76a2012-04-17 14:12:29 +0100282 drm_framebuffer_cleanup(&gfb->base);
283
284 return 0;
285}
286
287static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
288 .gamma_set = cirrus_crtc_fb_gamma_set,
289 .gamma_get = cirrus_crtc_fb_gamma_get,
Daniel Vettercd5428a2013-01-21 23:42:49 +0100290 .fb_probe = cirrusfb_create,
Dave Airlief9aa76a2012-04-17 14:12:29 +0100291};
292
293int cirrus_fbdev_init(struct cirrus_device *cdev)
294{
295 struct cirrus_fbdev *gfbdev;
296 int ret;
297 int bpp_sel = 24;
298
299 /*bpp_sel = 8;*/
300 gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
301 if (!gfbdev)
302 return -ENOMEM;
303
304 cdev->mode_info.gfbdev = gfbdev;
305 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
Dave Airlief3b2bbd2013-05-02 02:45:02 -0400306 spin_lock_init(&gfbdev->dirty_lock);
Dave Airlief9aa76a2012-04-17 14:12:29 +0100307
308 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
309 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
310 if (ret) {
311 kfree(gfbdev);
312 return ret;
313 }
314 drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
Daniel Vetter76a39db2013-01-20 23:12:54 +0100315
316 /* disable all the possible outputs/crtcs before entering KMS mode */
317 drm_helper_disable_unused_functions(cdev->dev);
Dave Airlief9aa76a2012-04-17 14:12:29 +0100318 drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
319
320 return 0;
321}
322
323void cirrus_fbdev_fini(struct cirrus_device *cdev)
324{
325 if (!cdev->mode_info.gfbdev)
326 return;
327
328 cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
329 kfree(cdev->mode_info.gfbdev);
330 cdev->mode_info.gfbdev = NULL;
331}