Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Red Hat |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General |
| 5 | * Public License version 2. See the file COPYING in the main |
| 6 | * directory of this archive for more details. |
| 7 | * |
| 8 | * Authors: Matthew Garrett |
| 9 | * Dave Airlie |
| 10 | */ |
| 11 | #include <linux/module.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 12 | #include <drm/drmP.h> |
| 13 | #include <drm/drm_fb_helper.h> |
Daniel Vetter | 76a39db | 2013-01-20 23:12:54 +0100 | [diff] [blame] | 14 | #include <drm/drm_crtc_helper.h> |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 15 | |
| 16 | #include <linux/fb.h> |
| 17 | |
| 18 | #include "cirrus_drv.h" |
| 19 | |
| 20 | static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, |
| 21 | int x, int y, int width, int height) |
| 22 | { |
| 23 | int i; |
| 24 | struct drm_gem_object *obj; |
| 25 | struct cirrus_bo *bo; |
| 26 | int src_offset, dst_offset; |
| 27 | int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; |
Maarten Lankhorst | 19d4b72 | 2013-06-27 13:38:24 +0200 | [diff] [blame] | 28 | int ret = -EBUSY; |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 29 | bool unmap = false; |
Dave Airlie | f3b2bbd | 2013-05-02 02:45:02 -0400 | [diff] [blame] | 30 | bool store_for_later = false; |
| 31 | int x2, y2; |
| 32 | unsigned long flags; |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 33 | |
| 34 | obj = afbdev->gfb.obj; |
| 35 | bo = gem_to_cirrus_bo(obj); |
| 36 | |
Dave Airlie | f3b2bbd | 2013-05-02 02:45:02 -0400 | [diff] [blame] | 37 | /* |
| 38 | * try and reserve the BO, if we fail with busy |
| 39 | * then the BO is being moved and we should |
| 40 | * store up the damage until later. |
| 41 | */ |
Dave Airlie | 8b7ad1b | 2014-02-05 14:47:45 +1000 | [diff] [blame] | 42 | if (drm_can_sleep()) |
Maarten Lankhorst | 19d4b72 | 2013-06-27 13:38:24 +0200 | [diff] [blame] | 43 | ret = cirrus_bo_reserve(bo, true); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 44 | if (ret) { |
Dave Airlie | f3b2bbd | 2013-05-02 02:45:02 -0400 | [diff] [blame] | 45 | if (ret != -EBUSY) |
| 46 | return; |
| 47 | store_for_later = true; |
| 48 | } |
| 49 | |
| 50 | x2 = x + width - 1; |
| 51 | y2 = y + height - 1; |
| 52 | spin_lock_irqsave(&afbdev->dirty_lock, flags); |
| 53 | |
| 54 | if (afbdev->y1 < y) |
| 55 | y = afbdev->y1; |
| 56 | if (afbdev->y2 > y2) |
| 57 | y2 = afbdev->y2; |
| 58 | if (afbdev->x1 < x) |
| 59 | x = afbdev->x1; |
| 60 | if (afbdev->x2 > x2) |
| 61 | x2 = afbdev->x2; |
| 62 | |
| 63 | if (store_for_later) { |
| 64 | afbdev->x1 = x; |
| 65 | afbdev->x2 = x2; |
| 66 | afbdev->y1 = y; |
| 67 | afbdev->y2 = y2; |
| 68 | spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 69 | return; |
| 70 | } |
| 71 | |
Dave Airlie | f3b2bbd | 2013-05-02 02:45:02 -0400 | [diff] [blame] | 72 | afbdev->x1 = afbdev->y1 = INT_MAX; |
| 73 | afbdev->x2 = afbdev->y2 = 0; |
| 74 | spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
| 75 | |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 76 | if (!bo->kmap.virtual) { |
| 77 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); |
| 78 | if (ret) { |
| 79 | DRM_ERROR("failed to kmap fb updates\n"); |
| 80 | cirrus_bo_unreserve(bo); |
| 81 | return; |
| 82 | } |
| 83 | unmap = true; |
| 84 | } |
| 85 | for (i = y; i < y + height; i++) { |
| 86 | /* assume equal stride for now */ |
| 87 | src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp); |
| 88 | memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); |
| 89 | |
| 90 | } |
| 91 | if (unmap) |
| 92 | ttm_bo_kunmap(&bo->kmap); |
| 93 | |
| 94 | cirrus_bo_unreserve(bo); |
| 95 | } |
| 96 | |
| 97 | static void cirrus_fillrect(struct fb_info *info, |
| 98 | const struct fb_fillrect *rect) |
| 99 | { |
| 100 | struct cirrus_fbdev *afbdev = info->par; |
| 101 | sys_fillrect(info, rect); |
| 102 | cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width, |
| 103 | rect->height); |
| 104 | } |
| 105 | |
| 106 | static void cirrus_copyarea(struct fb_info *info, |
| 107 | const struct fb_copyarea *area) |
| 108 | { |
| 109 | struct cirrus_fbdev *afbdev = info->par; |
| 110 | sys_copyarea(info, area); |
| 111 | cirrus_dirty_update(afbdev, area->dx, area->dy, area->width, |
| 112 | area->height); |
| 113 | } |
| 114 | |
| 115 | static void cirrus_imageblit(struct fb_info *info, |
| 116 | const struct fb_image *image) |
| 117 | { |
| 118 | struct cirrus_fbdev *afbdev = info->par; |
| 119 | sys_imageblit(info, image); |
| 120 | cirrus_dirty_update(afbdev, image->dx, image->dy, image->width, |
| 121 | image->height); |
| 122 | } |
| 123 | |
| 124 | |
| 125 | static struct fb_ops cirrusfb_ops = { |
| 126 | .owner = THIS_MODULE, |
| 127 | .fb_check_var = drm_fb_helper_check_var, |
| 128 | .fb_set_par = drm_fb_helper_set_par, |
| 129 | .fb_fillrect = cirrus_fillrect, |
| 130 | .fb_copyarea = cirrus_copyarea, |
| 131 | .fb_imageblit = cirrus_imageblit, |
| 132 | .fb_pan_display = drm_fb_helper_pan_display, |
| 133 | .fb_blank = drm_fb_helper_blank, |
| 134 | .fb_setcmap = drm_fb_helper_setcmap, |
| 135 | }; |
| 136 | |
| 137 | static int cirrusfb_create_object(struct cirrus_fbdev *afbdev, |
| 138 | struct drm_mode_fb_cmd2 *mode_cmd, |
| 139 | struct drm_gem_object **gobj_p) |
| 140 | { |
| 141 | struct drm_device *dev = afbdev->helper.dev; |
| 142 | u32 bpp, depth; |
| 143 | u32 size; |
| 144 | struct drm_gem_object *gobj; |
| 145 | |
| 146 | int ret = 0; |
| 147 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); |
| 148 | |
| 149 | if (bpp > 24) |
| 150 | return -EINVAL; |
| 151 | size = mode_cmd->pitches[0] * mode_cmd->height; |
| 152 | ret = cirrus_gem_create(dev, size, true, &gobj); |
| 153 | if (ret) |
| 154 | return ret; |
| 155 | |
| 156 | *gobj_p = gobj; |
| 157 | return ret; |
| 158 | } |
| 159 | |
Daniel Vetter | cd5428a | 2013-01-21 23:42:49 +0100 | [diff] [blame] | 160 | static int cirrusfb_create(struct drm_fb_helper *helper, |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 161 | struct drm_fb_helper_surface_size *sizes) |
| 162 | { |
Fabian Frederick | ea0622c | 2014-09-14 18:40:14 +0200 | [diff] [blame] | 163 | struct cirrus_fbdev *gfbdev = |
| 164 | container_of(helper, struct cirrus_fbdev, helper); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 165 | struct drm_device *dev = gfbdev->helper.dev; |
| 166 | struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; |
| 167 | struct fb_info *info; |
| 168 | struct drm_framebuffer *fb; |
| 169 | struct drm_mode_fb_cmd2 mode_cmd; |
| 170 | struct device *device = &dev->pdev->dev; |
| 171 | void *sysram; |
| 172 | struct drm_gem_object *gobj = NULL; |
| 173 | struct cirrus_bo *bo = NULL; |
| 174 | int size, ret; |
| 175 | |
| 176 | mode_cmd.width = sizes->surface_width; |
| 177 | mode_cmd.height = sizes->surface_height; |
| 178 | mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); |
| 179 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
| 180 | sizes->surface_depth); |
| 181 | size = mode_cmd.pitches[0] * mode_cmd.height; |
| 182 | |
| 183 | ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj); |
| 184 | if (ret) { |
| 185 | DRM_ERROR("failed to create fbcon backing object %d\n", ret); |
| 186 | return ret; |
| 187 | } |
| 188 | |
| 189 | bo = gem_to_cirrus_bo(gobj); |
| 190 | |
| 191 | sysram = vmalloc(size); |
| 192 | if (!sysram) |
| 193 | return -ENOMEM; |
| 194 | |
| 195 | info = framebuffer_alloc(0, device); |
| 196 | if (info == NULL) |
| 197 | return -ENOMEM; |
| 198 | |
| 199 | info->par = gfbdev; |
| 200 | |
| 201 | ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj); |
| 202 | if (ret) |
| 203 | return ret; |
| 204 | |
| 205 | gfbdev->sysram = sysram; |
| 206 | gfbdev->size = size; |
| 207 | |
| 208 | fb = &gfbdev->gfb.base; |
| 209 | if (!fb) { |
| 210 | DRM_INFO("fb is NULL\n"); |
| 211 | return -EINVAL; |
| 212 | } |
| 213 | |
| 214 | /* setup helper */ |
| 215 | gfbdev->helper.fb = fb; |
| 216 | gfbdev->helper.fbdev = info; |
| 217 | |
| 218 | strcpy(info->fix.id, "cirrusdrmfb"); |
| 219 | |
| 220 | |
| 221 | info->flags = FBINFO_DEFAULT; |
| 222 | info->fbops = &cirrusfb_ops; |
| 223 | |
| 224 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
| 225 | drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width, |
| 226 | sizes->fb_height); |
| 227 | |
| 228 | /* setup aperture base/size for vesafb takeover */ |
| 229 | info->apertures = alloc_apertures(1); |
| 230 | if (!info->apertures) { |
| 231 | ret = -ENOMEM; |
| 232 | goto out_iounmap; |
| 233 | } |
| 234 | info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; |
| 235 | info->apertures->ranges[0].size = cdev->mc.vram_size; |
| 236 | |
Martin Koegler | 99d4a8a | 2014-01-09 10:05:07 +0100 | [diff] [blame] | 237 | info->fix.smem_start = cdev->dev->mode_config.fb_base; |
| 238 | info->fix.smem_len = cdev->mc.vram_size; |
| 239 | |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 240 | info->screen_base = sysram; |
| 241 | info->screen_size = size; |
| 242 | |
| 243 | info->fix.mmio_start = 0; |
| 244 | info->fix.mmio_len = 0; |
| 245 | |
| 246 | ret = fb_alloc_cmap(&info->cmap, 256, 0); |
| 247 | if (ret) { |
| 248 | DRM_ERROR("%s: can't allocate color map\n", info->fix.id); |
| 249 | ret = -ENOMEM; |
| 250 | goto out_iounmap; |
| 251 | } |
| 252 | |
| 253 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
| 254 | DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); |
| 255 | DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); |
| 256 | DRM_INFO("fb depth is %d\n", fb->depth); |
| 257 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); |
| 258 | |
| 259 | return 0; |
| 260 | out_iounmap: |
| 261 | return ret; |
| 262 | } |
| 263 | |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 264 | static int cirrus_fbdev_destroy(struct drm_device *dev, |
| 265 | struct cirrus_fbdev *gfbdev) |
| 266 | { |
| 267 | struct fb_info *info; |
| 268 | struct cirrus_framebuffer *gfb = &gfbdev->gfb; |
| 269 | |
| 270 | if (gfbdev->helper.fbdev) { |
| 271 | info = gfbdev->helper.fbdev; |
| 272 | |
| 273 | unregister_framebuffer(info); |
| 274 | if (info->cmap.len) |
| 275 | fb_dealloc_cmap(&info->cmap); |
| 276 | framebuffer_release(info); |
| 277 | } |
| 278 | |
| 279 | if (gfb->obj) { |
| 280 | drm_gem_object_unreference_unlocked(gfb->obj); |
| 281 | gfb->obj = NULL; |
| 282 | } |
| 283 | |
| 284 | vfree(gfbdev->sysram); |
| 285 | drm_fb_helper_fini(&gfbdev->helper); |
Daniel Vetter | 3620636 | 2012-12-10 20:42:17 +0100 | [diff] [blame] | 286 | drm_framebuffer_unregister_private(&gfb->base); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 287 | drm_framebuffer_cleanup(&gfb->base); |
| 288 | |
| 289 | return 0; |
| 290 | } |
| 291 | |
Thierry Reding | 3a49387 | 2014-06-27 17:19:23 +0200 | [diff] [blame] | 292 | static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 293 | .gamma_set = cirrus_crtc_fb_gamma_set, |
| 294 | .gamma_get = cirrus_crtc_fb_gamma_get, |
Daniel Vetter | cd5428a | 2013-01-21 23:42:49 +0100 | [diff] [blame] | 295 | .fb_probe = cirrusfb_create, |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 296 | }; |
| 297 | |
| 298 | int cirrus_fbdev_init(struct cirrus_device *cdev) |
| 299 | { |
| 300 | struct cirrus_fbdev *gfbdev; |
| 301 | int ret; |
| 302 | int bpp_sel = 24; |
| 303 | |
| 304 | /*bpp_sel = 8;*/ |
| 305 | gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL); |
| 306 | if (!gfbdev) |
| 307 | return -ENOMEM; |
| 308 | |
| 309 | cdev->mode_info.gfbdev = gfbdev; |
Dave Airlie | f3b2bbd | 2013-05-02 02:45:02 -0400 | [diff] [blame] | 310 | spin_lock_init(&gfbdev->dirty_lock); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 311 | |
Thierry Reding | 10a2310 | 2014-06-27 17:19:24 +0200 | [diff] [blame] | 312 | drm_fb_helper_prepare(cdev->dev, &gfbdev->helper, |
| 313 | &cirrus_fb_helper_funcs); |
| 314 | |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 315 | ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, |
| 316 | cdev->num_crtc, CIRRUSFB_CONN_LIMIT); |
| 317 | if (ret) { |
| 318 | kfree(gfbdev); |
| 319 | return ret; |
| 320 | } |
| 321 | drm_fb_helper_single_add_all_connectors(&gfbdev->helper); |
Daniel Vetter | 76a39db | 2013-01-20 23:12:54 +0100 | [diff] [blame] | 322 | |
| 323 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
| 324 | drm_helper_disable_unused_functions(cdev->dev); |
Dave Airlie | f9aa76a | 2012-04-17 14:12:29 +0100 | [diff] [blame] | 325 | drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel); |
| 326 | |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | void cirrus_fbdev_fini(struct cirrus_device *cdev) |
| 331 | { |
| 332 | if (!cdev->mode_info.gfbdev) |
| 333 | return; |
| 334 | |
| 335 | cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev); |
| 336 | kfree(cdev->mode_info.gfbdev); |
| 337 | cdev->mode_info.gfbdev = NULL; |
| 338 | } |