Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 1 | /* |
| 2 | * drm kms/fb cma (contiguous memory allocator) helper functions |
| 3 | * |
| 4 | * Copyright (C) 2012 Analog Device Inc. |
| 5 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
| 6 | * |
| 7 | * Based on udl_fbdev.c |
| 8 | * Copyright (C) 2012 Red Hat |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version 2 |
| 13 | * of the License, or (at your option) any later version. |
| 14 | * This program is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | * GNU General Public License for more details. |
| 18 | */ |
| 19 | |
| 20 | #include <drm/drmP.h> |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 21 | #include <drm/drm_fb_helper.h> |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 22 | #include <drm/drm_framebuffer.h> |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 23 | #include <drm/drm_gem_cma_helper.h> |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 24 | #include <drm/drm_gem_framebuffer_helper.h> |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 25 | #include <drm/drm_fb_cma_helper.h> |
Noralf Trønnes | 41b676e | 2017-11-15 15:19:41 +0100 | [diff] [blame] | 26 | #include <drm/drm_print.h> |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 27 | #include <linux/module.h> |
| 28 | |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 29 | #define DEFAULT_FBDEFIO_DELAY_MS 50 |
| 30 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 31 | struct drm_fbdev_cma { |
| 32 | struct drm_fb_helper fb_helper; |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 33 | const struct drm_framebuffer_funcs *fb_funcs; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 34 | }; |
| 35 | |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 36 | /** |
| 37 | * DOC: framebuffer cma helper functions |
| 38 | * |
| 39 | * Provides helper functions for creating a cma (contiguous memory allocator) |
| 40 | * backed framebuffer. |
| 41 | * |
Noralf Trønnes | c0f095f | 2017-09-24 14:26:25 +0200 | [diff] [blame] | 42 | * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create |
Noralf Trønnes | 02da16d | 2016-05-11 18:09:18 +0200 | [diff] [blame] | 43 | * callback function to create a cma backed framebuffer. |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 44 | * |
| 45 | * An fbdev framebuffer backed by cma is also available by calling |
Noralf Trønnes | 41b676e | 2017-11-15 15:19:41 +0100 | [diff] [blame] | 46 | * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down. |
Daniel Vetter | 421242a | 2016-12-29 21:48:34 +0100 | [diff] [blame] | 47 | * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be |
| 48 | * set up automatically. &drm_framebuffer_funcs.dirty is called by |
| 49 | * drm_fb_helper_deferred_io() in process context (&struct delayed_work). |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 50 | * |
Daniel Vetter | da5335b | 2016-05-31 22:55:13 +0200 | [diff] [blame] | 51 | * Example fbdev deferred io code:: |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 52 | * |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 53 | * static int driver_fb_dirty(struct drm_framebuffer *fb, |
| 54 | * struct drm_file *file_priv, |
| 55 | * unsigned flags, unsigned color, |
| 56 | * struct drm_clip_rect *clips, |
| 57 | * unsigned num_clips) |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 58 | * { |
| 59 | * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0); |
| 60 | * ... push changes ... |
| 61 | * return 0; |
| 62 | * } |
| 63 | * |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 64 | * static struct drm_framebuffer_funcs driver_fb_funcs = { |
Noralf Trønnes | c0f095f | 2017-09-24 14:26:25 +0200 | [diff] [blame] | 65 | * .destroy = drm_gem_fb_destroy, |
| 66 | * .create_handle = drm_gem_fb_create_handle, |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 67 | * .dirty = driver_fb_dirty, |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 68 | * }; |
| 69 | * |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 70 | * Initialize:: |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 71 | * |
Noralf Trønnes | 41b676e | 2017-11-15 15:19:41 +0100 | [diff] [blame] | 72 | * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16, |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 73 | * dev->mode_config.num_crtc, |
| 74 | * dev->mode_config.num_connector, |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 75 | * &driver_fb_funcs); |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 76 | * |
| 77 | */ |
| 78 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 79 | static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper) |
| 80 | { |
| 81 | return container_of(helper, struct drm_fbdev_cma, fb_helper); |
| 82 | } |
| 83 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 84 | /** |
| 85 | * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer |
| 86 | * @fb: The framebuffer |
| 87 | * @plane: Which plane |
| 88 | * |
| 89 | * Return the CMA GEM object for given framebuffer. |
| 90 | * |
| 91 | * This function will usually be called from the CRTC callback functions. |
| 92 | */ |
| 93 | struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, |
Daniel Vetter | 890358a | 2016-05-31 23:11:12 +0200 | [diff] [blame] | 94 | unsigned int plane) |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 95 | { |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 96 | struct drm_gem_object *gem; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 97 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 98 | gem = drm_gem_fb_get_obj(fb, plane); |
| 99 | if (!gem) |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 100 | return NULL; |
| 101 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 102 | return to_drm_gem_cma_obj(gem); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 103 | } |
| 104 | EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); |
| 105 | |
Marek Vasut | 14d7f96 | 2016-11-14 11:07:31 +0100 | [diff] [blame] | 106 | /** |
Yannick Fertre | 4636ce9 | 2017-04-14 12:13:32 +0200 | [diff] [blame] | 107 | * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer |
| 108 | * @fb: The framebuffer |
| 109 | * @state: Which state of drm plane |
| 110 | * @plane: Which plane |
| 111 | * Return the CMA GEM address for given framebuffer. |
| 112 | * |
| 113 | * This function will usually be called from the PLANE callback functions. |
| 114 | */ |
| 115 | dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, |
| 116 | struct drm_plane_state *state, |
| 117 | unsigned int plane) |
| 118 | { |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 119 | struct drm_gem_cma_object *obj; |
Yannick Fertre | 4636ce9 | 2017-04-14 12:13:32 +0200 | [diff] [blame] | 120 | dma_addr_t paddr; |
| 121 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 122 | obj = drm_fb_cma_get_gem_obj(fb, plane); |
| 123 | if (!obj) |
Yannick Fertre | 4636ce9 | 2017-04-14 12:13:32 +0200 | [diff] [blame] | 124 | return 0; |
| 125 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 126 | paddr = obj->paddr + fb->offsets[plane]; |
Yannick Fertre | 4636ce9 | 2017-04-14 12:13:32 +0200 | [diff] [blame] | 127 | paddr += fb->format->cpp[plane] * (state->src_x >> 16); |
| 128 | paddr += fb->pitches[plane] * (state->src_y >> 16); |
| 129 | |
| 130 | return paddr; |
| 131 | } |
| 132 | EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr); |
| 133 | |
Robin Murphy | ce0c575 | 2016-06-07 13:18:09 +0100 | [diff] [blame] | 134 | static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma) |
| 135 | { |
| 136 | return dma_mmap_writecombine(info->device, vma, info->screen_base, |
| 137 | info->fix.smem_start, info->fix.smem_len); |
| 138 | } |
| 139 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 140 | static struct fb_ops drm_fbdev_cma_ops = { |
| 141 | .owner = THIS_MODULE, |
Stefan Christ | 659119d | 2016-11-14 00:03:16 +0100 | [diff] [blame] | 142 | DRM_FB_HELPER_DEFAULT_OPS, |
Archit Taneja | 85f2edf | 2015-07-22 14:58:20 +0530 | [diff] [blame] | 143 | .fb_fillrect = drm_fb_helper_sys_fillrect, |
| 144 | .fb_copyarea = drm_fb_helper_sys_copyarea, |
| 145 | .fb_imageblit = drm_fb_helper_sys_imageblit, |
Robin Murphy | ce0c575 | 2016-06-07 13:18:09 +0100 | [diff] [blame] | 146 | .fb_mmap = drm_fb_cma_mmap, |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 147 | }; |
| 148 | |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 149 | static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, |
| 150 | struct vm_area_struct *vma) |
| 151 | { |
| 152 | fb_deferred_io_mmap(info, vma); |
| 153 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
| 154 | |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | static int drm_fbdev_cma_defio_init(struct fb_info *fbi, |
| 159 | struct drm_gem_cma_object *cma_obj) |
| 160 | { |
| 161 | struct fb_deferred_io *fbdefio; |
| 162 | struct fb_ops *fbops; |
| 163 | |
| 164 | /* |
| 165 | * Per device structures are needed because: |
| 166 | * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap |
| 167 | * fbdefio: individual delays |
| 168 | */ |
| 169 | fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); |
| 170 | fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); |
| 171 | if (!fbdefio || !fbops) { |
| 172 | kfree(fbdefio); |
Sudip Mukherjee | 2a92762 | 2016-06-12 16:03:56 +0100 | [diff] [blame] | 173 | kfree(fbops); |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 174 | return -ENOMEM; |
| 175 | } |
| 176 | |
| 177 | /* can't be offset from vaddr since dirty() uses cma_obj */ |
| 178 | fbi->screen_buffer = cma_obj->vaddr; |
| 179 | /* fb_deferred_io_fault() needs a physical address */ |
| 180 | fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer)); |
| 181 | |
| 182 | *fbops = *fbi->fbops; |
| 183 | fbi->fbops = fbops; |
| 184 | |
| 185 | fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS); |
| 186 | fbdefio->deferred_io = drm_fb_helper_deferred_io; |
| 187 | fbi->fbdefio = fbdefio; |
| 188 | fb_deferred_io_init(fbi); |
| 189 | fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap; |
| 190 | |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | static void drm_fbdev_cma_defio_fini(struct fb_info *fbi) |
| 195 | { |
| 196 | if (!fbi->fbdefio) |
| 197 | return; |
| 198 | |
| 199 | fb_deferred_io_cleanup(fbi); |
| 200 | kfree(fbi->fbdefio); |
| 201 | kfree(fbi->fbops); |
| 202 | } |
| 203 | |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 204 | static int |
| 205 | drm_fbdev_cma_create(struct drm_fb_helper *helper, |
| 206 | struct drm_fb_helper_surface_size *sizes) |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 207 | { |
| 208 | struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 209 | struct drm_device *dev = helper->dev; |
| 210 | struct drm_gem_cma_object *obj; |
| 211 | struct drm_framebuffer *fb; |
| 212 | unsigned int bytes_per_pixel; |
| 213 | unsigned long offset; |
| 214 | struct fb_info *fbi; |
| 215 | size_t size; |
| 216 | int ret; |
| 217 | |
Thierry Reding | e0d78d0 | 2012-10-20 10:32:46 +0000 | [diff] [blame] | 218 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 219 | sizes->surface_width, sizes->surface_height, |
| 220 | sizes->surface_bpp); |
| 221 | |
| 222 | bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 223 | size = sizes->surface_width * sizes->surface_height * bytes_per_pixel; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 224 | obj = drm_gem_cma_create(dev, size); |
Thierry Reding | 0281324 | 2012-10-20 10:32:47 +0000 | [diff] [blame] | 225 | if (IS_ERR(obj)) |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 226 | return -ENOMEM; |
| 227 | |
Archit Taneja | 85f2edf | 2015-07-22 14:58:20 +0530 | [diff] [blame] | 228 | fbi = drm_fb_helper_alloc_fbi(helper); |
| 229 | if (IS_ERR(fbi)) { |
| 230 | ret = PTR_ERR(fbi); |
Eric Anholt | 50cbc13 | 2015-12-14 16:26:26 -0800 | [diff] [blame] | 231 | goto err_gem_free_object; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 232 | } |
| 233 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 234 | fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base, |
| 235 | fbdev_cma->fb_funcs); |
| 236 | if (IS_ERR(fb)) { |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 237 | dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 238 | ret = PTR_ERR(fb); |
Archit Taneja | 85f2edf | 2015-07-22 14:58:20 +0530 | [diff] [blame] | 239 | goto err_fb_info_destroy; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 240 | } |
| 241 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 242 | helper->fb = fb; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 243 | |
| 244 | fbi->par = helper; |
| 245 | fbi->flags = FBINFO_FLAG_DEFAULT; |
| 246 | fbi->fbops = &drm_fbdev_cma_ops; |
| 247 | |
Ville Syrjälä | b00c600 | 2016-12-14 23:31:35 +0200 | [diff] [blame] | 248 | drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); |
Rob Clark | 8d76612 | 2015-03-11 10:23:10 -0400 | [diff] [blame] | 249 | drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 250 | |
| 251 | offset = fbi->var.xoffset * bytes_per_pixel; |
| 252 | offset += fbi->var.yoffset * fb->pitches[0]; |
| 253 | |
| 254 | dev->mode_config.fb_base = (resource_size_t)obj->paddr; |
| 255 | fbi->screen_base = obj->vaddr + offset; |
| 256 | fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); |
| 257 | fbi->screen_size = size; |
| 258 | fbi->fix.smem_len = size; |
| 259 | |
Noralf Trønnes | 41b676e | 2017-11-15 15:19:41 +0100 | [diff] [blame] | 260 | if (fb->funcs->dirty) { |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 261 | ret = drm_fbdev_cma_defio_init(fbi, obj); |
| 262 | if (ret) |
| 263 | goto err_cma_destroy; |
| 264 | } |
| 265 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 266 | return 0; |
| 267 | |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 268 | err_cma_destroy: |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 269 | drm_framebuffer_remove(fb); |
Archit Taneja | 85f2edf | 2015-07-22 14:58:20 +0530 | [diff] [blame] | 270 | err_fb_info_destroy: |
Daniel Vetter | da7bdda | 2017-02-07 17:16:03 +0100 | [diff] [blame] | 271 | drm_fb_helper_fini(helper); |
Eric Anholt | 50cbc13 | 2015-12-14 16:26:26 -0800 | [diff] [blame] | 272 | err_gem_free_object: |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 273 | drm_gem_object_put_unlocked(&obj->base); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 274 | return ret; |
| 275 | } |
| 276 | |
Thierry Reding | 3a49387 | 2014-06-27 17:19:23 +0200 | [diff] [blame] | 277 | static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { |
Daniel Vetter | cd5428a | 2013-01-21 23:42:49 +0100 | [diff] [blame] | 278 | .fb_probe = drm_fbdev_cma_create, |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | /** |
Noralf Trønnes | 41b676e | 2017-11-15 15:19:41 +0100 | [diff] [blame] | 282 | * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation |
| 283 | * @dev: DRM device |
| 284 | * @preferred_bpp: Preferred bits per pixel for the device. |
| 285 | * @dev->mode_config.preferred_depth is used if this is zero. |
| 286 | * @max_conn_count: Maximum number of connectors. |
| 287 | * @dev->mode_config.num_connector is used if this is zero. |
| 288 | * @funcs: Framebuffer functions, in particular a custom dirty() callback. |
| 289 | * Can be NULL. |
| 290 | * |
| 291 | * Returns: |
| 292 | * Zero on success or negative error code on failure. |
| 293 | */ |
| 294 | int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, |
| 295 | unsigned int preferred_bpp, unsigned int max_conn_count, |
| 296 | const struct drm_framebuffer_funcs *funcs) |
| 297 | { |
| 298 | struct drm_fbdev_cma *fbdev_cma; |
| 299 | struct drm_fb_helper *fb_helper; |
| 300 | int ret; |
| 301 | |
| 302 | if (!preferred_bpp) |
| 303 | preferred_bpp = dev->mode_config.preferred_depth; |
| 304 | if (!preferred_bpp) |
| 305 | preferred_bpp = 32; |
| 306 | |
| 307 | if (!max_conn_count) |
| 308 | max_conn_count = dev->mode_config.num_connector; |
| 309 | |
| 310 | fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); |
| 311 | if (!fbdev_cma) |
| 312 | return -ENOMEM; |
| 313 | |
| 314 | fbdev_cma->fb_funcs = funcs; |
| 315 | fb_helper = &fbdev_cma->fb_helper; |
| 316 | |
| 317 | drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs); |
| 318 | |
| 319 | ret = drm_fb_helper_init(dev, fb_helper, max_conn_count); |
| 320 | if (ret < 0) { |
| 321 | DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n"); |
| 322 | goto err_free; |
| 323 | } |
| 324 | |
| 325 | ret = drm_fb_helper_single_add_all_connectors(fb_helper); |
| 326 | if (ret < 0) { |
| 327 | DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n"); |
| 328 | goto err_drm_fb_helper_fini; |
| 329 | } |
| 330 | |
| 331 | ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp); |
| 332 | if (ret < 0) { |
| 333 | DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n"); |
| 334 | goto err_drm_fb_helper_fini; |
| 335 | } |
| 336 | |
| 337 | return 0; |
| 338 | |
| 339 | err_drm_fb_helper_fini: |
| 340 | drm_fb_helper_fini(fb_helper); |
| 341 | err_free: |
| 342 | kfree(fbdev_cma); |
| 343 | |
| 344 | return ret; |
| 345 | } |
| 346 | EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs); |
| 347 | |
| 348 | /** |
| 349 | * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation |
| 350 | * @dev: DRM device |
| 351 | * @preferred_bpp: Preferred bits per pixel for the device. |
| 352 | * @dev->mode_config.preferred_depth is used if this is zero. |
| 353 | * @max_conn_count: Maximum number of connectors. |
| 354 | * @dev->mode_config.num_connector is used if this is zero. |
| 355 | * |
| 356 | * Returns: |
| 357 | * Zero on success or negative error code on failure. |
| 358 | */ |
| 359 | int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, |
| 360 | unsigned int max_conn_count) |
| 361 | { |
| 362 | return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp, |
| 363 | max_conn_count, NULL); |
| 364 | } |
| 365 | EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init); |
| 366 | |
| 367 | /** |
| 368 | * drm_fb_cma_fbdev_fini() - Teardown fbdev emulation |
| 369 | * @dev: DRM device |
| 370 | */ |
| 371 | void drm_fb_cma_fbdev_fini(struct drm_device *dev) |
| 372 | { |
| 373 | struct drm_fb_helper *fb_helper = dev->fb_helper; |
| 374 | |
| 375 | if (!fb_helper) |
| 376 | return; |
| 377 | |
| 378 | /* Unregister if it hasn't been done already */ |
| 379 | if (fb_helper->fbdev && fb_helper->fbdev->dev) |
| 380 | drm_fb_helper_unregister_fbi(fb_helper); |
| 381 | |
| 382 | if (fb_helper->fbdev) |
| 383 | drm_fbdev_cma_defio_fini(fb_helper->fbdev); |
| 384 | |
| 385 | if (fb_helper->fb) |
| 386 | drm_framebuffer_remove(fb_helper->fb); |
| 387 | |
| 388 | drm_fb_helper_fini(fb_helper); |
| 389 | kfree(to_fbdev_cma(fb_helper)); |
| 390 | } |
| 391 | EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini); |
| 392 | |
| 393 | /** |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 394 | * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 395 | * @dev: DRM device |
| 396 | * @preferred_bpp: Preferred bits per pixel for the device |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 397 | * @max_conn_count: Maximum number of connectors |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 398 | * @funcs: fb helper functions, in particular a custom dirty() callback |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 399 | * |
| 400 | * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. |
| 401 | */ |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 402 | struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, |
Gabriel Krisman Bertazi | e4563f6 | 2017-02-02 14:26:40 -0200 | [diff] [blame] | 403 | unsigned int preferred_bpp, unsigned int max_conn_count, |
| 404 | const struct drm_framebuffer_funcs *funcs) |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 405 | { |
| 406 | struct drm_fbdev_cma *fbdev_cma; |
| 407 | struct drm_fb_helper *helper; |
| 408 | int ret; |
| 409 | |
| 410 | fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); |
| 411 | if (!fbdev_cma) { |
| 412 | dev_err(dev->dev, "Failed to allocate drm fbdev.\n"); |
| 413 | return ERR_PTR(-ENOMEM); |
| 414 | } |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 415 | fbdev_cma->fb_funcs = funcs; |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 416 | |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 417 | helper = &fbdev_cma->fb_helper; |
| 418 | |
Daniel Vetter | b112481 | 2016-12-29 21:48:31 +0100 | [diff] [blame] | 419 | drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); |
Thierry Reding | 10a2310 | 2014-06-27 17:19:24 +0200 | [diff] [blame] | 420 | |
Gabriel Krisman Bertazi | e4563f6 | 2017-02-02 14:26:40 -0200 | [diff] [blame] | 421 | ret = drm_fb_helper_init(dev, helper, max_conn_count); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 422 | if (ret < 0) { |
| 423 | dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); |
| 424 | goto err_free; |
| 425 | } |
| 426 | |
| 427 | ret = drm_fb_helper_single_add_all_connectors(helper); |
| 428 | if (ret < 0) { |
| 429 | dev_err(dev->dev, "Failed to add connectors.\n"); |
| 430 | goto err_drm_fb_helper_fini; |
| 431 | |
| 432 | } |
| 433 | |
| 434 | ret = drm_fb_helper_initial_config(helper, preferred_bpp); |
| 435 | if (ret < 0) { |
Masanari Iida | 8b513d0 | 2013-05-21 23:13:12 +0900 | [diff] [blame] | 436 | dev_err(dev->dev, "Failed to set initial hw configuration.\n"); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 437 | goto err_drm_fb_helper_fini; |
| 438 | } |
| 439 | |
| 440 | return fbdev_cma; |
| 441 | |
| 442 | err_drm_fb_helper_fini: |
| 443 | drm_fb_helper_fini(helper); |
| 444 | err_free: |
| 445 | kfree(fbdev_cma); |
| 446 | |
| 447 | return ERR_PTR(ret); |
| 448 | } |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 449 | EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs); |
| 450 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 451 | static const struct drm_framebuffer_funcs drm_fb_cma_funcs = { |
| 452 | .destroy = drm_gem_fb_destroy, |
| 453 | .create_handle = drm_gem_fb_create_handle, |
| 454 | }; |
| 455 | |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 456 | /** |
| 457 | * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct |
| 458 | * @dev: DRM device |
| 459 | * @preferred_bpp: Preferred bits per pixel for the device |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 460 | * @max_conn_count: Maximum number of connectors |
| 461 | * |
| 462 | * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. |
| 463 | */ |
| 464 | struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, |
Gabriel Krisman Bertazi | e4563f6 | 2017-02-02 14:26:40 -0200 | [diff] [blame] | 465 | unsigned int preferred_bpp, unsigned int max_conn_count) |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 466 | { |
Gabriel Krisman Bertazi | e4563f6 | 2017-02-02 14:26:40 -0200 | [diff] [blame] | 467 | return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, |
| 468 | max_conn_count, |
| 469 | &drm_fb_cma_funcs); |
Noralf Trønnes | 199c771 | 2016-04-28 17:18:35 +0200 | [diff] [blame] | 470 | } |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 471 | EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); |
| 472 | |
| 473 | /** |
| 474 | * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct |
| 475 | * @fbdev_cma: The drm_fbdev_cma struct |
| 476 | */ |
| 477 | void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) |
| 478 | { |
Archit Taneja | 85f2edf | 2015-07-22 14:58:20 +0530 | [diff] [blame] | 479 | drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); |
Stefan Agner | 41b9bb1 | 2016-10-19 17:32:19 -0700 | [diff] [blame] | 480 | if (fbdev_cma->fb_helper.fbdev) |
| 481 | drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 482 | |
Noralf Trønnes | 5628648 | 2017-08-13 15:31:45 +0200 | [diff] [blame] | 483 | if (fbdev_cma->fb_helper.fb) |
| 484 | drm_framebuffer_remove(fbdev_cma->fb_helper.fb); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 485 | |
| 486 | drm_fb_helper_fini(&fbdev_cma->fb_helper); |
| 487 | kfree(fbdev_cma); |
| 488 | } |
| 489 | EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini); |
| 490 | |
| 491 | /** |
| 492 | * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode |
| 493 | * @fbdev_cma: The drm_fbdev_cma struct, may be NULL |
| 494 | * |
Daniel Vetter | 421242a | 2016-12-29 21:48:34 +0100 | [diff] [blame] | 495 | * This function is usually called from the &drm_driver.lastclose callback. |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 496 | */ |
| 497 | void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma) |
| 498 | { |
Rob Clark | 5ea1f75 | 2014-05-30 12:29:48 -0400 | [diff] [blame] | 499 | if (fbdev_cma) |
| 500 | drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper); |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 501 | } |
| 502 | EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode); |
| 503 | |
| 504 | /** |
| 505 | * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events |
| 506 | * @fbdev_cma: The drm_fbdev_cma struct, may be NULL |
| 507 | * |
Daniel Vetter | 421242a | 2016-12-29 21:48:34 +0100 | [diff] [blame] | 508 | * This function is usually called from the &drm_mode_config.output_poll_changed |
Lars-Peter Clausen | 2e3b3c4 | 2012-07-02 16:37:47 +0200 | [diff] [blame] | 509 | * callback. |
| 510 | */ |
| 511 | void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma) |
| 512 | { |
| 513 | if (fbdev_cma) |
| 514 | drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); |
| 515 | } |
| 516 | EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); |
Stefan Agner | 917f425 | 2016-02-11 17:30:14 -0800 | [diff] [blame] | 517 | |
| 518 | /** |
| 519 | * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend |
| 520 | * @fbdev_cma: The drm_fbdev_cma struct, may be NULL |
| 521 | * @state: desired state, zero to resume, non-zero to suspend |
| 522 | * |
| 523 | * Calls drm_fb_helper_set_suspend, which is a wrapper around |
| 524 | * fb_set_suspend implemented by fbdev core. |
| 525 | */ |
Liviu Dudau | d0a2987 | 2017-06-20 11:23:20 +0100 | [diff] [blame] | 526 | void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state) |
Stefan Agner | 917f425 | 2016-02-11 17:30:14 -0800 | [diff] [blame] | 527 | { |
| 528 | if (fbdev_cma) |
| 529 | drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state); |
| 530 | } |
| 531 | EXPORT_SYMBOL(drm_fbdev_cma_set_suspend); |
Noralf Trønnes | a4405b5 | 2017-01-22 19:11:09 +0100 | [diff] [blame] | 532 | |
| 533 | /** |
| 534 | * drm_fbdev_cma_set_suspend_unlocked - wrapper around |
| 535 | * drm_fb_helper_set_suspend_unlocked |
| 536 | * @fbdev_cma: The drm_fbdev_cma struct, may be NULL |
| 537 | * @state: desired state, zero to resume, non-zero to suspend |
| 538 | * |
| 539 | * Calls drm_fb_helper_set_suspend, which is a wrapper around |
| 540 | * fb_set_suspend implemented by fbdev core. |
| 541 | */ |
| 542 | void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, |
Liviu Dudau | d0a2987 | 2017-06-20 11:23:20 +0100 | [diff] [blame] | 543 | bool state) |
Noralf Trønnes | a4405b5 | 2017-01-22 19:11:09 +0100 | [diff] [blame] | 544 | { |
| 545 | if (fbdev_cma) |
| 546 | drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper, |
| 547 | state); |
| 548 | } |
| 549 | EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked); |