blob: df198d9e770c2f61882c9a1bdae771f8c9378f54 [file] [log] [blame]
Dave Airliedc5698e2013-09-09 10:02:56 +10001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_fb_helper.h>
28#include "virtgpu_drv.h"
29
30#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
31
32struct virtio_gpu_fbdev {
33 struct drm_fb_helper helper;
34 struct virtio_gpu_framebuffer vgfb;
35 struct list_head fbdev_list;
36 struct virtio_gpu_device *vgdev;
37 struct delayed_work work;
38};
39
40static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
41 bool store, int x, int y,
42 int width, int height)
43{
44 struct drm_device *dev = fb->base.dev;
45 struct virtio_gpu_device *vgdev = dev->dev_private;
46 bool store_for_later = false;
47 int bpp = fb->base.bits_per_pixel / 8;
48 int x2, y2;
49 unsigned long flags;
50 struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
51
52 if ((width <= 0) ||
53 (x + width > fb->base.width) ||
54 (y + height > fb->base.height)) {
55 DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
56 width, height, x, y,
57 fb->base.width, fb->base.height);
58 return -EINVAL;
59 }
60
61 /*
62 * Can be called with pretty much any context (console output
63 * path). If we are in atomic just store the dirty rect info
64 * to send out the update later.
65 *
66 * Can't test inside spin lock.
67 */
68 if (in_atomic() || store)
69 store_for_later = true;
70
71 x2 = x + width - 1;
72 y2 = y + height - 1;
73
74 spin_lock_irqsave(&fb->dirty_lock, flags);
75
76 if (fb->y1 < y)
77 y = fb->y1;
78 if (fb->y2 > y2)
79 y2 = fb->y2;
80 if (fb->x1 < x)
81 x = fb->x1;
82 if (fb->x2 > x2)
83 x2 = fb->x2;
84
85 if (store_for_later) {
86 fb->x1 = x;
87 fb->x2 = x2;
88 fb->y1 = y;
89 fb->y2 = y2;
90 spin_unlock_irqrestore(&fb->dirty_lock, flags);
91 return 0;
92 }
93
94 fb->x1 = fb->y1 = INT_MAX;
95 fb->x2 = fb->y2 = 0;
96
97 spin_unlock_irqrestore(&fb->dirty_lock, flags);
98
99 {
100 uint32_t offset;
101 uint32_t w = x2 - x + 1;
102 uint32_t h = y2 - y + 1;
103
104 offset = (y * fb->base.pitches[0]) + x * bpp;
105
106 virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
107 offset,
108 cpu_to_le32(w),
109 cpu_to_le32(h),
110 cpu_to_le32(x),
111 cpu_to_le32(y),
112 NULL);
113
114 }
115 virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
116 x, y, x2 - x + 1, y2 - y + 1);
117 return 0;
118}
119
120int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
121 struct drm_clip_rect *clips,
122 unsigned num_clips)
123{
124 struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
125 struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
126 struct drm_clip_rect norect;
127 struct drm_clip_rect *clips_ptr;
128 int left, right, top, bottom;
129 int i;
130 int inc = 1;
131 if (!num_clips) {
132 num_clips = 1;
133 clips = &norect;
134 norect.x1 = norect.y1 = 0;
135 norect.x2 = vgfb->base.width;
136 norect.y2 = vgfb->base.height;
137 }
138 left = clips->x1;
139 right = clips->x2;
140 top = clips->y1;
141 bottom = clips->y2;
142
143 /* skip the first clip rect */
144 for (i = 1, clips_ptr = clips + inc;
145 i < num_clips; i++, clips_ptr += inc) {
146 left = min_t(int, left, (int)clips_ptr->x1);
147 right = max_t(int, right, (int)clips_ptr->x2);
148 top = min_t(int, top, (int)clips_ptr->y1);
149 bottom = max_t(int, bottom, (int)clips_ptr->y2);
150 }
151
152 if (obj->dumb)
153 return virtio_gpu_dirty_update(vgfb, false, left, top,
154 right - left, bottom - top);
155
156 virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
157 left, top, right - left, bottom - top);
158 return 0;
159}
160
161static void virtio_gpu_fb_dirty_work(struct work_struct *work)
162{
163 struct delayed_work *delayed_work = to_delayed_work(work);
164 struct virtio_gpu_fbdev *vfbdev =
165 container_of(delayed_work, struct virtio_gpu_fbdev, work);
166 struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
167
168 virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
169 vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
170}
171
172static void virtio_gpu_3d_fillrect(struct fb_info *info,
173 const struct fb_fillrect *rect)
174{
175 struct virtio_gpu_fbdev *vfbdev = info->par;
176 sys_fillrect(info, rect);
177 virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
178 rect->width, rect->height);
179 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
180}
181
182static void virtio_gpu_3d_copyarea(struct fb_info *info,
183 const struct fb_copyarea *area)
184{
185 struct virtio_gpu_fbdev *vfbdev = info->par;
186 sys_copyarea(info, area);
187 virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
188 area->width, area->height);
189 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
190}
191
192static void virtio_gpu_3d_imageblit(struct fb_info *info,
193 const struct fb_image *image)
194{
195 struct virtio_gpu_fbdev *vfbdev = info->par;
196 sys_imageblit(info, image);
197 virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
198 image->width, image->height);
199 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
200}
201
202static struct fb_ops virtio_gpufb_ops = {
203 .owner = THIS_MODULE,
204 .fb_check_var = drm_fb_helper_check_var,
205 .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
206 .fb_fillrect = virtio_gpu_3d_fillrect,
207 .fb_copyarea = virtio_gpu_3d_copyarea,
208 .fb_imageblit = virtio_gpu_3d_imageblit,
209 .fb_pan_display = drm_fb_helper_pan_display,
210 .fb_blank = drm_fb_helper_blank,
211 .fb_setcmap = drm_fb_helper_setcmap,
212 .fb_debug_enter = drm_fb_helper_debug_enter,
213 .fb_debug_leave = drm_fb_helper_debug_leave,
214};
215
216static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
217 struct virtio_gpu_object *obj)
218{
219 return virtio_gpu_object_kmap(obj, NULL);
220}
221
222static int virtio_gpufb_create(struct drm_fb_helper *helper,
223 struct drm_fb_helper_surface_size *sizes)
224{
225 struct virtio_gpu_fbdev *vfbdev =
226 container_of(helper, struct virtio_gpu_fbdev, helper);
227 struct drm_device *dev = helper->dev;
228 struct virtio_gpu_device *vgdev = dev->dev_private;
229 struct fb_info *info;
230 struct drm_framebuffer *fb;
231 struct drm_mode_fb_cmd2 mode_cmd = {};
232 struct virtio_gpu_object *obj;
233 struct device *device = vgdev->dev;
234 uint32_t resid, format, size;
235 int ret;
236
237 mode_cmd.width = sizes->surface_width;
238 mode_cmd.height = sizes->surface_height;
239 mode_cmd.pitches[0] = mode_cmd.width * 4;
240 mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
241
242 switch (mode_cmd.pixel_format) {
243#ifdef __BIG_ENDIAN
244 case DRM_FORMAT_XRGB8888:
245 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
246 break;
247 case DRM_FORMAT_ARGB8888:
248 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
249 break;
250 case DRM_FORMAT_BGRX8888:
251 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
252 break;
253 case DRM_FORMAT_BGRA8888:
254 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
255 break;
256 case DRM_FORMAT_RGBX8888:
257 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
258 break;
259 case DRM_FORMAT_RGBA8888:
260 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
261 break;
262 case DRM_FORMAT_XBGR8888:
263 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
264 break;
265 case DRM_FORMAT_ABGR8888:
266 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
267 break;
268#else
269 case DRM_FORMAT_XRGB8888:
270 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
271 break;
272 case DRM_FORMAT_ARGB8888:
273 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
274 break;
275 case DRM_FORMAT_BGRX8888:
276 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
277 break;
278 case DRM_FORMAT_BGRA8888:
279 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
280 break;
281 case DRM_FORMAT_RGBX8888:
282 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
283 break;
284 case DRM_FORMAT_RGBA8888:
285 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
286 break;
287 case DRM_FORMAT_XBGR8888:
288 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
289 break;
290 case DRM_FORMAT_ABGR8888:
291 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
292 break;
293#endif
294 default:
295 DRM_ERROR("failed to find virtio gpu format for %d\n",
296 mode_cmd.pixel_format);
297 return -EINVAL;
298 }
299
300 size = mode_cmd.pitches[0] * mode_cmd.height;
301 obj = virtio_gpu_alloc_object(dev, size, false, true);
Dan Carpenter2b7edca2015-06-10 22:27:11 +0300302 if (IS_ERR(obj))
303 return PTR_ERR(obj);
Dave Airliedc5698e2013-09-09 10:02:56 +1000304
305 virtio_gpu_resource_id_get(vgdev, &resid);
306 virtio_gpu_cmd_create_resource(vgdev, resid, format,
307 mode_cmd.width, mode_cmd.height);
308
309 ret = virtio_gpu_vmap_fb(vgdev, obj);
310 if (ret) {
311 DRM_ERROR("failed to vmap fb %d\n", ret);
312 goto err_obj_vmap;
313 }
314
315 /* attach the object to the resource */
316 ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
317 if (ret)
318 goto err_obj_attach;
319
320 info = framebuffer_alloc(0, device);
321 if (!info) {
322 ret = -ENOMEM;
323 goto err_fb_alloc;
324 }
325
326 ret = fb_alloc_cmap(&info->cmap, 256, 0);
327 if (ret) {
328 ret = -ENOMEM;
329 goto err_fb_alloc_cmap;
330 }
331
332 info->par = helper;
333
334 ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
335 &mode_cmd, &obj->gem_base);
336 if (ret)
337 goto err_fb_init;
338
339 fb = &vfbdev->vgfb.base;
340
341 vfbdev->helper.fb = fb;
342 vfbdev->helper.fbdev = info;
343
344 strcpy(info->fix.id, "virtiodrmfb");
345 info->flags = FBINFO_DEFAULT;
346 info->fbops = &virtio_gpufb_ops;
347 info->pixmap.flags = FB_PIXMAP_SYSTEM;
348
349 info->screen_base = obj->vmap;
350 info->screen_size = obj->gem_base.size;
351 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
352 drm_fb_helper_fill_var(info, &vfbdev->helper,
353 sizes->fb_width, sizes->fb_height);
354
355 info->fix.mmio_start = 0;
356 info->fix.mmio_len = 0;
357 return 0;
358
359err_fb_init:
360 fb_dealloc_cmap(&info->cmap);
361err_fb_alloc_cmap:
362 framebuffer_release(info);
363err_fb_alloc:
364 virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
365err_obj_attach:
366err_obj_vmap:
367 virtio_gpu_gem_free_object(&obj->gem_base);
368 return ret;
369}
370
371static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
372 struct virtio_gpu_fbdev *vgfbdev)
373{
374 struct fb_info *info;
375 struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
376
377 if (vgfbdev->helper.fbdev) {
378 info = vgfbdev->helper.fbdev;
379
380 unregister_framebuffer(info);
381 framebuffer_release(info);
382 }
383 if (vgfb->obj)
384 vgfb->obj = NULL;
385 drm_fb_helper_fini(&vgfbdev->helper);
386 drm_framebuffer_cleanup(&vgfb->base);
387
388 return 0;
389}
390static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
391 .fb_probe = virtio_gpufb_create,
392};
393
394int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
395{
396 struct virtio_gpu_fbdev *vgfbdev;
397 int bpp_sel = 32; /* TODO: parameter from somewhere? */
398 int ret;
399
400 vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
401 if (!vgfbdev)
402 return -ENOMEM;
403
404 vgfbdev->vgdev = vgdev;
405 vgdev->vgfbdev = vgfbdev;
406 INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
407
408 drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
409 &virtio_gpu_fb_helper_funcs);
410 ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
411 vgdev->num_scanouts,
412 VIRTIO_GPUFB_CONN_LIMIT);
413 if (ret) {
414 kfree(vgfbdev);
415 return ret;
416 }
417
418 drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
419 drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
420 return 0;
421}
422
423void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
424{
425 if (!vgdev->vgfbdev)
426 return;
427
428 virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
429 kfree(vgdev->vgfbdev);
430 vgdev->vgfbdev = NULL;
431}