blob: 2242a80866a950f13c3e1c043436c30a41587f8d [file] [log] [blame]
Dave Airliedc5698e2013-09-09 10:02:56 +10001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_fb_helper.h>
28#include "virtgpu_drv.h"
29
30#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
31
32struct virtio_gpu_fbdev {
33 struct drm_fb_helper helper;
34 struct virtio_gpu_framebuffer vgfb;
Dave Airliedc5698e2013-09-09 10:02:56 +100035 struct virtio_gpu_device *vgdev;
36 struct delayed_work work;
37};
38
39static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
40 bool store, int x, int y,
41 int width, int height)
42{
43 struct drm_device *dev = fb->base.dev;
44 struct virtio_gpu_device *vgdev = dev->dev_private;
45 bool store_for_later = false;
46 int bpp = fb->base.bits_per_pixel / 8;
47 int x2, y2;
48 unsigned long flags;
49 struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
50
51 if ((width <= 0) ||
52 (x + width > fb->base.width) ||
53 (y + height > fb->base.height)) {
54 DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
55 width, height, x, y,
56 fb->base.width, fb->base.height);
57 return -EINVAL;
58 }
59
60 /*
61 * Can be called with pretty much any context (console output
62 * path). If we are in atomic just store the dirty rect info
63 * to send out the update later.
64 *
65 * Can't test inside spin lock.
66 */
67 if (in_atomic() || store)
68 store_for_later = true;
69
70 x2 = x + width - 1;
71 y2 = y + height - 1;
72
73 spin_lock_irqsave(&fb->dirty_lock, flags);
74
75 if (fb->y1 < y)
76 y = fb->y1;
77 if (fb->y2 > y2)
78 y2 = fb->y2;
79 if (fb->x1 < x)
80 x = fb->x1;
81 if (fb->x2 > x2)
82 x2 = fb->x2;
83
84 if (store_for_later) {
85 fb->x1 = x;
86 fb->x2 = x2;
87 fb->y1 = y;
88 fb->y2 = y2;
89 spin_unlock_irqrestore(&fb->dirty_lock, flags);
90 return 0;
91 }
92
93 fb->x1 = fb->y1 = INT_MAX;
94 fb->x2 = fb->y2 = 0;
95
96 spin_unlock_irqrestore(&fb->dirty_lock, flags);
97
98 {
99 uint32_t offset;
100 uint32_t w = x2 - x + 1;
101 uint32_t h = y2 - y + 1;
102
103 offset = (y * fb->base.pitches[0]) + x * bpp;
104
105 virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
106 offset,
107 cpu_to_le32(w),
108 cpu_to_le32(h),
109 cpu_to_le32(x),
110 cpu_to_le32(y),
111 NULL);
112
113 }
114 virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
115 x, y, x2 - x + 1, y2 - y + 1);
116 return 0;
117}
118
119int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
120 struct drm_clip_rect *clips,
121 unsigned num_clips)
122{
123 struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
124 struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
125 struct drm_clip_rect norect;
126 struct drm_clip_rect *clips_ptr;
127 int left, right, top, bottom;
128 int i;
129 int inc = 1;
130 if (!num_clips) {
131 num_clips = 1;
132 clips = &norect;
133 norect.x1 = norect.y1 = 0;
134 norect.x2 = vgfb->base.width;
135 norect.y2 = vgfb->base.height;
136 }
137 left = clips->x1;
138 right = clips->x2;
139 top = clips->y1;
140 bottom = clips->y2;
141
142 /* skip the first clip rect */
143 for (i = 1, clips_ptr = clips + inc;
144 i < num_clips; i++, clips_ptr += inc) {
145 left = min_t(int, left, (int)clips_ptr->x1);
146 right = max_t(int, right, (int)clips_ptr->x2);
147 top = min_t(int, top, (int)clips_ptr->y1);
148 bottom = max_t(int, bottom, (int)clips_ptr->y2);
149 }
150
151 if (obj->dumb)
152 return virtio_gpu_dirty_update(vgfb, false, left, top,
153 right - left, bottom - top);
154
155 virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
156 left, top, right - left, bottom - top);
157 return 0;
158}
159
160static void virtio_gpu_fb_dirty_work(struct work_struct *work)
161{
162 struct delayed_work *delayed_work = to_delayed_work(work);
163 struct virtio_gpu_fbdev *vfbdev =
164 container_of(delayed_work, struct virtio_gpu_fbdev, work);
165 struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
166
167 virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
168 vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
169}
170
171static void virtio_gpu_3d_fillrect(struct fb_info *info,
172 const struct fb_fillrect *rect)
173{
174 struct virtio_gpu_fbdev *vfbdev = info->par;
Archit Taneja08430102015-07-31 16:22:01 +0530175 drm_fb_helper_sys_fillrect(info, rect);
Dave Airliedc5698e2013-09-09 10:02:56 +1000176 virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
177 rect->width, rect->height);
178 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
179}
180
181static void virtio_gpu_3d_copyarea(struct fb_info *info,
182 const struct fb_copyarea *area)
183{
184 struct virtio_gpu_fbdev *vfbdev = info->par;
Archit Taneja08430102015-07-31 16:22:01 +0530185 drm_fb_helper_sys_copyarea(info, area);
Dave Airliedc5698e2013-09-09 10:02:56 +1000186 virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
187 area->width, area->height);
188 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
189}
190
191static void virtio_gpu_3d_imageblit(struct fb_info *info,
192 const struct fb_image *image)
193{
194 struct virtio_gpu_fbdev *vfbdev = info->par;
Archit Taneja08430102015-07-31 16:22:01 +0530195 drm_fb_helper_sys_imageblit(info, image);
Dave Airliedc5698e2013-09-09 10:02:56 +1000196 virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
197 image->width, image->height);
198 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
199}
200
201static struct fb_ops virtio_gpufb_ops = {
202 .owner = THIS_MODULE,
203 .fb_check_var = drm_fb_helper_check_var,
204 .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
205 .fb_fillrect = virtio_gpu_3d_fillrect,
206 .fb_copyarea = virtio_gpu_3d_copyarea,
207 .fb_imageblit = virtio_gpu_3d_imageblit,
208 .fb_pan_display = drm_fb_helper_pan_display,
209 .fb_blank = drm_fb_helper_blank,
210 .fb_setcmap = drm_fb_helper_setcmap,
211 .fb_debug_enter = drm_fb_helper_debug_enter,
212 .fb_debug_leave = drm_fb_helper_debug_leave,
213};
214
215static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
216 struct virtio_gpu_object *obj)
217{
218 return virtio_gpu_object_kmap(obj, NULL);
219}
220
221static int virtio_gpufb_create(struct drm_fb_helper *helper,
222 struct drm_fb_helper_surface_size *sizes)
223{
224 struct virtio_gpu_fbdev *vfbdev =
225 container_of(helper, struct virtio_gpu_fbdev, helper);
226 struct drm_device *dev = helper->dev;
227 struct virtio_gpu_device *vgdev = dev->dev_private;
228 struct fb_info *info;
229 struct drm_framebuffer *fb;
230 struct drm_mode_fb_cmd2 mode_cmd = {};
231 struct virtio_gpu_object *obj;
Dave Airliedc5698e2013-09-09 10:02:56 +1000232 uint32_t resid, format, size;
233 int ret;
234
235 mode_cmd.width = sizes->surface_width;
236 mode_cmd.height = sizes->surface_height;
237 mode_cmd.pitches[0] = mode_cmd.width * 4;
238 mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
239
240 switch (mode_cmd.pixel_format) {
241#ifdef __BIG_ENDIAN
242 case DRM_FORMAT_XRGB8888:
243 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
244 break;
245 case DRM_FORMAT_ARGB8888:
246 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
247 break;
248 case DRM_FORMAT_BGRX8888:
249 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
250 break;
251 case DRM_FORMAT_BGRA8888:
252 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
253 break;
254 case DRM_FORMAT_RGBX8888:
255 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
256 break;
257 case DRM_FORMAT_RGBA8888:
258 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
259 break;
260 case DRM_FORMAT_XBGR8888:
261 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
262 break;
263 case DRM_FORMAT_ABGR8888:
264 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
265 break;
266#else
267 case DRM_FORMAT_XRGB8888:
268 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
269 break;
270 case DRM_FORMAT_ARGB8888:
271 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
272 break;
273 case DRM_FORMAT_BGRX8888:
274 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
275 break;
276 case DRM_FORMAT_BGRA8888:
277 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
278 break;
279 case DRM_FORMAT_RGBX8888:
280 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
281 break;
282 case DRM_FORMAT_RGBA8888:
283 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
284 break;
285 case DRM_FORMAT_XBGR8888:
286 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
287 break;
288 case DRM_FORMAT_ABGR8888:
289 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
290 break;
291#endif
292 default:
293 DRM_ERROR("failed to find virtio gpu format for %d\n",
294 mode_cmd.pixel_format);
295 return -EINVAL;
296 }
297
298 size = mode_cmd.pitches[0] * mode_cmd.height;
299 obj = virtio_gpu_alloc_object(dev, size, false, true);
Dan Carpenter2b7edca2015-06-10 22:27:11 +0300300 if (IS_ERR(obj))
301 return PTR_ERR(obj);
Dave Airliedc5698e2013-09-09 10:02:56 +1000302
303 virtio_gpu_resource_id_get(vgdev, &resid);
304 virtio_gpu_cmd_create_resource(vgdev, resid, format,
305 mode_cmd.width, mode_cmd.height);
306
307 ret = virtio_gpu_vmap_fb(vgdev, obj);
308 if (ret) {
309 DRM_ERROR("failed to vmap fb %d\n", ret);
310 goto err_obj_vmap;
311 }
312
313 /* attach the object to the resource */
314 ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
315 if (ret)
316 goto err_obj_attach;
317
Archit Taneja08430102015-07-31 16:22:01 +0530318 info = drm_fb_helper_alloc_fbi(helper);
319 if (IS_ERR(info)) {
320 ret = PTR_ERR(info);
Dave Airliedc5698e2013-09-09 10:02:56 +1000321 goto err_fb_alloc;
322 }
323
Dave Airliedc5698e2013-09-09 10:02:56 +1000324 info->par = helper;
325
326 ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
327 &mode_cmd, &obj->gem_base);
328 if (ret)
329 goto err_fb_init;
330
331 fb = &vfbdev->vgfb.base;
332
333 vfbdev->helper.fb = fb;
Dave Airliedc5698e2013-09-09 10:02:56 +1000334
335 strcpy(info->fix.id, "virtiodrmfb");
336 info->flags = FBINFO_DEFAULT;
337 info->fbops = &virtio_gpufb_ops;
338 info->pixmap.flags = FB_PIXMAP_SYSTEM;
339
340 info->screen_base = obj->vmap;
341 info->screen_size = obj->gem_base.size;
342 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
343 drm_fb_helper_fill_var(info, &vfbdev->helper,
344 sizes->fb_width, sizes->fb_height);
345
346 info->fix.mmio_start = 0;
347 info->fix.mmio_len = 0;
348 return 0;
349
350err_fb_init:
Archit Taneja08430102015-07-31 16:22:01 +0530351 drm_fb_helper_release_fbi(helper);
Dave Airliedc5698e2013-09-09 10:02:56 +1000352err_fb_alloc:
353 virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
354err_obj_attach:
355err_obj_vmap:
356 virtio_gpu_gem_free_object(&obj->gem_base);
357 return ret;
358}
359
360static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
361 struct virtio_gpu_fbdev *vgfbdev)
362{
Dave Airliedc5698e2013-09-09 10:02:56 +1000363 struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
364
Archit Taneja08430102015-07-31 16:22:01 +0530365 drm_fb_helper_unregister_fbi(&vgfbdev->helper);
366 drm_fb_helper_release_fbi(&vgfbdev->helper);
Dave Airliedc5698e2013-09-09 10:02:56 +1000367
Dave Airliedc5698e2013-09-09 10:02:56 +1000368 if (vgfb->obj)
369 vgfb->obj = NULL;
370 drm_fb_helper_fini(&vgfbdev->helper);
371 drm_framebuffer_cleanup(&vgfb->base);
372
373 return 0;
374}
375static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
376 .fb_probe = virtio_gpufb_create,
377};
378
379int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
380{
381 struct virtio_gpu_fbdev *vgfbdev;
382 int bpp_sel = 32; /* TODO: parameter from somewhere? */
383 int ret;
384
385 vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
386 if (!vgfbdev)
387 return -ENOMEM;
388
389 vgfbdev->vgdev = vgdev;
390 vgdev->vgfbdev = vgfbdev;
391 INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
392
393 drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
394 &virtio_gpu_fb_helper_funcs);
395 ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
396 vgdev->num_scanouts,
397 VIRTIO_GPUFB_CONN_LIMIT);
398 if (ret) {
399 kfree(vgfbdev);
400 return ret;
401 }
402
403 drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
404 drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
405 return 0;
406}
407
408void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
409{
410 if (!vgdev->vgfbdev)
411 return;
412
413 virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
414 kfree(vgdev->vgfbdev);
415 vgdev->vgfbdev = NULL;
416}