blob: f36c14729b5547b2a36f67b65255b73618063919 [file] [log] [blame]
Zach Reizner502e95c2015-03-04 16:33:41 -08001/*
2 * Copyright 2011 Red Hat, Inc.
3 * Copyright © 2014 The Chromium OS Authors
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software")
7 * to deal in the software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * them Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Jackson <ajax@redhat.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 */
27
28/**
29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
30 * software renderer and the X server for efficient buffer sharing.
31 */
32
33#include <linux/module.h>
34#include <linux/ramfs.h>
35#include <linux/shmem_fs.h>
36#include <linux/dma-buf.h>
37#include "vgem_drv.h"
38
39#define DRIVER_NAME "vgem"
40#define DRIVER_DESC "Virtual GEM provider"
41#define DRIVER_DATE "20120112"
42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0
44
Zach Reizner502e95c2015-03-04 16:33:41 -080045static void vgem_gem_free_object(struct drm_gem_object *obj)
46{
47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
48
Zach Reizner502e95c2015-03-04 16:33:41 -080049 drm_gem_object_release(obj);
Zach Reizner502e95c2015-03-04 16:33:41 -080050 kfree(vgem_obj);
51}
52
Zach Reizner502e95c2015-03-04 16:33:41 -080053static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
54{
55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
Zach Reizner502e95c2015-03-04 16:33:41 -080056 /* We don't use vmf->pgoff since that has the fake offset */
Chris Wilson5ba6c9f2016-06-23 15:35:32 +010057 unsigned long vaddr = (unsigned long)vmf->virtual_address;
58 struct page *page;
Zach Reizner502e95c2015-03-04 16:33:41 -080059
Chris Wilson5ba6c9f2016-06-23 15:35:32 +010060 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
62 if (!IS_ERR(page)) {
63 vmf->page = page;
64 return 0;
65 } else switch (PTR_ERR(page)) {
66 case -ENOSPC:
67 case -ENOMEM:
68 return VM_FAULT_OOM;
69 case -EBUSY:
70 return VM_FAULT_RETRY;
71 case -EFAULT:
72 case -EINVAL:
73 return VM_FAULT_SIGBUS;
74 default:
75 WARN_ON_ONCE(PTR_ERR(page));
76 return VM_FAULT_SIGBUS;
Zach Reizner502e95c2015-03-04 16:33:41 -080077 }
78}
79
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -070080static const struct vm_operations_struct vgem_gem_vm_ops = {
Zach Reizner502e95c2015-03-04 16:33:41 -080081 .fault = vgem_gem_fault,
82 .open = drm_gem_vm_open,
83 .close = drm_gem_vm_close,
84};
85
Chris Wilson40777982016-07-15 09:31:11 +010086static int vgem_open(struct drm_device *dev, struct drm_file *file)
87{
88 struct vgem_file *vfile;
89 int ret;
90
91 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
92 if (!vfile)
93 return -ENOMEM;
94
95 file->driver_priv = vfile;
96
97 ret = vgem_fence_open(vfile);
98 if (ret) {
99 kfree(vfile);
100 return ret;
101 }
102
103 return 0;
104}
105
106static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
107{
108 struct vgem_file *vfile = file->driver_priv;
109
110 vgem_fence_close(vfile);
111 kfree(vfile);
112}
113
Zach Reizner502e95c2015-03-04 16:33:41 -0800114/* ioctls */
115
116static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
117 struct drm_file *file,
118 unsigned int *handle,
119 unsigned long size)
120{
121 struct drm_vgem_gem_object *obj;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100122 int ret;
Zach Reizner502e95c2015-03-04 16:33:41 -0800123
124 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
125 if (!obj)
126 return ERR_PTR(-ENOMEM);
127
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100128 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
129 if (ret)
130 goto err_free;
Zach Reizner502e95c2015-03-04 16:33:41 -0800131
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100132 ret = drm_gem_handle_create(file, &obj->base, handle);
133 drm_gem_object_unreference_unlocked(&obj->base);
134 if (ret)
135 goto err;
Zach Reizner502e95c2015-03-04 16:33:41 -0800136
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100137 return &obj->base;
Daniel Vetter7f340a22016-03-30 11:40:50 +0200138
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100139err_free:
Zach Reizner502e95c2015-03-04 16:33:41 -0800140 kfree(obj);
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100141err:
142 return ERR_PTR(ret);
Zach Reizner502e95c2015-03-04 16:33:41 -0800143}
144
145static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args)
147{
148 struct drm_gem_object *gem_object;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100149 u64 pitch, size;
Zach Reizner502e95c2015-03-04 16:33:41 -0800150
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100151 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
Zach Reizner502e95c2015-03-04 16:33:41 -0800152 size = args->height * pitch;
153 if (size == 0)
154 return -EINVAL;
155
156 gem_object = vgem_gem_create(dev, file, &args->handle, size);
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100157 if (IS_ERR(gem_object))
Zach Reizner502e95c2015-03-04 16:33:41 -0800158 return PTR_ERR(gem_object);
Zach Reizner502e95c2015-03-04 16:33:41 -0800159
160 args->size = gem_object->size;
161 args->pitch = pitch;
162
163 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
164
165 return 0;
166}
167
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100168static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
169 uint32_t handle, uint64_t *offset)
Zach Reizner502e95c2015-03-04 16:33:41 -0800170{
Zach Reizner502e95c2015-03-04 16:33:41 -0800171 struct drm_gem_object *obj;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100172 int ret;
Zach Reizner502e95c2015-03-04 16:33:41 -0800173
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100174 obj = drm_gem_object_lookup(file, handle);
Daniel Vetter0797ac62016-03-30 11:40:51 +0200175 if (!obj)
176 return -ENOENT;
Zach Reizner502e95c2015-03-04 16:33:41 -0800177
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100178 if (!obj->filp) {
179 ret = -EINVAL;
180 goto unref;
181 }
182
Daniel Vettere34274f2016-03-30 11:40:49 +0200183 ret = drm_gem_create_mmap_offset(obj);
184 if (ret)
185 goto unref;
Zach Reizner502e95c2015-03-04 16:33:41 -0800186
Zach Reizner502e95c2015-03-04 16:33:41 -0800187 *offset = drm_vma_node_offset_addr(&obj->vma_node);
Zach Reizner502e95c2015-03-04 16:33:41 -0800188unref:
Daniel Vetter0797ac62016-03-30 11:40:51 +0200189 drm_gem_object_unreference_unlocked(obj);
190
Zach Reizner502e95c2015-03-04 16:33:41 -0800191 return ret;
192}
193
Zach Reizner502e95c2015-03-04 16:33:41 -0800194static struct drm_ioctl_desc vgem_ioctls[] = {
Chris Wilson40777982016-07-15 09:31:11 +0100195 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
196 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
Zach Reizner502e95c2015-03-04 16:33:41 -0800197};
198
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100199static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
200{
201 unsigned long flags = vma->vm_flags;
202 int ret;
203
204 ret = drm_gem_mmap(filp, vma);
205 if (ret)
206 return ret;
207
208 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
209 * are ordinary and not special.
210 */
211 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
212 return 0;
213}
214
Zach Reizner502e95c2015-03-04 16:33:41 -0800215static const struct file_operations vgem_driver_fops = {
216 .owner = THIS_MODULE,
217 .open = drm_open,
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100218 .mmap = vgem_mmap,
Zach Reizner502e95c2015-03-04 16:33:41 -0800219 .poll = drm_poll,
220 .read = drm_read,
221 .unlocked_ioctl = drm_ioctl,
222 .release = drm_release,
223};
224
Chris Wilsone6f15b72016-07-11 14:08:07 +0100225static int vgem_prime_pin(struct drm_gem_object *obj)
226{
227 long n_pages = obj->size >> PAGE_SHIFT;
228 struct page **pages;
229
230 /* Flush the object from the CPU cache so that importers can rely
231 * on coherent indirect access via the exported dma-address.
232 */
233 pages = drm_gem_get_pages(obj);
234 if (IS_ERR(pages))
235 return PTR_ERR(pages);
236
237 drm_clflush_pages(pages, n_pages);
238 drm_gem_put_pages(obj, pages, true, false);
239
240 return 0;
241}
242
243static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
244{
245 struct sg_table *st;
246 struct page **pages;
247
248 pages = drm_gem_get_pages(obj);
249 if (IS_ERR(pages))
250 return ERR_CAST(pages);
251
252 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
253 drm_gem_put_pages(obj, pages, false, false);
254
255 return st;
256}
257
258static void *vgem_prime_vmap(struct drm_gem_object *obj)
259{
260 long n_pages = obj->size >> PAGE_SHIFT;
261 struct page **pages;
262 void *addr;
263
264 pages = drm_gem_get_pages(obj);
265 if (IS_ERR(pages))
266 return NULL;
267
Chris Wilsoncf47a072016-07-12 13:04:50 +0100268 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
Chris Wilsone6f15b72016-07-11 14:08:07 +0100269 drm_gem_put_pages(obj, pages, false, false);
270
271 return addr;
272}
273
274static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
275{
276 vunmap(vaddr);
277}
278
279static int vgem_prime_mmap(struct drm_gem_object *obj,
280 struct vm_area_struct *vma)
281{
282 int ret;
283
284 if (obj->size < vma->vm_end - vma->vm_start)
285 return -EINVAL;
286
287 if (!obj->filp)
288 return -ENODEV;
289
290 ret = obj->filp->f_op->mmap(obj->filp, vma);
291 if (ret)
292 return ret;
293
294 fput(vma->vm_file);
295 vma->vm_file = get_file(obj->filp);
296 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
297 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
298
299 return 0;
300}
301
Zach Reizner502e95c2015-03-04 16:33:41 -0800302static struct drm_driver vgem_driver = {
Chris Wilsone6f15b72016-07-11 14:08:07 +0100303 .driver_features = DRIVER_GEM | DRIVER_PRIME,
Chris Wilson40777982016-07-15 09:31:11 +0100304 .open = vgem_open,
305 .preclose = vgem_preclose,
Daniel Vetter1bd816f2016-05-30 19:53:08 +0200306 .gem_free_object_unlocked = vgem_gem_free_object,
Zach Reizner502e95c2015-03-04 16:33:41 -0800307 .gem_vm_ops = &vgem_gem_vm_ops,
308 .ioctls = vgem_ioctls,
Chris Wilson40777982016-07-15 09:31:11 +0100309 .num_ioctls = ARRAY_SIZE(vgem_ioctls),
Zach Reizner502e95c2015-03-04 16:33:41 -0800310 .fops = &vgem_driver_fops,
Chris Wilsone6f15b72016-07-11 14:08:07 +0100311
Zach Reizner502e95c2015-03-04 16:33:41 -0800312 .dumb_create = vgem_gem_dumb_create,
313 .dumb_map_offset = vgem_gem_dumb_map,
Chris Wilsone6f15b72016-07-11 14:08:07 +0100314
315 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
316 .gem_prime_pin = vgem_prime_pin,
317 .gem_prime_export = drm_gem_prime_export,
318 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
319 .gem_prime_vmap = vgem_prime_vmap,
320 .gem_prime_vunmap = vgem_prime_vunmap,
321 .gem_prime_mmap = vgem_prime_mmap,
322
Zach Reizner502e95c2015-03-04 16:33:41 -0800323 .name = DRIVER_NAME,
324 .desc = DRIVER_DESC,
325 .date = DRIVER_DATE,
326 .major = DRIVER_MAJOR,
327 .minor = DRIVER_MINOR,
328};
329
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100330static struct drm_device *vgem_device;
Zach Reizner502e95c2015-03-04 16:33:41 -0800331
332static int __init vgem_init(void)
333{
334 int ret;
335
336 vgem_device = drm_dev_alloc(&vgem_driver, NULL);
Tom Gundersen0f288602016-09-21 16:59:19 +0200337 if (IS_ERR(vgem_device)) {
338 ret = PTR_ERR(vgem_device);
Zach Reizner502e95c2015-03-04 16:33:41 -0800339 goto out;
340 }
341
Zach Reizner502e95c2015-03-04 16:33:41 -0800342 ret = drm_dev_register(vgem_device, 0);
Zach Reizner502e95c2015-03-04 16:33:41 -0800343 if (ret)
344 goto out_unref;
345
346 return 0;
347
348out_unref:
349 drm_dev_unref(vgem_device);
350out:
351 return ret;
352}
353
354static void __exit vgem_exit(void)
355{
356 drm_dev_unregister(vgem_device);
357 drm_dev_unref(vgem_device);
358}
359
360module_init(vgem_init);
361module_exit(vgem_exit);
362
363MODULE_AUTHOR("Red Hat, Inc.");
Chris Wilson40777982016-07-15 09:31:11 +0100364MODULE_AUTHOR("Intel Corporation");
Zach Reizner502e95c2015-03-04 16:33:41 -0800365MODULE_DESCRIPTION(DRIVER_DESC);
366MODULE_LICENSE("GPL and additional rights");