blob: b5fb968d2d5cddc6a3c8eaecc21e2c97fde9bb49 [file] [log] [blame]
Zach Reizner502e95c2015-03-04 16:33:41 -08001/*
2 * Copyright 2011 Red Hat, Inc.
3 * Copyright © 2014 The Chromium OS Authors
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software")
7 * to deal in the software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * them Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Jackson <ajax@redhat.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 */
27
28/**
29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
30 * software renderer and the X server for efficient buffer sharing.
31 */
32
33#include <linux/module.h>
34#include <linux/ramfs.h>
35#include <linux/shmem_fs.h>
36#include <linux/dma-buf.h>
37#include "vgem_drv.h"
38
39#define DRIVER_NAME "vgem"
40#define DRIVER_DESC "Virtual GEM provider"
41#define DRIVER_DATE "20120112"
42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0
44
Zach Reizner502e95c2015-03-04 16:33:41 -080045static void vgem_gem_free_object(struct drm_gem_object *obj)
46{
47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
48
Zach Reizner502e95c2015-03-04 16:33:41 -080049 drm_gem_object_release(obj);
Zach Reizner502e95c2015-03-04 16:33:41 -080050 kfree(vgem_obj);
51}
52
Zach Reizner502e95c2015-03-04 16:33:41 -080053static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
54{
55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
Zach Reizner502e95c2015-03-04 16:33:41 -080056 /* We don't use vmf->pgoff since that has the fake offset */
Chris Wilson5ba6c9f2016-06-23 15:35:32 +010057 unsigned long vaddr = (unsigned long)vmf->virtual_address;
58 struct page *page;
Zach Reizner502e95c2015-03-04 16:33:41 -080059
Chris Wilson5ba6c9f2016-06-23 15:35:32 +010060 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
62 if (!IS_ERR(page)) {
63 vmf->page = page;
64 return 0;
65 } else switch (PTR_ERR(page)) {
66 case -ENOSPC:
67 case -ENOMEM:
68 return VM_FAULT_OOM;
69 case -EBUSY:
70 return VM_FAULT_RETRY;
71 case -EFAULT:
72 case -EINVAL:
73 return VM_FAULT_SIGBUS;
74 default:
75 WARN_ON_ONCE(PTR_ERR(page));
76 return VM_FAULT_SIGBUS;
Zach Reizner502e95c2015-03-04 16:33:41 -080077 }
78}
79
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -070080static const struct vm_operations_struct vgem_gem_vm_ops = {
Zach Reizner502e95c2015-03-04 16:33:41 -080081 .fault = vgem_gem_fault,
82 .open = drm_gem_vm_open,
83 .close = drm_gem_vm_close,
84};
85
86/* ioctls */
87
88static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
89 struct drm_file *file,
90 unsigned int *handle,
91 unsigned long size)
92{
93 struct drm_vgem_gem_object *obj;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +010094 int ret;
Zach Reizner502e95c2015-03-04 16:33:41 -080095
96 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
97 if (!obj)
98 return ERR_PTR(-ENOMEM);
99
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100100 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
101 if (ret)
102 goto err_free;
Zach Reizner502e95c2015-03-04 16:33:41 -0800103
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100104 ret = drm_gem_handle_create(file, &obj->base, handle);
105 drm_gem_object_unreference_unlocked(&obj->base);
106 if (ret)
107 goto err;
Zach Reizner502e95c2015-03-04 16:33:41 -0800108
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100109 return &obj->base;
Daniel Vetter7f340a22016-03-30 11:40:50 +0200110
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100111err_free:
Zach Reizner502e95c2015-03-04 16:33:41 -0800112 kfree(obj);
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100113err:
114 return ERR_PTR(ret);
Zach Reizner502e95c2015-03-04 16:33:41 -0800115}
116
117static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
118 struct drm_mode_create_dumb *args)
119{
120 struct drm_gem_object *gem_object;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100121 u64 pitch, size;
Zach Reizner502e95c2015-03-04 16:33:41 -0800122
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100123 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
Zach Reizner502e95c2015-03-04 16:33:41 -0800124 size = args->height * pitch;
125 if (size == 0)
126 return -EINVAL;
127
128 gem_object = vgem_gem_create(dev, file, &args->handle, size);
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100129 if (IS_ERR(gem_object))
Zach Reizner502e95c2015-03-04 16:33:41 -0800130 return PTR_ERR(gem_object);
Zach Reizner502e95c2015-03-04 16:33:41 -0800131
132 args->size = gem_object->size;
133 args->pitch = pitch;
134
135 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
136
137 return 0;
138}
139
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100140static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
141 uint32_t handle, uint64_t *offset)
Zach Reizner502e95c2015-03-04 16:33:41 -0800142{
Zach Reizner502e95c2015-03-04 16:33:41 -0800143 struct drm_gem_object *obj;
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100144 int ret;
Zach Reizner502e95c2015-03-04 16:33:41 -0800145
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100146 obj = drm_gem_object_lookup(file, handle);
Daniel Vetter0797ac62016-03-30 11:40:51 +0200147 if (!obj)
148 return -ENOENT;
Zach Reizner502e95c2015-03-04 16:33:41 -0800149
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100150 if (!obj->filp) {
151 ret = -EINVAL;
152 goto unref;
153 }
154
Daniel Vettere34274f2016-03-30 11:40:49 +0200155 ret = drm_gem_create_mmap_offset(obj);
156 if (ret)
157 goto unref;
Zach Reizner502e95c2015-03-04 16:33:41 -0800158
Zach Reizner502e95c2015-03-04 16:33:41 -0800159 *offset = drm_vma_node_offset_addr(&obj->vma_node);
Zach Reizner502e95c2015-03-04 16:33:41 -0800160unref:
Daniel Vetter0797ac62016-03-30 11:40:51 +0200161 drm_gem_object_unreference_unlocked(obj);
162
Zach Reizner502e95c2015-03-04 16:33:41 -0800163 return ret;
164}
165
Zach Reizner502e95c2015-03-04 16:33:41 -0800166static struct drm_ioctl_desc vgem_ioctls[] = {
167};
168
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100169static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
170{
171 unsigned long flags = vma->vm_flags;
172 int ret;
173
174 ret = drm_gem_mmap(filp, vma);
175 if (ret)
176 return ret;
177
178 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
179 * are ordinary and not special.
180 */
181 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
182 return 0;
183}
184
Zach Reizner502e95c2015-03-04 16:33:41 -0800185static const struct file_operations vgem_driver_fops = {
186 .owner = THIS_MODULE,
187 .open = drm_open,
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100188 .mmap = vgem_mmap,
Zach Reizner502e95c2015-03-04 16:33:41 -0800189 .poll = drm_poll,
190 .read = drm_read,
191 .unlocked_ioctl = drm_ioctl,
192 .release = drm_release,
193};
194
Chris Wilsone6f15b72016-07-11 14:08:07 +0100195static int vgem_prime_pin(struct drm_gem_object *obj)
196{
197 long n_pages = obj->size >> PAGE_SHIFT;
198 struct page **pages;
199
200 /* Flush the object from the CPU cache so that importers can rely
201 * on coherent indirect access via the exported dma-address.
202 */
203 pages = drm_gem_get_pages(obj);
204 if (IS_ERR(pages))
205 return PTR_ERR(pages);
206
207 drm_clflush_pages(pages, n_pages);
208 drm_gem_put_pages(obj, pages, true, false);
209
210 return 0;
211}
212
213static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
214{
215 struct sg_table *st;
216 struct page **pages;
217
218 pages = drm_gem_get_pages(obj);
219 if (IS_ERR(pages))
220 return ERR_CAST(pages);
221
222 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
223 drm_gem_put_pages(obj, pages, false, false);
224
225 return st;
226}
227
228static void *vgem_prime_vmap(struct drm_gem_object *obj)
229{
230 long n_pages = obj->size >> PAGE_SHIFT;
231 struct page **pages;
232 void *addr;
233
234 pages = drm_gem_get_pages(obj);
235 if (IS_ERR(pages))
236 return NULL;
237
238 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));
239 drm_gem_put_pages(obj, pages, false, false);
240
241 return addr;
242}
243
244static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
245{
246 vunmap(vaddr);
247}
248
249static int vgem_prime_mmap(struct drm_gem_object *obj,
250 struct vm_area_struct *vma)
251{
252 int ret;
253
254 if (obj->size < vma->vm_end - vma->vm_start)
255 return -EINVAL;
256
257 if (!obj->filp)
258 return -ENODEV;
259
260 ret = obj->filp->f_op->mmap(obj->filp, vma);
261 if (ret)
262 return ret;
263
264 fput(vma->vm_file);
265 vma->vm_file = get_file(obj->filp);
266 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
267 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
268
269 return 0;
270}
271
Zach Reizner502e95c2015-03-04 16:33:41 -0800272static struct drm_driver vgem_driver = {
Chris Wilsone6f15b72016-07-11 14:08:07 +0100273 .driver_features = DRIVER_GEM | DRIVER_PRIME,
Daniel Vetter1bd816f2016-05-30 19:53:08 +0200274 .gem_free_object_unlocked = vgem_gem_free_object,
Zach Reizner502e95c2015-03-04 16:33:41 -0800275 .gem_vm_ops = &vgem_gem_vm_ops,
276 .ioctls = vgem_ioctls,
277 .fops = &vgem_driver_fops,
Chris Wilsone6f15b72016-07-11 14:08:07 +0100278
Zach Reizner502e95c2015-03-04 16:33:41 -0800279 .dumb_create = vgem_gem_dumb_create,
280 .dumb_map_offset = vgem_gem_dumb_map,
Chris Wilsone6f15b72016-07-11 14:08:07 +0100281
282 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
283 .gem_prime_pin = vgem_prime_pin,
284 .gem_prime_export = drm_gem_prime_export,
285 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
286 .gem_prime_vmap = vgem_prime_vmap,
287 .gem_prime_vunmap = vgem_prime_vunmap,
288 .gem_prime_mmap = vgem_prime_mmap,
289
Zach Reizner502e95c2015-03-04 16:33:41 -0800290 .name = DRIVER_NAME,
291 .desc = DRIVER_DESC,
292 .date = DRIVER_DATE,
293 .major = DRIVER_MAJOR,
294 .minor = DRIVER_MINOR,
295};
296
Chris Wilson5ba6c9f2016-06-23 15:35:32 +0100297static struct drm_device *vgem_device;
Zach Reizner502e95c2015-03-04 16:33:41 -0800298
299static int __init vgem_init(void)
300{
301 int ret;
302
303 vgem_device = drm_dev_alloc(&vgem_driver, NULL);
304 if (!vgem_device) {
305 ret = -ENOMEM;
306 goto out;
307 }
308
Zach Reizner502e95c2015-03-04 16:33:41 -0800309 ret = drm_dev_register(vgem_device, 0);
Zach Reizner502e95c2015-03-04 16:33:41 -0800310 if (ret)
311 goto out_unref;
312
313 return 0;
314
315out_unref:
316 drm_dev_unref(vgem_device);
317out:
318 return ret;
319}
320
321static void __exit vgem_exit(void)
322{
323 drm_dev_unregister(vgem_device);
324 drm_dev_unref(vgem_device);
325}
326
327module_init(vgem_init);
328module_exit(vgem_exit);
329
330MODULE_AUTHOR("Red Hat, Inc.");
331MODULE_DESCRIPTION(DRIVER_DESC);
332MODULE_LICENSE("GPL and additional rights");