blob: 3c0c4bd3f7506f56c165d457ba68841e9f987a57 [file] [log] [blame]
Dave Airlie53209182010-12-15 07:14:24 +10001/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
David Howells760285e2012-10-02 18:01:07 +01009#include <drm/drmP.h>
Dave Airlie53209182010-12-15 07:14:24 +100010#include "udl_drv.h"
11#include <linux/shmem_fs.h>
Dave Airlie96503f52011-12-21 11:23:44 +000012#include <linux/dma-buf.h>
Dave Airlie53209182010-12-15 07:14:24 +100013
14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
15 size_t size)
16{
17 struct udl_gem_object *obj;
18
19 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20 if (obj == NULL)
21 return NULL;
22
23 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
24 kfree(obj);
25 return NULL;
26 }
27
Haixia Shi09a58da2014-11-12 18:33:52 -080028 obj->flags = UDL_BO_CACHEABLE;
Dave Airlie53209182010-12-15 07:14:24 +100029 return obj;
30}
31
32static int
33udl_gem_create(struct drm_file *file,
34 struct drm_device *dev,
35 uint64_t size,
36 uint32_t *handle_p)
37{
38 struct udl_gem_object *obj;
39 int ret;
40 u32 handle;
41
42 size = roundup(size, PAGE_SIZE);
43
44 obj = udl_gem_alloc_object(dev, size);
45 if (obj == NULL)
46 return -ENOMEM;
47
48 ret = drm_gem_handle_create(file, &obj->base, &handle);
49 if (ret) {
50 drm_gem_object_release(&obj->base);
51 kfree(obj);
52 return ret;
53 }
54
Daniel Vetter72b9ff02016-03-30 11:40:43 +020055 drm_gem_object_unreference_unlocked(&obj->base);
Dave Airlie53209182010-12-15 07:14:24 +100056 *handle_p = handle;
57 return 0;
58}
59
Haixia Shi09a58da2014-11-12 18:33:52 -080060static void update_vm_cache_attr(struct udl_gem_object *obj,
61 struct vm_area_struct *vma)
62{
63 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
64
65 /* non-cacheable as default. */
66 if (obj->flags & UDL_BO_CACHEABLE) {
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68 } else if (obj->flags & UDL_BO_WC) {
69 vma->vm_page_prot =
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
71 } else {
72 vma->vm_page_prot =
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
74 }
75}
76
Dave Airlie53209182010-12-15 07:14:24 +100077int udl_dumb_create(struct drm_file *file,
78 struct drm_device *dev,
79 struct drm_mode_create_dumb *args)
80{
David Herrmann2b932d82014-01-20 19:54:18 +010081 args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
Dave Airlie53209182010-12-15 07:14:24 +100082 args->size = args->pitch * args->height;
83 return udl_gem_create(file, dev,
84 args->size, &args->handle);
85}
86
Konstantin Khlebnikovfa9e8552012-03-31 13:29:25 +040087int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
88{
89 int ret;
90
91 ret = drm_gem_mmap(filp, vma);
92 if (ret)
93 return ret;
94
95 vma->vm_flags &= ~VM_PFNMAP;
96 vma->vm_flags |= VM_MIXEDMAP;
97
Haixia Shi09a58da2014-11-12 18:33:52 -080098 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
99
Konstantin Khlebnikovfa9e8552012-03-31 13:29:25 +0400100 return ret;
101}
102
Dave Airlie53209182010-12-15 07:14:24 +1000103int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104{
105 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
106 struct page *page;
107 unsigned int page_offset;
108 int ret = 0;
109
Jan Kara1a29d852016-12-14 15:07:01 -0800110 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Dave Airlie53209182010-12-15 07:14:24 +1000111
112 if (!obj->pages)
113 return VM_FAULT_SIGBUS;
114
115 page = obj->pages[page_offset];
Jan Kara1a29d852016-12-14 15:07:01 -0800116 ret = vm_insert_page(vma, vmf->address, page);
Dave Airlie53209182010-12-15 07:14:24 +1000117 switch (ret) {
118 case -EAGAIN:
Dave Airlie53209182010-12-15 07:14:24 +1000119 case 0:
120 case -ERESTARTSYS:
121 return VM_FAULT_NOPAGE;
122 case -ENOMEM:
123 return VM_FAULT_OOM;
124 default:
125 return VM_FAULT_SIGBUS;
126 }
127}
128
Haixia Shi4bc158e2014-11-25 12:04:02 -0800129int udl_gem_get_pages(struct udl_gem_object *obj)
Dave Airlie53209182010-12-15 07:14:24 +1000130{
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400131 struct page **pages;
Dave Airlie53209182010-12-15 07:14:24 +1000132
133 if (obj->pages)
134 return 0;
135
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200136 pages = drm_gem_get_pages(&obj->base);
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400137 if (IS_ERR(pages))
138 return PTR_ERR(pages);
Dave Airlie53209182010-12-15 07:14:24 +1000139
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400140 obj->pages = pages;
Dave Airlie53209182010-12-15 07:14:24 +1000141
142 return 0;
Dave Airlie53209182010-12-15 07:14:24 +1000143}
144
Haixia Shi4bc158e2014-11-25 12:04:02 -0800145void udl_gem_put_pages(struct udl_gem_object *obj)
Dave Airlie53209182010-12-15 07:14:24 +1000146{
Rob Clark1d507b32013-12-04 08:45:43 -0500147 if (obj->base.import_attach) {
148 drm_free_large(obj->pages);
149 obj->pages = NULL;
150 return;
151 }
152
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400153 drm_gem_put_pages(&obj->base, obj->pages, false, false);
Dave Airlie53209182010-12-15 07:14:24 +1000154 obj->pages = NULL;
155}
156
157int udl_gem_vmap(struct udl_gem_object *obj)
158{
159 int page_count = obj->base.size / PAGE_SIZE;
160 int ret;
161
Dave Airliee8aa1d12012-03-26 14:36:56 +0100162 if (obj->base.import_attach) {
Dave Airliee8aa1d12012-03-26 14:36:56 +0100163 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
164 if (!obj->vmapping)
165 return -ENOMEM;
166 return 0;
167 }
168
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200169 ret = udl_gem_get_pages(obj);
Dave Airlie53209182010-12-15 07:14:24 +1000170 if (ret)
171 return ret;
172
173 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
174 if (!obj->vmapping)
175 return -ENOMEM;
176 return 0;
177}
178
179void udl_gem_vunmap(struct udl_gem_object *obj)
180{
Dave Airliee8aa1d12012-03-26 14:36:56 +0100181 if (obj->base.import_attach) {
182 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
Dave Airliee8aa1d12012-03-26 14:36:56 +0100183 return;
184 }
185
Markus Elfringe9196d22014-11-19 17:33:32 +0100186 vunmap(obj->vmapping);
Dave Airlie53209182010-12-15 07:14:24 +1000187
188 udl_gem_put_pages(obj);
189}
190
191void udl_gem_free_object(struct drm_gem_object *gem_obj)
192{
193 struct udl_gem_object *obj = to_udl_bo(gem_obj);
194
195 if (obj->vmapping)
196 udl_gem_vunmap(obj);
197
Dave Airlie347cf102014-03-25 14:38:44 +1000198 if (gem_obj->import_attach) {
Dave Airliee8aa1d12012-03-26 14:36:56 +0100199 drm_prime_gem_destroy(gem_obj, obj->sg);
Dave Airlie347cf102014-03-25 14:38:44 +1000200 put_device(gem_obj->dev->dev);
201 }
Dave Airliee8aa1d12012-03-26 14:36:56 +0100202
Dave Airlie53209182010-12-15 07:14:24 +1000203 if (obj->pages)
204 udl_gem_put_pages(obj);
205
David Herrmann0de23972013-07-24 21:07:52 +0200206 drm_gem_free_mmap_offset(gem_obj);
Dave Airlie53209182010-12-15 07:14:24 +1000207}
208
209/* the dumb interface doesn't work with the GEM straight MMAP
210 interface, it expects to do MMAP on the drm fd, like normal */
211int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
212 uint32_t handle, uint64_t *offset)
213{
214 struct udl_gem_object *gobj;
215 struct drm_gem_object *obj;
216 int ret = 0;
217
218 mutex_lock(&dev->struct_mutex);
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100219 obj = drm_gem_object_lookup(file, handle);
Dave Airlie53209182010-12-15 07:14:24 +1000220 if (obj == NULL) {
221 ret = -ENOENT;
222 goto unlock;
223 }
224 gobj = to_udl_bo(obj);
225
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200226 ret = udl_gem_get_pages(gobj);
Dave Airlie53209182010-12-15 07:14:24 +1000227 if (ret)
Dan Carpenterace281e2012-03-22 09:30:56 +0300228 goto out;
David Herrmann0de23972013-07-24 21:07:52 +0200229 ret = drm_gem_create_mmap_offset(obj);
230 if (ret)
231 goto out;
Dave Airlie53209182010-12-15 07:14:24 +1000232
David Herrmann0de23972013-07-24 21:07:52 +0200233 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
Dave Airlie53209182010-12-15 07:14:24 +1000234
235out:
236 drm_gem_object_unreference(&gobj->base);
237unlock:
238 mutex_unlock(&dev->struct_mutex);
239 return ret;
240}