blob: 24ffbe990736e3e0751609e78d113d08c952b69f [file] [log] [blame]
Dave Airlie53209182010-12-15 07:14:24 +10001/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
David Howells760285e2012-10-02 18:01:07 +01009#include <drm/drmP.h>
Dave Airlie53209182010-12-15 07:14:24 +100010#include "udl_drv.h"
11#include <linux/shmem_fs.h>
Dave Airlie96503f52011-12-21 11:23:44 +000012#include <linux/dma-buf.h>
Dave Airlie53209182010-12-15 07:14:24 +100013
14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
15 size_t size)
16{
17 struct udl_gem_object *obj;
18
19 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20 if (obj == NULL)
21 return NULL;
22
23 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
24 kfree(obj);
25 return NULL;
26 }
27
28 return obj;
29}
30
31static int
32udl_gem_create(struct drm_file *file,
33 struct drm_device *dev,
34 uint64_t size,
35 uint32_t *handle_p)
36{
37 struct udl_gem_object *obj;
38 int ret;
39 u32 handle;
40
41 size = roundup(size, PAGE_SIZE);
42
43 obj = udl_gem_alloc_object(dev, size);
44 if (obj == NULL)
45 return -ENOMEM;
46
47 ret = drm_gem_handle_create(file, &obj->base, &handle);
48 if (ret) {
49 drm_gem_object_release(&obj->base);
50 kfree(obj);
51 return ret;
52 }
53
54 drm_gem_object_unreference(&obj->base);
55 *handle_p = handle;
56 return 0;
57}
58
59int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev,
61 struct drm_mode_create_dumb *args)
62{
63 args->pitch = args->width * ((args->bpp + 1) / 8);
64 args->size = args->pitch * args->height;
65 return udl_gem_create(file, dev,
66 args->size, &args->handle);
67}
68
Konstantin Khlebnikovfa9e8552012-03-31 13:29:25 +040069int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
70{
71 int ret;
72
73 ret = drm_gem_mmap(filp, vma);
74 if (ret)
75 return ret;
76
77 vma->vm_flags &= ~VM_PFNMAP;
78 vma->vm_flags |= VM_MIXEDMAP;
79
80 return ret;
81}
82
Dave Airlie53209182010-12-15 07:14:24 +100083int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
84{
85 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
86 struct page *page;
87 unsigned int page_offset;
88 int ret = 0;
89
90 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
91 PAGE_SHIFT;
92
93 if (!obj->pages)
94 return VM_FAULT_SIGBUS;
95
96 page = obj->pages[page_offset];
97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
98 switch (ret) {
99 case -EAGAIN:
Dave Airlie53209182010-12-15 07:14:24 +1000100 case 0:
101 case -ERESTARTSYS:
102 return VM_FAULT_NOPAGE;
103 case -ENOMEM:
104 return VM_FAULT_OOM;
105 default:
106 return VM_FAULT_SIGBUS;
107 }
108}
109
Dave Airlie53209182010-12-15 07:14:24 +1000110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
111{
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400112 struct page **pages;
Dave Airlie53209182010-12-15 07:14:24 +1000113
114 if (obj->pages)
115 return 0;
116
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400117 pages = drm_gem_get_pages(&obj->base, gfpmask);
118 if (IS_ERR(pages))
119 return PTR_ERR(pages);
Dave Airlie53209182010-12-15 07:14:24 +1000120
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400121 obj->pages = pages;
Dave Airlie53209182010-12-15 07:14:24 +1000122
123 return 0;
Dave Airlie53209182010-12-15 07:14:24 +1000124}
125
126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400128 drm_gem_put_pages(&obj->base, obj->pages, false, false);
Dave Airlie53209182010-12-15 07:14:24 +1000129 obj->pages = NULL;
130}
131
132int udl_gem_vmap(struct udl_gem_object *obj)
133{
134 int page_count = obj->base.size / PAGE_SIZE;
135 int ret;
136
Dave Airliee8aa1d12012-03-26 14:36:56 +0100137 if (obj->base.import_attach) {
Dave Airliee8aa1d12012-03-26 14:36:56 +0100138 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
139 if (!obj->vmapping)
140 return -ENOMEM;
141 return 0;
142 }
143
Dave Airlie53209182010-12-15 07:14:24 +1000144 ret = udl_gem_get_pages(obj, GFP_KERNEL);
145 if (ret)
146 return ret;
147
148 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
149 if (!obj->vmapping)
150 return -ENOMEM;
151 return 0;
152}
153
154void udl_gem_vunmap(struct udl_gem_object *obj)
155{
Dave Airliee8aa1d12012-03-26 14:36:56 +0100156 if (obj->base.import_attach) {
157 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
Dave Airliee8aa1d12012-03-26 14:36:56 +0100158 return;
159 }
160
Dave Airlie53209182010-12-15 07:14:24 +1000161 if (obj->vmapping)
162 vunmap(obj->vmapping);
163
164 udl_gem_put_pages(obj);
165}
166
167void udl_gem_free_object(struct drm_gem_object *gem_obj)
168{
169 struct udl_gem_object *obj = to_udl_bo(gem_obj);
170
171 if (obj->vmapping)
172 udl_gem_vunmap(obj);
173
Dave Airliee8aa1d12012-03-26 14:36:56 +0100174 if (gem_obj->import_attach)
175 drm_prime_gem_destroy(gem_obj, obj->sg);
176
Dave Airlie53209182010-12-15 07:14:24 +1000177 if (obj->pages)
178 udl_gem_put_pages(obj);
179
David Herrmann0de23972013-07-24 21:07:52 +0200180 drm_gem_free_mmap_offset(gem_obj);
Dave Airlie53209182010-12-15 07:14:24 +1000181}
182
183/* the dumb interface doesn't work with the GEM straight MMAP
184 interface, it expects to do MMAP on the drm fd, like normal */
185int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
186 uint32_t handle, uint64_t *offset)
187{
188 struct udl_gem_object *gobj;
189 struct drm_gem_object *obj;
190 int ret = 0;
191
192 mutex_lock(&dev->struct_mutex);
193 obj = drm_gem_object_lookup(dev, file, handle);
194 if (obj == NULL) {
195 ret = -ENOENT;
196 goto unlock;
197 }
198 gobj = to_udl_bo(obj);
199
200 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
201 if (ret)
Dan Carpenterace281e2012-03-22 09:30:56 +0300202 goto out;
David Herrmann0de23972013-07-24 21:07:52 +0200203 ret = drm_gem_create_mmap_offset(obj);
204 if (ret)
205 goto out;
Dave Airlie53209182010-12-15 07:14:24 +1000206
David Herrmann0de23972013-07-24 21:07:52 +0200207 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
Dave Airlie53209182010-12-15 07:14:24 +1000208
209out:
210 drm_gem_object_unreference(&gobj->base);
211unlock:
212 mutex_unlock(&dev->struct_mutex);
213 return ret;
214}
Dave Airlie96503f52011-12-21 11:23:44 +0000215
216static int udl_prime_create(struct drm_device *dev,
217 size_t size,
218 struct sg_table *sg,
219 struct udl_gem_object **obj_p)
220{
221 struct udl_gem_object *obj;
222 int npages;
Dave Airlie96503f52011-12-21 11:23:44 +0000223
224 npages = size / PAGE_SIZE;
225
226 *obj_p = NULL;
227 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
228 if (!obj)
229 return -ENOMEM;
230
231 obj->sg = sg;
232 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
233 if (obj->pages == NULL) {
234 DRM_ERROR("obj pages is NULL %d\n", npages);
235 return -ENOMEM;
236 }
237
238 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
239
240 *obj_p = obj;
241 return 0;
242}
243
244struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
245 struct dma_buf *dma_buf)
246{
247 struct dma_buf_attachment *attach;
248 struct sg_table *sg;
249 struct udl_gem_object *uobj;
250 int ret;
251
252 /* need to attach */
253 attach = dma_buf_attach(dma_buf, dev->dev);
254 if (IS_ERR(attach))
Thomas Meyer959f7242012-08-07 06:57:25 +0000255 return ERR_CAST(attach);
Dave Airlie96503f52011-12-21 11:23:44 +0000256
Imre Deak011c2282013-04-19 11:11:56 +1000257 get_dma_buf(dma_buf);
258
Dave Airlie96503f52011-12-21 11:23:44 +0000259 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
260 if (IS_ERR(sg)) {
261 ret = PTR_ERR(sg);
262 goto fail_detach;
263 }
264
265 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
266 if (ret) {
267 goto fail_unmap;
268 }
269
270 uobj->base.import_attach = attach;
271
272 return &uobj->base;
273
274fail_unmap:
275 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
276fail_detach:
277 dma_buf_detach(dma_buf, attach);
Imre Deak011c2282013-04-19 11:11:56 +1000278 dma_buf_put(dma_buf);
279
Dave Airlie96503f52011-12-21 11:23:44 +0000280 return ERR_PTR(ret);
281}