blob: e00459d38dcbf2370341b1355e343d56fc4437ad [file] [log] [blame]
Dave Airlie53209182010-12-15 07:14:24 +10001/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
6 * more details.
7 */
8
David Howells760285e2012-10-02 18:01:07 +01009#include <drm/drmP.h>
Dave Airlie53209182010-12-15 07:14:24 +100010#include "udl_drv.h"
11#include <linux/shmem_fs.h>
Dave Airlie96503f52011-12-21 11:23:44 +000012#include <linux/dma-buf.h>
Dave Airlie53209182010-12-15 07:14:24 +100013
14struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
15 size_t size)
16{
17 struct udl_gem_object *obj;
18
19 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20 if (obj == NULL)
21 return NULL;
22
23 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
24 kfree(obj);
25 return NULL;
26 }
27
Haixia Shi09a58da2014-11-12 18:33:52 -080028 obj->flags = UDL_BO_CACHEABLE;
Dave Airlie53209182010-12-15 07:14:24 +100029 return obj;
30}
31
32static int
33udl_gem_create(struct drm_file *file,
34 struct drm_device *dev,
35 uint64_t size,
36 uint32_t *handle_p)
37{
38 struct udl_gem_object *obj;
39 int ret;
40 u32 handle;
41
42 size = roundup(size, PAGE_SIZE);
43
44 obj = udl_gem_alloc_object(dev, size);
45 if (obj == NULL)
46 return -ENOMEM;
47
48 ret = drm_gem_handle_create(file, &obj->base, &handle);
49 if (ret) {
50 drm_gem_object_release(&obj->base);
51 kfree(obj);
52 return ret;
53 }
54
55 drm_gem_object_unreference(&obj->base);
56 *handle_p = handle;
57 return 0;
58}
59
Haixia Shi09a58da2014-11-12 18:33:52 -080060static void update_vm_cache_attr(struct udl_gem_object *obj,
61 struct vm_area_struct *vma)
62{
63 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
64
65 /* non-cacheable as default. */
66 if (obj->flags & UDL_BO_CACHEABLE) {
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68 } else if (obj->flags & UDL_BO_WC) {
69 vma->vm_page_prot =
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
71 } else {
72 vma->vm_page_prot =
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
74 }
75}
76
Dave Airlie53209182010-12-15 07:14:24 +100077int udl_dumb_create(struct drm_file *file,
78 struct drm_device *dev,
79 struct drm_mode_create_dumb *args)
80{
David Herrmann2b932d82014-01-20 19:54:18 +010081 args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
Dave Airlie53209182010-12-15 07:14:24 +100082 args->size = args->pitch * args->height;
83 return udl_gem_create(file, dev,
84 args->size, &args->handle);
85}
86
Konstantin Khlebnikovfa9e8552012-03-31 13:29:25 +040087int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
88{
89 int ret;
90
91 ret = drm_gem_mmap(filp, vma);
92 if (ret)
93 return ret;
94
95 vma->vm_flags &= ~VM_PFNMAP;
96 vma->vm_flags |= VM_MIXEDMAP;
97
Haixia Shi09a58da2014-11-12 18:33:52 -080098 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
99
Konstantin Khlebnikovfa9e8552012-03-31 13:29:25 +0400100 return ret;
101}
102
Dave Airlie53209182010-12-15 07:14:24 +1000103int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104{
105 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
106 struct page *page;
107 unsigned int page_offset;
108 int ret = 0;
109
110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
111 PAGE_SHIFT;
112
113 if (!obj->pages)
114 return VM_FAULT_SIGBUS;
115
116 page = obj->pages[page_offset];
117 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
118 switch (ret) {
119 case -EAGAIN:
Dave Airlie53209182010-12-15 07:14:24 +1000120 case 0:
121 case -ERESTARTSYS:
122 return VM_FAULT_NOPAGE;
123 case -ENOMEM:
124 return VM_FAULT_OOM;
125 default:
126 return VM_FAULT_SIGBUS;
127 }
128}
129
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200130static int udl_gem_get_pages(struct udl_gem_object *obj)
Dave Airlie53209182010-12-15 07:14:24 +1000131{
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400132 struct page **pages;
Dave Airlie53209182010-12-15 07:14:24 +1000133
134 if (obj->pages)
135 return 0;
136
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200137 pages = drm_gem_get_pages(&obj->base);
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400138 if (IS_ERR(pages))
139 return PTR_ERR(pages);
Dave Airlie53209182010-12-15 07:14:24 +1000140
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400141 obj->pages = pages;
Dave Airlie53209182010-12-15 07:14:24 +1000142
143 return 0;
Dave Airlie53209182010-12-15 07:14:24 +1000144}
145
146static void udl_gem_put_pages(struct udl_gem_object *obj)
147{
Rob Clark1d507b32013-12-04 08:45:43 -0500148 if (obj->base.import_attach) {
149 drm_free_large(obj->pages);
150 obj->pages = NULL;
151 return;
152 }
153
Rob Clark5dc9e1e2013-08-07 13:41:26 -0400154 drm_gem_put_pages(&obj->base, obj->pages, false, false);
Dave Airlie53209182010-12-15 07:14:24 +1000155 obj->pages = NULL;
156}
157
158int udl_gem_vmap(struct udl_gem_object *obj)
159{
160 int page_count = obj->base.size / PAGE_SIZE;
161 int ret;
162
Dave Airliee8aa1d12012-03-26 14:36:56 +0100163 if (obj->base.import_attach) {
Dave Airliee8aa1d12012-03-26 14:36:56 +0100164 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
165 if (!obj->vmapping)
166 return -ENOMEM;
167 return 0;
168 }
169
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200170 ret = udl_gem_get_pages(obj);
Dave Airlie53209182010-12-15 07:14:24 +1000171 if (ret)
172 return ret;
173
174 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
175 if (!obj->vmapping)
176 return -ENOMEM;
177 return 0;
178}
179
180void udl_gem_vunmap(struct udl_gem_object *obj)
181{
Dave Airliee8aa1d12012-03-26 14:36:56 +0100182 if (obj->base.import_attach) {
183 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
Dave Airliee8aa1d12012-03-26 14:36:56 +0100184 return;
185 }
186
Dave Airlie53209182010-12-15 07:14:24 +1000187 if (obj->vmapping)
188 vunmap(obj->vmapping);
189
190 udl_gem_put_pages(obj);
191}
192
193void udl_gem_free_object(struct drm_gem_object *gem_obj)
194{
195 struct udl_gem_object *obj = to_udl_bo(gem_obj);
196
197 if (obj->vmapping)
198 udl_gem_vunmap(obj);
199
Dave Airlie347cf102014-03-25 14:38:44 +1000200 if (gem_obj->import_attach) {
Dave Airliee8aa1d12012-03-26 14:36:56 +0100201 drm_prime_gem_destroy(gem_obj, obj->sg);
Dave Airlie347cf102014-03-25 14:38:44 +1000202 put_device(gem_obj->dev->dev);
203 }
Dave Airliee8aa1d12012-03-26 14:36:56 +0100204
Dave Airlie53209182010-12-15 07:14:24 +1000205 if (obj->pages)
206 udl_gem_put_pages(obj);
207
David Herrmann0de23972013-07-24 21:07:52 +0200208 drm_gem_free_mmap_offset(gem_obj);
Dave Airlie53209182010-12-15 07:14:24 +1000209}
210
211/* the dumb interface doesn't work with the GEM straight MMAP
212 interface, it expects to do MMAP on the drm fd, like normal */
213int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
214 uint32_t handle, uint64_t *offset)
215{
216 struct udl_gem_object *gobj;
217 struct drm_gem_object *obj;
218 int ret = 0;
219
220 mutex_lock(&dev->struct_mutex);
221 obj = drm_gem_object_lookup(dev, file, handle);
222 if (obj == NULL) {
223 ret = -ENOENT;
224 goto unlock;
225 }
226 gobj = to_udl_bo(obj);
227
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200228 ret = udl_gem_get_pages(gobj);
Dave Airlie53209182010-12-15 07:14:24 +1000229 if (ret)
Dan Carpenterace281e2012-03-22 09:30:56 +0300230 goto out;
David Herrmann0de23972013-07-24 21:07:52 +0200231 ret = drm_gem_create_mmap_offset(obj);
232 if (ret)
233 goto out;
Dave Airlie53209182010-12-15 07:14:24 +1000234
David Herrmann0de23972013-07-24 21:07:52 +0200235 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
Dave Airlie53209182010-12-15 07:14:24 +1000236
237out:
238 drm_gem_object_unreference(&gobj->base);
239unlock:
240 mutex_unlock(&dev->struct_mutex);
241 return ret;
242}
Dave Airlie96503f52011-12-21 11:23:44 +0000243
244static int udl_prime_create(struct drm_device *dev,
245 size_t size,
246 struct sg_table *sg,
247 struct udl_gem_object **obj_p)
248{
249 struct udl_gem_object *obj;
250 int npages;
Dave Airlie96503f52011-12-21 11:23:44 +0000251
252 npages = size / PAGE_SIZE;
253
254 *obj_p = NULL;
255 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
256 if (!obj)
257 return -ENOMEM;
258
259 obj->sg = sg;
260 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
261 if (obj->pages == NULL) {
262 DRM_ERROR("obj pages is NULL %d\n", npages);
263 return -ENOMEM;
264 }
265
266 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
267
268 *obj_p = obj;
269 return 0;
270}
271
272struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
273 struct dma_buf *dma_buf)
274{
275 struct dma_buf_attachment *attach;
276 struct sg_table *sg;
277 struct udl_gem_object *uobj;
278 int ret;
279
280 /* need to attach */
Dave Airlie347cf102014-03-25 14:38:44 +1000281 get_device(dev->dev);
Dave Airlie96503f52011-12-21 11:23:44 +0000282 attach = dma_buf_attach(dma_buf, dev->dev);
Dave Airlie347cf102014-03-25 14:38:44 +1000283 if (IS_ERR(attach)) {
284 put_device(dev->dev);
Thomas Meyer959f7242012-08-07 06:57:25 +0000285 return ERR_CAST(attach);
Dave Airlie347cf102014-03-25 14:38:44 +1000286 }
Dave Airlie96503f52011-12-21 11:23:44 +0000287
Imre Deak011c22822013-04-19 11:11:56 +1000288 get_dma_buf(dma_buf);
289
Dave Airlie96503f52011-12-21 11:23:44 +0000290 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
291 if (IS_ERR(sg)) {
292 ret = PTR_ERR(sg);
293 goto fail_detach;
294 }
295
296 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
297 if (ret) {
298 goto fail_unmap;
299 }
300
301 uobj->base.import_attach = attach;
Haixia Shi09a58da2014-11-12 18:33:52 -0800302 uobj->flags = UDL_BO_WC;
Dave Airlie96503f52011-12-21 11:23:44 +0000303
304 return &uobj->base;
305
306fail_unmap:
307 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
308fail_detach:
309 dma_buf_detach(dma_buf, attach);
Imre Deak011c22822013-04-19 11:11:56 +1000310 dma_buf_put(dma_buf);
Dave Airlie347cf102014-03-25 14:38:44 +1000311 put_device(dev->dev);
Dave Airlie96503f52011-12-21 11:23:44 +0000312 return ERR_PTR(ret);
313}