blob: fcdbe46914f72fe6f412a959e9a6f54e54856f3d [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
David Howells760285e2012-10-02 18:01:07 +010026#include <drm/drmP.h>
Inki Dae1c248b72011-10-04 19:19:01 +090027
Inki Dae2b358922012-03-16 18:47:05 +090028#include <linux/shmem_fs.h>
Inki Dae1c248b72011-10-04 19:19:01 +090029#include <drm/exynos_drm.h>
30
31#include "exynos_drm_drv.h"
32#include "exynos_drm_gem.h"
33#include "exynos_drm_buf.h"
34
35static unsigned int convert_to_vm_err_msg(int msg)
36{
37 unsigned int out_msg;
38
39 switch (msg) {
40 case 0:
41 case -ERESTARTSYS:
42 case -EINTR:
43 out_msg = VM_FAULT_NOPAGE;
44 break;
45
46 case -ENOMEM:
47 out_msg = VM_FAULT_OOM;
48 break;
49
50 default:
51 out_msg = VM_FAULT_SIGBUS;
52 break;
53 }
54
55 return out_msg;
56}
57
Inki Daedcf9af82012-04-03 21:27:58 +090058static int check_gem_flags(unsigned int flags)
Inki Dae2b358922012-03-16 18:47:05 +090059{
Inki Daedcf9af82012-04-03 21:27:58 +090060 if (flags & ~(EXYNOS_BO_MASK)) {
61 DRM_ERROR("invalid flags.\n");
62 return -EINVAL;
63 }
64
65 return 0;
66}
67
Inki Daec01d73fa2012-04-23 19:26:34 +090068static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
69 struct vm_area_struct *vma)
70{
71 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
72
73 /* non-cachable as default. */
74 if (obj->flags & EXYNOS_BO_CACHABLE)
75 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76 else if (obj->flags & EXYNOS_BO_WC)
77 vma->vm_page_prot =
78 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
79 else
80 vma->vm_page_prot =
81 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
82}
83
Inki Daedcf9af82012-04-03 21:27:58 +090084static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{
86 if (!IS_NONCONTIG_BUFFER(flags)) {
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
Inki Dae2b358922012-03-16 18:47:05 +090096}
97
Inki Daeb2df26c2012-04-23 21:01:28 +090098struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
Inki Dae2b358922012-03-16 18:47:05 +090099 gfp_t gfpmask)
100{
Inki Dae2b358922012-03-16 18:47:05 +0900101 struct page *p, **pages;
102 int i, npages;
103
Inki Dae2b358922012-03-16 18:47:05 +0900104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
Inki Dae2b358922012-03-16 18:47:05 +0900110 for (i = 0; i < npages; i++) {
Inki Daec62bc752012-06-07 15:59:48 +0900111 p = alloc_page(gfpmask);
Inki Dae2b358922012-03-16 18:47:05 +0900112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
Inki Daecb364e342012-07-09 15:42:16 +0900120 while (--i)
Inki Daec62bc752012-06-07 15:59:48 +0900121 __free_page(pages[i]);
Inki Dae2b358922012-03-16 18:47:05 +0900122
123 drm_free_large(pages);
Thomas Meyer4fbd9a42012-08-07 08:57:25 +0200124 return ERR_CAST(p);
Inki Dae2b358922012-03-16 18:47:05 +0900125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
Inki Daed73c1c92012-07-09 14:35:38 +0900128 struct page **pages)
Inki Dae2b358922012-03-16 18:47:05 +0900129{
Inki Daed73c1c92012-07-09 14:35:38 +0900130 int npages;
Inki Dae2b358922012-03-16 18:47:05 +0900131
132 npages = obj->size >> PAGE_SHIFT;
133
Inki Daed73c1c92012-07-09 14:35:38 +0900134 while (--npages >= 0)
135 __free_page(pages[npages]);
Inki Dae2b358922012-03-16 18:47:05 +0900136
137 drm_free_large(pages);
138}
139
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
141 struct vm_area_struct *vma,
142 unsigned long f_vaddr,
143 pgoff_t page_offset)
144{
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
147 unsigned long pfn;
148
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
Inki Dae2b358922012-03-16 18:47:05 +0900150 if (!buf->pages)
151 return -EINTR;
152
Inki Daef6ead8d2012-04-23 19:41:14 +0900153 pfn = page_to_pfn(buf->pages[page_offset++]);
154 } else
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
Inki Dae2b358922012-03-16 18:47:05 +0900156
157 return vm_insert_mixed(vma, f_vaddr, pfn);
158}
159
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
161{
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168
169 if (buf->pages) {
170 DRM_DEBUG_KMS("already allocated.\n");
171 return -EINVAL;
172 }
173
Inki Daec62bc752012-06-07 15:59:48 +0900174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
Inki Dae2b358922012-03-16 18:47:05 +0900175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
Inki Daeb2df26c2012-04-23 21:01:28 +0900181 buf->page_size = PAGE_SIZE;
Inki Dae2b358922012-03-16 18:47:05 +0900182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl;
198
199 /* set all pages to sg list. */
200 while (i < npages) {
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 }
206
207 /* add some codes for UNCACHED type here. TODO */
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
Inki Daed73c1c92012-07-09 14:35:38 +0900215 exynos_gem_put_pages(obj, pages);
Inki Dae2b358922012-03-16 18:47:05 +0900216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
Inki Daed73c1c92012-07-09 14:35:38 +0900233 exynos_gem_put_pages(obj, buf->pages);
Inki Dae2b358922012-03-16 18:47:05 +0900234 buf->pages = NULL;
235
236 /* add some codes for UNCACHED type here. TODO */
237}
238
Joonyoung Shim23648392011-12-13 14:39:13 +0900239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
240 struct drm_file *file_priv,
241 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +0900242{
Inki Dae1c248b72011-10-04 19:19:01 +0900243 int ret;
244
Inki Dae1c248b72011-10-04 19:19:01 +0900245 /*
246 * allocate a id of idr table where the obj is registered
247 * and handle has the id what user can see.
248 */
249 ret = drm_gem_handle_create(file_priv, obj, handle);
250 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +0900251 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900252
253 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
254
255 /* drop reference from allocate - handle holds it now. */
256 drm_gem_object_unreference_unlocked(obj);
257
Joonyoung Shim23648392011-12-13 14:39:13 +0900258 return 0;
259}
Inki Dae1c248b72011-10-04 19:19:01 +0900260
Joonyoung Shim23648392011-12-13 14:39:13 +0900261void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
262{
263 struct drm_gem_object *obj;
Inki Daec01d73fa2012-04-23 19:26:34 +0900264 struct exynos_drm_gem_buf *buf;
Joonyoung Shim23648392011-12-13 14:39:13 +0900265
266 DRM_DEBUG_KMS("%s\n", __FILE__);
267
Joonyoung Shim23648392011-12-13 14:39:13 +0900268 obj = &exynos_gem_obj->base;
Inki Daec01d73fa2012-04-23 19:26:34 +0900269 buf = exynos_gem_obj->buffer;
Joonyoung Shim23648392011-12-13 14:39:13 +0900270
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272
Inki Daec01d73fa2012-04-23 19:26:34 +0900273 if (!buf->pages)
274 return;
275
Inki Daec374e732012-06-12 16:52:54 +0900276 /*
277 * do not release memory region from exporter.
278 *
279 * the region will be released by exporter
280 * once dmabuf's refcount becomes 0.
281 */
282 if (obj->import_attach)
283 goto out;
284
Inki Daec01d73fa2012-04-23 19:26:34 +0900285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
Inki Dae2b358922012-03-16 18:47:05 +0900286 exynos_drm_gem_put_pages(obj);
287 else
Inki Daec01d73fa2012-04-23 19:26:34 +0900288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
Inki Dae2b358922012-03-16 18:47:05 +0900289
Inki Daec374e732012-06-12 16:52:54 +0900290out:
Inki Daec01d73fa2012-04-23 19:26:34 +0900291 exynos_drm_fini_buf(obj->dev, buf);
Inki Dae2b358922012-03-16 18:47:05 +0900292 exynos_gem_obj->buffer = NULL;
Joonyoung Shim23648392011-12-13 14:39:13 +0900293
294 if (obj->map_list.map)
295 drm_gem_free_mmap_offset(obj);
296
297 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +0900298 drm_gem_object_release(obj);
299
Inki Dae1c248b72011-10-04 19:19:01 +0900300 kfree(exynos_gem_obj);
Inki Dae2b358922012-03-16 18:47:05 +0900301 exynos_gem_obj = NULL;
Joonyoung Shim23648392011-12-13 14:39:13 +0900302}
303
Inki Daeb2df26c2012-04-23 21:01:28 +0900304struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
Joonyoung Shim23648392011-12-13 14:39:13 +0900305 unsigned long size)
306{
307 struct exynos_drm_gem_obj *exynos_gem_obj;
308 struct drm_gem_object *obj;
309 int ret;
310
311 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
312 if (!exynos_gem_obj) {
313 DRM_ERROR("failed to allocate exynos gem object\n");
314 return NULL;
315 }
316
Inki Dae2b358922012-03-16 18:47:05 +0900317 exynos_gem_obj->size = size;
Joonyoung Shim23648392011-12-13 14:39:13 +0900318 obj = &exynos_gem_obj->base;
319
320 ret = drm_gem_object_init(dev, obj, size);
321 if (ret < 0) {
322 DRM_ERROR("failed to initialize gem object\n");
323 kfree(exynos_gem_obj);
324 return NULL;
325 }
326
327 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
328
329 return exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900330}
331
Inki Daef088d5a2011-11-12 14:51:23 +0900332struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900333 unsigned int flags,
334 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900335{
Joonyoung Shim23648392011-12-13 14:39:13 +0900336 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae2b358922012-03-16 18:47:05 +0900337 struct exynos_drm_gem_buf *buf;
338 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900339
Inki Daedcf9af82012-04-03 21:27:58 +0900340 if (!size) {
341 DRM_ERROR("invalid size.\n");
342 return ERR_PTR(-EINVAL);
343 }
Inki Daef088d5a2011-11-12 14:51:23 +0900344
Inki Daedcf9af82012-04-03 21:27:58 +0900345 size = roundup_gem_size(size, flags);
346 DRM_DEBUG_KMS("%s\n", __FILE__);
347
348 ret = check_gem_flags(flags);
349 if (ret)
350 return ERR_PTR(ret);
Inki Dae2b358922012-03-16 18:47:05 +0900351
352 buf = exynos_drm_init_buf(dev, size);
353 if (!buf)
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900354 return ERR_PTR(-ENOMEM);
Inki Daef088d5a2011-11-12 14:51:23 +0900355
Joonyoung Shim23648392011-12-13 14:39:13 +0900356 exynos_gem_obj = exynos_drm_gem_init(dev, size);
357 if (!exynos_gem_obj) {
Inki Dae2b358922012-03-16 18:47:05 +0900358 ret = -ENOMEM;
Inki Daedcf9af82012-04-03 21:27:58 +0900359 goto err_fini_buf;
Inki Daef088d5a2011-11-12 14:51:23 +0900360 }
361
Inki Dae2b358922012-03-16 18:47:05 +0900362 exynos_gem_obj->buffer = buf;
363
364 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags;
366
367 /*
368 * allocate all pages as desired size if user wants to allocate
369 * physically non-continuous memory.
370 */
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
Inki Daedcf9af82012-04-03 21:27:58 +0900375 goto err_fini_buf;
Inki Dae2b358922012-03-16 18:47:05 +0900376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
Inki Daedcf9af82012-04-03 21:27:58 +0900381 goto err_fini_buf;
Inki Dae2b358922012-03-16 18:47:05 +0900382 }
383 }
Inki Daef088d5a2011-11-12 14:51:23 +0900384
385 return exynos_gem_obj;
Inki Daedcf9af82012-04-03 21:27:58 +0900386
387err_fini_buf:
Inki Dae2b358922012-03-16 18:47:05 +0900388 exynos_drm_fini_buf(dev, buf);
389 return ERR_PTR(ret);
Inki Daef088d5a2011-11-12 14:51:23 +0900390}
391
Inki Dae1c248b72011-10-04 19:19:01 +0900392int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900393 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900394{
395 struct drm_exynos_gem_create *args = data;
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900396 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900397 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900398
Inki Daef088d5a2011-11-12 14:51:23 +0900399 DRM_DEBUG_KMS("%s\n", __FILE__);
Inki Dae1c248b72011-10-04 19:19:01 +0900400
Inki Dae2b358922012-03-16 18:47:05 +0900401 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
Inki Dae1c248b72011-10-04 19:19:01 +0900402 if (IS_ERR(exynos_gem_obj))
403 return PTR_ERR(exynos_gem_obj);
404
Joonyoung Shim23648392011-12-13 14:39:13 +0900405 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
406 &args->handle);
407 if (ret) {
408 exynos_drm_gem_destroy(exynos_gem_obj);
409 return ret;
410 }
411
Inki Dae1c248b72011-10-04 19:19:01 +0900412 return 0;
413}
414
Inki Daef0b1bda2012-03-16 18:47:06 +0900415void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle,
417 struct drm_file *file_priv)
418{
419 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj;
421
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
423 if (!obj) {
424 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL);
426 }
427
428 exynos_gem_obj = to_exynos_gem_obj(obj);
429
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
433
434 /* TODO */
435 return ERR_PTR(-EINVAL);
436 }
437
438 return &exynos_gem_obj->buffer->dma_addr;
439}
440
441void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle,
443 struct drm_file *file_priv)
444{
445 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj;
447
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
449 if (!obj) {
450 DRM_ERROR("failed to lookup gem object.\n");
451 return;
452 }
453
454 exynos_gem_obj = to_exynos_gem_obj(obj);
455
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
459
460 /* TODO */
461 return;
462 }
463
464 drm_gem_object_unreference_unlocked(obj);
465
466 /*
467 * decrease obj->refcount one more time because we has already
468 * increased it at exynos_drm_gem_get_dma_addr().
469 */
470 drm_gem_object_unreference_unlocked(obj);
471}
472
Inki Dae1c248b72011-10-04 19:19:01 +0900473int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900474 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900475{
476 struct drm_exynos_gem_map_off *args = data;
477
478 DRM_DEBUG_KMS("%s\n", __FILE__);
479
480 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
481 args->handle, (unsigned long)args->offset);
482
483 if (!(dev->driver->driver_features & DRIVER_GEM)) {
484 DRM_ERROR("does not support GEM.\n");
485 return -ENODEV;
486 }
487
488 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
489 &args->offset);
490}
491
492static int exynos_drm_gem_mmap_buffer(struct file *filp,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900493 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900494{
495 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
Inki Dae2c871122011-11-12 15:23:32 +0900497 struct exynos_drm_gem_buf *buffer;
Inki Dae2b358922012-03-16 18:47:05 +0900498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
499 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900500
501 DRM_DEBUG_KMS("%s\n", __FILE__);
502
503 vma->vm_flags |= (VM_IO | VM_RESERVED);
504
Inki Daec01d73fa2012-04-23 19:26:34 +0900505 update_vm_cache_attr(exynos_gem_obj, vma);
Inki Dae1c248b72011-10-04 19:19:01 +0900506
Inki Dae2b358922012-03-16 18:47:05 +0900507 vm_size = usize = vma->vm_end - vma->vm_start;
508
Inki Dae1c248b72011-10-04 19:19:01 +0900509 /*
Inki Dae2c871122011-11-12 15:23:32 +0900510 * a buffer contains information to physically continuous memory
Inki Dae1c248b72011-10-04 19:19:01 +0900511 * allocated by user request or at framebuffer creation.
512 */
Inki Dae2c871122011-11-12 15:23:32 +0900513 buffer = exynos_gem_obj->buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900514
515 /* check if user-requested size is valid. */
Inki Dae2c871122011-11-12 15:23:32 +0900516 if (vm_size > buffer->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900517 return -EINVAL;
518
Inki Dae2b358922012-03-16 18:47:05 +0900519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
520 int i = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900521
Inki Dae2b358922012-03-16 18:47:05 +0900522 if (!buffer->pages)
523 return -EINVAL;
Inki Dae1c248b72011-10-04 19:19:01 +0900524
Inki Dae818c4ea2012-04-23 19:47:18 +0900525 vma->vm_flags |= VM_MIXEDMAP;
526
Inki Dae2b358922012-03-16 18:47:05 +0900527 do {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 if (ret) {
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
Inki Dae1c248b72011-10-04 19:19:01 +0900552 }
553
554 return 0;
555}
556
557static const struct file_operations exynos_drm_gem_fops = {
558 .mmap = exynos_drm_gem_mmap_buffer,
559};
560
561int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900562 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900563{
564 struct drm_exynos_gem_mmap *args = data;
565 struct drm_gem_object *obj;
566 unsigned int addr;
567
568 DRM_DEBUG_KMS("%s\n", __FILE__);
569
570 if (!(dev->driver->driver_features & DRIVER_GEM)) {
571 DRM_ERROR("does not support GEM.\n");
572 return -ENODEV;
573 }
574
575 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
576 if (!obj) {
577 DRM_ERROR("failed to lookup gem object.\n");
578 return -EINVAL;
579 }
580
581 obj->filp->f_op = &exynos_drm_gem_fops;
582 obj->filp->private_data = obj;
583
Linus Torvalds6be5ceb2012-04-20 17:13:58 -0700584 addr = vm_mmap(obj->filp, 0, args->size,
Inki Dae1c248b72011-10-04 19:19:01 +0900585 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
Inki Dae1c248b72011-10-04 19:19:01 +0900586
587 drm_gem_object_unreference_unlocked(obj);
588
589 if (IS_ERR((void *)addr))
590 return PTR_ERR((void *)addr);
591
592 args->mapped = addr;
593
594 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
595
596 return 0;
597}
598
Inki Dae40cd7e02012-05-04 15:51:17 +0900599int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
600 struct drm_file *file_priv)
601{ struct exynos_drm_gem_obj *exynos_gem_obj;
602 struct drm_exynos_gem_info *args = data;
603 struct drm_gem_object *obj;
604
605 mutex_lock(&dev->struct_mutex);
606
607 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
608 if (!obj) {
609 DRM_ERROR("failed to lookup gem object.\n");
610 mutex_unlock(&dev->struct_mutex);
611 return -EINVAL;
612 }
613
614 exynos_gem_obj = to_exynos_gem_obj(obj);
615
616 args->flags = exynos_gem_obj->flags;
617 args->size = exynos_gem_obj->size;
618
619 drm_gem_object_unreference(obj);
620 mutex_unlock(&dev->struct_mutex);
621
622 return 0;
623}
624
Inki Dae1c248b72011-10-04 19:19:01 +0900625int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626{
627 DRM_DEBUG_KMS("%s\n", __FILE__);
628
629 return 0;
630}
631
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900632void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900633{
Inki Daeb2df26c2012-04-23 21:01:28 +0900634 struct exynos_drm_gem_obj *exynos_gem_obj;
635 struct exynos_drm_gem_buf *buf;
636
Inki Dae1c248b72011-10-04 19:19:01 +0900637 DRM_DEBUG_KMS("%s\n", __FILE__);
638
Inki Daeb2df26c2012-04-23 21:01:28 +0900639 exynos_gem_obj = to_exynos_gem_obj(obj);
640 buf = exynos_gem_obj->buffer;
641
642 if (obj->import_attach)
643 drm_prime_gem_destroy(obj, buf->sgt);
644
Joonyoung Shim23648392011-12-13 14:39:13 +0900645 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900646}
647
648int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900649 struct drm_device *dev,
650 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900651{
652 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900653 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900654
655 DRM_DEBUG_KMS("%s\n", __FILE__);
656
657 /*
658 * alocate memory to be used for framebuffer.
659 * - this callback would be called by user application
660 * with DRM_IOCTL_MODE_CREATE_DUMB command.
661 */
662
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900663 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900664 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900665
Inki Dae2b358922012-03-16 18:47:05 +0900666 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
Inki Dae1c248b72011-10-04 19:19:01 +0900667 if (IS_ERR(exynos_gem_obj))
668 return PTR_ERR(exynos_gem_obj);
669
Joonyoung Shim23648392011-12-13 14:39:13 +0900670 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
671 &args->handle);
672 if (ret) {
673 exynos_drm_gem_destroy(exynos_gem_obj);
674 return ret;
675 }
676
Inki Dae1c248b72011-10-04 19:19:01 +0900677 return 0;
678}
679
680int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900681 struct drm_device *dev, uint32_t handle,
682 uint64_t *offset)
Inki Dae1c248b72011-10-04 19:19:01 +0900683{
Inki Dae1c248b72011-10-04 19:19:01 +0900684 struct drm_gem_object *obj;
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900685 int ret = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900686
687 DRM_DEBUG_KMS("%s\n", __FILE__);
688
689 mutex_lock(&dev->struct_mutex);
690
691 /*
692 * get offset of memory allocated for drm framebuffer.
693 * - this callback would be called by user application
694 * with DRM_IOCTL_MODE_MAP_DUMB command.
695 */
696
697 obj = drm_gem_object_lookup(dev, file_priv, handle);
698 if (!obj) {
699 DRM_ERROR("failed to lookup gem object.\n");
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900700 ret = -EINVAL;
701 goto unlock;
Inki Dae1c248b72011-10-04 19:19:01 +0900702 }
703
Laurent Pinchart6037baf2012-05-16 17:08:55 +0200704 if (!obj->map_list.map) {
705 ret = drm_gem_create_mmap_offset(obj);
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900706 if (ret)
707 goto out;
708 }
Inki Dae1c248b72011-10-04 19:19:01 +0900709
Laurent Pinchart6037baf2012-05-16 17:08:55 +0200710 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
Inki Dae1c248b72011-10-04 19:19:01 +0900711 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
712
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900713out:
714 drm_gem_object_unreference(obj);
715unlock:
Inki Dae1c248b72011-10-04 19:19:01 +0900716 mutex_unlock(&dev->struct_mutex);
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900717 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900718}
719
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900720int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
721 struct drm_device *dev,
722 unsigned int handle)
723{
724 int ret;
725
726 DRM_DEBUG_KMS("%s\n", __FILE__);
727
728 /*
729 * obj->refcount and obj->handle_count are decreased and
730 * if both them are 0 then exynos_drm_gem_free_object()
731 * would be called by callback to release resources.
732 */
733 ret = drm_gem_handle_delete(file_priv, handle);
734 if (ret < 0) {
735 DRM_ERROR("failed to delete drm_gem_handle.\n");
736 return ret;
737 }
738
739 return 0;
740}
741
Inki Dae1c248b72011-10-04 19:19:01 +0900742int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
743{
744 struct drm_gem_object *obj = vma->vm_private_data;
Inki Dae1c248b72011-10-04 19:19:01 +0900745 struct drm_device *dev = obj->dev;
Inki Dae2b358922012-03-16 18:47:05 +0900746 unsigned long f_vaddr;
Inki Dae1c248b72011-10-04 19:19:01 +0900747 pgoff_t page_offset;
748 int ret;
749
750 page_offset = ((unsigned long)vmf->virtual_address -
751 vma->vm_start) >> PAGE_SHIFT;
Inki Dae2b358922012-03-16 18:47:05 +0900752 f_vaddr = (unsigned long)vmf->virtual_address;
Inki Dae1c248b72011-10-04 19:19:01 +0900753
754 mutex_lock(&dev->struct_mutex);
755
Inki Dae2b358922012-03-16 18:47:05 +0900756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
757 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900759
760 mutex_unlock(&dev->struct_mutex);
761
762 return convert_to_vm_err_msg(ret);
763}
764
765int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
766{
Inki Daec01d73fa2012-04-23 19:26:34 +0900767 struct exynos_drm_gem_obj *exynos_gem_obj;
768 struct drm_gem_object *obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900769 int ret;
770
771 DRM_DEBUG_KMS("%s\n", __FILE__);
772
773 /* set vm_area_struct. */
774 ret = drm_gem_mmap(filp, vma);
775 if (ret < 0) {
776 DRM_ERROR("failed to mmap.\n");
777 return ret;
778 }
779
Inki Daec01d73fa2012-04-23 19:26:34 +0900780 obj = vma->vm_private_data;
781 exynos_gem_obj = to_exynos_gem_obj(obj);
782
783 ret = check_gem_flags(exynos_gem_obj->flags);
784 if (ret) {
785 drm_gem_vm_close(vma);
786 drm_gem_free_mmap_offset(obj);
787 return ret;
788 }
789
Inki Dae1c248b72011-10-04 19:19:01 +0900790 vma->vm_flags &= ~VM_PFNMAP;
791 vma->vm_flags |= VM_MIXEDMAP;
792
Inki Daec01d73fa2012-04-23 19:26:34 +0900793 update_vm_cache_attr(exynos_gem_obj, vma);
794
Inki Dae1c248b72011-10-04 19:19:01 +0900795 return ret;
796}