blob: a8849ed30a507102c77f25ab8e920995237ec786 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020013#include <drm/drm_vma_manager.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
Inki Dae2b358922012-03-16 18:47:05 +090015#include <linux/shmem_fs.h>
Inki Dae1c248b72011-10-04 19:19:01 +090016#include <drm/exynos_drm.h>
17
18#include "exynos_drm_drv.h"
19#include "exynos_drm_gem.h"
20#include "exynos_drm_buf.h"
Vikas Sajjan3fec4532013-08-23 12:05:06 +053021#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090022
Inki Daec01d73fa2012-04-23 19:26:34 +090023static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
24 struct vm_area_struct *vma)
25{
26 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
27
28 /* non-cachable as default. */
29 if (obj->flags & EXYNOS_BO_CACHABLE)
30 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
31 else if (obj->flags & EXYNOS_BO_WC)
32 vma->vm_page_prot =
33 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
34 else
35 vma->vm_page_prot =
36 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
37}
38
Inki Daedcf9af82012-04-03 21:27:58 +090039static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
40{
Inki Dae0519f9a2012-10-20 07:53:42 -070041 /* TODO */
42
Inki Daedcf9af82012-04-03 21:27:58 +090043 return roundup(size, PAGE_SIZE);
Inki Dae2b358922012-03-16 18:47:05 +090044}
45
Joonyoung Shim23648392011-12-13 14:39:13 +090046static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
47 struct drm_file *file_priv,
48 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +090049{
Inki Dae1c248b72011-10-04 19:19:01 +090050 int ret;
51
Inki Dae1c248b72011-10-04 19:19:01 +090052 /*
53 * allocate a id of idr table where the obj is registered
54 * and handle has the id what user can see.
55 */
56 ret = drm_gem_handle_create(file_priv, obj, handle);
57 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +090058 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +090059
60 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
61
62 /* drop reference from allocate - handle holds it now. */
63 drm_gem_object_unreference_unlocked(obj);
64
Joonyoung Shim23648392011-12-13 14:39:13 +090065 return 0;
66}
Inki Dae1c248b72011-10-04 19:19:01 +090067
Joonyoung Shim23648392011-12-13 14:39:13 +090068void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
69{
70 struct drm_gem_object *obj;
Inki Daec01d73fa2012-04-23 19:26:34 +090071 struct exynos_drm_gem_buf *buf;
Joonyoung Shim23648392011-12-13 14:39:13 +090072
Joonyoung Shim23648392011-12-13 14:39:13 +090073 obj = &exynos_gem_obj->base;
Inki Daec01d73fa2012-04-23 19:26:34 +090074 buf = exynos_gem_obj->buffer;
Joonyoung Shim23648392011-12-13 14:39:13 +090075
Daniel Vettera8e11d12013-08-15 00:02:37 +020076 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
Joonyoung Shim23648392011-12-13 14:39:13 +090077
Inki Daec374e732012-06-12 16:52:54 +090078 /*
79 * do not release memory region from exporter.
80 *
81 * the region will be released by exporter
82 * once dmabuf's refcount becomes 0.
83 */
84 if (obj->import_attach)
85 goto out;
86
Inki Dae0519f9a2012-10-20 07:53:42 -070087 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
Inki Dae2b358922012-03-16 18:47:05 +090088
Inki Daec374e732012-06-12 16:52:54 +090089out:
Inki Daec01d73fa2012-04-23 19:26:34 +090090 exynos_drm_fini_buf(obj->dev, buf);
Inki Dae2b358922012-03-16 18:47:05 +090091 exynos_gem_obj->buffer = NULL;
Joonyoung Shim23648392011-12-13 14:39:13 +090092
David Herrmann0de23972013-07-24 21:07:52 +020093 drm_gem_free_mmap_offset(obj);
Joonyoung Shim23648392011-12-13 14:39:13 +090094
95 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +090096 drm_gem_object_release(obj);
97
Inki Dae1c248b72011-10-04 19:19:01 +090098 kfree(exynos_gem_obj);
Inki Dae2b358922012-03-16 18:47:05 +090099 exynos_gem_obj = NULL;
Joonyoung Shim23648392011-12-13 14:39:13 +0900100}
101
Inki Daea4f19aa2013-03-11 21:15:59 +0900102unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
103 unsigned int gem_handle,
104 struct drm_file *file_priv)
105{
106 struct exynos_drm_gem_obj *exynos_gem_obj;
107 struct drm_gem_object *obj;
108
109 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
110 if (!obj) {
111 DRM_ERROR("failed to lookup gem object.\n");
112 return 0;
113 }
114
115 exynos_gem_obj = to_exynos_gem_obj(obj);
116
117 drm_gem_object_unreference_unlocked(obj);
118
119 return exynos_gem_obj->buffer->size;
120}
121
122
Inki Daeb2df26c2012-04-23 21:01:28 +0900123struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
Joonyoung Shim23648392011-12-13 14:39:13 +0900124 unsigned long size)
125{
126 struct exynos_drm_gem_obj *exynos_gem_obj;
127 struct drm_gem_object *obj;
128 int ret;
129
130 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900131 if (!exynos_gem_obj)
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900132 return ERR_PTR(-ENOMEM);
Joonyoung Shim23648392011-12-13 14:39:13 +0900133
Inki Dae2b358922012-03-16 18:47:05 +0900134 exynos_gem_obj->size = size;
Joonyoung Shim23648392011-12-13 14:39:13 +0900135 obj = &exynos_gem_obj->base;
136
137 ret = drm_gem_object_init(dev, obj, size);
138 if (ret < 0) {
139 DRM_ERROR("failed to initialize gem object\n");
140 kfree(exynos_gem_obj);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900141 return ERR_PTR(ret);
Joonyoung Shim23648392011-12-13 14:39:13 +0900142 }
143
144 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
145
146 return exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900147}
148
Inki Daef088d5a2011-11-12 14:51:23 +0900149struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900150 unsigned int flags,
151 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900152{
Joonyoung Shim23648392011-12-13 14:39:13 +0900153 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae2b358922012-03-16 18:47:05 +0900154 struct exynos_drm_gem_buf *buf;
155 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900156
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900157 if (flags & ~(EXYNOS_BO_MASK)) {
158 DRM_ERROR("invalid flags.\n");
159 return ERR_PTR(-EINVAL);
160 }
161
Inki Daedcf9af82012-04-03 21:27:58 +0900162 if (!size) {
163 DRM_ERROR("invalid size.\n");
164 return ERR_PTR(-EINVAL);
165 }
Inki Daef088d5a2011-11-12 14:51:23 +0900166
Inki Daedcf9af82012-04-03 21:27:58 +0900167 size = roundup_gem_size(size, flags);
Inki Daedcf9af82012-04-03 21:27:58 +0900168
Inki Dae2b358922012-03-16 18:47:05 +0900169 buf = exynos_drm_init_buf(dev, size);
170 if (!buf)
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900171 return ERR_PTR(-ENOMEM);
Inki Daef088d5a2011-11-12 14:51:23 +0900172
Joonyoung Shim23648392011-12-13 14:39:13 +0900173 exynos_gem_obj = exynos_drm_gem_init(dev, size);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900174 if (IS_ERR(exynos_gem_obj)) {
175 ret = PTR_ERR(exynos_gem_obj);
Inki Daedcf9af82012-04-03 21:27:58 +0900176 goto err_fini_buf;
Inki Daef088d5a2011-11-12 14:51:23 +0900177 }
178
Inki Dae2b358922012-03-16 18:47:05 +0900179 exynos_gem_obj->buffer = buf;
180
181 /* set memory type and cache attribute from user side. */
182 exynos_gem_obj->flags = flags;
183
Inki Dae0519f9a2012-10-20 07:53:42 -0700184 ret = exynos_drm_alloc_buf(dev, buf, flags);
YoungJun Choc58c1592013-07-01 11:17:12 +0900185 if (ret < 0)
186 goto err_gem_fini;
Inki Daef088d5a2011-11-12 14:51:23 +0900187
188 return exynos_gem_obj;
Inki Daedcf9af82012-04-03 21:27:58 +0900189
YoungJun Choc58c1592013-07-01 11:17:12 +0900190err_gem_fini:
191 drm_gem_object_release(&exynos_gem_obj->base);
192 kfree(exynos_gem_obj);
Inki Daedcf9af82012-04-03 21:27:58 +0900193err_fini_buf:
Inki Dae2b358922012-03-16 18:47:05 +0900194 exynos_drm_fini_buf(dev, buf);
195 return ERR_PTR(ret);
Inki Daef088d5a2011-11-12 14:51:23 +0900196}
197
Inki Dae1c248b72011-10-04 19:19:01 +0900198int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900199 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900200{
201 struct drm_exynos_gem_create *args = data;
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900202 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900203 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900204
Inki Dae2b358922012-03-16 18:47:05 +0900205 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
Inki Dae1c248b72011-10-04 19:19:01 +0900206 if (IS_ERR(exynos_gem_obj))
207 return PTR_ERR(exynos_gem_obj);
208
Joonyoung Shim23648392011-12-13 14:39:13 +0900209 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
210 &args->handle);
211 if (ret) {
212 exynos_drm_gem_destroy(exynos_gem_obj);
213 return ret;
214 }
215
Inki Dae1c248b72011-10-04 19:19:01 +0900216 return 0;
217}
218
Inki Daed87342c2012-11-03 21:53:24 -0700219dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
Inki Daef0b1bda2012-03-16 18:47:06 +0900220 unsigned int gem_handle,
Inki Daed87342c2012-11-03 21:53:24 -0700221 struct drm_file *filp)
Inki Daef0b1bda2012-03-16 18:47:06 +0900222{
223 struct exynos_drm_gem_obj *exynos_gem_obj;
224 struct drm_gem_object *obj;
225
Inki Daed87342c2012-11-03 21:53:24 -0700226 obj = drm_gem_object_lookup(dev, filp, gem_handle);
Inki Daef0b1bda2012-03-16 18:47:06 +0900227 if (!obj) {
228 DRM_ERROR("failed to lookup gem object.\n");
229 return ERR_PTR(-EINVAL);
230 }
231
232 exynos_gem_obj = to_exynos_gem_obj(obj);
233
Inki Daef0b1bda2012-03-16 18:47:06 +0900234 return &exynos_gem_obj->buffer->dma_addr;
235}
236
237void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
238 unsigned int gem_handle,
Inki Daed87342c2012-11-03 21:53:24 -0700239 struct drm_file *filp)
Inki Daef0b1bda2012-03-16 18:47:06 +0900240{
Inki Daef0b1bda2012-03-16 18:47:06 +0900241 struct drm_gem_object *obj;
242
Inki Daed87342c2012-11-03 21:53:24 -0700243 obj = drm_gem_object_lookup(dev, filp, gem_handle);
Inki Daef0b1bda2012-03-16 18:47:06 +0900244 if (!obj) {
245 DRM_ERROR("failed to lookup gem object.\n");
246 return;
247 }
248
Inki Daef0b1bda2012-03-16 18:47:06 +0900249 drm_gem_object_unreference_unlocked(obj);
250
251 /*
252 * decrease obj->refcount one more time because we has already
253 * increased it at exynos_drm_gem_get_dma_addr().
254 */
255 drm_gem_object_unreference_unlocked(obj);
256}
257
Inki Dae832316c2014-09-18 14:19:01 +0900258int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900259 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900260{
Inki Dae832316c2014-09-18 14:19:01 +0900261 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
Inki Dae2c871122011-11-12 15:23:32 +0900262 struct exynos_drm_gem_buf *buffer;
Inki Dae0519f9a2012-10-20 07:53:42 -0700263 unsigned long vm_size;
Inki Dae5b07c662012-11-08 21:52:54 +0900264 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900265
Inki Dae832316c2014-09-18 14:19:01 +0900266 vma->vm_flags &= ~VM_PFNMAP;
267 vma->vm_pgoff = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900268
Inki Dae0519f9a2012-10-20 07:53:42 -0700269 vm_size = vma->vm_end - vma->vm_start;
Inki Dae2b358922012-03-16 18:47:05 +0900270
Inki Dae1c248b72011-10-04 19:19:01 +0900271 /*
Inki Dae2c871122011-11-12 15:23:32 +0900272 * a buffer contains information to physically continuous memory
Inki Dae1c248b72011-10-04 19:19:01 +0900273 * allocated by user request or at framebuffer creation.
274 */
Inki Dae2c871122011-11-12 15:23:32 +0900275 buffer = exynos_gem_obj->buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900276
277 /* check if user-requested size is valid. */
Inki Dae2c871122011-11-12 15:23:32 +0900278 if (vm_size > buffer->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900279 return -EINVAL;
280
Inki Dae4744ad22012-12-07 17:51:27 +0900281 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
Inki Dae0519f9a2012-10-20 07:53:42 -0700282 buffer->dma_addr, buffer->size,
283 &buffer->dma_attrs);
Inki Dae5b07c662012-11-08 21:52:54 +0900284 if (ret < 0) {
285 DRM_ERROR("failed to mmap.\n");
286 return ret;
287 }
288
Inki Dae1c248b72011-10-04 19:19:01 +0900289 return 0;
290}
291
Inki Dae40cd7e02012-05-04 15:51:17 +0900292int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *file_priv)
294{ struct exynos_drm_gem_obj *exynos_gem_obj;
295 struct drm_exynos_gem_info *args = data;
296 struct drm_gem_object *obj;
297
298 mutex_lock(&dev->struct_mutex);
299
300 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
301 if (!obj) {
302 DRM_ERROR("failed to lookup gem object.\n");
303 mutex_unlock(&dev->struct_mutex);
304 return -EINVAL;
305 }
306
307 exynos_gem_obj = to_exynos_gem_obj(obj);
308
309 args->flags = exynos_gem_obj->flags;
310 args->size = exynos_gem_obj->size;
311
312 drm_gem_object_unreference(obj);
313 mutex_unlock(&dev->struct_mutex);
314
315 return 0;
316}
317
Inki Dae2a3098f2012-11-04 05:48:52 -0800318struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
319{
320 struct vm_area_struct *vma_copy;
321
322 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
323 if (!vma_copy)
324 return NULL;
325
326 if (vma->vm_ops && vma->vm_ops->open)
327 vma->vm_ops->open(vma);
328
329 if (vma->vm_file)
330 get_file(vma->vm_file);
331
332 memcpy(vma_copy, vma, sizeof(*vma));
333
334 vma_copy->vm_mm = NULL;
335 vma_copy->vm_next = NULL;
336 vma_copy->vm_prev = NULL;
337
338 return vma_copy;
339}
340
341void exynos_gem_put_vma(struct vm_area_struct *vma)
342{
343 if (!vma)
344 return;
345
346 if (vma->vm_ops && vma->vm_ops->close)
347 vma->vm_ops->close(vma);
348
349 if (vma->vm_file)
350 fput(vma->vm_file);
351
352 kfree(vma);
353}
354
355int exynos_gem_get_pages_from_userptr(unsigned long start,
356 unsigned int npages,
357 struct page **pages,
358 struct vm_area_struct *vma)
359{
360 int get_npages;
361
362 /* the memory region mmaped with VM_PFNMAP. */
363 if (vma_is_io(vma)) {
364 unsigned int i;
365
366 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
367 unsigned long pfn;
368 int ret = follow_pfn(vma, start, &pfn);
369 if (ret)
370 return ret;
371
372 pages[i] = pfn_to_page(pfn);
373 }
374
375 if (i != npages) {
376 DRM_ERROR("failed to get user_pages.\n");
377 return -EINVAL;
378 }
379
380 return 0;
381 }
382
383 get_npages = get_user_pages(current, current->mm, start,
384 npages, 1, 1, pages, NULL);
385 get_npages = max(get_npages, 0);
386 if (get_npages != npages) {
387 DRM_ERROR("failed to get user_pages.\n");
388 while (get_npages)
389 put_page(pages[--get_npages]);
390 return -EFAULT;
391 }
392
393 return 0;
394}
395
396void exynos_gem_put_pages_to_userptr(struct page **pages,
397 unsigned int npages,
398 struct vm_area_struct *vma)
399{
400 if (!vma_is_io(vma)) {
401 unsigned int i;
402
403 for (i = 0; i < npages; i++) {
404 set_page_dirty_lock(pages[i]);
405
406 /*
407 * undo the reference we took when populating
408 * the table.
409 */
410 put_page(pages[i]);
411 }
412 }
413}
414
415int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
416 struct sg_table *sgt,
417 enum dma_data_direction dir)
418{
419 int nents;
420
421 mutex_lock(&drm_dev->struct_mutex);
422
423 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
424 if (!nents) {
425 DRM_ERROR("failed to map sgl with dma.\n");
426 mutex_unlock(&drm_dev->struct_mutex);
427 return nents;
428 }
429
430 mutex_unlock(&drm_dev->struct_mutex);
431 return 0;
432}
433
434void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
435 struct sg_table *sgt,
436 enum dma_data_direction dir)
437{
438 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
439}
440
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900441void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900442{
Inki Daeb2df26c2012-04-23 21:01:28 +0900443 struct exynos_drm_gem_obj *exynos_gem_obj;
444 struct exynos_drm_gem_buf *buf;
445
Inki Daeb2df26c2012-04-23 21:01:28 +0900446 exynos_gem_obj = to_exynos_gem_obj(obj);
447 buf = exynos_gem_obj->buffer;
448
Joonyoung Shim23648392011-12-13 14:39:13 +0900449 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900450}
451
452int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900453 struct drm_device *dev,
454 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900455{
456 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900457 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900458
Inki Dae1c248b72011-10-04 19:19:01 +0900459 /*
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900460 * allocate memory to be used for framebuffer.
Inki Dae1c248b72011-10-04 19:19:01 +0900461 * - this callback would be called by user application
462 * with DRM_IOCTL_MODE_CREATE_DUMB command.
463 */
464
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900465 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900466 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900467
Rahul Sharma122beea2014-05-07 17:21:29 +0530468 if (is_drm_iommu_supported(dev)) {
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530469 exynos_gem_obj = exynos_drm_gem_create(dev,
Rahul Sharma122beea2014-05-07 17:21:29 +0530470 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
471 args->size);
472 } else {
473 exynos_gem_obj = exynos_drm_gem_create(dev,
474 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
475 args->size);
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530476 }
477
Rahul Sharma122beea2014-05-07 17:21:29 +0530478 if (IS_ERR(exynos_gem_obj)) {
479 dev_warn(dev->dev, "FB allocation failed.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900480 return PTR_ERR(exynos_gem_obj);
Rahul Sharma122beea2014-05-07 17:21:29 +0530481 }
Inki Dae1c248b72011-10-04 19:19:01 +0900482
Joonyoung Shim23648392011-12-13 14:39:13 +0900483 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
484 &args->handle);
485 if (ret) {
486 exynos_drm_gem_destroy(exynos_gem_obj);
487 return ret;
488 }
489
Inki Dae1c248b72011-10-04 19:19:01 +0900490 return 0;
491}
492
493int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900494 struct drm_device *dev, uint32_t handle,
495 uint64_t *offset)
Inki Dae1c248b72011-10-04 19:19:01 +0900496{
Inki Dae1c248b72011-10-04 19:19:01 +0900497 struct drm_gem_object *obj;
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900498 int ret = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900499
Inki Dae1c248b72011-10-04 19:19:01 +0900500 mutex_lock(&dev->struct_mutex);
501
502 /*
503 * get offset of memory allocated for drm framebuffer.
504 * - this callback would be called by user application
505 * with DRM_IOCTL_MODE_MAP_DUMB command.
506 */
507
508 obj = drm_gem_object_lookup(dev, file_priv, handle);
509 if (!obj) {
510 DRM_ERROR("failed to lookup gem object.\n");
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900511 ret = -EINVAL;
512 goto unlock;
Inki Dae1c248b72011-10-04 19:19:01 +0900513 }
514
David Herrmann0de23972013-07-24 21:07:52 +0200515 ret = drm_gem_create_mmap_offset(obj);
516 if (ret)
517 goto out;
Inki Dae1c248b72011-10-04 19:19:01 +0900518
David Herrmann0de23972013-07-24 21:07:52 +0200519 *offset = drm_vma_node_offset_addr(&obj->vma_node);
Inki Dae1c248b72011-10-04 19:19:01 +0900520 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
521
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900522out:
523 drm_gem_object_unreference(obj);
524unlock:
Inki Dae1c248b72011-10-04 19:19:01 +0900525 mutex_unlock(&dev->struct_mutex);
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900526 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900527}
528
529int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
530{
531 struct drm_gem_object *obj = vma->vm_private_data;
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900532 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
533 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
534 unsigned long pfn;
Inki Dae1c248b72011-10-04 19:19:01 +0900535 pgoff_t page_offset;
536 int ret;
537
538 page_offset = ((unsigned long)vmf->virtual_address -
539 vma->vm_start) >> PAGE_SHIFT;
540
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900541 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
542 DRM_ERROR("invalid page offset\n");
543 ret = -EINVAL;
544 goto out;
545 }
Inki Dae1c248b72011-10-04 19:19:01 +0900546
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900547 pfn = page_to_pfn(buf->pages[page_offset]);
548 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
549
550out:
Joonyoung Shim23597e22015-07-28 17:53:17 +0900551 switch (ret) {
552 case 0:
553 case -ERESTARTSYS:
554 case -EINTR:
555 return VM_FAULT_NOPAGE;
556 case -ENOMEM:
557 return VM_FAULT_OOM;
558 default:
559 return VM_FAULT_SIGBUS;
560 }
Inki Dae1c248b72011-10-04 19:19:01 +0900561}
562
563int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
564{
Inki Daec01d73fa2012-04-23 19:26:34 +0900565 struct exynos_drm_gem_obj *exynos_gem_obj;
566 struct drm_gem_object *obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900567 int ret;
568
Inki Dae1c248b72011-10-04 19:19:01 +0900569 /* set vm_area_struct. */
570 ret = drm_gem_mmap(filp, vma);
571 if (ret < 0) {
572 DRM_ERROR("failed to mmap.\n");
573 return ret;
574 }
575
Inki Daec01d73fa2012-04-23 19:26:34 +0900576 obj = vma->vm_private_data;
577 exynos_gem_obj = to_exynos_gem_obj(obj);
578
Inki Daec01d73fa2012-04-23 19:26:34 +0900579 update_vm_cache_attr(exynos_gem_obj, vma);
580
Inki Dae832316c2014-09-18 14:19:01 +0900581 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
582 if (ret)
583 goto err_close_vm;
584
585 return ret;
586
587err_close_vm:
588 drm_gem_vm_close(vma);
589 drm_gem_free_mmap_offset(obj);
590
Inki Dae1c248b72011-10-04 19:19:01 +0900591 return ret;
592}