blob: 62b9ea1b07fb005c04da732490c06e91a2996e21 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020013#include <drm/drm_vma_manager.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
Inki Dae2b358922012-03-16 18:47:05 +090015#include <linux/shmem_fs.h>
Joonyoung Shim01ed50d2015-08-16 14:33:08 +090016#include <linux/dma-buf.h>
Inki Dae1c248b72011-10-04 19:19:01 +090017#include <drm/exynos_drm.h>
18
19#include "exynos_drm_drv.h"
20#include "exynos_drm_gem.h"
Vikas Sajjan3fec4532013-08-23 12:05:06 +053021#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090022
Joonyoung Shim2a8cb482015-08-16 14:38:49 +090023static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24{
25 struct drm_device *dev = obj->base.dev;
26 enum dma_attr attr;
27 unsigned int nr_pages;
28
29 if (obj->dma_addr) {
30 DRM_DEBUG_KMS("already allocated.\n");
31 return 0;
32 }
33
34 init_dma_attrs(&obj->dma_attrs);
35
36 /*
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous
39 * as possible.
40 */
41 if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
43
44 /*
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
47 */
48 if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE;
50 else
51 attr = DMA_ATTR_NON_CONSISTENT;
52
53 dma_set_attr(attr, &obj->dma_attrs);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
55
56 nr_pages = obj->size >> PAGE_SHIFT;
57
58 if (!is_drm_iommu_supported(dev)) {
59 dma_addr_t start_addr;
60 unsigned int i = 0;
61
62 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!obj->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
65 return -ENOMEM;
66 }
67
68 obj->cookie = dma_alloc_attrs(dev->dev,
69 obj->size,
70 &obj->dma_addr, GFP_KERNEL,
71 &obj->dma_attrs);
72 if (!obj->cookie) {
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj->pages);
75 return -ENOMEM;
76 }
77
78 start_addr = obj->dma_addr;
79 while (i < nr_pages) {
80 obj->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
82 i++;
83 }
84 } else {
85 obj->pages = dma_alloc_attrs(dev->dev, obj->size,
86 &obj->dma_addr, GFP_KERNEL,
87 &obj->dma_attrs);
88 if (!obj->pages) {
89 DRM_ERROR("failed to allocate buffer.\n");
90 return -ENOMEM;
91 }
92 }
93
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 (unsigned long)obj->dma_addr,
96 obj->size);
97
98 return 0;
99}
100
101static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
102{
103 struct drm_device *dev = obj->base.dev;
104
105 if (!obj->dma_addr) {
106 DRM_DEBUG_KMS("dma_addr is invalid.\n");
107 return;
108 }
109
110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 (unsigned long)obj->dma_addr, obj->size);
112
113 if (!is_drm_iommu_supported(dev)) {
114 dma_free_attrs(dev->dev, obj->size, obj->cookie,
115 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 drm_free_large(obj->pages);
117 } else
118 dma_free_attrs(dev->dev, obj->size, obj->pages,
119 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
120
121 obj->dma_addr = (dma_addr_t)NULL;
122}
123
Joonyoung Shim23648392011-12-13 14:39:13 +0900124static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125 struct drm_file *file_priv,
126 unsigned int *handle)
Inki Dae1c248b72011-10-04 19:19:01 +0900127{
Inki Dae1c248b72011-10-04 19:19:01 +0900128 int ret;
129
Inki Dae1c248b72011-10-04 19:19:01 +0900130 /*
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
133 */
134 ret = drm_gem_handle_create(file_priv, obj, handle);
135 if (ret)
Joonyoung Shim23648392011-12-13 14:39:13 +0900136 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900137
138 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
139
140 /* drop reference from allocate - handle holds it now. */
141 drm_gem_object_unreference_unlocked(obj);
142
Joonyoung Shim23648392011-12-13 14:39:13 +0900143 return 0;
144}
Inki Dae1c248b72011-10-04 19:19:01 +0900145
Joonyoung Shim23648392011-12-13 14:39:13 +0900146void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
147{
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900148 struct drm_gem_object *obj = &exynos_gem_obj->base;
Joonyoung Shim23648392011-12-13 14:39:13 +0900149
Daniel Vettera8e11d12013-08-15 00:02:37 +0200150 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
Joonyoung Shim23648392011-12-13 14:39:13 +0900151
Inki Daec374e732012-06-12 16:52:54 +0900152 /*
153 * do not release memory region from exporter.
154 *
155 * the region will be released by exporter
156 * once dmabuf's refcount becomes 0.
157 */
158 if (obj->import_attach)
159 goto out;
160
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900161 exynos_drm_free_buf(exynos_gem_obj);
Inki Dae2b358922012-03-16 18:47:05 +0900162
Inki Daec374e732012-06-12 16:52:54 +0900163out:
David Herrmann0de23972013-07-24 21:07:52 +0200164 drm_gem_free_mmap_offset(obj);
Joonyoung Shim23648392011-12-13 14:39:13 +0900165
166 /* release file pointer to gem object. */
Inki Dae1c248b72011-10-04 19:19:01 +0900167 drm_gem_object_release(obj);
168
Inki Dae1c248b72011-10-04 19:19:01 +0900169 kfree(exynos_gem_obj);
Inki Dae2b358922012-03-16 18:47:05 +0900170 exynos_gem_obj = NULL;
Joonyoung Shim23648392011-12-13 14:39:13 +0900171}
172
Inki Daea4f19aa2013-03-11 21:15:59 +0900173unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
176{
177 struct exynos_drm_gem_obj *exynos_gem_obj;
178 struct drm_gem_object *obj;
179
180 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
181 if (!obj) {
182 DRM_ERROR("failed to lookup gem object.\n");
183 return 0;
184 }
185
186 exynos_gem_obj = to_exynos_gem_obj(obj);
187
188 drm_gem_object_unreference_unlocked(obj);
189
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900190 return exynos_gem_obj->size;
Inki Daea4f19aa2013-03-11 21:15:59 +0900191}
192
193
Inki Daeb2df26c2012-04-23 21:01:28 +0900194struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
Joonyoung Shim23648392011-12-13 14:39:13 +0900195 unsigned long size)
196{
197 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct drm_gem_object *obj;
199 int ret;
200
201 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
Sachin Kamat38bb5252013-08-19 19:04:55 +0900202 if (!exynos_gem_obj)
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900203 return ERR_PTR(-ENOMEM);
Joonyoung Shim23648392011-12-13 14:39:13 +0900204
Inki Dae2b358922012-03-16 18:47:05 +0900205 exynos_gem_obj->size = size;
Joonyoung Shim23648392011-12-13 14:39:13 +0900206 obj = &exynos_gem_obj->base;
207
208 ret = drm_gem_object_init(dev, obj, size);
209 if (ret < 0) {
210 DRM_ERROR("failed to initialize gem object\n");
211 kfree(exynos_gem_obj);
Joonyoung Shim5f3f4262015-07-28 17:53:22 +0900212 return ERR_PTR(ret);
Joonyoung Shim23648392011-12-13 14:39:13 +0900213 }
214
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216
217 return exynos_gem_obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900218}
219
Inki Daef088d5a2011-11-12 14:51:23 +0900220struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900221 unsigned int flags,
222 unsigned long size)
Inki Daef088d5a2011-11-12 14:51:23 +0900223{
Joonyoung Shim23648392011-12-13 14:39:13 +0900224 struct exynos_drm_gem_obj *exynos_gem_obj;
Inki Dae2b358922012-03-16 18:47:05 +0900225 int ret;
Inki Daef088d5a2011-11-12 14:51:23 +0900226
Joonyoung Shimc4130bc2015-08-16 14:15:06 +0900227 if (flags & ~(EXYNOS_BO_MASK)) {
228 DRM_ERROR("invalid flags.\n");
229 return ERR_PTR(-EINVAL);
230 }
231
Inki Daedcf9af82012-04-03 21:27:58 +0900232 if (!size) {
233 DRM_ERROR("invalid size.\n");
234 return ERR_PTR(-EINVAL);
235 }
Inki Daef088d5a2011-11-12 14:51:23 +0900236
Joonyoung Shimeb57da82015-07-28 17:53:27 +0900237 size = roundup(size, PAGE_SIZE);
Inki Daedcf9af82012-04-03 21:27:58 +0900238
Joonyoung Shim23648392011-12-13 14:39:13 +0900239 exynos_gem_obj = exynos_drm_gem_init(dev, size);
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900240 if (IS_ERR(exynos_gem_obj))
241 return exynos_gem_obj;
Inki Dae2b358922012-03-16 18:47:05 +0900242
243 /* set memory type and cache attribute from user side. */
244 exynos_gem_obj->flags = flags;
245
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900246 ret = exynos_drm_alloc_buf(exynos_gem_obj);
247 if (ret < 0) {
248 drm_gem_object_release(&exynos_gem_obj->base);
249 kfree(exynos_gem_obj);
250 return ERR_PTR(ret);
251 }
Inki Daef088d5a2011-11-12 14:51:23 +0900252
253 return exynos_gem_obj;
Inki Daef088d5a2011-11-12 14:51:23 +0900254}
255
Inki Dae1c248b72011-10-04 19:19:01 +0900256int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900257 struct drm_file *file_priv)
Inki Dae1c248b72011-10-04 19:19:01 +0900258{
259 struct drm_exynos_gem_create *args = data;
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900260 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900261 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900262
Inki Dae2b358922012-03-16 18:47:05 +0900263 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
Inki Dae1c248b72011-10-04 19:19:01 +0900264 if (IS_ERR(exynos_gem_obj))
265 return PTR_ERR(exynos_gem_obj);
266
Joonyoung Shim23648392011-12-13 14:39:13 +0900267 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
268 &args->handle);
269 if (ret) {
270 exynos_drm_gem_destroy(exynos_gem_obj);
271 return ret;
272 }
273
Inki Dae1c248b72011-10-04 19:19:01 +0900274 return 0;
275}
276
Inki Daed87342c2012-11-03 21:53:24 -0700277dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
Inki Daef0b1bda2012-03-16 18:47:06 +0900278 unsigned int gem_handle,
Inki Daed87342c2012-11-03 21:53:24 -0700279 struct drm_file *filp)
Inki Daef0b1bda2012-03-16 18:47:06 +0900280{
281 struct exynos_drm_gem_obj *exynos_gem_obj;
282 struct drm_gem_object *obj;
283
Inki Daed87342c2012-11-03 21:53:24 -0700284 obj = drm_gem_object_lookup(dev, filp, gem_handle);
Inki Daef0b1bda2012-03-16 18:47:06 +0900285 if (!obj) {
286 DRM_ERROR("failed to lookup gem object.\n");
287 return ERR_PTR(-EINVAL);
288 }
289
290 exynos_gem_obj = to_exynos_gem_obj(obj);
291
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900292 return &exynos_gem_obj->dma_addr;
Inki Daef0b1bda2012-03-16 18:47:06 +0900293}
294
295void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
296 unsigned int gem_handle,
Inki Daed87342c2012-11-03 21:53:24 -0700297 struct drm_file *filp)
Inki Daef0b1bda2012-03-16 18:47:06 +0900298{
Inki Daef0b1bda2012-03-16 18:47:06 +0900299 struct drm_gem_object *obj;
300
Inki Daed87342c2012-11-03 21:53:24 -0700301 obj = drm_gem_object_lookup(dev, filp, gem_handle);
Inki Daef0b1bda2012-03-16 18:47:06 +0900302 if (!obj) {
303 DRM_ERROR("failed to lookup gem object.\n");
304 return;
305 }
306
Inki Daef0b1bda2012-03-16 18:47:06 +0900307 drm_gem_object_unreference_unlocked(obj);
308
309 /*
310 * decrease obj->refcount one more time because we has already
311 * increased it at exynos_drm_gem_get_dma_addr().
312 */
313 drm_gem_object_unreference_unlocked(obj);
314}
315
Inki Dae832316c2014-09-18 14:19:01 +0900316int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900317 struct vm_area_struct *vma)
Inki Dae1c248b72011-10-04 19:19:01 +0900318{
Inki Dae832316c2014-09-18 14:19:01 +0900319 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
Inki Dae0519f9a2012-10-20 07:53:42 -0700320 unsigned long vm_size;
Inki Dae5b07c662012-11-08 21:52:54 +0900321 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900322
Inki Dae832316c2014-09-18 14:19:01 +0900323 vma->vm_flags &= ~VM_PFNMAP;
324 vma->vm_pgoff = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900325
Inki Dae0519f9a2012-10-20 07:53:42 -0700326 vm_size = vma->vm_end - vma->vm_start;
Inki Dae2b358922012-03-16 18:47:05 +0900327
Inki Dae1c248b72011-10-04 19:19:01 +0900328 /* check if user-requested size is valid. */
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900329 if (vm_size > exynos_gem_obj->size)
Inki Dae1c248b72011-10-04 19:19:01 +0900330 return -EINVAL;
331
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900332 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
333 exynos_gem_obj->dma_addr, exynos_gem_obj->size,
334 &exynos_gem_obj->dma_attrs);
Inki Dae5b07c662012-11-08 21:52:54 +0900335 if (ret < 0) {
336 DRM_ERROR("failed to mmap.\n");
337 return ret;
338 }
339
Inki Dae1c248b72011-10-04 19:19:01 +0900340 return 0;
341}
342
Inki Dae40cd7e02012-05-04 15:51:17 +0900343int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file_priv)
345{ struct exynos_drm_gem_obj *exynos_gem_obj;
346 struct drm_exynos_gem_info *args = data;
347 struct drm_gem_object *obj;
348
349 mutex_lock(&dev->struct_mutex);
350
351 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
352 if (!obj) {
353 DRM_ERROR("failed to lookup gem object.\n");
354 mutex_unlock(&dev->struct_mutex);
355 return -EINVAL;
356 }
357
358 exynos_gem_obj = to_exynos_gem_obj(obj);
359
360 args->flags = exynos_gem_obj->flags;
361 args->size = exynos_gem_obj->size;
362
363 drm_gem_object_unreference(obj);
364 mutex_unlock(&dev->struct_mutex);
365
366 return 0;
367}
368
Inki Dae2a3098f2012-11-04 05:48:52 -0800369struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
370{
371 struct vm_area_struct *vma_copy;
372
373 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
374 if (!vma_copy)
375 return NULL;
376
377 if (vma->vm_ops && vma->vm_ops->open)
378 vma->vm_ops->open(vma);
379
380 if (vma->vm_file)
381 get_file(vma->vm_file);
382
383 memcpy(vma_copy, vma, sizeof(*vma));
384
385 vma_copy->vm_mm = NULL;
386 vma_copy->vm_next = NULL;
387 vma_copy->vm_prev = NULL;
388
389 return vma_copy;
390}
391
392void exynos_gem_put_vma(struct vm_area_struct *vma)
393{
394 if (!vma)
395 return;
396
397 if (vma->vm_ops && vma->vm_ops->close)
398 vma->vm_ops->close(vma);
399
400 if (vma->vm_file)
401 fput(vma->vm_file);
402
403 kfree(vma);
404}
405
406int exynos_gem_get_pages_from_userptr(unsigned long start,
407 unsigned int npages,
408 struct page **pages,
409 struct vm_area_struct *vma)
410{
411 int get_npages;
412
413 /* the memory region mmaped with VM_PFNMAP. */
414 if (vma_is_io(vma)) {
415 unsigned int i;
416
417 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
418 unsigned long pfn;
419 int ret = follow_pfn(vma, start, &pfn);
420 if (ret)
421 return ret;
422
423 pages[i] = pfn_to_page(pfn);
424 }
425
426 if (i != npages) {
427 DRM_ERROR("failed to get user_pages.\n");
428 return -EINVAL;
429 }
430
431 return 0;
432 }
433
434 get_npages = get_user_pages(current, current->mm, start,
435 npages, 1, 1, pages, NULL);
436 get_npages = max(get_npages, 0);
437 if (get_npages != npages) {
438 DRM_ERROR("failed to get user_pages.\n");
439 while (get_npages)
440 put_page(pages[--get_npages]);
441 return -EFAULT;
442 }
443
444 return 0;
445}
446
447void exynos_gem_put_pages_to_userptr(struct page **pages,
448 unsigned int npages,
449 struct vm_area_struct *vma)
450{
451 if (!vma_is_io(vma)) {
452 unsigned int i;
453
454 for (i = 0; i < npages; i++) {
455 set_page_dirty_lock(pages[i]);
456
457 /*
458 * undo the reference we took when populating
459 * the table.
460 */
461 put_page(pages[i]);
462 }
463 }
464}
465
466int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
467 struct sg_table *sgt,
468 enum dma_data_direction dir)
469{
470 int nents;
471
472 mutex_lock(&drm_dev->struct_mutex);
473
474 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
475 if (!nents) {
476 DRM_ERROR("failed to map sgl with dma.\n");
477 mutex_unlock(&drm_dev->struct_mutex);
478 return nents;
479 }
480
481 mutex_unlock(&drm_dev->struct_mutex);
482 return 0;
483}
484
485void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
486 struct sg_table *sgt,
487 enum dma_data_direction dir)
488{
489 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
490}
491
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900492void exynos_drm_gem_free_object(struct drm_gem_object *obj)
Inki Dae1c248b72011-10-04 19:19:01 +0900493{
Joonyoung Shim23648392011-12-13 14:39:13 +0900494 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
Inki Dae1c248b72011-10-04 19:19:01 +0900495}
496
497int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900498 struct drm_device *dev,
499 struct drm_mode_create_dumb *args)
Inki Dae1c248b72011-10-04 19:19:01 +0900500{
501 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim23648392011-12-13 14:39:13 +0900502 int ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900503
Inki Dae1c248b72011-10-04 19:19:01 +0900504 /*
Masanari Iidac6b78bc2013-10-24 16:02:57 +0900505 * allocate memory to be used for framebuffer.
Inki Dae1c248b72011-10-04 19:19:01 +0900506 * - this callback would be called by user application
507 * with DRM_IOCTL_MODE_CREATE_DUMB command.
508 */
509
Cooper Yuan3fd6b692012-06-29 11:49:45 +0900510 args->pitch = args->width * ((args->bpp + 7) / 8);
Inki Dae7da59072012-08-17 15:24:03 +0900511 args->size = args->pitch * args->height;
Inki Dae1c248b72011-10-04 19:19:01 +0900512
Rahul Sharma122beea2014-05-07 17:21:29 +0530513 if (is_drm_iommu_supported(dev)) {
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530514 exynos_gem_obj = exynos_drm_gem_create(dev,
Rahul Sharma122beea2014-05-07 17:21:29 +0530515 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
516 args->size);
517 } else {
518 exynos_gem_obj = exynos_drm_gem_create(dev,
519 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
520 args->size);
Vikas Sajjan3fec4532013-08-23 12:05:06 +0530521 }
522
Rahul Sharma122beea2014-05-07 17:21:29 +0530523 if (IS_ERR(exynos_gem_obj)) {
524 dev_warn(dev->dev, "FB allocation failed.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900525 return PTR_ERR(exynos_gem_obj);
Rahul Sharma122beea2014-05-07 17:21:29 +0530526 }
Inki Dae1c248b72011-10-04 19:19:01 +0900527
Joonyoung Shim23648392011-12-13 14:39:13 +0900528 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
529 &args->handle);
530 if (ret) {
531 exynos_drm_gem_destroy(exynos_gem_obj);
532 return ret;
533 }
534
Inki Dae1c248b72011-10-04 19:19:01 +0900535 return 0;
536}
537
538int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900539 struct drm_device *dev, uint32_t handle,
540 uint64_t *offset)
Inki Dae1c248b72011-10-04 19:19:01 +0900541{
Inki Dae1c248b72011-10-04 19:19:01 +0900542 struct drm_gem_object *obj;
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900543 int ret = 0;
Inki Dae1c248b72011-10-04 19:19:01 +0900544
Inki Dae1c248b72011-10-04 19:19:01 +0900545 mutex_lock(&dev->struct_mutex);
546
547 /*
548 * get offset of memory allocated for drm framebuffer.
549 * - this callback would be called by user application
550 * with DRM_IOCTL_MODE_MAP_DUMB command.
551 */
552
553 obj = drm_gem_object_lookup(dev, file_priv, handle);
554 if (!obj) {
555 DRM_ERROR("failed to lookup gem object.\n");
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900556 ret = -EINVAL;
557 goto unlock;
Inki Dae1c248b72011-10-04 19:19:01 +0900558 }
559
David Herrmann0de23972013-07-24 21:07:52 +0200560 ret = drm_gem_create_mmap_offset(obj);
561 if (ret)
562 goto out;
Inki Dae1c248b72011-10-04 19:19:01 +0900563
David Herrmann0de23972013-07-24 21:07:52 +0200564 *offset = drm_vma_node_offset_addr(&obj->vma_node);
Inki Dae1c248b72011-10-04 19:19:01 +0900565 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
566
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900567out:
568 drm_gem_object_unreference(obj);
569unlock:
Inki Dae1c248b72011-10-04 19:19:01 +0900570 mutex_unlock(&dev->struct_mutex);
Joonyoung Shim2d91cf12011-12-13 14:32:24 +0900571 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900572}
573
574int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
575{
576 struct drm_gem_object *obj = vma->vm_private_data;
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900577 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900578 unsigned long pfn;
Inki Dae1c248b72011-10-04 19:19:01 +0900579 pgoff_t page_offset;
580 int ret;
581
582 page_offset = ((unsigned long)vmf->virtual_address -
583 vma->vm_start) >> PAGE_SHIFT;
584
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900585 if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900586 DRM_ERROR("invalid page offset\n");
587 ret = -EINVAL;
588 goto out;
589 }
Inki Dae1c248b72011-10-04 19:19:01 +0900590
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900591 pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
Joonyoung Shim0e9a2ee2015-07-28 17:53:19 +0900592 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
593
594out:
Joonyoung Shim23597e22015-07-28 17:53:17 +0900595 switch (ret) {
596 case 0:
597 case -ERESTARTSYS:
598 case -EINTR:
599 return VM_FAULT_NOPAGE;
600 case -ENOMEM:
601 return VM_FAULT_OOM;
602 default:
603 return VM_FAULT_SIGBUS;
604 }
Inki Dae1c248b72011-10-04 19:19:01 +0900605}
606
607int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
608{
Inki Daec01d73fa2012-04-23 19:26:34 +0900609 struct exynos_drm_gem_obj *exynos_gem_obj;
610 struct drm_gem_object *obj;
Inki Dae1c248b72011-10-04 19:19:01 +0900611 int ret;
612
Inki Dae1c248b72011-10-04 19:19:01 +0900613 /* set vm_area_struct. */
614 ret = drm_gem_mmap(filp, vma);
615 if (ret < 0) {
616 DRM_ERROR("failed to mmap.\n");
617 return ret;
618 }
619
Inki Daec01d73fa2012-04-23 19:26:34 +0900620 obj = vma->vm_private_data;
621 exynos_gem_obj = to_exynos_gem_obj(obj);
622
Joonyoung Shim211b8872015-08-16 14:16:49 +0900623 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
624
625 /* non-cachable as default. */
626 if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628 else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
629 vma->vm_page_prot =
630 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
631 else
632 vma->vm_page_prot =
633 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
Inki Daec01d73fa2012-04-23 19:26:34 +0900634
Inki Dae832316c2014-09-18 14:19:01 +0900635 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
636 if (ret)
637 goto err_close_vm;
638
639 return ret;
640
641err_close_vm:
642 drm_gem_vm_close(vma);
643 drm_gem_free_mmap_offset(obj);
644
Inki Dae1c248b72011-10-04 19:19:01 +0900645 return ret;
646}
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900647
648/* low-level interface prime helpers */
649struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
650{
651 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900652 int npages;
653
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900654 npages = exynos_gem_obj->size >> PAGE_SHIFT;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900655
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900656 return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900657}
658
659struct drm_gem_object *
660exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
661 struct dma_buf_attachment *attach,
662 struct sg_table *sgt)
663{
664 struct exynos_drm_gem_obj *exynos_gem_obj;
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900665 int npages;
666 int ret;
667
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900668 exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900669 if (IS_ERR(exynos_gem_obj)) {
670 ret = PTR_ERR(exynos_gem_obj);
Inki Dae50002d42015-08-31 01:11:53 +0900671 return ERR_PTR(ret);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900672 }
673
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900674 exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
675
676 npages = exynos_gem_obj->size >> PAGE_SHIFT;
677 exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
678 if (!exynos_gem_obj->pages) {
679 ret = -ENOMEM;
680 goto err;
681 }
682
683 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
684 npages);
685 if (ret < 0)
686 goto err_free_large;
687
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900688 if (sgt->nents == 1) {
689 /* always physically continuous memory if sgt->nents is 1. */
690 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
691 } else {
692 /*
693 * this case could be CONTIG or NONCONTIG type but for now
694 * sets NONCONTIG.
695 * TODO. we have to find a way that exporter can notify
696 * the type of its own buffer to importer.
697 */
698 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
699 }
700
701 return &exynos_gem_obj->base;
702
703err_free_large:
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900704 drm_free_large(exynos_gem_obj->pages);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900705err:
Joonyoung Shim2a8cb482015-08-16 14:38:49 +0900706 drm_gem_object_release(&exynos_gem_obj->base);
707 kfree(exynos_gem_obj);
Joonyoung Shim01ed50d2015-08-16 14:33:08 +0900708 return ERR_PTR(ret);
709}
710
711void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
712{
713 return NULL;
714}
715
716void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
717{
718 /* Nothing to do */
719}