blob: 1ccde09d01c8bc1687b7eeadc4baa94b8c2d9651 [file] [log] [blame]
Arto Merilainende2ba662013-03-22 16:34:08 +02001/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
Thierry Reding9a2ac2d2014-02-11 15:52:01 +010011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
Arto Merilainende2ba662013-03-22 16:34:08 +020014 */
15
Thierry Reding38003912013-12-12 10:00:43 +010016#include <linux/dma-buf.h>
Thierry Redingdf06b752014-06-26 21:41:53 +020017#include <linux/iommu.h>
Thierry Reding773af772013-10-04 22:34:01 +020018#include <drm/tegra_drm.h>
19
Thierry Redingd1f3e1e2014-07-11 08:29:14 +020020#include "drm.h"
Arto Merilainende2ba662013-03-22 16:34:08 +020021#include "gem.h"
22
Thierry Reding3be82742013-09-24 16:34:05 +020023static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
Arto Merilainende2ba662013-03-22 16:34:08 +020024{
25 return container_of(bo, struct tegra_bo, base);
26}
27
28static void tegra_bo_put(struct host1x_bo *bo)
29{
Thierry Reding3be82742013-09-24 16:34:05 +020030 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +020031 struct drm_device *drm = obj->gem.dev;
32
33 mutex_lock(&drm->struct_mutex);
34 drm_gem_object_unreference(&obj->gem);
35 mutex_unlock(&drm->struct_mutex);
36}
37
38static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
39{
Thierry Reding3be82742013-09-24 16:34:05 +020040 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +020041
42 return obj->paddr;
43}
44
45static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
46{
47}
48
49static void *tegra_bo_mmap(struct host1x_bo *bo)
50{
Thierry Reding3be82742013-09-24 16:34:05 +020051 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +020052
53 return obj->vaddr;
54}
55
56static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
57{
58}
59
60static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
61{
Thierry Reding3be82742013-09-24 16:34:05 +020062 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +020063
64 return obj->vaddr + page * PAGE_SIZE;
65}
66
67static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
68 void *addr)
69{
70}
71
72static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
73{
Thierry Reding3be82742013-09-24 16:34:05 +020074 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +020075 struct drm_device *drm = obj->gem.dev;
76
77 mutex_lock(&drm->struct_mutex);
78 drm_gem_object_reference(&obj->gem);
79 mutex_unlock(&drm->struct_mutex);
80
81 return bo;
82}
83
Thierry Reding425c0fd2013-12-12 10:10:46 +010084static const struct host1x_bo_ops tegra_bo_ops = {
Arto Merilainende2ba662013-03-22 16:34:08 +020085 .get = tegra_bo_get,
86 .put = tegra_bo_put,
87 .pin = tegra_bo_pin,
88 .unpin = tegra_bo_unpin,
89 .mmap = tegra_bo_mmap,
90 .munmap = tegra_bo_munmap,
91 .kmap = tegra_bo_kmap,
92 .kunmap = tegra_bo_kunmap,
93};
94
Thierry Redingdf06b752014-06-26 21:41:53 +020095static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
96{
97 int prot = IOMMU_READ | IOMMU_WRITE;
98 ssize_t err;
99
100 if (bo->mm)
101 return -EBUSY;
102
103 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
104 if (!bo->mm)
105 return -ENOMEM;
106
107 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
108 PAGE_SIZE, 0, 0, 0);
109 if (err < 0) {
110 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
111 err);
112 goto free;
113 }
114
115 bo->paddr = bo->mm->start;
116
Thierry Reding8c8cb582014-12-17 16:46:37 +0100117 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
118 bo->sgt->nents, prot);
Thierry Redingdf06b752014-06-26 21:41:53 +0200119 if (err < 0) {
120 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
121 goto remove;
122 }
123
124 bo->size = err;
125
126 return 0;
127
128remove:
129 drm_mm_remove_node(bo->mm);
130free:
131 kfree(bo->mm);
132 return err;
133}
134
135static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
136{
137 if (!bo->mm)
138 return 0;
139
140 iommu_unmap(tegra->domain, bo->paddr, bo->size);
141 drm_mm_remove_node(bo->mm);
142 kfree(bo->mm);
143
144 return 0;
145}
146
Thierry Redingc28d4a32014-10-16 14:18:50 +0200147static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
148 size_t size)
Arto Merilainende2ba662013-03-22 16:34:08 +0200149{
150 struct tegra_bo *bo;
151 int err;
152
153 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
154 if (!bo)
155 return ERR_PTR(-ENOMEM);
156
157 host1x_bo_init(&bo->base, &tegra_bo_ops);
158 size = round_up(size, PAGE_SIZE);
159
Thierry Redingc28d4a32014-10-16 14:18:50 +0200160 err = drm_gem_object_init(drm, &bo->gem, size);
161 if (err < 0)
162 goto free;
163
164 err = drm_gem_create_mmap_offset(&bo->gem);
165 if (err < 0)
166 goto release;
167
168 return bo;
169
170release:
171 drm_gem_object_release(&bo->gem);
172free:
173 kfree(bo);
174 return ERR_PTR(err);
175}
176
Thierry Redingdf06b752014-06-26 21:41:53 +0200177static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
Thierry Redingc28d4a32014-10-16 14:18:50 +0200178{
Thierry Redingdf06b752014-06-26 21:41:53 +0200179 if (bo->pages) {
180 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
181 sg_free_table(bo->sgt);
182 kfree(bo->sgt);
Thierry Reding7e0180e2014-11-06 14:41:31 +0100183 } else if (bo->vaddr) {
Thierry Redingdf06b752014-06-26 21:41:53 +0200184 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
185 bo->paddr);
186 }
187}
188
Thierry Reding73c42c72014-12-16 16:41:47 +0100189static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
Thierry Redingdf06b752014-06-26 21:41:53 +0200190{
Thierry Redinga04251f2014-12-16 16:35:26 +0100191 struct scatterlist *s;
192 struct sg_table *sgt;
193 unsigned int i;
194
Thierry Redingdf06b752014-06-26 21:41:53 +0200195 bo->pages = drm_gem_get_pages(&bo->gem);
196 if (IS_ERR(bo->pages))
197 return PTR_ERR(bo->pages);
198
Thierry Reding73c42c72014-12-16 16:41:47 +0100199 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
Thierry Redingdf06b752014-06-26 21:41:53 +0200200
Thierry Redinga04251f2014-12-16 16:35:26 +0100201 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
202 if (IS_ERR(sgt))
203 goto put_pages;
204
205 /*
206 * Fake up the SG table so that dma_map_sg() can be used to flush the
207 * pages associated with it. Note that this relies on the fact that
208 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
209 * only cache maintenance.
210 *
211 * TODO: Replace this by drm_clflash_sg() once it can be implemented
212 * without relying on symbols that are not exported.
213 */
214 for_each_sg(sgt->sgl, s, sgt->nents, i)
215 sg_dma_address(s) = sg_phys(s);
216
217 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
218 sgt = ERR_PTR(-ENOMEM);
219 goto release_sgt;
Thierry Redingdf06b752014-06-26 21:41:53 +0200220 }
221
Thierry Redinga04251f2014-12-16 16:35:26 +0100222 bo->sgt = sgt;
223
Thierry Redingdf06b752014-06-26 21:41:53 +0200224 return 0;
Thierry Redinga04251f2014-12-16 16:35:26 +0100225
226release_sgt:
227 sg_free_table(sgt);
228 kfree(sgt);
229put_pages:
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
231 return PTR_ERR(sgt);
Thierry Redingdf06b752014-06-26 21:41:53 +0200232}
233
Thierry Reding73c42c72014-12-16 16:41:47 +0100234static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
Thierry Redingdf06b752014-06-26 21:41:53 +0200235{
236 struct tegra_drm *tegra = drm->dev_private;
237 int err;
238
239 if (tegra->domain) {
Thierry Reding73c42c72014-12-16 16:41:47 +0100240 err = tegra_bo_get_pages(drm, bo);
Thierry Redingdf06b752014-06-26 21:41:53 +0200241 if (err < 0)
242 return err;
243
244 err = tegra_bo_iommu_map(tegra, bo);
245 if (err < 0) {
246 tegra_bo_free(drm, bo);
247 return err;
248 }
249 } else {
Thierry Reding73c42c72014-12-16 16:41:47 +0100250 size_t size = bo->gem.size;
251
Thierry Redingdf06b752014-06-26 21:41:53 +0200252 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
253 GFP_KERNEL | __GFP_NOWARN);
254 if (!bo->vaddr) {
255 dev_err(drm->dev,
256 "failed to allocate buffer of size %zu\n",
257 size);
258 return -ENOMEM;
259 }
260 }
261
262 return 0;
Thierry Redingc28d4a32014-10-16 14:18:50 +0200263}
264
Thierry Reding71c38622014-11-03 13:23:02 +0100265struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
Thierry Redingc28d4a32014-10-16 14:18:50 +0200266 unsigned long flags)
267{
268 struct tegra_bo *bo;
269 int err;
270
271 bo = tegra_bo_alloc_object(drm, size);
272 if (IS_ERR(bo))
273 return bo;
274
Thierry Reding73c42c72014-12-16 16:41:47 +0100275 err = tegra_bo_alloc(drm, bo);
Thierry Redingdf06b752014-06-26 21:41:53 +0200276 if (err < 0)
277 goto release;
Arto Merilainende2ba662013-03-22 16:34:08 +0200278
Thierry Reding773af772013-10-04 22:34:01 +0200279 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
Thierry Redingc134f012014-06-03 14:48:12 +0200280 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
Thierry Reding773af772013-10-04 22:34:01 +0200281
Thierry Redingdb7fbdf2013-10-07 09:47:58 +0200282 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
283 bo->flags |= TEGRA_BO_BOTTOM_UP;
284
Arto Merilainende2ba662013-03-22 16:34:08 +0200285 return bo;
286
Thierry Redingdf06b752014-06-26 21:41:53 +0200287release:
288 drm_gem_object_release(&bo->gem);
Arto Merilainende2ba662013-03-22 16:34:08 +0200289 kfree(bo);
Arto Merilainende2ba662013-03-22 16:34:08 +0200290 return ERR_PTR(err);
Arto Merilainende2ba662013-03-22 16:34:08 +0200291}
292
293struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
Thierry Reding3be82742013-09-24 16:34:05 +0200294 struct drm_device *drm,
Thierry Reding71c38622014-11-03 13:23:02 +0100295 size_t size,
Thierry Reding773af772013-10-04 22:34:01 +0200296 unsigned long flags,
Thierry Reding71c38622014-11-03 13:23:02 +0100297 u32 *handle)
Arto Merilainende2ba662013-03-22 16:34:08 +0200298{
299 struct tegra_bo *bo;
Thierry Redinga8b48df2014-10-16 14:22:50 +0200300 int err;
Arto Merilainende2ba662013-03-22 16:34:08 +0200301
Thierry Reding773af772013-10-04 22:34:01 +0200302 bo = tegra_bo_create(drm, size, flags);
Arto Merilainende2ba662013-03-22 16:34:08 +0200303 if (IS_ERR(bo))
304 return bo;
305
Thierry Redinga8b48df2014-10-16 14:22:50 +0200306 err = drm_gem_handle_create(file, &bo->gem, handle);
307 if (err) {
308 tegra_bo_free_object(&bo->gem);
309 return ERR_PTR(err);
310 }
Arto Merilainende2ba662013-03-22 16:34:08 +0200311
312 drm_gem_object_unreference_unlocked(&bo->gem);
313
314 return bo;
Arto Merilainende2ba662013-03-22 16:34:08 +0200315}
316
Thierry Reding540457c2014-05-13 16:46:11 +0200317static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
318 struct dma_buf *buf)
Thierry Reding38003912013-12-12 10:00:43 +0100319{
Thierry Redingdf06b752014-06-26 21:41:53 +0200320 struct tegra_drm *tegra = drm->dev_private;
Thierry Reding38003912013-12-12 10:00:43 +0100321 struct dma_buf_attachment *attach;
322 struct tegra_bo *bo;
Thierry Reding38003912013-12-12 10:00:43 +0100323 int err;
324
Thierry Redingc28d4a32014-10-16 14:18:50 +0200325 bo = tegra_bo_alloc_object(drm, buf->size);
326 if (IS_ERR(bo))
327 return bo;
Thierry Reding38003912013-12-12 10:00:43 +0100328
329 attach = dma_buf_attach(buf, drm->dev);
330 if (IS_ERR(attach)) {
331 err = PTR_ERR(attach);
Thierry Redingc28d4a32014-10-16 14:18:50 +0200332 goto free;
Thierry Reding38003912013-12-12 10:00:43 +0100333 }
334
335 get_dma_buf(buf);
336
337 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
338 if (!bo->sgt) {
339 err = -ENOMEM;
340 goto detach;
341 }
342
343 if (IS_ERR(bo->sgt)) {
344 err = PTR_ERR(bo->sgt);
345 goto detach;
346 }
347
Thierry Redingdf06b752014-06-26 21:41:53 +0200348 if (tegra->domain) {
349 err = tegra_bo_iommu_map(tegra, bo);
350 if (err < 0)
351 goto detach;
352 } else {
353 if (bo->sgt->nents > 1) {
354 err = -EINVAL;
355 goto detach;
356 }
357
358 bo->paddr = sg_dma_address(bo->sgt->sgl);
Thierry Reding38003912013-12-12 10:00:43 +0100359 }
360
Thierry Reding38003912013-12-12 10:00:43 +0100361 bo->gem.import_attach = attach;
362
363 return bo;
364
365detach:
366 if (!IS_ERR_OR_NULL(bo->sgt))
367 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
368
369 dma_buf_detach(buf, attach);
370 dma_buf_put(buf);
Thierry Reding38003912013-12-12 10:00:43 +0100371free:
Thierry Redingc28d4a32014-10-16 14:18:50 +0200372 drm_gem_object_release(&bo->gem);
Thierry Reding38003912013-12-12 10:00:43 +0100373 kfree(bo);
Thierry Reding38003912013-12-12 10:00:43 +0100374 return ERR_PTR(err);
375}
376
Arto Merilainende2ba662013-03-22 16:34:08 +0200377void tegra_bo_free_object(struct drm_gem_object *gem)
378{
Thierry Redingdf06b752014-06-26 21:41:53 +0200379 struct tegra_drm *tegra = gem->dev->dev_private;
Arto Merilainende2ba662013-03-22 16:34:08 +0200380 struct tegra_bo *bo = to_tegra_bo(gem);
381
Thierry Redingdf06b752014-06-26 21:41:53 +0200382 if (tegra->domain)
383 tegra_bo_iommu_unmap(tegra, bo);
384
Thierry Reding38003912013-12-12 10:00:43 +0100385 if (gem->import_attach) {
386 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
387 DMA_TO_DEVICE);
388 drm_prime_gem_destroy(gem, NULL);
389 } else {
Thierry Redingdf06b752014-06-26 21:41:53 +0200390 tegra_bo_free(gem->dev, bo);
Thierry Reding38003912013-12-12 10:00:43 +0100391 }
392
Arto Merilainende2ba662013-03-22 16:34:08 +0200393 drm_gem_object_release(gem);
Arto Merilainende2ba662013-03-22 16:34:08 +0200394 kfree(bo);
395}
396
397int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
398 struct drm_mode_create_dumb *args)
399{
Thierry Redingdc6057e2014-10-30 15:32:56 +0100400 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Thierry Redingd1f3e1e2014-07-11 08:29:14 +0200401 struct tegra_drm *tegra = drm->dev_private;
Arto Merilainende2ba662013-03-22 16:34:08 +0200402 struct tegra_bo *bo;
403
Thierry Redingdc6057e2014-10-30 15:32:56 +0100404 args->pitch = round_up(min_pitch, tegra->pitch_align);
405 args->size = args->pitch * args->height;
Arto Merilainende2ba662013-03-22 16:34:08 +0200406
Thierry Reding773af772013-10-04 22:34:01 +0200407 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
Thierry Reding3be82742013-09-24 16:34:05 +0200408 &args->handle);
Arto Merilainende2ba662013-03-22 16:34:08 +0200409 if (IS_ERR(bo))
410 return PTR_ERR(bo);
411
412 return 0;
413}
414
415int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
Thierry Reding71c38622014-11-03 13:23:02 +0100416 u32 handle, u64 *offset)
Arto Merilainende2ba662013-03-22 16:34:08 +0200417{
418 struct drm_gem_object *gem;
419 struct tegra_bo *bo;
420
421 mutex_lock(&drm->struct_mutex);
422
423 gem = drm_gem_object_lookup(drm, file, handle);
424 if (!gem) {
425 dev_err(drm->dev, "failed to lookup GEM object\n");
426 mutex_unlock(&drm->struct_mutex);
427 return -EINVAL;
428 }
429
430 bo = to_tegra_bo(gem);
431
David Herrmann2bc7b0c2013-08-13 14:19:58 +0200432 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
Arto Merilainende2ba662013-03-22 16:34:08 +0200433
434 drm_gem_object_unreference(gem);
435
436 mutex_unlock(&drm->struct_mutex);
437
438 return 0;
439}
440
Thierry Redingdf06b752014-06-26 21:41:53 +0200441static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
442{
443 struct drm_gem_object *gem = vma->vm_private_data;
444 struct tegra_bo *bo = to_tegra_bo(gem);
445 struct page *page;
446 pgoff_t offset;
447 int err;
448
449 if (!bo->pages)
450 return VM_FAULT_SIGBUS;
451
452 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
453 page = bo->pages[offset];
454
455 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
456 switch (err) {
457 case -EAGAIN:
458 case 0:
459 case -ERESTARTSYS:
460 case -EINTR:
461 case -EBUSY:
462 return VM_FAULT_NOPAGE;
463
464 case -ENOMEM:
465 return VM_FAULT_OOM;
466 }
467
468 return VM_FAULT_SIGBUS;
469}
470
Arto Merilainende2ba662013-03-22 16:34:08 +0200471const struct vm_operations_struct tegra_bo_vm_ops = {
Thierry Redingdf06b752014-06-26 21:41:53 +0200472 .fault = tegra_bo_fault,
Arto Merilainende2ba662013-03-22 16:34:08 +0200473 .open = drm_gem_vm_open,
474 .close = drm_gem_vm_close,
475};
476
477int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
478{
479 struct drm_gem_object *gem;
480 struct tegra_bo *bo;
481 int ret;
482
483 ret = drm_gem_mmap(file, vma);
484 if (ret)
485 return ret;
486
487 gem = vma->vm_private_data;
488 bo = to_tegra_bo(gem);
489
Thierry Redingdf06b752014-06-26 21:41:53 +0200490 if (!bo->pages) {
491 unsigned long vm_pgoff = vma->vm_pgoff;
Arto Merilainende2ba662013-03-22 16:34:08 +0200492
Thierry Redingdf06b752014-06-26 21:41:53 +0200493 vma->vm_flags &= ~VM_PFNMAP;
494 vma->vm_pgoff = 0;
495
496 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
497 bo->paddr, gem->size);
498 if (ret) {
499 drm_gem_vm_close(vma);
500 return ret;
501 }
502
503 vma->vm_pgoff = vm_pgoff;
504 } else {
505 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
506
507 vma->vm_flags |= VM_MIXEDMAP;
508 vma->vm_flags &= ~VM_PFNMAP;
509
510 vma->vm_page_prot = pgprot_writecombine(prot);
Thierry Reding53ea7212014-09-24 16:14:04 +0200511 }
512
Thierry Reding53ea7212014-09-24 16:14:04 +0200513 return 0;
Arto Merilainende2ba662013-03-22 16:34:08 +0200514}
Thierry Reding38003912013-12-12 10:00:43 +0100515
516static struct sg_table *
517tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
518 enum dma_data_direction dir)
519{
520 struct drm_gem_object *gem = attach->dmabuf->priv;
521 struct tegra_bo *bo = to_tegra_bo(gem);
522 struct sg_table *sgt;
523
524 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
525 if (!sgt)
526 return NULL;
527
Thierry Redingdf06b752014-06-26 21:41:53 +0200528 if (bo->pages) {
529 struct scatterlist *sg;
530 unsigned int i;
531
532 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
533 goto free;
534
535 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
536 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
537
538 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
539 goto free;
540 } else {
541 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
542 goto free;
543
544 sg_dma_address(sgt->sgl) = bo->paddr;
545 sg_dma_len(sgt->sgl) = gem->size;
Thierry Reding38003912013-12-12 10:00:43 +0100546 }
547
Thierry Reding38003912013-12-12 10:00:43 +0100548 return sgt;
Thierry Redingdf06b752014-06-26 21:41:53 +0200549
550free:
551 sg_free_table(sgt);
552 kfree(sgt);
553 return NULL;
Thierry Reding38003912013-12-12 10:00:43 +0100554}
555
556static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
557 struct sg_table *sgt,
558 enum dma_data_direction dir)
559{
Thierry Redingdf06b752014-06-26 21:41:53 +0200560 struct drm_gem_object *gem = attach->dmabuf->priv;
561 struct tegra_bo *bo = to_tegra_bo(gem);
562
563 if (bo->pages)
564 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
565
Thierry Reding38003912013-12-12 10:00:43 +0100566 sg_free_table(sgt);
567 kfree(sgt);
568}
569
570static void tegra_gem_prime_release(struct dma_buf *buf)
571{
572 drm_gem_dmabuf_release(buf);
573}
574
575static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
576 unsigned long page)
577{
578 return NULL;
579}
580
581static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
582 unsigned long page,
583 void *addr)
584{
585}
586
587static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
588{
589 return NULL;
590}
591
592static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
593 void *addr)
594{
595}
596
597static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
598{
599 return -EINVAL;
600}
601
Thierry Redingd40326f2014-01-29 20:32:33 +0100602static void *tegra_gem_prime_vmap(struct dma_buf *buf)
603{
604 struct drm_gem_object *gem = buf->priv;
605 struct tegra_bo *bo = to_tegra_bo(gem);
606
607 return bo->vaddr;
608}
609
610static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
611{
612}
613
Thierry Reding38003912013-12-12 10:00:43 +0100614static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
615 .map_dma_buf = tegra_gem_prime_map_dma_buf,
616 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
617 .release = tegra_gem_prime_release,
618 .kmap_atomic = tegra_gem_prime_kmap_atomic,
619 .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
620 .kmap = tegra_gem_prime_kmap,
621 .kunmap = tegra_gem_prime_kunmap,
622 .mmap = tegra_gem_prime_mmap,
Thierry Redingd40326f2014-01-29 20:32:33 +0100623 .vmap = tegra_gem_prime_vmap,
624 .vunmap = tegra_gem_prime_vunmap,
Thierry Reding38003912013-12-12 10:00:43 +0100625};
626
627struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
628 struct drm_gem_object *gem,
629 int flags)
630{
631 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
Maarten Lankhorst3aac4502014-07-01 12:57:26 +0200632 flags, NULL);
Thierry Reding38003912013-12-12 10:00:43 +0100633}
634
635struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
636 struct dma_buf *buf)
637{
638 struct tegra_bo *bo;
639
640 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
641 struct drm_gem_object *gem = buf->priv;
642
643 if (gem->dev == drm) {
644 drm_gem_object_reference(gem);
645 return gem;
646 }
647 }
648
649 bo = tegra_bo_import(drm, buf);
650 if (IS_ERR(bo))
651 return ERR_CAST(bo);
652
653 return &bo->gem;
654}