blob: 1da3d48054c2037adf541f640f170d7c2472f979 [file] [log] [blame]
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
132static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
134{
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
137
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
140
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147 } else {
148 /*
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
152 */
153 fput(vma->vm_file);
154 get_file(obj->filp);
155 vma->vm_pgoff = 0;
156 vma->vm_file = obj->filp;
157
158 vma->vm_page_prot = vm_page_prot;
159 }
160
161 return 0;
162}
163
164int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165{
166 struct etnaviv_gem_object *obj;
167 int ret;
168
169 ret = drm_gem_mmap(filp, vma);
170 if (ret) {
171 DBG("mmap failed: %d", ret);
172 return ret;
173 }
174
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177}
178
179int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
208
209 page = pages[pgoff];
210
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216out:
217 switch (ret) {
218 case -EAGAIN:
219 case 0:
220 case -ERESTARTSYS:
221 case -EINTR:
222 case -EBUSY:
223 /*
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
226 */
227 return VM_FAULT_NOPAGE;
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230 default:
231 return VM_FAULT_SIGBUS;
232 }
233}
234
235int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236{
237 int ret;
238
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
241 if (ret)
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243 else
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246 return ret;
247}
248
249static struct etnaviv_vram_mapping *
250etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
252{
253 struct etnaviv_vram_mapping *mapping;
254
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
257 return mapping;
258 }
259
260 return NULL;
261}
262
263int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
264 struct drm_gem_object *obj, u32 *iova)
265{
266 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
267 struct etnaviv_vram_mapping *mapping;
268 struct page **pages;
269 int ret = 0;
270
271 mutex_lock(&etnaviv_obj->lock);
272 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
273 if (mapping) {
274 /*
275 * Holding the object lock prevents the use count changing
276 * beneath us. If the use count is zero, the MMU might be
277 * reaping this object, so take the lock and re-check that
278 * the MMU owns this mapping to close this race.
279 */
280 if (mapping->use == 0) {
281 mutex_lock(&gpu->mmu->lock);
282 if (mapping->mmu == gpu->mmu)
283 mapping->use += 1;
284 else
285 mapping = NULL;
286 mutex_unlock(&gpu->mmu->lock);
287 if (mapping)
288 goto out;
289 } else {
290 mapping->use += 1;
291 goto out;
292 }
293 }
294
295 pages = etnaviv_gem_get_pages(etnaviv_obj);
296 if (IS_ERR(pages)) {
297 ret = PTR_ERR(pages);
298 goto out;
299 }
300
301 /*
302 * See if we have a reaped vram mapping we can re-use before
303 * allocating a fresh mapping.
304 */
305 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
306 if (!mapping) {
307 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
Dan Carpentered94add2016-01-04 16:10:24 +0300308 if (!mapping) {
309 ret = -ENOMEM;
310 goto out;
311 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100312
313 INIT_LIST_HEAD(&mapping->scan_node);
314 mapping->object = etnaviv_obj;
315 } else {
316 list_del(&mapping->obj_node);
317 }
318
319 mapping->mmu = gpu->mmu;
320 mapping->use = 1;
321
322 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
323 mapping);
324 if (ret < 0)
325 kfree(mapping);
326 else
327 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
328
329out:
330 mutex_unlock(&etnaviv_obj->lock);
331
332 if (!ret) {
333 /* Take a reference on the object */
334 drm_gem_object_reference(obj);
335 *iova = mapping->iova;
336 }
337
338 return ret;
339}
340
341void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
342{
343 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
344 struct etnaviv_vram_mapping *mapping;
345
346 mutex_lock(&etnaviv_obj->lock);
347 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
348
349 WARN_ON(mapping->use == 0);
350 mapping->use -= 1;
351 mutex_unlock(&etnaviv_obj->lock);
352
353 drm_gem_object_unreference_unlocked(obj);
354}
355
Lucas Stachce3088f2016-01-26 18:10:32 +0100356void *etnaviv_gem_vmap(struct drm_gem_object *obj)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100357{
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
359
360 mutex_lock(&etnaviv_obj->lock);
361 if (!etnaviv_obj->vaddr) {
362 struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
363
Lucas Stach9f07bb02016-01-25 15:37:28 +0100364 if (IS_ERR(pages)) {
365 mutex_unlock(&etnaviv_obj->lock);
366 return NULL;
367 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100368
369 etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
370 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
371 }
372 mutex_unlock(&etnaviv_obj->lock);
373
374 return etnaviv_obj->vaddr;
375}
376
377static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
378{
379 if (op & ETNA_PREP_READ)
380 return DMA_FROM_DEVICE;
381 else if (op & ETNA_PREP_WRITE)
382 return DMA_TO_DEVICE;
383 else
384 return DMA_BIDIRECTIONAL;
385}
386
387int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
388 struct timespec *timeout)
389{
390 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
391 struct drm_device *dev = obj->dev;
392 bool write = !!(op & ETNA_PREP_WRITE);
393 int ret;
394
395 if (op & ETNA_PREP_NOSYNC) {
396 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
397 write))
398 return -EBUSY;
399 } else {
400 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
401
402 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
403 write, true, remain);
404 if (ret <= 0)
405 return ret == 0 ? -ETIMEDOUT : ret;
406 }
407
408 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
409 if (!etnaviv_obj->sgt) {
410 void *ret;
411
412 mutex_lock(&etnaviv_obj->lock);
413 ret = etnaviv_gem_get_pages(etnaviv_obj);
414 mutex_unlock(&etnaviv_obj->lock);
415 if (IS_ERR(ret))
416 return PTR_ERR(ret);
417 }
418
419 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
420 etnaviv_obj->sgt->nents,
421 etnaviv_op_to_dma_dir(op));
422 etnaviv_obj->last_cpu_prep_op = op;
423 }
424
425 return 0;
426}
427
428int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
429{
430 struct drm_device *dev = obj->dev;
431 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
432
433 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
434 /* fini without a prep is almost certainly a userspace error */
435 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
436 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
437 etnaviv_obj->sgt->nents,
438 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
439 etnaviv_obj->last_cpu_prep_op = 0;
440 }
441
442 return 0;
443}
444
445int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
446 struct timespec *timeout)
447{
448 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
449
450 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
451}
452
453#ifdef CONFIG_DEBUG_FS
454static void etnaviv_gem_describe_fence(struct fence *fence,
455 const char *type, struct seq_file *m)
456{
457 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
458 seq_printf(m, "\t%9s: %s %s seq %u\n",
459 type,
460 fence->ops->get_driver_name(fence),
461 fence->ops->get_timeline_name(fence),
462 fence->seqno);
463}
464
465static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
466{
467 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
468 struct reservation_object *robj = etnaviv_obj->resv;
469 struct reservation_object_list *fobj;
470 struct fence *fence;
471 unsigned long off = drm_vma_node_start(&obj->vma_node);
472
473 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
474 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
475 obj->name, obj->refcount.refcount.counter,
476 off, etnaviv_obj->vaddr, obj->size);
477
478 rcu_read_lock();
479 fobj = rcu_dereference(robj->fence);
480 if (fobj) {
481 unsigned int i, shared_count = fobj->shared_count;
482
483 for (i = 0; i < shared_count; i++) {
484 fence = rcu_dereference(fobj->shared[i]);
485 etnaviv_gem_describe_fence(fence, "Shared", m);
486 }
487 }
488
489 fence = rcu_dereference(robj->fence_excl);
490 if (fence)
491 etnaviv_gem_describe_fence(fence, "Exclusive", m);
492 rcu_read_unlock();
493}
494
495void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
496 struct seq_file *m)
497{
498 struct etnaviv_gem_object *etnaviv_obj;
499 int count = 0;
500 size_t size = 0;
501
502 mutex_lock(&priv->gem_lock);
503 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
504 struct drm_gem_object *obj = &etnaviv_obj->base;
505
506 seq_puts(m, " ");
507 etnaviv_gem_describe(obj, m);
508 count++;
509 size += obj->size;
510 }
511 mutex_unlock(&priv->gem_lock);
512
513 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
514}
515#endif
516
517static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
518{
519 if (etnaviv_obj->vaddr)
520 vunmap(etnaviv_obj->vaddr);
521 put_pages(etnaviv_obj);
522}
523
524static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
525 .get_pages = etnaviv_gem_shmem_get_pages,
526 .release = etnaviv_gem_shmem_release,
527};
528
529void etnaviv_gem_free_object(struct drm_gem_object *obj)
530{
531 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
532 struct etnaviv_vram_mapping *mapping, *tmp;
533
534 /* object should not be active */
535 WARN_ON(is_active(etnaviv_obj));
536
537 list_del(&etnaviv_obj->gem_node);
538
539 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
540 obj_node) {
541 struct etnaviv_iommu *mmu = mapping->mmu;
542
543 WARN_ON(mapping->use);
544
545 if (mmu)
546 etnaviv_iommu_unmap_gem(mmu, mapping);
547
548 list_del(&mapping->obj_node);
549 kfree(mapping);
550 }
551
552 drm_gem_free_mmap_offset(obj);
553 etnaviv_obj->ops->release(etnaviv_obj);
554 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
555 reservation_object_fini(&etnaviv_obj->_resv);
556 drm_gem_object_release(obj);
557
558 kfree(etnaviv_obj);
559}
560
561int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
562{
563 struct etnaviv_drm_private *priv = dev->dev_private;
564 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
565
566 mutex_lock(&priv->gem_lock);
567 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
568 mutex_unlock(&priv->gem_lock);
569
570 return 0;
571}
572
573static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
574 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
575 struct drm_gem_object **obj)
576{
577 struct etnaviv_gem_object *etnaviv_obj;
578 unsigned sz = sizeof(*etnaviv_obj);
579 bool valid = true;
580
581 /* validate flags */
582 switch (flags & ETNA_BO_CACHE_MASK) {
583 case ETNA_BO_UNCACHED:
584 case ETNA_BO_CACHED:
585 case ETNA_BO_WC:
586 break;
587 default:
588 valid = false;
589 }
590
591 if (!valid) {
592 dev_err(dev->dev, "invalid cache flag: %x\n",
593 (flags & ETNA_BO_CACHE_MASK));
594 return -EINVAL;
595 }
596
597 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
598 if (!etnaviv_obj)
599 return -ENOMEM;
600
601 etnaviv_obj->flags = flags;
602 etnaviv_obj->ops = ops;
603 if (robj) {
604 etnaviv_obj->resv = robj;
605 } else {
606 etnaviv_obj->resv = &etnaviv_obj->_resv;
607 reservation_object_init(&etnaviv_obj->_resv);
608 }
609
610 mutex_init(&etnaviv_obj->lock);
611 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
612
613 *obj = &etnaviv_obj->base;
614
615 return 0;
616}
617
618static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
619 u32 size, u32 flags)
620{
621 struct drm_gem_object *obj = NULL;
622 int ret;
623
624 size = PAGE_ALIGN(size);
625
626 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
627 &etnaviv_gem_shmem_ops, &obj);
628 if (ret)
629 goto fail;
630
631 ret = drm_gem_object_init(dev, obj, size);
632 if (ret == 0) {
633 struct address_space *mapping;
634
635 /*
636 * Our buffers are kept pinned, so allocating them
637 * from the MOVABLE zone is a really bad idea, and
638 * conflicts with CMA. See coments above new_inode()
639 * why this is required _and_ expected if you're
640 * going to pin these pages.
641 */
642 mapping = file_inode(obj->filp)->i_mapping;
643 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
644 }
645
646 if (ret)
647 goto fail;
648
649 return obj;
650
651fail:
652 if (obj)
653 drm_gem_object_unreference_unlocked(obj);
654
655 return ERR_PTR(ret);
656}
657
658/* convenience method to construct a GEM buffer object, and userspace handle */
659int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
660 u32 size, u32 flags, u32 *handle)
661{
662 struct drm_gem_object *obj;
663 int ret;
664
665 obj = __etnaviv_gem_new(dev, size, flags);
666 if (IS_ERR(obj))
667 return PTR_ERR(obj);
668
669 ret = etnaviv_gem_obj_add(dev, obj);
670 if (ret < 0) {
671 drm_gem_object_unreference_unlocked(obj);
672 return ret;
673 }
674
675 ret = drm_gem_handle_create(file, obj, handle);
676
677 /* drop reference from allocate - handle holds it now */
678 drm_gem_object_unreference_unlocked(obj);
679
680 return ret;
681}
682
683struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
684 u32 size, u32 flags)
685{
686 struct drm_gem_object *obj;
687 int ret;
688
689 obj = __etnaviv_gem_new(dev, size, flags);
690 if (IS_ERR(obj))
691 return obj;
692
693 ret = etnaviv_gem_obj_add(dev, obj);
694 if (ret < 0) {
695 drm_gem_object_unreference_unlocked(obj);
696 return ERR_PTR(ret);
697 }
698
699 return obj;
700}
701
702int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
703 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
704 struct etnaviv_gem_object **res)
705{
706 struct drm_gem_object *obj;
707 int ret;
708
709 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
710 if (ret)
711 return ret;
712
713 drm_gem_private_object_init(dev, obj, size);
714
715 *res = to_etnaviv_bo(obj);
716
717 return 0;
718}
719
720struct get_pages_work {
721 struct work_struct work;
722 struct mm_struct *mm;
723 struct task_struct *task;
724 struct etnaviv_gem_object *etnaviv_obj;
725};
726
727static struct page **etnaviv_gem_userptr_do_get_pages(
728 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
729{
730 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
731 struct page **pvec;
732 uintptr_t ptr;
733
734 pvec = drm_malloc_ab(npages, sizeof(struct page *));
735 if (!pvec)
736 return ERR_PTR(-ENOMEM);
737
738 pinned = 0;
739 ptr = etnaviv_obj->userptr.ptr;
740
741 down_read(&mm->mmap_sem);
742 while (pinned < npages) {
743 ret = get_user_pages(task, mm, ptr, npages - pinned,
744 !etnaviv_obj->userptr.ro, 0,
745 pvec + pinned, NULL);
746 if (ret < 0)
747 break;
748
749 ptr += ret * PAGE_SIZE;
750 pinned += ret;
751 }
752 up_read(&mm->mmap_sem);
753
754 if (ret < 0) {
755 release_pages(pvec, pinned, 0);
756 drm_free_large(pvec);
757 return ERR_PTR(ret);
758 }
759
760 return pvec;
761}
762
763static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
764{
765 struct get_pages_work *work = container_of(_work, typeof(*work), work);
766 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
767 struct page **pvec;
768
769 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
770
771 mutex_lock(&etnaviv_obj->lock);
772 if (IS_ERR(pvec)) {
773 etnaviv_obj->userptr.work = ERR_CAST(pvec);
774 } else {
775 etnaviv_obj->userptr.work = NULL;
776 etnaviv_obj->pages = pvec;
777 }
778
779 mutex_unlock(&etnaviv_obj->lock);
780 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
781
782 mmput(work->mm);
783 put_task_struct(work->task);
784 kfree(work);
785}
786
787static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
788{
789 struct page **pvec = NULL;
790 struct get_pages_work *work;
791 struct mm_struct *mm;
792 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
793
794 if (etnaviv_obj->userptr.work) {
795 if (IS_ERR(etnaviv_obj->userptr.work)) {
796 ret = PTR_ERR(etnaviv_obj->userptr.work);
797 etnaviv_obj->userptr.work = NULL;
798 } else {
799 ret = -EAGAIN;
800 }
801 return ret;
802 }
803
804 mm = get_task_mm(etnaviv_obj->userptr.task);
805 pinned = 0;
806 if (mm == current->mm) {
807 pvec = drm_malloc_ab(npages, sizeof(struct page *));
808 if (!pvec) {
809 mmput(mm);
810 return -ENOMEM;
811 }
812
813 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
814 !etnaviv_obj->userptr.ro, pvec);
815 if (pinned < 0) {
816 drm_free_large(pvec);
817 mmput(mm);
818 return pinned;
819 }
820
821 if (pinned == npages) {
822 etnaviv_obj->pages = pvec;
823 mmput(mm);
824 return 0;
825 }
826 }
827
828 release_pages(pvec, pinned, 0);
829 drm_free_large(pvec);
830
831 work = kmalloc(sizeof(*work), GFP_KERNEL);
832 if (!work) {
833 mmput(mm);
834 return -ENOMEM;
835 }
836
837 get_task_struct(current);
838 drm_gem_object_reference(&etnaviv_obj->base);
839
840 work->mm = mm;
841 work->task = current;
842 work->etnaviv_obj = etnaviv_obj;
843
844 etnaviv_obj->userptr.work = &work->work;
845 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
846
847 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
848
849 return -EAGAIN;
850}
851
852static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
853{
854 if (etnaviv_obj->sgt) {
855 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
856 sg_free_table(etnaviv_obj->sgt);
857 kfree(etnaviv_obj->sgt);
858 }
859 if (etnaviv_obj->pages) {
860 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
861
862 release_pages(etnaviv_obj->pages, npages, 0);
863 drm_free_large(etnaviv_obj->pages);
864 }
865 put_task_struct(etnaviv_obj->userptr.task);
866}
867
868static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
869 .get_pages = etnaviv_gem_userptr_get_pages,
870 .release = etnaviv_gem_userptr_release,
871};
872
873int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
874 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
875{
876 struct etnaviv_gem_object *etnaviv_obj;
877 int ret;
878
879 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
880 &etnaviv_gem_userptr_ops, &etnaviv_obj);
881 if (ret)
882 return ret;
883
884 etnaviv_obj->userptr.ptr = ptr;
885 etnaviv_obj->userptr.task = current;
886 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
887 get_task_struct(current);
888
889 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
890 if (ret) {
891 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
892 return ret;
893 }
894
895 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
896
897 /* drop reference from allocate - handle holds it now */
898 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
899
900 return ret;
901}