blob: 8d6f859f82003c863fb08dc9897d8e064050846e [file] [log] [blame]
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
132static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
134{
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
137
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
140
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147 } else {
148 /*
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
152 */
153 fput(vma->vm_file);
154 get_file(obj->filp);
155 vma->vm_pgoff = 0;
156 vma->vm_file = obj->filp;
157
158 vma->vm_page_prot = vm_page_prot;
159 }
160
161 return 0;
162}
163
164int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165{
166 struct etnaviv_gem_object *obj;
167 int ret;
168
169 ret = drm_gem_mmap(filp, vma);
170 if (ret) {
171 DBG("mmap failed: %d", ret);
172 return ret;
173 }
174
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177}
178
179int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
208
209 page = pages[pgoff];
210
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216out:
217 switch (ret) {
218 case -EAGAIN:
219 case 0:
220 case -ERESTARTSYS:
221 case -EINTR:
222 case -EBUSY:
223 /*
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
226 */
227 return VM_FAULT_NOPAGE;
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230 default:
231 return VM_FAULT_SIGBUS;
232 }
233}
234
235int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236{
237 int ret;
238
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
241 if (ret)
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243 else
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246 return ret;
247}
248
249static struct etnaviv_vram_mapping *
250etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
252{
253 struct etnaviv_vram_mapping *mapping;
254
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
257 return mapping;
258 }
259
260 return NULL;
261}
262
263int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
264 struct drm_gem_object *obj, u32 *iova)
265{
266 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
267 struct etnaviv_vram_mapping *mapping;
268 struct page **pages;
269 int ret = 0;
270
271 mutex_lock(&etnaviv_obj->lock);
272 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
273 if (mapping) {
274 /*
275 * Holding the object lock prevents the use count changing
276 * beneath us. If the use count is zero, the MMU might be
277 * reaping this object, so take the lock and re-check that
278 * the MMU owns this mapping to close this race.
279 */
280 if (mapping->use == 0) {
281 mutex_lock(&gpu->mmu->lock);
282 if (mapping->mmu == gpu->mmu)
283 mapping->use += 1;
284 else
285 mapping = NULL;
286 mutex_unlock(&gpu->mmu->lock);
287 if (mapping)
288 goto out;
289 } else {
290 mapping->use += 1;
291 goto out;
292 }
293 }
294
295 pages = etnaviv_gem_get_pages(etnaviv_obj);
296 if (IS_ERR(pages)) {
297 ret = PTR_ERR(pages);
298 goto out;
299 }
300
301 /*
302 * See if we have a reaped vram mapping we can re-use before
303 * allocating a fresh mapping.
304 */
305 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
306 if (!mapping) {
307 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
308 if (!mapping)
309 return -ENOMEM;
310
311 INIT_LIST_HEAD(&mapping->scan_node);
312 mapping->object = etnaviv_obj;
313 } else {
314 list_del(&mapping->obj_node);
315 }
316
317 mapping->mmu = gpu->mmu;
318 mapping->use = 1;
319
320 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
321 mapping);
322 if (ret < 0)
323 kfree(mapping);
324 else
325 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
326
327out:
328 mutex_unlock(&etnaviv_obj->lock);
329
330 if (!ret) {
331 /* Take a reference on the object */
332 drm_gem_object_reference(obj);
333 *iova = mapping->iova;
334 }
335
336 return ret;
337}
338
339void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
340{
341 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
342 struct etnaviv_vram_mapping *mapping;
343
344 mutex_lock(&etnaviv_obj->lock);
345 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
346
347 WARN_ON(mapping->use == 0);
348 mapping->use -= 1;
349 mutex_unlock(&etnaviv_obj->lock);
350
351 drm_gem_object_unreference_unlocked(obj);
352}
353
354void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
355{
356 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
357
358 mutex_lock(&etnaviv_obj->lock);
359 if (!etnaviv_obj->vaddr) {
360 struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
361
362 if (IS_ERR(pages))
363 return ERR_CAST(pages);
364
365 etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
366 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
367 }
368 mutex_unlock(&etnaviv_obj->lock);
369
370 return etnaviv_obj->vaddr;
371}
372
373static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
374{
375 if (op & ETNA_PREP_READ)
376 return DMA_FROM_DEVICE;
377 else if (op & ETNA_PREP_WRITE)
378 return DMA_TO_DEVICE;
379 else
380 return DMA_BIDIRECTIONAL;
381}
382
383int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
384 struct timespec *timeout)
385{
386 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
387 struct drm_device *dev = obj->dev;
388 bool write = !!(op & ETNA_PREP_WRITE);
389 int ret;
390
391 if (op & ETNA_PREP_NOSYNC) {
392 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
393 write))
394 return -EBUSY;
395 } else {
396 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
397
398 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
399 write, true, remain);
400 if (ret <= 0)
401 return ret == 0 ? -ETIMEDOUT : ret;
402 }
403
404 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
405 if (!etnaviv_obj->sgt) {
406 void *ret;
407
408 mutex_lock(&etnaviv_obj->lock);
409 ret = etnaviv_gem_get_pages(etnaviv_obj);
410 mutex_unlock(&etnaviv_obj->lock);
411 if (IS_ERR(ret))
412 return PTR_ERR(ret);
413 }
414
415 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
416 etnaviv_obj->sgt->nents,
417 etnaviv_op_to_dma_dir(op));
418 etnaviv_obj->last_cpu_prep_op = op;
419 }
420
421 return 0;
422}
423
424int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
425{
426 struct drm_device *dev = obj->dev;
427 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
428
429 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
430 /* fini without a prep is almost certainly a userspace error */
431 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
432 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
433 etnaviv_obj->sgt->nents,
434 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
435 etnaviv_obj->last_cpu_prep_op = 0;
436 }
437
438 return 0;
439}
440
441int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
442 struct timespec *timeout)
443{
444 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
445
446 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
447}
448
449#ifdef CONFIG_DEBUG_FS
450static void etnaviv_gem_describe_fence(struct fence *fence,
451 const char *type, struct seq_file *m)
452{
453 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
454 seq_printf(m, "\t%9s: %s %s seq %u\n",
455 type,
456 fence->ops->get_driver_name(fence),
457 fence->ops->get_timeline_name(fence),
458 fence->seqno);
459}
460
461static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
462{
463 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
464 struct reservation_object *robj = etnaviv_obj->resv;
465 struct reservation_object_list *fobj;
466 struct fence *fence;
467 unsigned long off = drm_vma_node_start(&obj->vma_node);
468
469 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
470 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
471 obj->name, obj->refcount.refcount.counter,
472 off, etnaviv_obj->vaddr, obj->size);
473
474 rcu_read_lock();
475 fobj = rcu_dereference(robj->fence);
476 if (fobj) {
477 unsigned int i, shared_count = fobj->shared_count;
478
479 for (i = 0; i < shared_count; i++) {
480 fence = rcu_dereference(fobj->shared[i]);
481 etnaviv_gem_describe_fence(fence, "Shared", m);
482 }
483 }
484
485 fence = rcu_dereference(robj->fence_excl);
486 if (fence)
487 etnaviv_gem_describe_fence(fence, "Exclusive", m);
488 rcu_read_unlock();
489}
490
491void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
492 struct seq_file *m)
493{
494 struct etnaviv_gem_object *etnaviv_obj;
495 int count = 0;
496 size_t size = 0;
497
498 mutex_lock(&priv->gem_lock);
499 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
500 struct drm_gem_object *obj = &etnaviv_obj->base;
501
502 seq_puts(m, " ");
503 etnaviv_gem_describe(obj, m);
504 count++;
505 size += obj->size;
506 }
507 mutex_unlock(&priv->gem_lock);
508
509 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
510}
511#endif
512
513static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
514{
515 if (etnaviv_obj->vaddr)
516 vunmap(etnaviv_obj->vaddr);
517 put_pages(etnaviv_obj);
518}
519
520static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
521 .get_pages = etnaviv_gem_shmem_get_pages,
522 .release = etnaviv_gem_shmem_release,
523};
524
525void etnaviv_gem_free_object(struct drm_gem_object *obj)
526{
527 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
528 struct etnaviv_vram_mapping *mapping, *tmp;
529
530 /* object should not be active */
531 WARN_ON(is_active(etnaviv_obj));
532
533 list_del(&etnaviv_obj->gem_node);
534
535 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
536 obj_node) {
537 struct etnaviv_iommu *mmu = mapping->mmu;
538
539 WARN_ON(mapping->use);
540
541 if (mmu)
542 etnaviv_iommu_unmap_gem(mmu, mapping);
543
544 list_del(&mapping->obj_node);
545 kfree(mapping);
546 }
547
548 drm_gem_free_mmap_offset(obj);
549 etnaviv_obj->ops->release(etnaviv_obj);
550 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
551 reservation_object_fini(&etnaviv_obj->_resv);
552 drm_gem_object_release(obj);
553
554 kfree(etnaviv_obj);
555}
556
557int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
558{
559 struct etnaviv_drm_private *priv = dev->dev_private;
560 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
561
562 mutex_lock(&priv->gem_lock);
563 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
564 mutex_unlock(&priv->gem_lock);
565
566 return 0;
567}
568
569static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
570 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
571 struct drm_gem_object **obj)
572{
573 struct etnaviv_gem_object *etnaviv_obj;
574 unsigned sz = sizeof(*etnaviv_obj);
575 bool valid = true;
576
577 /* validate flags */
578 switch (flags & ETNA_BO_CACHE_MASK) {
579 case ETNA_BO_UNCACHED:
580 case ETNA_BO_CACHED:
581 case ETNA_BO_WC:
582 break;
583 default:
584 valid = false;
585 }
586
587 if (!valid) {
588 dev_err(dev->dev, "invalid cache flag: %x\n",
589 (flags & ETNA_BO_CACHE_MASK));
590 return -EINVAL;
591 }
592
593 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
594 if (!etnaviv_obj)
595 return -ENOMEM;
596
597 etnaviv_obj->flags = flags;
598 etnaviv_obj->ops = ops;
599 if (robj) {
600 etnaviv_obj->resv = robj;
601 } else {
602 etnaviv_obj->resv = &etnaviv_obj->_resv;
603 reservation_object_init(&etnaviv_obj->_resv);
604 }
605
606 mutex_init(&etnaviv_obj->lock);
607 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
608
609 *obj = &etnaviv_obj->base;
610
611 return 0;
612}
613
614static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
615 u32 size, u32 flags)
616{
617 struct drm_gem_object *obj = NULL;
618 int ret;
619
620 size = PAGE_ALIGN(size);
621
622 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
623 &etnaviv_gem_shmem_ops, &obj);
624 if (ret)
625 goto fail;
626
627 ret = drm_gem_object_init(dev, obj, size);
628 if (ret == 0) {
629 struct address_space *mapping;
630
631 /*
632 * Our buffers are kept pinned, so allocating them
633 * from the MOVABLE zone is a really bad idea, and
634 * conflicts with CMA. See coments above new_inode()
635 * why this is required _and_ expected if you're
636 * going to pin these pages.
637 */
638 mapping = file_inode(obj->filp)->i_mapping;
639 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
640 }
641
642 if (ret)
643 goto fail;
644
645 return obj;
646
647fail:
648 if (obj)
649 drm_gem_object_unreference_unlocked(obj);
650
651 return ERR_PTR(ret);
652}
653
654/* convenience method to construct a GEM buffer object, and userspace handle */
655int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
656 u32 size, u32 flags, u32 *handle)
657{
658 struct drm_gem_object *obj;
659 int ret;
660
661 obj = __etnaviv_gem_new(dev, size, flags);
662 if (IS_ERR(obj))
663 return PTR_ERR(obj);
664
665 ret = etnaviv_gem_obj_add(dev, obj);
666 if (ret < 0) {
667 drm_gem_object_unreference_unlocked(obj);
668 return ret;
669 }
670
671 ret = drm_gem_handle_create(file, obj, handle);
672
673 /* drop reference from allocate - handle holds it now */
674 drm_gem_object_unreference_unlocked(obj);
675
676 return ret;
677}
678
679struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
680 u32 size, u32 flags)
681{
682 struct drm_gem_object *obj;
683 int ret;
684
685 obj = __etnaviv_gem_new(dev, size, flags);
686 if (IS_ERR(obj))
687 return obj;
688
689 ret = etnaviv_gem_obj_add(dev, obj);
690 if (ret < 0) {
691 drm_gem_object_unreference_unlocked(obj);
692 return ERR_PTR(ret);
693 }
694
695 return obj;
696}
697
698int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
699 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
700 struct etnaviv_gem_object **res)
701{
702 struct drm_gem_object *obj;
703 int ret;
704
705 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
706 if (ret)
707 return ret;
708
709 drm_gem_private_object_init(dev, obj, size);
710
711 *res = to_etnaviv_bo(obj);
712
713 return 0;
714}
715
716struct get_pages_work {
717 struct work_struct work;
718 struct mm_struct *mm;
719 struct task_struct *task;
720 struct etnaviv_gem_object *etnaviv_obj;
721};
722
723static struct page **etnaviv_gem_userptr_do_get_pages(
724 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
725{
726 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
727 struct page **pvec;
728 uintptr_t ptr;
729
730 pvec = drm_malloc_ab(npages, sizeof(struct page *));
731 if (!pvec)
732 return ERR_PTR(-ENOMEM);
733
734 pinned = 0;
735 ptr = etnaviv_obj->userptr.ptr;
736
737 down_read(&mm->mmap_sem);
738 while (pinned < npages) {
739 ret = get_user_pages(task, mm, ptr, npages - pinned,
740 !etnaviv_obj->userptr.ro, 0,
741 pvec + pinned, NULL);
742 if (ret < 0)
743 break;
744
745 ptr += ret * PAGE_SIZE;
746 pinned += ret;
747 }
748 up_read(&mm->mmap_sem);
749
750 if (ret < 0) {
751 release_pages(pvec, pinned, 0);
752 drm_free_large(pvec);
753 return ERR_PTR(ret);
754 }
755
756 return pvec;
757}
758
759static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
760{
761 struct get_pages_work *work = container_of(_work, typeof(*work), work);
762 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
763 struct page **pvec;
764
765 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
766
767 mutex_lock(&etnaviv_obj->lock);
768 if (IS_ERR(pvec)) {
769 etnaviv_obj->userptr.work = ERR_CAST(pvec);
770 } else {
771 etnaviv_obj->userptr.work = NULL;
772 etnaviv_obj->pages = pvec;
773 }
774
775 mutex_unlock(&etnaviv_obj->lock);
776 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
777
778 mmput(work->mm);
779 put_task_struct(work->task);
780 kfree(work);
781}
782
783static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
784{
785 struct page **pvec = NULL;
786 struct get_pages_work *work;
787 struct mm_struct *mm;
788 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
789
790 if (etnaviv_obj->userptr.work) {
791 if (IS_ERR(etnaviv_obj->userptr.work)) {
792 ret = PTR_ERR(etnaviv_obj->userptr.work);
793 etnaviv_obj->userptr.work = NULL;
794 } else {
795 ret = -EAGAIN;
796 }
797 return ret;
798 }
799
800 mm = get_task_mm(etnaviv_obj->userptr.task);
801 pinned = 0;
802 if (mm == current->mm) {
803 pvec = drm_malloc_ab(npages, sizeof(struct page *));
804 if (!pvec) {
805 mmput(mm);
806 return -ENOMEM;
807 }
808
809 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
810 !etnaviv_obj->userptr.ro, pvec);
811 if (pinned < 0) {
812 drm_free_large(pvec);
813 mmput(mm);
814 return pinned;
815 }
816
817 if (pinned == npages) {
818 etnaviv_obj->pages = pvec;
819 mmput(mm);
820 return 0;
821 }
822 }
823
824 release_pages(pvec, pinned, 0);
825 drm_free_large(pvec);
826
827 work = kmalloc(sizeof(*work), GFP_KERNEL);
828 if (!work) {
829 mmput(mm);
830 return -ENOMEM;
831 }
832
833 get_task_struct(current);
834 drm_gem_object_reference(&etnaviv_obj->base);
835
836 work->mm = mm;
837 work->task = current;
838 work->etnaviv_obj = etnaviv_obj;
839
840 etnaviv_obj->userptr.work = &work->work;
841 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
842
843 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
844
845 return -EAGAIN;
846}
847
848static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
849{
850 if (etnaviv_obj->sgt) {
851 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
852 sg_free_table(etnaviv_obj->sgt);
853 kfree(etnaviv_obj->sgt);
854 }
855 if (etnaviv_obj->pages) {
856 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
857
858 release_pages(etnaviv_obj->pages, npages, 0);
859 drm_free_large(etnaviv_obj->pages);
860 }
861 put_task_struct(etnaviv_obj->userptr.task);
862}
863
864static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
865 .get_pages = etnaviv_gem_userptr_get_pages,
866 .release = etnaviv_gem_userptr_release,
867};
868
869int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
870 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
871{
872 struct etnaviv_gem_object *etnaviv_obj;
873 int ret;
874
875 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
876 &etnaviv_gem_userptr_ops, &etnaviv_obj);
877 if (ret)
878 return ret;
879
880 etnaviv_obj->userptr.ptr = ptr;
881 etnaviv_obj->userptr.task = current;
882 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
883 get_task_struct(current);
884
885 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
886 if (ret) {
887 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
888 return ret;
889 }
890
891 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
892
893 /* drop reference from allocate - handle holds it now */
894 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
895
896 return ret;
897}