blob: e78f1406885d102af012979e00a1a7a35ee61017 [file] [log] [blame]
The etnaviv authorsa8c21a52015-12-03 18:21:29 +01001/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
Lucas Stach0e7f26e2016-04-27 12:12:00 +0200132static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100133 struct vm_area_struct *vma)
134{
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100135 pgprot_t vm_page_prot;
136
137 vma->vm_flags &= ~VM_PFNMAP;
138 vma->vm_flags |= VM_MIXEDMAP;
139
140 vm_page_prot = vm_get_page_prot(vma->vm_flags);
141
142 if (etnaviv_obj->flags & ETNA_BO_WC) {
143 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
144 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
145 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
146 } else {
147 /*
148 * Shunt off cached objs to shmem file so they have their own
149 * address_space (so unmap_mapping_range does what we want,
150 * in particular in the case of mmap'd dmabufs)
151 */
152 fput(vma->vm_file);
Lucas Stach0e7f26e2016-04-27 12:12:00 +0200153 get_file(etnaviv_obj->base.filp);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100154 vma->vm_pgoff = 0;
Lucas Stach0e7f26e2016-04-27 12:12:00 +0200155 vma->vm_file = etnaviv_obj->base.filp;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100156
157 vma->vm_page_prot = vm_page_prot;
158 }
159
160 return 0;
161}
162
163int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
164{
165 struct etnaviv_gem_object *obj;
166 int ret;
167
168 ret = drm_gem_mmap(filp, vma);
169 if (ret) {
170 DBG("mmap failed: %d", ret);
171 return ret;
172 }
173
174 obj = to_etnaviv_bo(vma->vm_private_data);
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200175 return obj->ops->mmap(obj, vma);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100176}
177
Dave Jiang11bac802017-02-24 14:56:41 -0800178int etnaviv_gem_fault(struct vm_fault *vmf)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100179{
Dave Jiang11bac802017-02-24 14:56:41 -0800180 struct vm_area_struct *vma = vmf->vma;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800206 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100207
208 page = pages[pgoff];
209
Jan Kara1a29d852016-12-14 15:07:01 -0800210 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100211 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
212
Jan Kara1a29d852016-12-14 15:07:01 -0800213 ret = vm_insert_page(vma, vmf->address, page);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100214
215out:
216 switch (ret) {
217 case -EAGAIN:
218 case 0:
219 case -ERESTARTSYS:
220 case -EINTR:
221 case -EBUSY:
222 /*
223 * EBUSY is ok: this just means that another thread
224 * already did the job.
225 */
226 return VM_FAULT_NOPAGE;
227 case -ENOMEM:
228 return VM_FAULT_OOM;
229 default:
230 return VM_FAULT_SIGBUS;
231 }
232}
233
234int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
235{
236 int ret;
237
238 /* Make it mmapable */
239 ret = drm_gem_create_mmap_offset(obj);
240 if (ret)
241 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
242 else
243 *offset = drm_vma_node_offset_addr(&obj->vma_node);
244
245 return ret;
246}
247
248static struct etnaviv_vram_mapping *
249etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
250 struct etnaviv_iommu *mmu)
251{
252 struct etnaviv_vram_mapping *mapping;
253
254 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
255 if (mapping->mmu == mmu)
256 return mapping;
257 }
258
259 return NULL;
260}
261
Russell Kingb6325f42016-01-21 15:20:50 +0000262void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
263{
264 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
265
266 drm_gem_object_reference(&etnaviv_obj->base);
267
268 mutex_lock(&etnaviv_obj->lock);
269 WARN_ON(mapping->use == 0);
270 mapping->use += 1;
271 mutex_unlock(&etnaviv_obj->lock);
272}
273
274void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
275{
276 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
277
278 mutex_lock(&etnaviv_obj->lock);
279 WARN_ON(mapping->use == 0);
280 mapping->use -= 1;
281 mutex_unlock(&etnaviv_obj->lock);
282
283 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
284}
285
286struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
287 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100288{
289 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
290 struct etnaviv_vram_mapping *mapping;
291 struct page **pages;
292 int ret = 0;
293
294 mutex_lock(&etnaviv_obj->lock);
295 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
296 if (mapping) {
297 /*
298 * Holding the object lock prevents the use count changing
299 * beneath us. If the use count is zero, the MMU might be
300 * reaping this object, so take the lock and re-check that
301 * the MMU owns this mapping to close this race.
302 */
303 if (mapping->use == 0) {
304 mutex_lock(&gpu->mmu->lock);
305 if (mapping->mmu == gpu->mmu)
306 mapping->use += 1;
307 else
308 mapping = NULL;
309 mutex_unlock(&gpu->mmu->lock);
310 if (mapping)
311 goto out;
312 } else {
313 mapping->use += 1;
314 goto out;
315 }
316 }
317
318 pages = etnaviv_gem_get_pages(etnaviv_obj);
319 if (IS_ERR(pages)) {
320 ret = PTR_ERR(pages);
321 goto out;
322 }
323
324 /*
325 * See if we have a reaped vram mapping we can re-use before
326 * allocating a fresh mapping.
327 */
328 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
329 if (!mapping) {
330 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
Dan Carpentered94add2016-01-04 16:10:24 +0300331 if (!mapping) {
332 ret = -ENOMEM;
333 goto out;
334 }
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100335
336 INIT_LIST_HEAD(&mapping->scan_node);
337 mapping->object = etnaviv_obj;
338 } else {
339 list_del(&mapping->obj_node);
340 }
341
342 mapping->mmu = gpu->mmu;
343 mapping->use = 1;
344
345 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
346 mapping);
347 if (ret < 0)
348 kfree(mapping);
349 else
350 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
351
352out:
353 mutex_unlock(&etnaviv_obj->lock);
354
Russell Kingb6325f42016-01-21 15:20:50 +0000355 if (ret)
356 return ERR_PTR(ret);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100357
Russell Kingb6325f42016-01-21 15:20:50 +0000358 /* Take a reference on the object */
359 drm_gem_object_reference(obj);
360 return mapping;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100361}
362
Lucas Stachce3088f2016-01-26 18:10:32 +0100363void *etnaviv_gem_vmap(struct drm_gem_object *obj)
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100364{
365 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
366
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100367 if (etnaviv_obj->vaddr)
368 return etnaviv_obj->vaddr;
369
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100370 mutex_lock(&etnaviv_obj->lock);
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100371 /*
372 * Need to check again, as we might have raced with another thread
373 * while waiting for the mutex.
374 */
375 if (!etnaviv_obj->vaddr)
376 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100377 mutex_unlock(&etnaviv_obj->lock);
378
379 return etnaviv_obj->vaddr;
380}
381
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100382static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
383{
384 struct page **pages;
385
386 lockdep_assert_held(&obj->lock);
387
388 pages = etnaviv_gem_get_pages(obj);
389 if (IS_ERR(pages))
390 return NULL;
391
392 return vmap(pages, obj->base.size >> PAGE_SHIFT,
393 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
394}
395
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100396static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
397{
398 if (op & ETNA_PREP_READ)
399 return DMA_FROM_DEVICE;
400 else if (op & ETNA_PREP_WRITE)
401 return DMA_TO_DEVICE;
402 else
403 return DMA_BIDIRECTIONAL;
404}
405
406int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
407 struct timespec *timeout)
408{
409 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
410 struct drm_device *dev = obj->dev;
411 bool write = !!(op & ETNA_PREP_WRITE);
Chris Wilsoncd34db42016-08-29 08:08:25 +0100412 unsigned long remain =
413 op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
414 long lret;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100415
Chris Wilsoncd34db42016-08-29 08:08:25 +0100416 lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
417 write, true, remain);
418 if (lret < 0)
419 return lret;
420 else if (lret == 0)
421 return remain == 0 ? -EBUSY : -ETIMEDOUT;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100422
423 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
424 if (!etnaviv_obj->sgt) {
425 void *ret;
426
427 mutex_lock(&etnaviv_obj->lock);
428 ret = etnaviv_gem_get_pages(etnaviv_obj);
429 mutex_unlock(&etnaviv_obj->lock);
430 if (IS_ERR(ret))
431 return PTR_ERR(ret);
432 }
433
434 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
435 etnaviv_obj->sgt->nents,
436 etnaviv_op_to_dma_dir(op));
437 etnaviv_obj->last_cpu_prep_op = op;
438 }
439
440 return 0;
441}
442
443int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
444{
445 struct drm_device *dev = obj->dev;
446 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
447
448 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
449 /* fini without a prep is almost certainly a userspace error */
450 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
451 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
452 etnaviv_obj->sgt->nents,
453 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
454 etnaviv_obj->last_cpu_prep_op = 0;
455 }
456
457 return 0;
458}
459
460int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
461 struct timespec *timeout)
462{
463 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
464
465 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
466}
467
468#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100469static void etnaviv_gem_describe_fence(struct dma_fence *fence,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100470 const char *type, struct seq_file *m)
471{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100472 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100473 seq_printf(m, "\t%9s: %s %s seq %u\n",
474 type,
475 fence->ops->get_driver_name(fence),
476 fence->ops->get_timeline_name(fence),
477 fence->seqno);
478}
479
480static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
481{
482 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
483 struct reservation_object *robj = etnaviv_obj->resv;
484 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100485 struct dma_fence *fence;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100486 unsigned long off = drm_vma_node_start(&obj->vma_node);
487
488 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
489 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100490 obj->name, kref_read(&obj->refcount),
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100491 off, etnaviv_obj->vaddr, obj->size);
492
493 rcu_read_lock();
494 fobj = rcu_dereference(robj->fence);
495 if (fobj) {
496 unsigned int i, shared_count = fobj->shared_count;
497
498 for (i = 0; i < shared_count; i++) {
499 fence = rcu_dereference(fobj->shared[i]);
500 etnaviv_gem_describe_fence(fence, "Shared", m);
501 }
502 }
503
504 fence = rcu_dereference(robj->fence_excl);
505 if (fence)
506 etnaviv_gem_describe_fence(fence, "Exclusive", m);
507 rcu_read_unlock();
508}
509
510void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
511 struct seq_file *m)
512{
513 struct etnaviv_gem_object *etnaviv_obj;
514 int count = 0;
515 size_t size = 0;
516
517 mutex_lock(&priv->gem_lock);
518 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
519 struct drm_gem_object *obj = &etnaviv_obj->base;
520
521 seq_puts(m, " ");
522 etnaviv_gem_describe(obj, m);
523 count++;
524 size += obj->size;
525 }
526 mutex_unlock(&priv->gem_lock);
527
528 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
529}
530#endif
531
532static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
533{
Markus Elfring8c6e6182016-07-22 15:56:15 +0200534 vunmap(etnaviv_obj->vaddr);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100535 put_pages(etnaviv_obj);
536}
537
538static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
539 .get_pages = etnaviv_gem_shmem_get_pages,
540 .release = etnaviv_gem_shmem_release,
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100541 .vmap = etnaviv_gem_vmap_impl,
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200542 .mmap = etnaviv_gem_mmap_obj,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100543};
544
545void etnaviv_gem_free_object(struct drm_gem_object *obj)
546{
547 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
548 struct etnaviv_vram_mapping *mapping, *tmp;
549
550 /* object should not be active */
551 WARN_ON(is_active(etnaviv_obj));
552
553 list_del(&etnaviv_obj->gem_node);
554
555 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
556 obj_node) {
557 struct etnaviv_iommu *mmu = mapping->mmu;
558
559 WARN_ON(mapping->use);
560
561 if (mmu)
562 etnaviv_iommu_unmap_gem(mmu, mapping);
563
564 list_del(&mapping->obj_node);
565 kfree(mapping);
566 }
567
568 drm_gem_free_mmap_offset(obj);
569 etnaviv_obj->ops->release(etnaviv_obj);
570 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
571 reservation_object_fini(&etnaviv_obj->_resv);
572 drm_gem_object_release(obj);
573
574 kfree(etnaviv_obj);
575}
576
577int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
578{
579 struct etnaviv_drm_private *priv = dev->dev_private;
580 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
581
582 mutex_lock(&priv->gem_lock);
583 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
584 mutex_unlock(&priv->gem_lock);
585
586 return 0;
587}
588
589static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
590 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
591 struct drm_gem_object **obj)
592{
593 struct etnaviv_gem_object *etnaviv_obj;
594 unsigned sz = sizeof(*etnaviv_obj);
595 bool valid = true;
596
597 /* validate flags */
598 switch (flags & ETNA_BO_CACHE_MASK) {
599 case ETNA_BO_UNCACHED:
600 case ETNA_BO_CACHED:
601 case ETNA_BO_WC:
602 break;
603 default:
604 valid = false;
605 }
606
607 if (!valid) {
608 dev_err(dev->dev, "invalid cache flag: %x\n",
609 (flags & ETNA_BO_CACHE_MASK));
610 return -EINVAL;
611 }
612
613 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
614 if (!etnaviv_obj)
615 return -ENOMEM;
616
617 etnaviv_obj->flags = flags;
618 etnaviv_obj->ops = ops;
619 if (robj) {
620 etnaviv_obj->resv = robj;
621 } else {
622 etnaviv_obj->resv = &etnaviv_obj->_resv;
623 reservation_object_init(&etnaviv_obj->_resv);
624 }
625
626 mutex_init(&etnaviv_obj->lock);
627 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
628
629 *obj = &etnaviv_obj->base;
630
631 return 0;
632}
633
634static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
635 u32 size, u32 flags)
636{
637 struct drm_gem_object *obj = NULL;
638 int ret;
639
640 size = PAGE_ALIGN(size);
641
642 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
643 &etnaviv_gem_shmem_ops, &obj);
644 if (ret)
645 goto fail;
646
647 ret = drm_gem_object_init(dev, obj, size);
648 if (ret == 0) {
649 struct address_space *mapping;
650
651 /*
652 * Our buffers are kept pinned, so allocating them
653 * from the MOVABLE zone is a really bad idea, and
654 * conflicts with CMA. See coments above new_inode()
655 * why this is required _and_ expected if you're
656 * going to pin these pages.
657 */
Al Viro93c76a32015-12-04 23:45:44 -0500658 mapping = obj->filp->f_mapping;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100659 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
660 }
661
662 if (ret)
663 goto fail;
664
665 return obj;
666
667fail:
Markus Elfring8c6e6182016-07-22 15:56:15 +0200668 drm_gem_object_unreference_unlocked(obj);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100669 return ERR_PTR(ret);
670}
671
672/* convenience method to construct a GEM buffer object, and userspace handle */
673int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
674 u32 size, u32 flags, u32 *handle)
675{
676 struct drm_gem_object *obj;
677 int ret;
678
679 obj = __etnaviv_gem_new(dev, size, flags);
680 if (IS_ERR(obj))
681 return PTR_ERR(obj);
682
683 ret = etnaviv_gem_obj_add(dev, obj);
684 if (ret < 0) {
685 drm_gem_object_unreference_unlocked(obj);
686 return ret;
687 }
688
689 ret = drm_gem_handle_create(file, obj, handle);
690
691 /* drop reference from allocate - handle holds it now */
692 drm_gem_object_unreference_unlocked(obj);
693
694 return ret;
695}
696
697struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
698 u32 size, u32 flags)
699{
700 struct drm_gem_object *obj;
701 int ret;
702
703 obj = __etnaviv_gem_new(dev, size, flags);
704 if (IS_ERR(obj))
705 return obj;
706
707 ret = etnaviv_gem_obj_add(dev, obj);
708 if (ret < 0) {
709 drm_gem_object_unreference_unlocked(obj);
710 return ERR_PTR(ret);
711 }
712
713 return obj;
714}
715
716int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
717 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
718 struct etnaviv_gem_object **res)
719{
720 struct drm_gem_object *obj;
721 int ret;
722
723 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
724 if (ret)
725 return ret;
726
727 drm_gem_private_object_init(dev, obj, size);
728
729 *res = to_etnaviv_bo(obj);
730
731 return 0;
732}
733
734struct get_pages_work {
735 struct work_struct work;
736 struct mm_struct *mm;
737 struct task_struct *task;
738 struct etnaviv_gem_object *etnaviv_obj;
739};
740
741static struct page **etnaviv_gem_userptr_do_get_pages(
742 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
743{
744 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
745 struct page **pvec;
746 uintptr_t ptr;
Lorenzo Stoakes9beae1e2016-10-13 01:20:17 +0100747 unsigned int flags = 0;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100748
749 pvec = drm_malloc_ab(npages, sizeof(struct page *));
750 if (!pvec)
751 return ERR_PTR(-ENOMEM);
752
Lorenzo Stoakes9beae1e2016-10-13 01:20:17 +0100753 if (!etnaviv_obj->userptr.ro)
754 flags |= FOLL_WRITE;
755
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100756 pinned = 0;
757 ptr = etnaviv_obj->userptr.ptr;
758
759 down_read(&mm->mmap_sem);
760 while (pinned < npages) {
Dave Hansen1e987792016-02-12 13:01:54 -0800761 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
Lorenzo Stoakes5b56d492016-12-14 15:06:52 -0800762 flags, pvec + pinned, NULL, NULL);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100763 if (ret < 0)
764 break;
765
766 ptr += ret * PAGE_SIZE;
767 pinned += ret;
768 }
769 up_read(&mm->mmap_sem);
770
771 if (ret < 0) {
772 release_pages(pvec, pinned, 0);
773 drm_free_large(pvec);
774 return ERR_PTR(ret);
775 }
776
777 return pvec;
778}
779
780static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
781{
782 struct get_pages_work *work = container_of(_work, typeof(*work), work);
783 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
784 struct page **pvec;
785
786 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
787
788 mutex_lock(&etnaviv_obj->lock);
789 if (IS_ERR(pvec)) {
790 etnaviv_obj->userptr.work = ERR_CAST(pvec);
791 } else {
792 etnaviv_obj->userptr.work = NULL;
793 etnaviv_obj->pages = pvec;
794 }
795
796 mutex_unlock(&etnaviv_obj->lock);
797 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
798
799 mmput(work->mm);
800 put_task_struct(work->task);
801 kfree(work);
802}
803
804static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
805{
806 struct page **pvec = NULL;
807 struct get_pages_work *work;
808 struct mm_struct *mm;
809 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
810
811 if (etnaviv_obj->userptr.work) {
812 if (IS_ERR(etnaviv_obj->userptr.work)) {
813 ret = PTR_ERR(etnaviv_obj->userptr.work);
814 etnaviv_obj->userptr.work = NULL;
815 } else {
816 ret = -EAGAIN;
817 }
818 return ret;
819 }
820
821 mm = get_task_mm(etnaviv_obj->userptr.task);
822 pinned = 0;
823 if (mm == current->mm) {
824 pvec = drm_malloc_ab(npages, sizeof(struct page *));
825 if (!pvec) {
826 mmput(mm);
827 return -ENOMEM;
828 }
829
830 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
831 !etnaviv_obj->userptr.ro, pvec);
832 if (pinned < 0) {
833 drm_free_large(pvec);
834 mmput(mm);
835 return pinned;
836 }
837
838 if (pinned == npages) {
839 etnaviv_obj->pages = pvec;
840 mmput(mm);
841 return 0;
842 }
843 }
844
845 release_pages(pvec, pinned, 0);
846 drm_free_large(pvec);
847
848 work = kmalloc(sizeof(*work), GFP_KERNEL);
849 if (!work) {
850 mmput(mm);
851 return -ENOMEM;
852 }
853
854 get_task_struct(current);
855 drm_gem_object_reference(&etnaviv_obj->base);
856
857 work->mm = mm;
858 work->task = current;
859 work->etnaviv_obj = etnaviv_obj;
860
861 etnaviv_obj->userptr.work = &work->work;
862 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
863
864 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
865
866 return -EAGAIN;
867}
868
869static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
870{
871 if (etnaviv_obj->sgt) {
872 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
873 sg_free_table(etnaviv_obj->sgt);
874 kfree(etnaviv_obj->sgt);
875 }
876 if (etnaviv_obj->pages) {
877 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
878
879 release_pages(etnaviv_obj->pages, npages, 0);
880 drm_free_large(etnaviv_obj->pages);
881 }
882 put_task_struct(etnaviv_obj->userptr.task);
883}
884
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200885static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
886 struct vm_area_struct *vma)
887{
888 return -EINVAL;
889}
890
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100891static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
892 .get_pages = etnaviv_gem_userptr_get_pages,
893 .release = etnaviv_gem_userptr_release,
Lucas Stacha0a5ab32016-01-25 15:47:28 +0100894 .vmap = etnaviv_gem_vmap_impl,
Lucas Stacha10e2bde2016-04-27 12:27:02 +0200895 .mmap = etnaviv_gem_userptr_mmap_obj,
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100896};
897
898int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
899 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
900{
901 struct etnaviv_gem_object *etnaviv_obj;
902 int ret;
903
904 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
905 &etnaviv_gem_userptr_ops, &etnaviv_obj);
906 if (ret)
907 return ret;
908
909 etnaviv_obj->userptr.ptr = ptr;
910 etnaviv_obj->userptr.task = current;
911 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
912 get_task_struct(current);
913
914 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
Markus Elfringd9a7ed72016-07-22 17:17:48 +0200915 if (ret)
916 goto unreference;
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100917
918 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
Markus Elfringd9a7ed72016-07-22 17:17:48 +0200919unreference:
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100920 /* drop reference from allocate - handle holds it now */
921 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
The etnaviv authorsa8c21a52015-12-03 18:21:29 +0100922 return ret;
923}