blob: 6cd4af443139cbdf858a0ba24baeddb01292b785 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Rob Clark871d8122013-11-16 12:56:06 -050029static dma_addr_t physaddr(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35}
36
Rob Clark072f1f92015-03-03 15:04:25 -050037static bool use_pages(struct drm_gem_object *obj)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41}
42
Rob Clark871d8122013-11-16 12:56:06 -050043/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71}
Rob Clarkc8afe682013-06-26 12:44:06 -040072
73/* called with dev->struct_mutex held */
74static struct page **get_pages(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050080 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040081 int npages = obj->size >> PAGE_SHIFT;
82
Rob Clark072f1f92015-03-03 15:04:25 -050083 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020084 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050085 else
86 p = get_pages_vram(obj, npages);
87
Rob Clarkc8afe682013-06-26 12:44:06 -040088 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080095 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040096 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080097 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040098 }
99
100 msm_obj->pages = p;
101
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
104 */
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108 }
109
110 return msm_obj->pages;
111}
112
113static void put_pages(struct drm_gem_object *obj)
114{
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
120 */
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
Rob Clark072f1f92015-03-03 15:04:25 -0500127 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700129 else {
Rob Clark871d8122013-11-16 12:56:06 -0500130 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700131 drm_free_large(msm_obj->pages);
132 }
Rob Clark871d8122013-11-16 12:56:06 -0500133
Rob Clarkc8afe682013-06-26 12:44:06 -0400134 msm_obj->pages = NULL;
135 }
136}
137
Rob Clark05b84912013-09-28 11:28:35 -0400138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150 /* when we start tracking the pin count, then do something here */
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155{
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400198 struct drm_device *dev = obj->dev;
199 struct page **pages;
200 unsigned long pfn;
201 pgoff_t pgoff;
202 int ret;
203
204 /* Make sure we don't parallel update on a fault, nor move or remove
205 * something from beneath our feet
206 */
207 ret = mutex_lock_interruptible(&dev->struct_mutex);
208 if (ret)
209 goto out;
210
211 /* make sure we have pages attached now */
212 pages = get_pages(obj);
213 if (IS_ERR(pages)) {
214 ret = PTR_ERR(pages);
215 goto out_unlock;
216 }
217
218 /* We don't use vmf->pgoff since that has the fake offset: */
219 pgoff = ((unsigned long)vmf->virtual_address -
220 vma->vm_start) >> PAGE_SHIFT;
221
Rob Clark871d8122013-11-16 12:56:06 -0500222 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400223
224 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
225 pfn, pfn << PAGE_SHIFT);
226
Dan Williams01c8f1c2016-01-15 16:56:40 -0800227 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
228 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400229
230out_unlock:
231 mutex_unlock(&dev->struct_mutex);
232out:
233 switch (ret) {
234 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400235 case 0:
236 case -ERESTARTSYS:
237 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400238 case -EBUSY:
239 /*
240 * EBUSY is ok: this just means that another thread
241 * already did the job.
242 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400243 return VM_FAULT_NOPAGE;
244 case -ENOMEM:
245 return VM_FAULT_OOM;
246 default:
247 return VM_FAULT_SIGBUS;
248 }
249}
250
251/** get mmap offset */
252static uint64_t mmap_offset(struct drm_gem_object *obj)
253{
254 struct drm_device *dev = obj->dev;
255 int ret;
256
257 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
258
259 /* Make it mmapable */
260 ret = drm_gem_create_mmap_offset(obj);
261
262 if (ret) {
263 dev_err(dev->dev, "could not allocate mmap offset\n");
264 return 0;
265 }
266
267 return drm_vma_node_offset_addr(&obj->vma_node);
268}
269
270uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
271{
272 uint64_t offset;
273 mutex_lock(&obj->dev->struct_mutex);
274 offset = mmap_offset(obj);
275 mutex_unlock(&obj->dev->struct_mutex);
276 return offset;
277}
278
Rob Clark4fe5f652016-06-01 11:38:28 -0400279static void
280put_iova(struct drm_gem_object *obj)
281{
282 struct drm_device *dev = obj->dev;
283 struct msm_drm_private *priv = obj->dev->dev_private;
284 struct msm_gem_object *msm_obj = to_msm_bo(obj);
285 int id;
286
287 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
288
289 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
290 struct msm_mmu *mmu = priv->mmus[id];
291 if (mmu && msm_obj->domain[id].iova) {
292 uint32_t offset = msm_obj->domain[id].iova;
293 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
294 msm_obj->domain[id].iova = 0;
295 }
296 }
297}
298
Rob Clarkc8afe682013-06-26 12:44:06 -0400299/* should be called under struct_mutex.. although it can be called
300 * from atomic context without struct_mutex to acquire an extra
301 * iova ref if you know one is already held.
302 *
303 * That means when I do eventually need to add support for unpinning
304 * the refcnt counter needs to be atomic_t.
305 */
306int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
307 uint32_t *iova)
308{
309 struct msm_gem_object *msm_obj = to_msm_bo(obj);
310 int ret = 0;
311
312 if (!msm_obj->domain[id].iova) {
313 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500314 struct page **pages = get_pages(obj);
315
Rob Clarkc8afe682013-06-26 12:44:06 -0400316 if (IS_ERR(pages))
317 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500318
319 if (iommu_present(&platform_bus_type)) {
Rob Clark1c4997f2014-07-01 14:49:55 -0400320 struct msm_mmu *mmu = priv->mmus[id];
321 uint32_t offset;
322
323 if (WARN_ON(!mmu))
324 return -EINVAL;
325
326 offset = (uint32_t)mmap_offset(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500327 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
328 obj->size, IOMMU_READ | IOMMU_WRITE);
329 msm_obj->domain[id].iova = offset;
330 } else {
331 msm_obj->domain[id].iova = physaddr(obj);
332 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400333 }
334
335 if (!ret)
336 *iova = msm_obj->domain[id].iova;
337
338 return ret;
339}
340
Rob Clark2638d902014-11-08 09:13:37 -0500341/* get iova, taking a reference. Should have a matching put */
Rob Clarkc8afe682013-06-26 12:44:06 -0400342int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
343{
Rob Clarkedd4fc62013-09-14 14:01:55 -0400344 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400345 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400346
347 /* this is safe right now because we don't unmap until the
348 * bo is deleted:
349 */
350 if (msm_obj->domain[id].iova) {
351 *iova = msm_obj->domain[id].iova;
352 return 0;
353 }
354
Rob Clarkc8afe682013-06-26 12:44:06 -0400355 mutex_lock(&obj->dev->struct_mutex);
356 ret = msm_gem_get_iova_locked(obj, id, iova);
357 mutex_unlock(&obj->dev->struct_mutex);
358 return ret;
359}
360
Rob Clark2638d902014-11-08 09:13:37 -0500361/* get iova without taking a reference, used in places where you have
362 * already done a 'msm_gem_get_iova()'.
363 */
364uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
365{
366 struct msm_gem_object *msm_obj = to_msm_bo(obj);
367 WARN_ON(!msm_obj->domain[id].iova);
368 return msm_obj->domain[id].iova;
369}
370
Rob Clarkc8afe682013-06-26 12:44:06 -0400371void msm_gem_put_iova(struct drm_gem_object *obj, int id)
372{
373 // XXX TODO ..
374 // NOTE: probably don't need a _locked() version.. we wouldn't
375 // normally unmap here, but instead just mark that it could be
376 // unmapped (if the iova refcnt drops to zero), but then later
377 // if another _get_iova_locked() fails we can start unmapping
378 // things that are no longer needed..
379}
380
381int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
382 struct drm_mode_create_dumb *args)
383{
384 args->pitch = align_pitch(args->width, args->bpp);
385 args->size = PAGE_ALIGN(args->pitch * args->height);
386 return msm_gem_new_handle(dev, file, args->size,
387 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
388}
389
Rob Clarkc8afe682013-06-26 12:44:06 -0400390int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
391 uint32_t handle, uint64_t *offset)
392{
393 struct drm_gem_object *obj;
394 int ret = 0;
395
396 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100397 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400398 if (obj == NULL) {
399 ret = -ENOENT;
400 goto fail;
401 }
402
403 *offset = msm_gem_mmap_offset(obj);
404
405 drm_gem_object_unreference_unlocked(obj);
406
407fail:
408 return ret;
409}
410
Rob Clark18f23042016-05-26 16:24:35 -0400411void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400412{
413 struct msm_gem_object *msm_obj = to_msm_bo(obj);
414 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
415 if (!msm_obj->vaddr) {
416 struct page **pages = get_pages(obj);
417 if (IS_ERR(pages))
418 return ERR_CAST(pages);
419 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
420 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400421 if (msm_obj->vaddr == NULL)
422 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400423 }
Rob Clarke1e9db22016-05-27 11:16:28 -0400424 msm_obj->vmap_count++;
Rob Clarkc8afe682013-06-26 12:44:06 -0400425 return msm_obj->vaddr;
426}
427
Rob Clark18f23042016-05-26 16:24:35 -0400428void *msm_gem_get_vaddr(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400429{
430 void *ret;
431 mutex_lock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400432 ret = msm_gem_get_vaddr_locked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400433 mutex_unlock(&obj->dev->struct_mutex);
434 return ret;
435}
436
Rob Clark18f23042016-05-26 16:24:35 -0400437void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
438{
Rob Clarke1e9db22016-05-27 11:16:28 -0400439 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark18f23042016-05-26 16:24:35 -0400440 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarke1e9db22016-05-27 11:16:28 -0400441 WARN_ON(msm_obj->vmap_count < 1);
442 msm_obj->vmap_count--;
Rob Clark18f23042016-05-26 16:24:35 -0400443}
444
445void msm_gem_put_vaddr(struct drm_gem_object *obj)
446{
Rob Clarke1e9db22016-05-27 11:16:28 -0400447 mutex_lock(&obj->dev->struct_mutex);
448 msm_gem_put_vaddr_locked(obj);
449 mutex_unlock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400450}
451
Rob Clark4cd33c42016-05-17 15:44:49 -0400452/* Update madvise status, returns true if not purged, else
453 * false or -errno.
454 */
455int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
456{
457 struct msm_gem_object *msm_obj = to_msm_bo(obj);
458
459 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
460
461 if (msm_obj->madv != __MSM_MADV_PURGED)
462 msm_obj->madv = madv;
463
464 return (msm_obj->madv != __MSM_MADV_PURGED);
465}
466
Rob Clark68209392016-05-17 16:19:32 -0400467void msm_gem_purge(struct drm_gem_object *obj)
468{
469 struct drm_device *dev = obj->dev;
470 struct msm_gem_object *msm_obj = to_msm_bo(obj);
471
472 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
473 WARN_ON(!is_purgeable(msm_obj));
474 WARN_ON(obj->import_attach);
475
476 put_iova(obj);
477
Rob Clarke1e9db22016-05-27 11:16:28 -0400478 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400479
480 put_pages(obj);
481
482 msm_obj->madv = __MSM_MADV_PURGED;
483
484 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
485 drm_gem_free_mmap_offset(obj);
486
487 /* Our goal here is to return as much of the memory as
488 * is possible back to the system as we are called from OOM.
489 * To do this we must instruct the shmfs to drop all of its
490 * backing pages, *now*.
491 */
492 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
493
494 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
495 0, (loff_t)-1);
496}
497
Rob Clarke1e9db22016-05-27 11:16:28 -0400498void msm_gem_vunmap(struct drm_gem_object *obj)
499{
500 struct msm_gem_object *msm_obj = to_msm_bo(obj);
501
502 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
503 return;
504
505 vunmap(msm_obj->vaddr);
506 msm_obj->vaddr = NULL;
507}
508
Rob Clarkb6295f92016-03-15 18:26:28 -0400509/* must be called before _move_to_active().. */
510int msm_gem_sync_object(struct drm_gem_object *obj,
511 struct msm_fence_context *fctx, bool exclusive)
512{
513 struct msm_gem_object *msm_obj = to_msm_bo(obj);
514 struct reservation_object_list *fobj;
515 struct fence *fence;
516 int i, ret;
517
518 if (!exclusive) {
519 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
520 * which makes this a slightly strange place to call it. OTOH this
521 * is a convenient can-fail point to hook it in. (And similar to
522 * how etnaviv and nouveau handle this.)
523 */
524 ret = reservation_object_reserve_shared(msm_obj->resv);
525 if (ret)
526 return ret;
527 }
528
529 fobj = reservation_object_get_list(msm_obj->resv);
530 if (!fobj || (fobj->shared_count == 0)) {
531 fence = reservation_object_get_excl(msm_obj->resv);
532 /* don't need to wait on our own fences, since ring is fifo */
533 if (fence && (fence->context != fctx->context)) {
534 ret = fence_wait(fence, true);
535 if (ret)
536 return ret;
537 }
538 }
539
540 if (!exclusive || !fobj)
541 return 0;
542
543 for (i = 0; i < fobj->shared_count; i++) {
544 fence = rcu_dereference_protected(fobj->shared[i],
545 reservation_object_held(msm_obj->resv));
546 if (fence->context != fctx->context) {
547 ret = fence_wait(fence, true);
548 if (ret)
549 return ret;
550 }
551 }
552
553 return 0;
554}
555
Rob Clark7198e6b2013-07-19 12:59:32 -0400556void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkb6295f92016-03-15 18:26:28 -0400557 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400558{
559 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400560 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400561 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400562 if (exclusive)
563 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400564 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400565 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400566 list_del_init(&msm_obj->mm_list);
567 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
568}
569
570void msm_gem_move_to_inactive(struct drm_gem_object *obj)
571{
572 struct drm_device *dev = obj->dev;
573 struct msm_drm_private *priv = dev->dev_private;
574 struct msm_gem_object *msm_obj = to_msm_bo(obj);
575
576 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
577
578 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400579 list_del_init(&msm_obj->mm_list);
580 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400581}
582
Rob Clarkba00c3f2016-03-16 18:18:17 -0400583int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
584{
Rob Clarkb6295f92016-03-15 18:26:28 -0400585 struct msm_gem_object *msm_obj = to_msm_bo(obj);
586 bool write = !!(op & MSM_PREP_WRITE);
587
588 if (op & MSM_PREP_NOSYNC) {
589 if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
590 return -EBUSY;
591 } else {
592 int ret;
593
594 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
595 true, timeout_to_jiffies(timeout));
596 if (ret <= 0)
597 return ret == 0 ? -ETIMEDOUT : ret;
598 }
Rob Clarkba00c3f2016-03-16 18:18:17 -0400599
Rob Clark7198e6b2013-07-19 12:59:32 -0400600 /* TODO cache maintenance */
601
Rob Clarkb6295f92016-03-15 18:26:28 -0400602 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400603}
604
605int msm_gem_cpu_fini(struct drm_gem_object *obj)
606{
607 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400608 return 0;
609}
610
611#ifdef CONFIG_DEBUG_FS
Rob Clarkb6295f92016-03-15 18:26:28 -0400612static void describe_fence(struct fence *fence, const char *type,
613 struct seq_file *m)
614{
615 if (!fence_is_signaled(fence))
616 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
617 fence->ops->get_driver_name(fence),
618 fence->ops->get_timeline_name(fence),
619 fence->seqno);
620}
621
Rob Clarkc8afe682013-06-26 12:44:06 -0400622void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
623{
Rob Clarkc8afe682013-06-26 12:44:06 -0400624 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400625 struct reservation_object *robj = msm_obj->resv;
626 struct reservation_object_list *fobj;
627 struct fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400628 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400629 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400630
Rob Clarkb6295f92016-03-15 18:26:28 -0400631 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
632
Rob Clark4cd33c42016-05-17 15:44:49 -0400633 switch (msm_obj->madv) {
634 case __MSM_MADV_PURGED:
635 madv = " purged";
636 break;
637 case MSM_MADV_DONTNEED:
638 madv = " purgeable";
639 break;
640 case MSM_MADV_WILLNEED:
641 default:
642 madv = "";
643 break;
644 }
645
646 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400647 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400648 obj->name, obj->refcount.refcount.counter,
Rob Clark4cd33c42016-05-17 15:44:49 -0400649 off, msm_obj->vaddr, obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400650
651 rcu_read_lock();
652 fobj = rcu_dereference(robj->fence);
653 if (fobj) {
654 unsigned int i, shared_count = fobj->shared_count;
655
656 for (i = 0; i < shared_count; i++) {
657 fence = rcu_dereference(fobj->shared[i]);
658 describe_fence(fence, "Shared", m);
659 }
660 }
661
662 fence = rcu_dereference(robj->fence_excl);
663 if (fence)
664 describe_fence(fence, "Exclusive", m);
665 rcu_read_unlock();
Rob Clarkc8afe682013-06-26 12:44:06 -0400666}
667
668void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
669{
670 struct msm_gem_object *msm_obj;
671 int count = 0;
672 size_t size = 0;
673
674 list_for_each_entry(msm_obj, list, mm_list) {
675 struct drm_gem_object *obj = &msm_obj->base;
676 seq_printf(m, " ");
677 msm_gem_describe(obj, m);
678 count++;
679 size += obj->size;
680 }
681
682 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
683}
684#endif
685
686void msm_gem_free_object(struct drm_gem_object *obj)
687{
688 struct drm_device *dev = obj->dev;
689 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400690
691 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
692
Rob Clark7198e6b2013-07-19 12:59:32 -0400693 /* object should not be on active list: */
694 WARN_ON(is_active(msm_obj));
695
Rob Clarkc8afe682013-06-26 12:44:06 -0400696 list_del(&msm_obj->mm_list);
697
Rob Clark4fe5f652016-06-01 11:38:28 -0400698 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400699
Rob Clark05b84912013-09-28 11:28:35 -0400700 if (obj->import_attach) {
701 if (msm_obj->vaddr)
702 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400703
Rob Clark05b84912013-09-28 11:28:35 -0400704 /* Don't drop the pages for imported dmabuf, as they are not
705 * ours, just free the array we allocated:
706 */
707 if (msm_obj->pages)
708 drm_free_large(msm_obj->pages);
709
jilai wangf28730c2015-04-07 13:51:32 -0400710 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400711 } else {
Rob Clarke1e9db22016-05-27 11:16:28 -0400712 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400713 put_pages(obj);
714 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400715
Rob Clark7198e6b2013-07-19 12:59:32 -0400716 if (msm_obj->resv == &msm_obj->_resv)
717 reservation_object_fini(msm_obj->resv);
718
Rob Clarkc8afe682013-06-26 12:44:06 -0400719 drm_gem_object_release(obj);
720
721 kfree(msm_obj);
722}
723
724/* convenience method to construct a GEM buffer object, and userspace handle */
725int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
726 uint32_t size, uint32_t flags, uint32_t *handle)
727{
728 struct drm_gem_object *obj;
729 int ret;
730
731 ret = mutex_lock_interruptible(&dev->struct_mutex);
732 if (ret)
733 return ret;
734
735 obj = msm_gem_new(dev, size, flags);
736
737 mutex_unlock(&dev->struct_mutex);
738
739 if (IS_ERR(obj))
740 return PTR_ERR(obj);
741
742 ret = drm_gem_handle_create(file, obj, handle);
743
744 /* drop reference from allocate - handle holds it now */
745 drm_gem_object_unreference_unlocked(obj);
746
747 return ret;
748}
749
Rob Clark05b84912013-09-28 11:28:35 -0400750static int msm_gem_new_impl(struct drm_device *dev,
751 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400752 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400753 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400754{
755 struct msm_drm_private *priv = dev->dev_private;
756 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -0500757 unsigned sz;
Rob Clark072f1f92015-03-03 15:04:25 -0500758 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400759
760 switch (flags & MSM_BO_CACHE_MASK) {
761 case MSM_BO_UNCACHED:
762 case MSM_BO_CACHED:
763 case MSM_BO_WC:
764 break;
765 default:
766 dev_err(dev->dev, "invalid cache flag: %x\n",
767 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400768 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400769 }
770
Rob Clark871d8122013-11-16 12:56:06 -0500771 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500772 use_vram = true;
773 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
774 use_vram = true;
775
776 if (WARN_ON(use_vram && !priv->vram.size))
777 return -EINVAL;
778
779 sz = sizeof(*msm_obj);
780 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500781 sz += sizeof(struct drm_mm_node);
782
783 msm_obj = kzalloc(sz, GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400784 if (!msm_obj)
785 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400786
Rob Clark072f1f92015-03-03 15:04:25 -0500787 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500788 msm_obj->vram_node = (void *)&msm_obj[1];
789
Rob Clarkc8afe682013-06-26 12:44:06 -0400790 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400791 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400792
Rob Clark79f0e202016-03-16 12:40:35 -0400793 if (resv) {
794 msm_obj->resv = resv;
795 } else {
796 msm_obj->resv = &msm_obj->_resv;
797 reservation_object_init(msm_obj->resv);
798 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400799
Rob Clark7198e6b2013-07-19 12:59:32 -0400800 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clarkc8afe682013-06-26 12:44:06 -0400801 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
802
Rob Clark05b84912013-09-28 11:28:35 -0400803 *obj = &msm_obj->base;
804
805 return 0;
806}
807
808struct drm_gem_object *msm_gem_new(struct drm_device *dev,
809 uint32_t size, uint32_t flags)
810{
Rob Clark871d8122013-11-16 12:56:06 -0500811 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400812 int ret;
813
814 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
815
816 size = PAGE_ALIGN(size);
817
Rob Clark79f0e202016-03-16 12:40:35 -0400818 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400819 if (ret)
820 goto fail;
821
Rob Clark072f1f92015-03-03 15:04:25 -0500822 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500823 ret = drm_gem_object_init(dev, obj, size);
824 if (ret)
825 goto fail;
826 } else {
827 drm_gem_private_object_init(dev, obj, size);
828 }
Rob Clark05b84912013-09-28 11:28:35 -0400829
830 return obj;
831
832fail:
Markus Elfring0a677122016-07-13 19:29:19 +0200833 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400834 return ERR_PTR(ret);
835}
836
837struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400838 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400839{
840 struct msm_gem_object *msm_obj;
841 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400842 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400843 int ret, npages;
844
Rob Clark871d8122013-11-16 12:56:06 -0500845 /* if we don't have IOMMU, don't bother pretending we can import: */
846 if (!iommu_present(&platform_bus_type)) {
847 dev_err(dev->dev, "cannot import without IOMMU\n");
848 return ERR_PTR(-EINVAL);
849 }
850
Rob Clark79f0e202016-03-16 12:40:35 -0400851 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -0400852
Rob Clark79f0e202016-03-16 12:40:35 -0400853 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400854 if (ret)
855 goto fail;
856
857 drm_gem_private_object_init(dev, obj, size);
858
859 npages = size / PAGE_SIZE;
860
861 msm_obj = to_msm_bo(obj);
862 msm_obj->sgt = sgt;
863 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
864 if (!msm_obj->pages) {
865 ret = -ENOMEM;
866 goto fail;
867 }
868
869 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
870 if (ret)
871 goto fail;
872
Rob Clarkc8afe682013-06-26 12:44:06 -0400873 return obj;
874
875fail:
Markus Elfringe73a8562016-07-13 19:15:35 +0200876 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400877 return ERR_PTR(ret);
878}