blob: 69836f5685b1ba63b3038ce90da38e9b18d8c6f3 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Rob Clark871d8122013-11-16 12:56:06 -050029static dma_addr_t physaddr(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35}
36
Rob Clark072f1f92015-03-03 15:04:25 -050037static bool use_pages(struct drm_gem_object *obj)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41}
42
Rob Clark871d8122013-11-16 12:56:06 -050043/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71}
Rob Clarkc8afe682013-06-26 12:44:06 -040072
73/* called with dev->struct_mutex held */
74static struct page **get_pages(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050080 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040081 int npages = obj->size >> PAGE_SHIFT;
82
Rob Clark072f1f92015-03-03 15:04:25 -050083 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020084 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050085 else
86 p = get_pages_vram(obj, npages);
87
Rob Clarkc8afe682013-06-26 12:44:06 -040088 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080095 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040096 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080097 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040098 }
99
100 msm_obj->pages = p;
101
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
104 */
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108 }
109
110 return msm_obj->pages;
111}
112
113static void put_pages(struct drm_gem_object *obj)
114{
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
120 */
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
Rob Clark072f1f92015-03-03 15:04:25 -0500127 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700129 else {
Rob Clark871d8122013-11-16 12:56:06 -0500130 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700131 drm_free_large(msm_obj->pages);
132 }
Rob Clark871d8122013-11-16 12:56:06 -0500133
Rob Clarkc8afe682013-06-26 12:44:06 -0400134 msm_obj->pages = NULL;
135 }
136}
137
Rob Clark05b84912013-09-28 11:28:35 -0400138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150 /* when we start tracking the pin count, then do something here */
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155{
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400198 struct drm_device *dev = obj->dev;
199 struct page **pages;
200 unsigned long pfn;
201 pgoff_t pgoff;
202 int ret;
203
204 /* Make sure we don't parallel update on a fault, nor move or remove
205 * something from beneath our feet
206 */
207 ret = mutex_lock_interruptible(&dev->struct_mutex);
208 if (ret)
209 goto out;
210
211 /* make sure we have pages attached now */
212 pages = get_pages(obj);
213 if (IS_ERR(pages)) {
214 ret = PTR_ERR(pages);
215 goto out_unlock;
216 }
217
218 /* We don't use vmf->pgoff since that has the fake offset: */
219 pgoff = ((unsigned long)vmf->virtual_address -
220 vma->vm_start) >> PAGE_SHIFT;
221
Rob Clark871d8122013-11-16 12:56:06 -0500222 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400223
224 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
225 pfn, pfn << PAGE_SHIFT);
226
Dan Williams01c8f1c2016-01-15 16:56:40 -0800227 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
228 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400229
230out_unlock:
231 mutex_unlock(&dev->struct_mutex);
232out:
233 switch (ret) {
234 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400235 case 0:
236 case -ERESTARTSYS:
237 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400238 case -EBUSY:
239 /*
240 * EBUSY is ok: this just means that another thread
241 * already did the job.
242 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400243 return VM_FAULT_NOPAGE;
244 case -ENOMEM:
245 return VM_FAULT_OOM;
246 default:
247 return VM_FAULT_SIGBUS;
248 }
249}
250
251/** get mmap offset */
252static uint64_t mmap_offset(struct drm_gem_object *obj)
253{
254 struct drm_device *dev = obj->dev;
255 int ret;
256
257 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
258
259 /* Make it mmapable */
260 ret = drm_gem_create_mmap_offset(obj);
261
262 if (ret) {
263 dev_err(dev->dev, "could not allocate mmap offset\n");
264 return 0;
265 }
266
267 return drm_vma_node_offset_addr(&obj->vma_node);
268}
269
270uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
271{
272 uint64_t offset;
273 mutex_lock(&obj->dev->struct_mutex);
274 offset = mmap_offset(obj);
275 mutex_unlock(&obj->dev->struct_mutex);
276 return offset;
277}
278
Rob Clarkc8afe682013-06-26 12:44:06 -0400279/* should be called under struct_mutex.. although it can be called
280 * from atomic context without struct_mutex to acquire an extra
281 * iova ref if you know one is already held.
282 *
283 * That means when I do eventually need to add support for unpinning
284 * the refcnt counter needs to be atomic_t.
285 */
286int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
287 uint32_t *iova)
288{
289 struct msm_gem_object *msm_obj = to_msm_bo(obj);
290 int ret = 0;
291
292 if (!msm_obj->domain[id].iova) {
293 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500294 struct page **pages = get_pages(obj);
295
Rob Clarkc8afe682013-06-26 12:44:06 -0400296 if (IS_ERR(pages))
297 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500298
299 if (iommu_present(&platform_bus_type)) {
Rob Clark1c4997f2014-07-01 14:49:55 -0400300 struct msm_mmu *mmu = priv->mmus[id];
301 uint32_t offset;
302
303 if (WARN_ON(!mmu))
304 return -EINVAL;
305
306 offset = (uint32_t)mmap_offset(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500307 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
308 obj->size, IOMMU_READ | IOMMU_WRITE);
309 msm_obj->domain[id].iova = offset;
310 } else {
311 msm_obj->domain[id].iova = physaddr(obj);
312 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400313 }
314
315 if (!ret)
316 *iova = msm_obj->domain[id].iova;
317
318 return ret;
319}
320
Rob Clark2638d902014-11-08 09:13:37 -0500321/* get iova, taking a reference. Should have a matching put */
Rob Clarkc8afe682013-06-26 12:44:06 -0400322int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
323{
Rob Clarkedd4fc62013-09-14 14:01:55 -0400324 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400325 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400326
327 /* this is safe right now because we don't unmap until the
328 * bo is deleted:
329 */
330 if (msm_obj->domain[id].iova) {
331 *iova = msm_obj->domain[id].iova;
332 return 0;
333 }
334
Rob Clarkc8afe682013-06-26 12:44:06 -0400335 mutex_lock(&obj->dev->struct_mutex);
336 ret = msm_gem_get_iova_locked(obj, id, iova);
337 mutex_unlock(&obj->dev->struct_mutex);
338 return ret;
339}
340
Rob Clark2638d902014-11-08 09:13:37 -0500341/* get iova without taking a reference, used in places where you have
342 * already done a 'msm_gem_get_iova()'.
343 */
344uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
345{
346 struct msm_gem_object *msm_obj = to_msm_bo(obj);
347 WARN_ON(!msm_obj->domain[id].iova);
348 return msm_obj->domain[id].iova;
349}
350
Rob Clarkc8afe682013-06-26 12:44:06 -0400351void msm_gem_put_iova(struct drm_gem_object *obj, int id)
352{
353 // XXX TODO ..
354 // NOTE: probably don't need a _locked() version.. we wouldn't
355 // normally unmap here, but instead just mark that it could be
356 // unmapped (if the iova refcnt drops to zero), but then later
357 // if another _get_iova_locked() fails we can start unmapping
358 // things that are no longer needed..
359}
360
361int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
362 struct drm_mode_create_dumb *args)
363{
364 args->pitch = align_pitch(args->width, args->bpp);
365 args->size = PAGE_ALIGN(args->pitch * args->height);
366 return msm_gem_new_handle(dev, file, args->size,
367 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
368}
369
Rob Clarkc8afe682013-06-26 12:44:06 -0400370int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
371 uint32_t handle, uint64_t *offset)
372{
373 struct drm_gem_object *obj;
374 int ret = 0;
375
376 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100377 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400378 if (obj == NULL) {
379 ret = -ENOENT;
380 goto fail;
381 }
382
383 *offset = msm_gem_mmap_offset(obj);
384
385 drm_gem_object_unreference_unlocked(obj);
386
387fail:
388 return ret;
389}
390
391void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
392{
393 struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
395 if (!msm_obj->vaddr) {
396 struct page **pages = get_pages(obj);
397 if (IS_ERR(pages))
398 return ERR_CAST(pages);
399 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
400 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400401 if (msm_obj->vaddr == NULL)
402 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400403 }
404 return msm_obj->vaddr;
405}
406
407void *msm_gem_vaddr(struct drm_gem_object *obj)
408{
409 void *ret;
410 mutex_lock(&obj->dev->struct_mutex);
411 ret = msm_gem_vaddr_locked(obj);
412 mutex_unlock(&obj->dev->struct_mutex);
413 return ret;
414}
415
Rob Clarkb6295f92016-03-15 18:26:28 -0400416/* must be called before _move_to_active().. */
417int msm_gem_sync_object(struct drm_gem_object *obj,
418 struct msm_fence_context *fctx, bool exclusive)
419{
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 struct reservation_object_list *fobj;
422 struct fence *fence;
423 int i, ret;
424
425 if (!exclusive) {
426 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
427 * which makes this a slightly strange place to call it. OTOH this
428 * is a convenient can-fail point to hook it in. (And similar to
429 * how etnaviv and nouveau handle this.)
430 */
431 ret = reservation_object_reserve_shared(msm_obj->resv);
432 if (ret)
433 return ret;
434 }
435
436 fobj = reservation_object_get_list(msm_obj->resv);
437 if (!fobj || (fobj->shared_count == 0)) {
438 fence = reservation_object_get_excl(msm_obj->resv);
439 /* don't need to wait on our own fences, since ring is fifo */
440 if (fence && (fence->context != fctx->context)) {
441 ret = fence_wait(fence, true);
442 if (ret)
443 return ret;
444 }
445 }
446
447 if (!exclusive || !fobj)
448 return 0;
449
450 for (i = 0; i < fobj->shared_count; i++) {
451 fence = rcu_dereference_protected(fobj->shared[i],
452 reservation_object_held(msm_obj->resv));
453 if (fence->context != fctx->context) {
454 ret = fence_wait(fence, true);
455 if (ret)
456 return ret;
457 }
458 }
459
460 return 0;
461}
462
Rob Clark7198e6b2013-07-19 12:59:32 -0400463void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkb6295f92016-03-15 18:26:28 -0400464 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400465{
466 struct msm_gem_object *msm_obj = to_msm_bo(obj);
467 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400468 if (exclusive)
469 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400470 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400471 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400472 list_del_init(&msm_obj->mm_list);
473 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
474}
475
476void msm_gem_move_to_inactive(struct drm_gem_object *obj)
477{
478 struct drm_device *dev = obj->dev;
479 struct msm_drm_private *priv = dev->dev_private;
480 struct msm_gem_object *msm_obj = to_msm_bo(obj);
481
482 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
483
484 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400485 list_del_init(&msm_obj->mm_list);
486 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400487}
488
Rob Clarkba00c3f2016-03-16 18:18:17 -0400489int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
490{
Rob Clarkb6295f92016-03-15 18:26:28 -0400491 struct msm_gem_object *msm_obj = to_msm_bo(obj);
492 bool write = !!(op & MSM_PREP_WRITE);
493
494 if (op & MSM_PREP_NOSYNC) {
495 if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
496 return -EBUSY;
497 } else {
498 int ret;
499
500 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
501 true, timeout_to_jiffies(timeout));
502 if (ret <= 0)
503 return ret == 0 ? -ETIMEDOUT : ret;
504 }
Rob Clarkba00c3f2016-03-16 18:18:17 -0400505
Rob Clark7198e6b2013-07-19 12:59:32 -0400506 /* TODO cache maintenance */
507
Rob Clarkb6295f92016-03-15 18:26:28 -0400508 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400509}
510
511int msm_gem_cpu_fini(struct drm_gem_object *obj)
512{
513 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400514 return 0;
515}
516
517#ifdef CONFIG_DEBUG_FS
Rob Clarkb6295f92016-03-15 18:26:28 -0400518static void describe_fence(struct fence *fence, const char *type,
519 struct seq_file *m)
520{
521 if (!fence_is_signaled(fence))
522 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
523 fence->ops->get_driver_name(fence),
524 fence->ops->get_timeline_name(fence),
525 fence->seqno);
526}
527
Rob Clarkc8afe682013-06-26 12:44:06 -0400528void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
529{
Rob Clarkc8afe682013-06-26 12:44:06 -0400530 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400531 struct reservation_object *robj = msm_obj->resv;
532 struct reservation_object_list *fobj;
533 struct fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400534 uint64_t off = drm_vma_node_start(&obj->vma_node);
535
Rob Clarkb6295f92016-03-15 18:26:28 -0400536 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
537
538 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400539 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400540 obj->name, obj->refcount.refcount.counter,
Rob Clarkc8afe682013-06-26 12:44:06 -0400541 off, msm_obj->vaddr, obj->size);
Rob Clarkb6295f92016-03-15 18:26:28 -0400542
543 rcu_read_lock();
544 fobj = rcu_dereference(robj->fence);
545 if (fobj) {
546 unsigned int i, shared_count = fobj->shared_count;
547
548 for (i = 0; i < shared_count; i++) {
549 fence = rcu_dereference(fobj->shared[i]);
550 describe_fence(fence, "Shared", m);
551 }
552 }
553
554 fence = rcu_dereference(robj->fence_excl);
555 if (fence)
556 describe_fence(fence, "Exclusive", m);
557 rcu_read_unlock();
Rob Clarkc8afe682013-06-26 12:44:06 -0400558}
559
560void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
561{
562 struct msm_gem_object *msm_obj;
563 int count = 0;
564 size_t size = 0;
565
566 list_for_each_entry(msm_obj, list, mm_list) {
567 struct drm_gem_object *obj = &msm_obj->base;
568 seq_printf(m, " ");
569 msm_gem_describe(obj, m);
570 count++;
571 size += obj->size;
572 }
573
574 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
575}
576#endif
577
578void msm_gem_free_object(struct drm_gem_object *obj)
579{
580 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500581 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400582 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583 int id;
584
585 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
586
Rob Clark7198e6b2013-07-19 12:59:32 -0400587 /* object should not be on active list: */
588 WARN_ON(is_active(msm_obj));
589
Rob Clarkc8afe682013-06-26 12:44:06 -0400590 list_del(&msm_obj->mm_list);
591
592 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
Rob Clark871d8122013-11-16 12:56:06 -0500593 struct msm_mmu *mmu = priv->mmus[id];
594 if (mmu && msm_obj->domain[id].iova) {
Rob Clark257d06f2014-11-12 15:25:50 -0500595 uint32_t offset = msm_obj->domain[id].iova;
Rob Clark871d8122013-11-16 12:56:06 -0500596 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
Rob Clarkc8afe682013-06-26 12:44:06 -0400597 }
598 }
599
Rob Clark05b84912013-09-28 11:28:35 -0400600 if (obj->import_attach) {
601 if (msm_obj->vaddr)
602 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400603
Rob Clark05b84912013-09-28 11:28:35 -0400604 /* Don't drop the pages for imported dmabuf, as they are not
605 * ours, just free the array we allocated:
606 */
607 if (msm_obj->pages)
608 drm_free_large(msm_obj->pages);
609
jilai wangf28730c2015-04-07 13:51:32 -0400610 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400611 } else {
Markus Elfring264f7d62014-11-25 14:30:28 +0100612 vunmap(msm_obj->vaddr);
Rob Clark05b84912013-09-28 11:28:35 -0400613 put_pages(obj);
614 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400615
Rob Clark7198e6b2013-07-19 12:59:32 -0400616 if (msm_obj->resv == &msm_obj->_resv)
617 reservation_object_fini(msm_obj->resv);
618
Rob Clarkc8afe682013-06-26 12:44:06 -0400619 drm_gem_object_release(obj);
620
621 kfree(msm_obj);
622}
623
624/* convenience method to construct a GEM buffer object, and userspace handle */
625int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
626 uint32_t size, uint32_t flags, uint32_t *handle)
627{
628 struct drm_gem_object *obj;
629 int ret;
630
631 ret = mutex_lock_interruptible(&dev->struct_mutex);
632 if (ret)
633 return ret;
634
635 obj = msm_gem_new(dev, size, flags);
636
637 mutex_unlock(&dev->struct_mutex);
638
639 if (IS_ERR(obj))
640 return PTR_ERR(obj);
641
642 ret = drm_gem_handle_create(file, obj, handle);
643
644 /* drop reference from allocate - handle holds it now */
645 drm_gem_object_unreference_unlocked(obj);
646
647 return ret;
648}
649
Rob Clark05b84912013-09-28 11:28:35 -0400650static int msm_gem_new_impl(struct drm_device *dev,
651 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400652 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400653 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400654{
655 struct msm_drm_private *priv = dev->dev_private;
656 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -0500657 unsigned sz;
Rob Clark072f1f92015-03-03 15:04:25 -0500658 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400659
660 switch (flags & MSM_BO_CACHE_MASK) {
661 case MSM_BO_UNCACHED:
662 case MSM_BO_CACHED:
663 case MSM_BO_WC:
664 break;
665 default:
666 dev_err(dev->dev, "invalid cache flag: %x\n",
667 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400668 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400669 }
670
Rob Clark871d8122013-11-16 12:56:06 -0500671 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500672 use_vram = true;
673 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
674 use_vram = true;
675
676 if (WARN_ON(use_vram && !priv->vram.size))
677 return -EINVAL;
678
679 sz = sizeof(*msm_obj);
680 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500681 sz += sizeof(struct drm_mm_node);
682
683 msm_obj = kzalloc(sz, GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400684 if (!msm_obj)
685 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400686
Rob Clark072f1f92015-03-03 15:04:25 -0500687 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500688 msm_obj->vram_node = (void *)&msm_obj[1];
689
Rob Clarkc8afe682013-06-26 12:44:06 -0400690 msm_obj->flags = flags;
691
Rob Clark79f0e202016-03-16 12:40:35 -0400692 if (resv) {
693 msm_obj->resv = resv;
694 } else {
695 msm_obj->resv = &msm_obj->_resv;
696 reservation_object_init(msm_obj->resv);
697 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400698
Rob Clark7198e6b2013-07-19 12:59:32 -0400699 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clarkc8afe682013-06-26 12:44:06 -0400700 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
701
Rob Clark05b84912013-09-28 11:28:35 -0400702 *obj = &msm_obj->base;
703
704 return 0;
705}
706
707struct drm_gem_object *msm_gem_new(struct drm_device *dev,
708 uint32_t size, uint32_t flags)
709{
Rob Clark871d8122013-11-16 12:56:06 -0500710 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400711 int ret;
712
713 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
714
715 size = PAGE_ALIGN(size);
716
Rob Clark79f0e202016-03-16 12:40:35 -0400717 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400718 if (ret)
719 goto fail;
720
Rob Clark072f1f92015-03-03 15:04:25 -0500721 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500722 ret = drm_gem_object_init(dev, obj, size);
723 if (ret)
724 goto fail;
725 } else {
726 drm_gem_private_object_init(dev, obj, size);
727 }
Rob Clark05b84912013-09-28 11:28:35 -0400728
729 return obj;
730
731fail:
732 if (obj)
Rob Clark9999f102014-02-04 14:17:32 -0500733 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400734
735 return ERR_PTR(ret);
736}
737
738struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400739 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400740{
741 struct msm_gem_object *msm_obj;
742 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400743 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400744 int ret, npages;
745
Rob Clark871d8122013-11-16 12:56:06 -0500746 /* if we don't have IOMMU, don't bother pretending we can import: */
747 if (!iommu_present(&platform_bus_type)) {
748 dev_err(dev->dev, "cannot import without IOMMU\n");
749 return ERR_PTR(-EINVAL);
750 }
751
Rob Clark79f0e202016-03-16 12:40:35 -0400752 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -0400753
Rob Clark79f0e202016-03-16 12:40:35 -0400754 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400755 if (ret)
756 goto fail;
757
758 drm_gem_private_object_init(dev, obj, size);
759
760 npages = size / PAGE_SIZE;
761
762 msm_obj = to_msm_bo(obj);
763 msm_obj->sgt = sgt;
764 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
765 if (!msm_obj->pages) {
766 ret = -ENOMEM;
767 goto fail;
768 }
769
770 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
771 if (ret)
772 goto fail;
773
Rob Clarkc8afe682013-06-26 12:44:06 -0400774 return obj;
775
776fail:
777 if (obj)
778 drm_gem_object_unreference_unlocked(obj);
779
780 return ERR_PTR(ret);
781}