blob: b6ac27e3192964cbf2fa1f5a4c139ab282deaa71 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Rob Clark871d8122013-11-16 12:56:06 -050029static dma_addr_t physaddr(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35}
36
Rob Clark072f1f92015-03-03 15:04:25 -050037static bool use_pages(struct drm_gem_object *obj)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41}
42
Rob Clark871d8122013-11-16 12:56:06 -050043/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71}
Rob Clarkc8afe682013-06-26 12:44:06 -040072
73/* called with dev->struct_mutex held */
74static struct page **get_pages(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050080 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040081 int npages = obj->size >> PAGE_SHIFT;
82
Rob Clark072f1f92015-03-03 15:04:25 -050083 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020084 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050085 else
86 p = get_pages_vram(obj, npages);
87
Rob Clarkc8afe682013-06-26 12:44:06 -040088 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080095 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040096 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080097 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040098 }
99
100 msm_obj->pages = p;
101
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
104 */
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108 }
109
110 return msm_obj->pages;
111}
112
113static void put_pages(struct drm_gem_object *obj)
114{
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
120 */
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
Rob Clark072f1f92015-03-03 15:04:25 -0500127 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700129 else {
Rob Clark871d8122013-11-16 12:56:06 -0500130 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700131 drm_free_large(msm_obj->pages);
132 }
Rob Clark871d8122013-11-16 12:56:06 -0500133
Rob Clarkc8afe682013-06-26 12:44:06 -0400134 msm_obj->pages = NULL;
135 }
136}
137
Rob Clark05b84912013-09-28 11:28:35 -0400138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150 /* when we start tracking the pin count, then do something here */
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155{
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400198 struct drm_device *dev = obj->dev;
Rob Clarkd78d3832016-08-22 15:28:38 -0400199 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400200 struct page **pages;
201 unsigned long pfn;
202 pgoff_t pgoff;
203 int ret;
204
Rob Clarkd78d3832016-08-22 15:28:38 -0400205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
209 */
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
212
Rob Clarkc8afe682013-06-26 12:44:06 -0400213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
215 */
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 goto out;
219
220 /* make sure we have pages attached now */
221 pages = get_pages(obj);
222 if (IS_ERR(pages)) {
223 ret = PTR_ERR(pages);
224 goto out_unlock;
225 }
226
227 /* We don't use vmf->pgoff since that has the fake offset: */
228 pgoff = ((unsigned long)vmf->virtual_address -
229 vma->vm_start) >> PAGE_SHIFT;
230
Rob Clark871d8122013-11-16 12:56:06 -0500231 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400232
233 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
234 pfn, pfn << PAGE_SHIFT);
235
Dan Williams01c8f1c2016-01-15 16:56:40 -0800236 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
237 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400238
239out_unlock:
240 mutex_unlock(&dev->struct_mutex);
241out:
242 switch (ret) {
243 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400244 case 0:
245 case -ERESTARTSYS:
246 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400247 case -EBUSY:
248 /*
249 * EBUSY is ok: this just means that another thread
250 * already did the job.
251 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400252 return VM_FAULT_NOPAGE;
253 case -ENOMEM:
254 return VM_FAULT_OOM;
255 default:
256 return VM_FAULT_SIGBUS;
257 }
258}
259
260/** get mmap offset */
261static uint64_t mmap_offset(struct drm_gem_object *obj)
262{
263 struct drm_device *dev = obj->dev;
264 int ret;
265
266 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
267
268 /* Make it mmapable */
269 ret = drm_gem_create_mmap_offset(obj);
270
271 if (ret) {
272 dev_err(dev->dev, "could not allocate mmap offset\n");
273 return 0;
274 }
275
276 return drm_vma_node_offset_addr(&obj->vma_node);
277}
278
279uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
280{
281 uint64_t offset;
282 mutex_lock(&obj->dev->struct_mutex);
283 offset = mmap_offset(obj);
284 mutex_unlock(&obj->dev->struct_mutex);
285 return offset;
286}
287
Rob Clark4fe5f652016-06-01 11:38:28 -0400288static void
289put_iova(struct drm_gem_object *obj)
290{
291 struct drm_device *dev = obj->dev;
292 struct msm_drm_private *priv = obj->dev->dev_private;
293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 int id;
295
296 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
297
298 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
299 struct msm_mmu *mmu = priv->mmus[id];
300 if (mmu && msm_obj->domain[id].iova) {
301 uint32_t offset = msm_obj->domain[id].iova;
302 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
303 msm_obj->domain[id].iova = 0;
304 }
305 }
306}
307
Rob Clarkc8afe682013-06-26 12:44:06 -0400308/* should be called under struct_mutex.. although it can be called
309 * from atomic context without struct_mutex to acquire an extra
310 * iova ref if you know one is already held.
311 *
312 * That means when I do eventually need to add support for unpinning
313 * the refcnt counter needs to be atomic_t.
314 */
315int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
316 uint32_t *iova)
317{
318 struct msm_gem_object *msm_obj = to_msm_bo(obj);
319 int ret = 0;
320
321 if (!msm_obj->domain[id].iova) {
322 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500323 struct page **pages = get_pages(obj);
324
Rob Clarkc8afe682013-06-26 12:44:06 -0400325 if (IS_ERR(pages))
326 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500327
328 if (iommu_present(&platform_bus_type)) {
Rob Clark1c4997f2014-07-01 14:49:55 -0400329 struct msm_mmu *mmu = priv->mmus[id];
330 uint32_t offset;
331
332 if (WARN_ON(!mmu))
333 return -EINVAL;
334
335 offset = (uint32_t)mmap_offset(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500336 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
337 obj->size, IOMMU_READ | IOMMU_WRITE);
338 msm_obj->domain[id].iova = offset;
339 } else {
340 msm_obj->domain[id].iova = physaddr(obj);
341 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400342 }
343
344 if (!ret)
345 *iova = msm_obj->domain[id].iova;
346
347 return ret;
348}
349
Rob Clark2638d902014-11-08 09:13:37 -0500350/* get iova, taking a reference. Should have a matching put */
Rob Clarkc8afe682013-06-26 12:44:06 -0400351int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
352{
Rob Clarkedd4fc62013-09-14 14:01:55 -0400353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400354 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400355
356 /* this is safe right now because we don't unmap until the
357 * bo is deleted:
358 */
359 if (msm_obj->domain[id].iova) {
360 *iova = msm_obj->domain[id].iova;
361 return 0;
362 }
363
Rob Clarkc8afe682013-06-26 12:44:06 -0400364 mutex_lock(&obj->dev->struct_mutex);
365 ret = msm_gem_get_iova_locked(obj, id, iova);
366 mutex_unlock(&obj->dev->struct_mutex);
367 return ret;
368}
369
Rob Clark2638d902014-11-08 09:13:37 -0500370/* get iova without taking a reference, used in places where you have
371 * already done a 'msm_gem_get_iova()'.
372 */
373uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
374{
375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 WARN_ON(!msm_obj->domain[id].iova);
377 return msm_obj->domain[id].iova;
378}
379
Rob Clarkc8afe682013-06-26 12:44:06 -0400380void msm_gem_put_iova(struct drm_gem_object *obj, int id)
381{
382 // XXX TODO ..
383 // NOTE: probably don't need a _locked() version.. we wouldn't
384 // normally unmap here, but instead just mark that it could be
385 // unmapped (if the iova refcnt drops to zero), but then later
386 // if another _get_iova_locked() fails we can start unmapping
387 // things that are no longer needed..
388}
389
390int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
391 struct drm_mode_create_dumb *args)
392{
393 args->pitch = align_pitch(args->width, args->bpp);
394 args->size = PAGE_ALIGN(args->pitch * args->height);
395 return msm_gem_new_handle(dev, file, args->size,
396 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
397}
398
Rob Clarkc8afe682013-06-26 12:44:06 -0400399int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
400 uint32_t handle, uint64_t *offset)
401{
402 struct drm_gem_object *obj;
403 int ret = 0;
404
405 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100406 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400407 if (obj == NULL) {
408 ret = -ENOENT;
409 goto fail;
410 }
411
412 *offset = msm_gem_mmap_offset(obj);
413
414 drm_gem_object_unreference_unlocked(obj);
415
416fail:
417 return ret;
418}
419
Rob Clark18f23042016-05-26 16:24:35 -0400420void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400421{
422 struct msm_gem_object *msm_obj = to_msm_bo(obj);
423 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
424 if (!msm_obj->vaddr) {
425 struct page **pages = get_pages(obj);
426 if (IS_ERR(pages))
427 return ERR_CAST(pages);
428 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
429 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400430 if (msm_obj->vaddr == NULL)
431 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400432 }
Rob Clarke1e9db22016-05-27 11:16:28 -0400433 msm_obj->vmap_count++;
Rob Clarkc8afe682013-06-26 12:44:06 -0400434 return msm_obj->vaddr;
435}
436
Rob Clark18f23042016-05-26 16:24:35 -0400437void *msm_gem_get_vaddr(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400438{
439 void *ret;
440 mutex_lock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400441 ret = msm_gem_get_vaddr_locked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400442 mutex_unlock(&obj->dev->struct_mutex);
443 return ret;
444}
445
Rob Clark18f23042016-05-26 16:24:35 -0400446void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
447{
Rob Clarke1e9db22016-05-27 11:16:28 -0400448 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark18f23042016-05-26 16:24:35 -0400449 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarke1e9db22016-05-27 11:16:28 -0400450 WARN_ON(msm_obj->vmap_count < 1);
451 msm_obj->vmap_count--;
Rob Clark18f23042016-05-26 16:24:35 -0400452}
453
454void msm_gem_put_vaddr(struct drm_gem_object *obj)
455{
Rob Clarke1e9db22016-05-27 11:16:28 -0400456 mutex_lock(&obj->dev->struct_mutex);
457 msm_gem_put_vaddr_locked(obj);
458 mutex_unlock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400459}
460
Rob Clark4cd33c42016-05-17 15:44:49 -0400461/* Update madvise status, returns true if not purged, else
462 * false or -errno.
463 */
464int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
465{
466 struct msm_gem_object *msm_obj = to_msm_bo(obj);
467
468 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
469
470 if (msm_obj->madv != __MSM_MADV_PURGED)
471 msm_obj->madv = madv;
472
473 return (msm_obj->madv != __MSM_MADV_PURGED);
474}
475
Rob Clark68209392016-05-17 16:19:32 -0400476void msm_gem_purge(struct drm_gem_object *obj)
477{
478 struct drm_device *dev = obj->dev;
479 struct msm_gem_object *msm_obj = to_msm_bo(obj);
480
481 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
482 WARN_ON(!is_purgeable(msm_obj));
483 WARN_ON(obj->import_attach);
484
485 put_iova(obj);
486
Rob Clarke1e9db22016-05-27 11:16:28 -0400487 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400488
489 put_pages(obj);
490
491 msm_obj->madv = __MSM_MADV_PURGED;
492
493 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
494 drm_gem_free_mmap_offset(obj);
495
496 /* Our goal here is to return as much of the memory as
497 * is possible back to the system as we are called from OOM.
498 * To do this we must instruct the shmfs to drop all of its
499 * backing pages, *now*.
500 */
501 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
502
503 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
504 0, (loff_t)-1);
505}
506
Rob Clarke1e9db22016-05-27 11:16:28 -0400507void msm_gem_vunmap(struct drm_gem_object *obj)
508{
509 struct msm_gem_object *msm_obj = to_msm_bo(obj);
510
511 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
512 return;
513
514 vunmap(msm_obj->vaddr);
515 msm_obj->vaddr = NULL;
516}
517
Rob Clarkb6295f92016-03-15 18:26:28 -0400518/* must be called before _move_to_active().. */
519int msm_gem_sync_object(struct drm_gem_object *obj,
520 struct msm_fence_context *fctx, bool exclusive)
521{
522 struct msm_gem_object *msm_obj = to_msm_bo(obj);
523 struct reservation_object_list *fobj;
524 struct fence *fence;
525 int i, ret;
526
527 if (!exclusive) {
528 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
529 * which makes this a slightly strange place to call it. OTOH this
530 * is a convenient can-fail point to hook it in. (And similar to
531 * how etnaviv and nouveau handle this.)
532 */
533 ret = reservation_object_reserve_shared(msm_obj->resv);
534 if (ret)
535 return ret;
536 }
537
538 fobj = reservation_object_get_list(msm_obj->resv);
539 if (!fobj || (fobj->shared_count == 0)) {
540 fence = reservation_object_get_excl(msm_obj->resv);
541 /* don't need to wait on our own fences, since ring is fifo */
542 if (fence && (fence->context != fctx->context)) {
543 ret = fence_wait(fence, true);
544 if (ret)
545 return ret;
546 }
547 }
548
549 if (!exclusive || !fobj)
550 return 0;
551
552 for (i = 0; i < fobj->shared_count; i++) {
553 fence = rcu_dereference_protected(fobj->shared[i],
554 reservation_object_held(msm_obj->resv));
555 if (fence->context != fctx->context) {
556 ret = fence_wait(fence, true);
557 if (ret)
558 return ret;
559 }
560 }
561
562 return 0;
563}
564
Rob Clark7198e6b2013-07-19 12:59:32 -0400565void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkb6295f92016-03-15 18:26:28 -0400566 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400567{
568 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400569 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400570 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400571 if (exclusive)
572 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400573 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400574 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400575 list_del_init(&msm_obj->mm_list);
576 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
577}
578
579void msm_gem_move_to_inactive(struct drm_gem_object *obj)
580{
581 struct drm_device *dev = obj->dev;
582 struct msm_drm_private *priv = dev->dev_private;
583 struct msm_gem_object *msm_obj = to_msm_bo(obj);
584
585 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
586
587 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400588 list_del_init(&msm_obj->mm_list);
589 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400590}
591
Rob Clarkba00c3f2016-03-16 18:18:17 -0400592int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
593{
Rob Clarkb6295f92016-03-15 18:26:28 -0400594 struct msm_gem_object *msm_obj = to_msm_bo(obj);
595 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100596 unsigned long remain =
597 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
598 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400599
Chris Wilsonf755e222016-08-29 08:08:26 +0100600 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
601 true, remain);
602 if (ret == 0)
603 return remain == 0 ? -EBUSY : -ETIMEDOUT;
604 else if (ret < 0)
605 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400606
Rob Clark7198e6b2013-07-19 12:59:32 -0400607 /* TODO cache maintenance */
608
Rob Clarkb6295f92016-03-15 18:26:28 -0400609 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400610}
611
612int msm_gem_cpu_fini(struct drm_gem_object *obj)
613{
614 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400615 return 0;
616}
617
618#ifdef CONFIG_DEBUG_FS
Rob Clarkb6295f92016-03-15 18:26:28 -0400619static void describe_fence(struct fence *fence, const char *type,
620 struct seq_file *m)
621{
622 if (!fence_is_signaled(fence))
623 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
624 fence->ops->get_driver_name(fence),
625 fence->ops->get_timeline_name(fence),
626 fence->seqno);
627}
628
Rob Clarkc8afe682013-06-26 12:44:06 -0400629void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
630{
Rob Clarkc8afe682013-06-26 12:44:06 -0400631 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400632 struct reservation_object *robj = msm_obj->resv;
633 struct reservation_object_list *fobj;
634 struct fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400635 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400636 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400637
Rob Clarkb6295f92016-03-15 18:26:28 -0400638 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
639
Rob Clark4cd33c42016-05-17 15:44:49 -0400640 switch (msm_obj->madv) {
641 case __MSM_MADV_PURGED:
642 madv = " purged";
643 break;
644 case MSM_MADV_DONTNEED:
645 madv = " purgeable";
646 break;
647 case MSM_MADV_WILLNEED:
648 default:
649 madv = "";
650 break;
651 }
652
653 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400654 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400655 obj->name, obj->refcount.refcount.counter,
Rob Clark4cd33c42016-05-17 15:44:49 -0400656 off, msm_obj->vaddr, obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400657
658 rcu_read_lock();
659 fobj = rcu_dereference(robj->fence);
660 if (fobj) {
661 unsigned int i, shared_count = fobj->shared_count;
662
663 for (i = 0; i < shared_count; i++) {
664 fence = rcu_dereference(fobj->shared[i]);
665 describe_fence(fence, "Shared", m);
666 }
667 }
668
669 fence = rcu_dereference(robj->fence_excl);
670 if (fence)
671 describe_fence(fence, "Exclusive", m);
672 rcu_read_unlock();
Rob Clarkc8afe682013-06-26 12:44:06 -0400673}
674
675void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
676{
677 struct msm_gem_object *msm_obj;
678 int count = 0;
679 size_t size = 0;
680
681 list_for_each_entry(msm_obj, list, mm_list) {
682 struct drm_gem_object *obj = &msm_obj->base;
683 seq_printf(m, " ");
684 msm_gem_describe(obj, m);
685 count++;
686 size += obj->size;
687 }
688
689 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
690}
691#endif
692
693void msm_gem_free_object(struct drm_gem_object *obj)
694{
695 struct drm_device *dev = obj->dev;
696 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400697
698 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
699
Rob Clark7198e6b2013-07-19 12:59:32 -0400700 /* object should not be on active list: */
701 WARN_ON(is_active(msm_obj));
702
Rob Clarkc8afe682013-06-26 12:44:06 -0400703 list_del(&msm_obj->mm_list);
704
Rob Clark4fe5f652016-06-01 11:38:28 -0400705 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400706
Rob Clark05b84912013-09-28 11:28:35 -0400707 if (obj->import_attach) {
708 if (msm_obj->vaddr)
709 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400710
Rob Clark05b84912013-09-28 11:28:35 -0400711 /* Don't drop the pages for imported dmabuf, as they are not
712 * ours, just free the array we allocated:
713 */
714 if (msm_obj->pages)
715 drm_free_large(msm_obj->pages);
716
jilai wangf28730c2015-04-07 13:51:32 -0400717 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400718 } else {
Rob Clarke1e9db22016-05-27 11:16:28 -0400719 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400720 put_pages(obj);
721 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400722
Rob Clark7198e6b2013-07-19 12:59:32 -0400723 if (msm_obj->resv == &msm_obj->_resv)
724 reservation_object_fini(msm_obj->resv);
725
Rob Clarkc8afe682013-06-26 12:44:06 -0400726 drm_gem_object_release(obj);
727
728 kfree(msm_obj);
729}
730
731/* convenience method to construct a GEM buffer object, and userspace handle */
732int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
733 uint32_t size, uint32_t flags, uint32_t *handle)
734{
735 struct drm_gem_object *obj;
736 int ret;
737
738 ret = mutex_lock_interruptible(&dev->struct_mutex);
739 if (ret)
740 return ret;
741
742 obj = msm_gem_new(dev, size, flags);
743
744 mutex_unlock(&dev->struct_mutex);
745
746 if (IS_ERR(obj))
747 return PTR_ERR(obj);
748
749 ret = drm_gem_handle_create(file, obj, handle);
750
751 /* drop reference from allocate - handle holds it now */
752 drm_gem_object_unreference_unlocked(obj);
753
754 return ret;
755}
756
Rob Clark05b84912013-09-28 11:28:35 -0400757static int msm_gem_new_impl(struct drm_device *dev,
758 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400759 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400760 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400761{
762 struct msm_drm_private *priv = dev->dev_private;
763 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -0500764 unsigned sz;
Rob Clark072f1f92015-03-03 15:04:25 -0500765 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400766
767 switch (flags & MSM_BO_CACHE_MASK) {
768 case MSM_BO_UNCACHED:
769 case MSM_BO_CACHED:
770 case MSM_BO_WC:
771 break;
772 default:
773 dev_err(dev->dev, "invalid cache flag: %x\n",
774 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400775 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400776 }
777
Rob Clark871d8122013-11-16 12:56:06 -0500778 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500779 use_vram = true;
780 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
781 use_vram = true;
782
783 if (WARN_ON(use_vram && !priv->vram.size))
784 return -EINVAL;
785
786 sz = sizeof(*msm_obj);
787 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500788 sz += sizeof(struct drm_mm_node);
789
790 msm_obj = kzalloc(sz, GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400791 if (!msm_obj)
792 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400793
Rob Clark072f1f92015-03-03 15:04:25 -0500794 if (use_vram)
Rob Clark871d8122013-11-16 12:56:06 -0500795 msm_obj->vram_node = (void *)&msm_obj[1];
796
Rob Clarkc8afe682013-06-26 12:44:06 -0400797 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400798 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400799
Rob Clark79f0e202016-03-16 12:40:35 -0400800 if (resv) {
801 msm_obj->resv = resv;
802 } else {
803 msm_obj->resv = &msm_obj->_resv;
804 reservation_object_init(msm_obj->resv);
805 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400806
Rob Clark7198e6b2013-07-19 12:59:32 -0400807 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clarkc8afe682013-06-26 12:44:06 -0400808 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
809
Rob Clark05b84912013-09-28 11:28:35 -0400810 *obj = &msm_obj->base;
811
812 return 0;
813}
814
815struct drm_gem_object *msm_gem_new(struct drm_device *dev,
816 uint32_t size, uint32_t flags)
817{
Rob Clark871d8122013-11-16 12:56:06 -0500818 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400819 int ret;
820
821 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
822
823 size = PAGE_ALIGN(size);
824
Rob Clark79f0e202016-03-16 12:40:35 -0400825 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400826 if (ret)
827 goto fail;
828
Rob Clark072f1f92015-03-03 15:04:25 -0500829 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500830 ret = drm_gem_object_init(dev, obj, size);
831 if (ret)
832 goto fail;
833 } else {
834 drm_gem_private_object_init(dev, obj, size);
835 }
Rob Clark05b84912013-09-28 11:28:35 -0400836
837 return obj;
838
839fail:
Markus Elfring0a677122016-07-13 19:29:19 +0200840 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400841 return ERR_PTR(ret);
842}
843
844struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400845 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400846{
847 struct msm_gem_object *msm_obj;
848 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400849 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400850 int ret, npages;
851
Rob Clark871d8122013-11-16 12:56:06 -0500852 /* if we don't have IOMMU, don't bother pretending we can import: */
853 if (!iommu_present(&platform_bus_type)) {
854 dev_err(dev->dev, "cannot import without IOMMU\n");
855 return ERR_PTR(-EINVAL);
856 }
857
Rob Clark79f0e202016-03-16 12:40:35 -0400858 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -0400859
Rob Clark79f0e202016-03-16 12:40:35 -0400860 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400861 if (ret)
862 goto fail;
863
864 drm_gem_private_object_init(dev, obj, size);
865
866 npages = size / PAGE_SIZE;
867
868 msm_obj = to_msm_bo(obj);
869 msm_obj->sgt = sgt;
870 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
871 if (!msm_obj->pages) {
872 ret = -ENOMEM;
873 goto fail;
874 }
875
876 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
877 if (ret)
878 goto fail;
879
Rob Clarkc8afe682013-06-26 12:44:06 -0400880 return obj;
881
882fail:
Markus Elfringe73a8562016-07-13 19:15:35 +0200883 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400884 return ERR_PTR(ret);
885}