blob: a015379f4f705f879f09abefc856bc8e1ea3774d [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Jordan Crouse12bf3622017-02-13 10:14:11 -070029static void *get_dmabuf_ptr(struct drm_gem_object *obj)
30{
31 return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
32}
33
Rob Clark871d8122013-11-16 12:56:06 -050034static dma_addr_t physaddr(struct drm_gem_object *obj)
35{
36 struct msm_gem_object *msm_obj = to_msm_bo(obj);
37 struct msm_drm_private *priv = obj->dev->dev_private;
38 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
39 priv->vram.paddr;
40}
41
Rob Clark072f1f92015-03-03 15:04:25 -050042static bool use_pages(struct drm_gem_object *obj)
43{
44 struct msm_gem_object *msm_obj = to_msm_bo(obj);
45 return !msm_obj->vram_node;
46}
47
Rob Clark871d8122013-11-16 12:56:06 -050048/* allocate pages from VRAM carveout, used when no IOMMU: */
49static struct page **get_pages_vram(struct drm_gem_object *obj,
50 int npages)
51{
52 struct msm_gem_object *msm_obj = to_msm_bo(obj);
53 struct msm_drm_private *priv = obj->dev->dev_private;
54 dma_addr_t paddr;
55 struct page **p;
56 int ret, i;
57
58 p = drm_malloc_ab(npages, sizeof(struct page *));
59 if (!p)
60 return ERR_PTR(-ENOMEM);
61
62 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
63 npages, 0, DRM_MM_SEARCH_DEFAULT);
64 if (ret) {
65 drm_free_large(p);
66 return ERR_PTR(ret);
67 }
68
69 paddr = physaddr(obj);
70 for (i = 0; i < npages; i++) {
71 p[i] = phys_to_page(paddr);
72 paddr += PAGE_SIZE;
73 }
74
75 return p;
76}
Rob Clarkc8afe682013-06-26 12:44:06 -040077
78/* called with dev->struct_mutex held */
79static struct page **get_pages(struct drm_gem_object *obj)
80{
81 struct msm_gem_object *msm_obj = to_msm_bo(obj);
82
83 if (!msm_obj->pages) {
84 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050085 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040086 int npages = obj->size >> PAGE_SHIFT;
87
Rob Clark072f1f92015-03-03 15:04:25 -050088 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020089 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050090 else
91 p = get_pages_vram(obj, npages);
92
Rob Clarkc8afe682013-06-26 12:44:06 -040093 if (IS_ERR(p)) {
94 dev_err(dev->dev, "could not get pages: %ld\n",
95 PTR_ERR(p));
96 return p;
97 }
98
99 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800100 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400101 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +0800102 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400103 }
104
105 msm_obj->pages = p;
106
Jordan Croused162c8c2017-02-07 09:03:40 -0700107 /*
108 * Make sure to flush the CPU cache for newly allocated memory
109 * so we don't get ourselves into trouble with a dirty cache
Rob Clarkc8afe682013-06-26 12:44:06 -0400110 */
111 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
Jordan Croused162c8c2017-02-07 09:03:40 -0700112 dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
113 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
Rob Clarkc8afe682013-06-26 12:44:06 -0400114 }
115
116 return msm_obj->pages;
117}
118
119static void put_pages(struct drm_gem_object *obj)
120{
121 struct msm_gem_object *msm_obj = to_msm_bo(obj);
122
123 if (msm_obj->pages) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
Rob Clark072f1f92015-03-03 15:04:25 -0500127 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700129 else {
Rob Clark871d8122013-11-16 12:56:06 -0500130 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700131 drm_free_large(msm_obj->pages);
132 }
Rob Clark871d8122013-11-16 12:56:06 -0500133
Rob Clarkc8afe682013-06-26 12:44:06 -0400134 msm_obj->pages = NULL;
135 }
136}
137
Rob Clark05b84912013-09-28 11:28:35 -0400138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150 /* when we start tracking the pin count, then do something here */
151}
152
Rob Clarkc8afe682013-06-26 12:44:06 -0400153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155{
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400198 struct drm_device *dev = obj->dev;
Rob Clarkd78d3832016-08-22 15:28:38 -0400199 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400200 struct page **pages;
201 unsigned long pfn;
202 pgoff_t pgoff;
203 int ret;
204
Rob Clarkd78d3832016-08-22 15:28:38 -0400205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
209 */
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
212
Rob Clarkc8afe682013-06-26 12:44:06 -0400213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
215 */
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 goto out;
219
220 /* make sure we have pages attached now */
221 pages = get_pages(obj);
222 if (IS_ERR(pages)) {
223 ret = PTR_ERR(pages);
224 goto out_unlock;
225 }
226
227 /* We don't use vmf->pgoff since that has the fake offset: */
228 pgoff = ((unsigned long)vmf->virtual_address -
229 vma->vm_start) >> PAGE_SHIFT;
230
Rob Clark871d8122013-11-16 12:56:06 -0500231 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400232
233 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
234 pfn, pfn << PAGE_SHIFT);
235
Dan Williams01c8f1c2016-01-15 16:56:40 -0800236 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
237 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400238
239out_unlock:
240 mutex_unlock(&dev->struct_mutex);
241out:
242 switch (ret) {
243 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400244 case 0:
245 case -ERESTARTSYS:
246 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400247 case -EBUSY:
248 /*
249 * EBUSY is ok: this just means that another thread
250 * already did the job.
251 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400252 return VM_FAULT_NOPAGE;
253 case -ENOMEM:
254 return VM_FAULT_OOM;
255 default:
256 return VM_FAULT_SIGBUS;
257 }
258}
259
260/** get mmap offset */
261static uint64_t mmap_offset(struct drm_gem_object *obj)
262{
263 struct drm_device *dev = obj->dev;
264 int ret;
265
266 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
267
268 /* Make it mmapable */
269 ret = drm_gem_create_mmap_offset(obj);
270
271 if (ret) {
272 dev_err(dev->dev, "could not allocate mmap offset\n");
273 return 0;
274 }
275
276 return drm_vma_node_offset_addr(&obj->vma_node);
277}
278
279uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
280{
281 uint64_t offset;
282 mutex_lock(&obj->dev->struct_mutex);
283 offset = mmap_offset(obj);
284 mutex_unlock(&obj->dev->struct_mutex);
285 return offset;
286}
287
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700288dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
289{
290 struct msm_gem_object *msm_obj = to_msm_bo(obj);
291 struct drm_device *dev = obj->dev;
292
293 if (IS_ERR_OR_NULL(msm_obj->sgt)) {
294 dev_err(dev->dev, "invalid scatter/gather table\n");
295 return 0;
296 }
297
298 return sg_dma_address(msm_obj->sgt->sgl);
299}
300
Jordan Croused8e96522017-02-13 10:14:16 -0700301static void obj_remove_domain(struct msm_gem_vma *domain)
302{
303 if (domain) {
304 list_del(&domain->list);
305 kfree(domain);
306 }
307}
308
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700309static void put_iova(struct drm_gem_object *obj)
Rob Clark4fe5f652016-06-01 11:38:28 -0400310{
311 struct drm_device *dev = obj->dev;
Rob Clark4fe5f652016-06-01 11:38:28 -0400312 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700313 struct msm_gem_vma *domain, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400314
315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
316
Jordan Croused8e96522017-02-13 10:14:16 -0700317 list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
318 if (iommu_present(&platform_bus_type)) {
319 msm_gem_unmap_vma(domain->aspace, domain,
320 msm_obj->sgt, get_dmabuf_ptr(obj));
321 }
322
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700323 /*
324 * put_iova removes the domain connected to the obj which makes
325 * the aspace inaccessible. Store the aspace, as it is used to
326 * update the active_list during gem_free_obj and gem_purege.
327 */
328 msm_obj->aspace = domain->aspace;
Jordan Croused8e96522017-02-13 10:14:16 -0700329 obj_remove_domain(domain);
Rob Clark4fe5f652016-06-01 11:38:28 -0400330 }
331}
332
Jordan Croused8e96522017-02-13 10:14:16 -0700333static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
334 struct msm_gem_address_space *aspace)
335{
336 struct msm_gem_object *msm_obj = to_msm_bo(obj);
337 struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
338
339 if (!domain)
340 return ERR_PTR(-ENOMEM);
341
342 domain->aspace = aspace;
343
344 list_add_tail(&domain->list, &msm_obj->domains);
345
346 return domain;
347}
348
349static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
350 struct msm_gem_address_space *aspace)
351{
352 struct msm_gem_object *msm_obj = to_msm_bo(obj);
353 struct msm_gem_vma *domain;
354
355 list_for_each_entry(domain, &msm_obj->domains, list) {
356 if (domain->aspace == aspace)
357 return domain;
358 }
359
360 return NULL;
361}
362
Rob Clarkc8afe682013-06-26 12:44:06 -0400363/* should be called under struct_mutex.. although it can be called
364 * from atomic context without struct_mutex to acquire an extra
365 * iova ref if you know one is already held.
366 *
367 * That means when I do eventually need to add support for unpinning
368 * the refcnt counter needs to be atomic_t.
369 */
Jordan Croused8e96522017-02-13 10:14:16 -0700370int msm_gem_get_iova_locked(struct drm_gem_object *obj,
371 struct msm_gem_address_space *aspace, uint32_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400372{
373 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700374 struct page **pages;
375 struct msm_gem_vma *domain;
Rob Clarkc8afe682013-06-26 12:44:06 -0400376 int ret = 0;
377
Jordan Croused8e96522017-02-13 10:14:16 -0700378 if (!iommu_present(&platform_bus_type)) {
379 pages = get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500380
Rob Clarkc8afe682013-06-26 12:44:06 -0400381 if (IS_ERR(pages))
382 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500383
Jordan Croused8e96522017-02-13 10:14:16 -0700384 *iova = physaddr(obj);
385 return 0;
Rob Clarkc8afe682013-06-26 12:44:06 -0400386 }
387
Jordan Croused8e96522017-02-13 10:14:16 -0700388 domain = obj_get_domain(obj, aspace);
389
390 if (!domain) {
391 domain = obj_add_domain(obj, aspace);
392 if (IS_ERR(domain))
393 return PTR_ERR(domain);
394
395 pages = get_pages(obj);
396 if (IS_ERR(pages)) {
397 obj_remove_domain(domain);
398 return PTR_ERR(pages);
399 }
400
401 ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
402 get_dmabuf_ptr(obj),
403 msm_obj->flags);
404 }
405
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700406 if (!ret && domain) {
Jordan Croused8e96522017-02-13 10:14:16 -0700407 *iova = domain->iova;
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700408 if (aspace && aspace->domain_attached)
409 msm_gem_add_obj_to_aspace_active_list(aspace, obj);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700410 } else {
Jordan Croused8e96522017-02-13 10:14:16 -0700411 obj_remove_domain(domain);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700412 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400413
414 return ret;
415}
416
Rob Clark2638d902014-11-08 09:13:37 -0500417/* get iova, taking a reference. Should have a matching put */
Jordan Croused8e96522017-02-13 10:14:16 -0700418int msm_gem_get_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace, uint32_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400420{
Jordan Croused8e96522017-02-13 10:14:16 -0700421 struct msm_gem_vma *domain;
Rob Clarkc8afe682013-06-26 12:44:06 -0400422 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400423
Jordan Croused8e96522017-02-13 10:14:16 -0700424 domain = obj_get_domain(obj, aspace);
425 if (domain) {
426 *iova = domain->iova;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400427 return 0;
428 }
429
Rob Clarkc8afe682013-06-26 12:44:06 -0400430 mutex_lock(&obj->dev->struct_mutex);
Jordan Croused8e96522017-02-13 10:14:16 -0700431 ret = msm_gem_get_iova_locked(obj, aspace, iova);
Rob Clarkc8afe682013-06-26 12:44:06 -0400432 mutex_unlock(&obj->dev->struct_mutex);
433 return ret;
434}
435
Rob Clark2638d902014-11-08 09:13:37 -0500436/* get iova without taking a reference, used in places where you have
437 * already done a 'msm_gem_get_iova()'.
438 */
Jordan Croused8e96522017-02-13 10:14:16 -0700439uint32_t msm_gem_iova(struct drm_gem_object *obj,
440 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500441{
Jordan Croused8e96522017-02-13 10:14:16 -0700442 struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
443
444 WARN_ON(!domain);
445
446 return domain ? domain->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500447}
448
Jordan Croused8e96522017-02-13 10:14:16 -0700449void msm_gem_put_iova(struct drm_gem_object *obj,
450 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400451{
452 // XXX TODO ..
453 // NOTE: probably don't need a _locked() version.. we wouldn't
454 // normally unmap here, but instead just mark that it could be
455 // unmapped (if the iova refcnt drops to zero), but then later
456 // if another _get_iova_locked() fails we can start unmapping
457 // things that are no longer needed..
458}
459
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700460void msm_gem_aspace_domain_attach_detach_update(
461 struct msm_gem_address_space *aspace,
462 bool is_detach)
463{
464 struct msm_gem_object *msm_obj;
465 struct drm_gem_object *obj;
466 struct aspace_client *aclient;
467 int ret;
468 uint32_t iova;
469
470 if (!aspace)
471 return;
472
473 mutex_lock(&aspace->dev->struct_mutex);
474 if (is_detach) {
475 /* Indicate to clients domain is getting detached */
476 list_for_each_entry(aclient, &aspace->clients, list) {
477 if (aclient->cb)
478 aclient->cb(aclient->cb_data,
479 is_detach);
480 }
481
482 /**
483 * Unmap active buffers,
484 * typically clients should do this when the callback is called,
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700485 * but this needs to be done for the buffers which are not
486 * attached to any planes.
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700487 */
488 list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
489 obj = &msm_obj->base;
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700490 if (obj->import_attach)
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700491 put_iova(obj);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700492 }
493 } else {
494 /* map active buffers */
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700495 list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700496 obj = &msm_obj->base;
497 ret = msm_gem_get_iova_locked(obj, aspace, &iova);
498 if (ret) {
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700499 mutex_unlock(&aspace->dev->struct_mutex);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700500 return;
501 }
502 }
503
504 /* Indicate to clients domain is attached */
505 list_for_each_entry(aclient, &aspace->clients, list) {
506 if (aclient->cb)
507 aclient->cb(aclient->cb_data,
508 is_detach);
509 }
510 }
511 mutex_unlock(&aspace->dev->struct_mutex);
512}
513
Rob Clarkc8afe682013-06-26 12:44:06 -0400514int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
515 struct drm_mode_create_dumb *args)
516{
517 args->pitch = align_pitch(args->width, args->bpp);
518 args->size = PAGE_ALIGN(args->pitch * args->height);
519 return msm_gem_new_handle(dev, file, args->size,
520 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
521}
522
Rob Clarkc8afe682013-06-26 12:44:06 -0400523int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
524 uint32_t handle, uint64_t *offset)
525{
526 struct drm_gem_object *obj;
527 int ret = 0;
528
529 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100530 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400531 if (obj == NULL) {
532 ret = -ENOENT;
533 goto fail;
534 }
535
536 *offset = msm_gem_mmap_offset(obj);
537
538 drm_gem_object_unreference_unlocked(obj);
539
540fail:
541 return ret;
542}
543
Rob Clark18f23042016-05-26 16:24:35 -0400544void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400545{
546 struct msm_gem_object *msm_obj = to_msm_bo(obj);
547 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
548 if (!msm_obj->vaddr) {
549 struct page **pages = get_pages(obj);
550 if (IS_ERR(pages))
551 return ERR_CAST(pages);
552 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
553 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400554 if (msm_obj->vaddr == NULL)
555 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400556 }
Rob Clarke1e9db22016-05-27 11:16:28 -0400557 msm_obj->vmap_count++;
Rob Clarkc8afe682013-06-26 12:44:06 -0400558 return msm_obj->vaddr;
559}
560
Rob Clark18f23042016-05-26 16:24:35 -0400561void *msm_gem_get_vaddr(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400562{
563 void *ret;
564 mutex_lock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400565 ret = msm_gem_get_vaddr_locked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400566 mutex_unlock(&obj->dev->struct_mutex);
567 return ret;
568}
569
Rob Clark18f23042016-05-26 16:24:35 -0400570void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
571{
Rob Clarke1e9db22016-05-27 11:16:28 -0400572 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark18f23042016-05-26 16:24:35 -0400573 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarke1e9db22016-05-27 11:16:28 -0400574 WARN_ON(msm_obj->vmap_count < 1);
575 msm_obj->vmap_count--;
Rob Clark18f23042016-05-26 16:24:35 -0400576}
577
578void msm_gem_put_vaddr(struct drm_gem_object *obj)
579{
Rob Clarke1e9db22016-05-27 11:16:28 -0400580 mutex_lock(&obj->dev->struct_mutex);
581 msm_gem_put_vaddr_locked(obj);
582 mutex_unlock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400583}
584
Rob Clark4cd33c42016-05-17 15:44:49 -0400585/* Update madvise status, returns true if not purged, else
586 * false or -errno.
587 */
588int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
589{
590 struct msm_gem_object *msm_obj = to_msm_bo(obj);
591
592 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
593
594 if (msm_obj->madv != __MSM_MADV_PURGED)
595 msm_obj->madv = madv;
596
597 return (msm_obj->madv != __MSM_MADV_PURGED);
598}
599
Rob Clark68209392016-05-17 16:19:32 -0400600void msm_gem_purge(struct drm_gem_object *obj)
601{
602 struct drm_device *dev = obj->dev;
603 struct msm_gem_object *msm_obj = to_msm_bo(obj);
604
605 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
606 WARN_ON(!is_purgeable(msm_obj));
607 WARN_ON(obj->import_attach);
608
609 put_iova(obj);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700610 msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
Rob Clark68209392016-05-17 16:19:32 -0400611
Rob Clarke1e9db22016-05-27 11:16:28 -0400612 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400613
614 put_pages(obj);
615
616 msm_obj->madv = __MSM_MADV_PURGED;
617
618 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
619 drm_gem_free_mmap_offset(obj);
620
621 /* Our goal here is to return as much of the memory as
622 * is possible back to the system as we are called from OOM.
623 * To do this we must instruct the shmfs to drop all of its
624 * backing pages, *now*.
625 */
626 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
627
628 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
629 0, (loff_t)-1);
630}
631
Rob Clarke1e9db22016-05-27 11:16:28 -0400632void msm_gem_vunmap(struct drm_gem_object *obj)
633{
634 struct msm_gem_object *msm_obj = to_msm_bo(obj);
635
636 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
637 return;
638
639 vunmap(msm_obj->vaddr);
640 msm_obj->vaddr = NULL;
641}
642
Rob Clarkb6295f92016-03-15 18:26:28 -0400643/* must be called before _move_to_active().. */
644int msm_gem_sync_object(struct drm_gem_object *obj,
645 struct msm_fence_context *fctx, bool exclusive)
646{
647 struct msm_gem_object *msm_obj = to_msm_bo(obj);
648 struct reservation_object_list *fobj;
649 struct fence *fence;
650 int i, ret;
651
652 if (!exclusive) {
653 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
654 * which makes this a slightly strange place to call it. OTOH this
655 * is a convenient can-fail point to hook it in. (And similar to
656 * how etnaviv and nouveau handle this.)
657 */
658 ret = reservation_object_reserve_shared(msm_obj->resv);
659 if (ret)
660 return ret;
661 }
662
663 fobj = reservation_object_get_list(msm_obj->resv);
664 if (!fobj || (fobj->shared_count == 0)) {
665 fence = reservation_object_get_excl(msm_obj->resv);
666 /* don't need to wait on our own fences, since ring is fifo */
667 if (fence && (fence->context != fctx->context)) {
668 ret = fence_wait(fence, true);
669 if (ret)
670 return ret;
671 }
672 }
673
674 if (!exclusive || !fobj)
675 return 0;
676
677 for (i = 0; i < fobj->shared_count; i++) {
678 fence = rcu_dereference_protected(fobj->shared[i],
679 reservation_object_held(msm_obj->resv));
680 if (fence->context != fctx->context) {
681 ret = fence_wait(fence, true);
682 if (ret)
683 return ret;
684 }
685 }
686
687 return 0;
688}
689
Rob Clark7198e6b2013-07-19 12:59:32 -0400690void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkb6295f92016-03-15 18:26:28 -0400691 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400692{
693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400694 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400695 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400696 if (exclusive)
697 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400698 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400699 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400700 list_del_init(&msm_obj->mm_list);
701 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
702}
703
704void msm_gem_move_to_inactive(struct drm_gem_object *obj)
705{
706 struct drm_device *dev = obj->dev;
707 struct msm_drm_private *priv = dev->dev_private;
708 struct msm_gem_object *msm_obj = to_msm_bo(obj);
709
710 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
711
712 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400713 list_del_init(&msm_obj->mm_list);
714 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400715}
716
Rob Clarkba00c3f2016-03-16 18:18:17 -0400717int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
718{
Rob Clarkb6295f92016-03-15 18:26:28 -0400719 struct msm_gem_object *msm_obj = to_msm_bo(obj);
720 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100721 unsigned long remain =
722 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
723 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400724
Chris Wilsonf755e222016-08-29 08:08:26 +0100725 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
726 true, remain);
727 if (ret == 0)
728 return remain == 0 ? -EBUSY : -ETIMEDOUT;
729 else if (ret < 0)
730 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400731
Rob Clark7198e6b2013-07-19 12:59:32 -0400732 /* TODO cache maintenance */
733
Rob Clarkb6295f92016-03-15 18:26:28 -0400734 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400735}
736
737int msm_gem_cpu_fini(struct drm_gem_object *obj)
738{
739 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400740 return 0;
741}
742
743#ifdef CONFIG_DEBUG_FS
Rob Clarkb6295f92016-03-15 18:26:28 -0400744static void describe_fence(struct fence *fence, const char *type,
745 struct seq_file *m)
746{
747 if (!fence_is_signaled(fence))
748 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
749 fence->ops->get_driver_name(fence),
750 fence->ops->get_timeline_name(fence),
751 fence->seqno);
752}
753
Rob Clarkc8afe682013-06-26 12:44:06 -0400754void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
755{
Rob Clarkc8afe682013-06-26 12:44:06 -0400756 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700757 struct msm_gem_vma *domain;
Rob Clarkb6295f92016-03-15 18:26:28 -0400758 struct reservation_object *robj = msm_obj->resv;
759 struct reservation_object_list *fobj;
760 struct fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400761 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400762 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400763
Rob Clarkb6295f92016-03-15 18:26:28 -0400764 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
765
Rob Clark4cd33c42016-05-17 15:44:49 -0400766 switch (msm_obj->madv) {
767 case __MSM_MADV_PURGED:
768 madv = " purged";
769 break;
770 case MSM_MADV_DONTNEED:
771 madv = " purgeable";
772 break;
773 case MSM_MADV_WILLNEED:
774 default:
775 madv = "";
776 break;
777 }
778
779 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
Rob Clarke22a2fb2017-02-13 10:14:11 -0700780
Rob Clark7198e6b2013-07-19 12:59:32 -0400781 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400782 obj->name, obj->refcount.refcount.counter,
Rob Clark4cd33c42016-05-17 15:44:49 -0400783 off, msm_obj->vaddr, obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400784
785 rcu_read_lock();
786 fobj = rcu_dereference(robj->fence);
787 if (fobj) {
788 unsigned int i, shared_count = fobj->shared_count;
789
790 for (i = 0; i < shared_count; i++) {
791 fence = rcu_dereference(fobj->shared[i]);
792 describe_fence(fence, "Shared", m);
793 }
794 }
795
796 fence = rcu_dereference(robj->fence_excl);
797 if (fence)
798 describe_fence(fence, "Exclusive", m);
799 rcu_read_unlock();
Jordan Croused8e96522017-02-13 10:14:16 -0700800
801 /* FIXME: we need to print the address space here too */
802 list_for_each_entry(domain, &msm_obj->domains, list)
803 seq_printf(m, " %08llx", domain->iova);
804
805 seq_puts(m, "\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400806}
807
808void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
809{
810 struct msm_gem_object *msm_obj;
811 int count = 0;
812 size_t size = 0;
813
814 list_for_each_entry(msm_obj, list, mm_list) {
815 struct drm_gem_object *obj = &msm_obj->base;
816 seq_printf(m, " ");
817 msm_gem_describe(obj, m);
818 count++;
819 size += obj->size;
820 }
821
822 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
823}
824#endif
825
826void msm_gem_free_object(struct drm_gem_object *obj)
827{
828 struct drm_device *dev = obj->dev;
829 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400830
831 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
832
Rob Clark7198e6b2013-07-19 12:59:32 -0400833 /* object should not be on active list: */
834 WARN_ON(is_active(msm_obj));
835
Rob Clarkc8afe682013-06-26 12:44:06 -0400836 list_del(&msm_obj->mm_list);
837
Rob Clark4fe5f652016-06-01 11:38:28 -0400838 put_iova(obj);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700839 msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400840
Rob Clark05b84912013-09-28 11:28:35 -0400841 if (obj->import_attach) {
842 if (msm_obj->vaddr)
Jordan Crouse12bf3622017-02-13 10:14:11 -0700843 dma_buf_vunmap(obj->import_attach->dmabuf,
844 msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400845
Rob Clark05b84912013-09-28 11:28:35 -0400846 /* Don't drop the pages for imported dmabuf, as they are not
847 * ours, just free the array we allocated:
848 */
849 if (msm_obj->pages)
850 drm_free_large(msm_obj->pages);
851
jilai wangf28730c2015-04-07 13:51:32 -0400852 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400853 } else {
Rob Clarke1e9db22016-05-27 11:16:28 -0400854 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400855 put_pages(obj);
856 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400857
Rob Clark7198e6b2013-07-19 12:59:32 -0400858 if (msm_obj->resv == &msm_obj->_resv)
859 reservation_object_fini(msm_obj->resv);
860
Rob Clarkc8afe682013-06-26 12:44:06 -0400861 drm_gem_object_release(obj);
862
863 kfree(msm_obj);
864}
865
866/* convenience method to construct a GEM buffer object, and userspace handle */
867int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
868 uint32_t size, uint32_t flags, uint32_t *handle)
869{
870 struct drm_gem_object *obj;
871 int ret;
872
873 ret = mutex_lock_interruptible(&dev->struct_mutex);
874 if (ret)
875 return ret;
876
877 obj = msm_gem_new(dev, size, flags);
878
879 mutex_unlock(&dev->struct_mutex);
880
881 if (IS_ERR(obj))
882 return PTR_ERR(obj);
883
884 ret = drm_gem_handle_create(file, obj, handle);
885
886 /* drop reference from allocate - handle holds it now */
887 drm_gem_object_unreference_unlocked(obj);
888
889 return ret;
890}
891
Rob Clark05b84912013-09-28 11:28:35 -0400892static int msm_gem_new_impl(struct drm_device *dev,
893 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400894 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400895 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400896{
897 struct msm_drm_private *priv = dev->dev_private;
898 struct msm_gem_object *msm_obj;
Rob Clark072f1f92015-03-03 15:04:25 -0500899 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400900
901 switch (flags & MSM_BO_CACHE_MASK) {
902 case MSM_BO_UNCACHED:
903 case MSM_BO_CACHED:
904 case MSM_BO_WC:
905 break;
906 default:
907 dev_err(dev->dev, "invalid cache flag: %x\n",
908 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400909 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400910 }
911
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -0400912 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
913
Rob Clark871d8122013-11-16 12:56:06 -0500914 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500915 use_vram = true;
916 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
917 use_vram = true;
918
919 if (WARN_ON(use_vram && !priv->vram.size))
920 return -EINVAL;
921
Rob Clarke22a2fb2017-02-13 10:14:11 -0700922 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400923 if (!msm_obj)
924 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400925
Jordan Croused8e96522017-02-13 10:14:16 -0700926 if (use_vram) {
927 struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
928 NULL);
929
930 if (!IS_ERR(domain))
931 msm_obj->vram_node = &domain->node;
932 }
Rob Clark871d8122013-11-16 12:56:06 -0500933
Rob Clarkc8afe682013-06-26 12:44:06 -0400934 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400935 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400936
Rob Clark79f0e202016-03-16 12:40:35 -0400937 if (resv) {
938 msm_obj->resv = resv;
939 } else {
940 msm_obj->resv = &msm_obj->_resv;
941 reservation_object_init(msm_obj->resv);
942 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400943
Rob Clark7198e6b2013-07-19 12:59:32 -0400944 INIT_LIST_HEAD(&msm_obj->submit_entry);
Jordan Croused8e96522017-02-13 10:14:16 -0700945 INIT_LIST_HEAD(&msm_obj->domains);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700946 INIT_LIST_HEAD(&msm_obj->iova_list);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700947 msm_obj->aspace = NULL;
Jordan Croused8e96522017-02-13 10:14:16 -0700948
Rob Clarkc8afe682013-06-26 12:44:06 -0400949 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
950
Rob Clark05b84912013-09-28 11:28:35 -0400951 *obj = &msm_obj->base;
952
953 return 0;
954}
955
956struct drm_gem_object *msm_gem_new(struct drm_device *dev,
957 uint32_t size, uint32_t flags)
958{
Rob Clark871d8122013-11-16 12:56:06 -0500959 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400960 int ret;
961
962 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
963
964 size = PAGE_ALIGN(size);
965
Rob Clark79f0e202016-03-16 12:40:35 -0400966 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400967 if (ret)
968 goto fail;
969
Rob Clark072f1f92015-03-03 15:04:25 -0500970 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500971 ret = drm_gem_object_init(dev, obj, size);
972 if (ret)
973 goto fail;
974 } else {
975 drm_gem_private_object_init(dev, obj, size);
976 }
Rob Clark05b84912013-09-28 11:28:35 -0400977
978 return obj;
979
980fail:
Markus Elfring0a677122016-07-13 19:29:19 +0200981 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400982 return ERR_PTR(ret);
983}
984
985struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400986 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400987{
988 struct msm_gem_object *msm_obj;
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -0400989 struct drm_gem_object *obj = NULL;
Rob Clark79f0e202016-03-16 12:40:35 -0400990 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400991 int ret, npages;
992
Rob Clark871d8122013-11-16 12:56:06 -0500993 /* if we don't have IOMMU, don't bother pretending we can import: */
994 if (!iommu_present(&platform_bus_type)) {
995 dev_err(dev->dev, "cannot import without IOMMU\n");
996 return ERR_PTR(-EINVAL);
997 }
998
Rob Clark79f0e202016-03-16 12:40:35 -0400999 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001000
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -04001001 ret = mutex_lock_interruptible(&dev->struct_mutex);
1002 if (ret)
1003 return ERR_PTR(ret);
1004
Rob Clark79f0e202016-03-16 12:40:35 -04001005 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -04001006 mutex_unlock(&dev->struct_mutex);
Rob Clark05b84912013-09-28 11:28:35 -04001007 if (ret)
1008 goto fail;
1009
1010 drm_gem_private_object_init(dev, obj, size);
1011
1012 npages = size / PAGE_SIZE;
1013
1014 msm_obj = to_msm_bo(obj);
1015 msm_obj->sgt = sgt;
1016 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
1017 if (!msm_obj->pages) {
1018 ret = -ENOMEM;
1019 goto fail;
1020 }
1021
1022 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1023 if (ret)
1024 goto fail;
1025
Rob Clarkc8afe682013-06-26 12:44:06 -04001026 return obj;
1027
1028fail:
Markus Elfringe73a8562016-07-13 19:15:35 +02001029 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001030 return ERR_PTR(ret);
1031}