blob: f8c3df5594fd083b44592cafa91e0af5af8e3396 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Jordan Crouse12bf3622017-02-13 10:14:11 -070029static void *get_dmabuf_ptr(struct drm_gem_object *obj)
30{
31 return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
32}
33
Rob Clark871d8122013-11-16 12:56:06 -050034static dma_addr_t physaddr(struct drm_gem_object *obj)
35{
36 struct msm_gem_object *msm_obj = to_msm_bo(obj);
37 struct msm_drm_private *priv = obj->dev->dev_private;
38 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
39 priv->vram.paddr;
40}
41
Rob Clark072f1f92015-03-03 15:04:25 -050042static bool use_pages(struct drm_gem_object *obj)
43{
44 struct msm_gem_object *msm_obj = to_msm_bo(obj);
45 return !msm_obj->vram_node;
46}
47
Rob Clark871d8122013-11-16 12:56:06 -050048/* allocate pages from VRAM carveout, used when no IOMMU: */
49static struct page **get_pages_vram(struct drm_gem_object *obj,
50 int npages)
51{
52 struct msm_gem_object *msm_obj = to_msm_bo(obj);
53 struct msm_drm_private *priv = obj->dev->dev_private;
54 dma_addr_t paddr;
55 struct page **p;
56 int ret, i;
57
58 p = drm_malloc_ab(npages, sizeof(struct page *));
59 if (!p)
60 return ERR_PTR(-ENOMEM);
61
62 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
63 npages, 0, DRM_MM_SEARCH_DEFAULT);
64 if (ret) {
65 drm_free_large(p);
66 return ERR_PTR(ret);
67 }
68
69 paddr = physaddr(obj);
70 for (i = 0; i < npages; i++) {
71 p[i] = phys_to_page(paddr);
72 paddr += PAGE_SIZE;
73 }
74
75 return p;
76}
Rob Clarkc8afe682013-06-26 12:44:06 -040077
78/* called with dev->struct_mutex held */
79static struct page **get_pages(struct drm_gem_object *obj)
80{
81 struct msm_gem_object *msm_obj = to_msm_bo(obj);
82
83 if (!msm_obj->pages) {
84 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050085 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040086 int npages = obj->size >> PAGE_SHIFT;
87
Rob Clark072f1f92015-03-03 15:04:25 -050088 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020089 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050090 else
91 p = get_pages_vram(obj, npages);
92
Rob Clarkc8afe682013-06-26 12:44:06 -040093 if (IS_ERR(p)) {
94 dev_err(dev->dev, "could not get pages: %ld\n",
95 PTR_ERR(p));
96 return p;
97 }
98
99 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +0800100 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400101 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +0800102 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400103 }
104
105 msm_obj->pages = p;
106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
118static void put_pages(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121
122 if (msm_obj->pages) {
123 /* For non-cached buffers, ensure the new pages are clean
124 * because display controller, GPU, etc. are not coherent:
125 */
126 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
127 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
128 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
129 sg_free_table(msm_obj->sgt);
130 kfree(msm_obj->sgt);
131
Rob Clark072f1f92015-03-03 15:04:25 -0500132 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500133 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700134 else {
Rob Clark871d8122013-11-16 12:56:06 -0500135 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700136 drm_free_large(msm_obj->pages);
137 }
Rob Clark871d8122013-11-16 12:56:06 -0500138
Rob Clarkc8afe682013-06-26 12:44:06 -0400139 msm_obj->pages = NULL;
140 }
141}
142
Rob Clark05b84912013-09-28 11:28:35 -0400143struct page **msm_gem_get_pages(struct drm_gem_object *obj)
144{
145 struct drm_device *dev = obj->dev;
146 struct page **p;
147 mutex_lock(&dev->struct_mutex);
148 p = get_pages(obj);
149 mutex_unlock(&dev->struct_mutex);
150 return p;
151}
152
153void msm_gem_put_pages(struct drm_gem_object *obj)
154{
155 /* when we start tracking the pin count, then do something here */
156}
157
Rob Clarkc8afe682013-06-26 12:44:06 -0400158int msm_gem_mmap_obj(struct drm_gem_object *obj,
159 struct vm_area_struct *vma)
160{
161 struct msm_gem_object *msm_obj = to_msm_bo(obj);
162
163 vma->vm_flags &= ~VM_PFNMAP;
164 vma->vm_flags |= VM_MIXEDMAP;
165
166 if (msm_obj->flags & MSM_BO_WC) {
167 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
168 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
169 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
170 } else {
171 /*
172 * Shunt off cached objs to shmem file so they have their own
173 * address_space (so unmap_mapping_range does what we want,
174 * in particular in the case of mmap'd dmabufs)
175 */
176 fput(vma->vm_file);
177 get_file(obj->filp);
178 vma->vm_pgoff = 0;
179 vma->vm_file = obj->filp;
180
181 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
182 }
183
184 return 0;
185}
186
187int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
188{
189 int ret;
190
191 ret = drm_gem_mmap(filp, vma);
192 if (ret) {
193 DBG("mmap failed: %d", ret);
194 return ret;
195 }
196
197 return msm_gem_mmap_obj(vma->vm_private_data, vma);
198}
199
200int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
201{
202 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400203 struct drm_device *dev = obj->dev;
Rob Clarkd78d3832016-08-22 15:28:38 -0400204 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400205 struct page **pages;
206 unsigned long pfn;
207 pgoff_t pgoff;
208 int ret;
209
Rob Clarkd78d3832016-08-22 15:28:38 -0400210 /* This should only happen if userspace tries to pass a mmap'd
211 * but unfaulted gem bo vaddr into submit ioctl, triggering
212 * a page fault while struct_mutex is already held. This is
213 * not a valid use-case so just bail.
214 */
215 if (priv->struct_mutex_task == current)
216 return VM_FAULT_SIGBUS;
217
Rob Clarkc8afe682013-06-26 12:44:06 -0400218 /* Make sure we don't parallel update on a fault, nor move or remove
219 * something from beneath our feet
220 */
221 ret = mutex_lock_interruptible(&dev->struct_mutex);
222 if (ret)
223 goto out;
224
225 /* make sure we have pages attached now */
226 pages = get_pages(obj);
227 if (IS_ERR(pages)) {
228 ret = PTR_ERR(pages);
229 goto out_unlock;
230 }
231
232 /* We don't use vmf->pgoff since that has the fake offset: */
233 pgoff = ((unsigned long)vmf->virtual_address -
234 vma->vm_start) >> PAGE_SHIFT;
235
Rob Clark871d8122013-11-16 12:56:06 -0500236 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400237
238 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
239 pfn, pfn << PAGE_SHIFT);
240
Dan Williams01c8f1c2016-01-15 16:56:40 -0800241 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
242 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400243
244out_unlock:
245 mutex_unlock(&dev->struct_mutex);
246out:
247 switch (ret) {
248 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400249 case 0:
250 case -ERESTARTSYS:
251 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400252 case -EBUSY:
253 /*
254 * EBUSY is ok: this just means that another thread
255 * already did the job.
256 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400257 return VM_FAULT_NOPAGE;
258 case -ENOMEM:
259 return VM_FAULT_OOM;
260 default:
261 return VM_FAULT_SIGBUS;
262 }
263}
264
265/** get mmap offset */
266static uint64_t mmap_offset(struct drm_gem_object *obj)
267{
268 struct drm_device *dev = obj->dev;
269 int ret;
270
271 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
272
273 /* Make it mmapable */
274 ret = drm_gem_create_mmap_offset(obj);
275
276 if (ret) {
277 dev_err(dev->dev, "could not allocate mmap offset\n");
278 return 0;
279 }
280
281 return drm_vma_node_offset_addr(&obj->vma_node);
282}
283
284uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
285{
286 uint64_t offset;
287 mutex_lock(&obj->dev->struct_mutex);
288 offset = mmap_offset(obj);
289 mutex_unlock(&obj->dev->struct_mutex);
290 return offset;
291}
292
Abhijit Kulkarni1b3340c2017-06-22 12:39:37 -0700293dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
294{
295 struct msm_gem_object *msm_obj = to_msm_bo(obj);
296 struct drm_device *dev = obj->dev;
297
298 if (IS_ERR_OR_NULL(msm_obj->sgt)) {
299 dev_err(dev->dev, "invalid scatter/gather table\n");
300 return 0;
301 }
302
303 return sg_dma_address(msm_obj->sgt->sgl);
304}
305
Jordan Croused8e96522017-02-13 10:14:16 -0700306static void obj_remove_domain(struct msm_gem_vma *domain)
307{
308 if (domain) {
309 list_del(&domain->list);
310 kfree(domain);
311 }
312}
313
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700314static void put_iova(struct drm_gem_object *obj)
Rob Clark4fe5f652016-06-01 11:38:28 -0400315{
316 struct drm_device *dev = obj->dev;
Rob Clark4fe5f652016-06-01 11:38:28 -0400317 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700318 struct msm_gem_vma *domain, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400319
320 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
321
Jordan Croused8e96522017-02-13 10:14:16 -0700322 list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
323 if (iommu_present(&platform_bus_type)) {
324 msm_gem_unmap_vma(domain->aspace, domain,
325 msm_obj->sgt, get_dmabuf_ptr(obj));
326 }
327
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700328 /*
329 * put_iova removes the domain connected to the obj which makes
330 * the aspace inaccessible. Store the aspace, as it is used to
331 * update the active_list during gem_free_obj and gem_purege.
332 */
333 msm_obj->aspace = domain->aspace;
Jordan Croused8e96522017-02-13 10:14:16 -0700334 obj_remove_domain(domain);
Rob Clark4fe5f652016-06-01 11:38:28 -0400335 }
336}
337
Jordan Croused8e96522017-02-13 10:14:16 -0700338static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
339 struct msm_gem_address_space *aspace)
340{
341 struct msm_gem_object *msm_obj = to_msm_bo(obj);
342 struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
343
344 if (!domain)
345 return ERR_PTR(-ENOMEM);
346
347 domain->aspace = aspace;
348
349 list_add_tail(&domain->list, &msm_obj->domains);
350
351 return domain;
352}
353
354static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
355 struct msm_gem_address_space *aspace)
356{
357 struct msm_gem_object *msm_obj = to_msm_bo(obj);
358 struct msm_gem_vma *domain;
359
360 list_for_each_entry(domain, &msm_obj->domains, list) {
361 if (domain->aspace == aspace)
362 return domain;
363 }
364
365 return NULL;
366}
367
Rob Clarkc8afe682013-06-26 12:44:06 -0400368/* should be called under struct_mutex.. although it can be called
369 * from atomic context without struct_mutex to acquire an extra
370 * iova ref if you know one is already held.
371 *
372 * That means when I do eventually need to add support for unpinning
373 * the refcnt counter needs to be atomic_t.
374 */
Jordan Croused8e96522017-02-13 10:14:16 -0700375int msm_gem_get_iova_locked(struct drm_gem_object *obj,
376 struct msm_gem_address_space *aspace, uint32_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400377{
378 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700379 struct page **pages;
380 struct msm_gem_vma *domain;
Rob Clarkc8afe682013-06-26 12:44:06 -0400381 int ret = 0;
382
Jordan Croused8e96522017-02-13 10:14:16 -0700383 if (!iommu_present(&platform_bus_type)) {
384 pages = get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500385
Rob Clarkc8afe682013-06-26 12:44:06 -0400386 if (IS_ERR(pages))
387 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500388
Jordan Croused8e96522017-02-13 10:14:16 -0700389 *iova = physaddr(obj);
390 return 0;
Rob Clarkc8afe682013-06-26 12:44:06 -0400391 }
392
Jordan Croused8e96522017-02-13 10:14:16 -0700393 domain = obj_get_domain(obj, aspace);
394
395 if (!domain) {
396 domain = obj_add_domain(obj, aspace);
397 if (IS_ERR(domain))
398 return PTR_ERR(domain);
399
400 pages = get_pages(obj);
401 if (IS_ERR(pages)) {
402 obj_remove_domain(domain);
403 return PTR_ERR(pages);
404 }
405
406 ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
407 get_dmabuf_ptr(obj),
408 msm_obj->flags);
409 }
410
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700411 if (!ret && domain) {
Jordan Croused8e96522017-02-13 10:14:16 -0700412 *iova = domain->iova;
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700413 if (aspace && aspace->domain_attached)
414 msm_gem_add_obj_to_aspace_active_list(aspace, obj);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700415 } else {
Jordan Croused8e96522017-02-13 10:14:16 -0700416 obj_remove_domain(domain);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700417 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400418
419 return ret;
420}
421
Rob Clark2638d902014-11-08 09:13:37 -0500422/* get iova, taking a reference. Should have a matching put */
Jordan Croused8e96522017-02-13 10:14:16 -0700423int msm_gem_get_iova(struct drm_gem_object *obj,
424 struct msm_gem_address_space *aspace, uint32_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400425{
Jordan Croused8e96522017-02-13 10:14:16 -0700426 struct msm_gem_vma *domain;
Rob Clarkc8afe682013-06-26 12:44:06 -0400427 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400428
Jordan Croused8e96522017-02-13 10:14:16 -0700429 domain = obj_get_domain(obj, aspace);
430 if (domain) {
431 *iova = domain->iova;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400432 return 0;
433 }
434
Rob Clarkc8afe682013-06-26 12:44:06 -0400435 mutex_lock(&obj->dev->struct_mutex);
Jordan Croused8e96522017-02-13 10:14:16 -0700436 ret = msm_gem_get_iova_locked(obj, aspace, iova);
Rob Clarkc8afe682013-06-26 12:44:06 -0400437 mutex_unlock(&obj->dev->struct_mutex);
438 return ret;
439}
440
Rob Clark2638d902014-11-08 09:13:37 -0500441/* get iova without taking a reference, used in places where you have
442 * already done a 'msm_gem_get_iova()'.
443 */
Jordan Croused8e96522017-02-13 10:14:16 -0700444uint32_t msm_gem_iova(struct drm_gem_object *obj,
445 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500446{
Jordan Croused8e96522017-02-13 10:14:16 -0700447 struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
448
449 WARN_ON(!domain);
450
451 return domain ? domain->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500452}
453
Jordan Croused8e96522017-02-13 10:14:16 -0700454void msm_gem_put_iova(struct drm_gem_object *obj,
455 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400456{
457 // XXX TODO ..
458 // NOTE: probably don't need a _locked() version.. we wouldn't
459 // normally unmap here, but instead just mark that it could be
460 // unmapped (if the iova refcnt drops to zero), but then later
461 // if another _get_iova_locked() fails we can start unmapping
462 // things that are no longer needed..
463}
464
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700465void msm_gem_aspace_domain_attach_detach_update(
466 struct msm_gem_address_space *aspace,
467 bool is_detach)
468{
469 struct msm_gem_object *msm_obj;
470 struct drm_gem_object *obj;
471 struct aspace_client *aclient;
472 int ret;
473 uint32_t iova;
474
475 if (!aspace)
476 return;
477
478 mutex_lock(&aspace->dev->struct_mutex);
479 if (is_detach) {
480 /* Indicate to clients domain is getting detached */
481 list_for_each_entry(aclient, &aspace->clients, list) {
482 if (aclient->cb)
483 aclient->cb(aclient->cb_data,
484 is_detach);
485 }
486
487 /**
488 * Unmap active buffers,
489 * typically clients should do this when the callback is called,
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700490 * but this needs to be done for the buffers which are not
491 * attached to any planes.
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700492 */
493 list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
494 obj = &msm_obj->base;
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700495 if (obj->import_attach)
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700496 put_iova(obj);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700497 }
498 } else {
499 /* map active buffers */
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700500 list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700501 obj = &msm_obj->base;
502 ret = msm_gem_get_iova_locked(obj, aspace, &iova);
503 if (ret) {
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700504 mutex_unlock(&aspace->dev->struct_mutex);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700505 return;
506 }
507 }
508
509 /* Indicate to clients domain is attached */
510 list_for_each_entry(aclient, &aspace->clients, list) {
511 if (aclient->cb)
512 aclient->cb(aclient->cb_data,
513 is_detach);
514 }
515 }
516 mutex_unlock(&aspace->dev->struct_mutex);
517}
518
Rob Clarkc8afe682013-06-26 12:44:06 -0400519int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
520 struct drm_mode_create_dumb *args)
521{
522 args->pitch = align_pitch(args->width, args->bpp);
523 args->size = PAGE_ALIGN(args->pitch * args->height);
524 return msm_gem_new_handle(dev, file, args->size,
525 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
526}
527
Rob Clarkc8afe682013-06-26 12:44:06 -0400528int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
529 uint32_t handle, uint64_t *offset)
530{
531 struct drm_gem_object *obj;
532 int ret = 0;
533
534 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100535 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400536 if (obj == NULL) {
537 ret = -ENOENT;
538 goto fail;
539 }
540
541 *offset = msm_gem_mmap_offset(obj);
542
543 drm_gem_object_unreference_unlocked(obj);
544
545fail:
546 return ret;
547}
548
Rob Clark18f23042016-05-26 16:24:35 -0400549void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400550{
551 struct msm_gem_object *msm_obj = to_msm_bo(obj);
552 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
553 if (!msm_obj->vaddr) {
554 struct page **pages = get_pages(obj);
555 if (IS_ERR(pages))
556 return ERR_CAST(pages);
557 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
558 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
Rob Clark69a834c2016-05-24 18:29:38 -0400559 if (msm_obj->vaddr == NULL)
560 return ERR_PTR(-ENOMEM);
Rob Clarkc8afe682013-06-26 12:44:06 -0400561 }
Rob Clarke1e9db22016-05-27 11:16:28 -0400562 msm_obj->vmap_count++;
Rob Clarkc8afe682013-06-26 12:44:06 -0400563 return msm_obj->vaddr;
564}
565
Rob Clark18f23042016-05-26 16:24:35 -0400566void *msm_gem_get_vaddr(struct drm_gem_object *obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400567{
568 void *ret;
569 mutex_lock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400570 ret = msm_gem_get_vaddr_locked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400571 mutex_unlock(&obj->dev->struct_mutex);
572 return ret;
573}
574
Rob Clark18f23042016-05-26 16:24:35 -0400575void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
576{
Rob Clarke1e9db22016-05-27 11:16:28 -0400577 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark18f23042016-05-26 16:24:35 -0400578 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarke1e9db22016-05-27 11:16:28 -0400579 WARN_ON(msm_obj->vmap_count < 1);
580 msm_obj->vmap_count--;
Rob Clark18f23042016-05-26 16:24:35 -0400581}
582
583void msm_gem_put_vaddr(struct drm_gem_object *obj)
584{
Rob Clarke1e9db22016-05-27 11:16:28 -0400585 mutex_lock(&obj->dev->struct_mutex);
586 msm_gem_put_vaddr_locked(obj);
587 mutex_unlock(&obj->dev->struct_mutex);
Rob Clark18f23042016-05-26 16:24:35 -0400588}
589
Rob Clark4cd33c42016-05-17 15:44:49 -0400590/* Update madvise status, returns true if not purged, else
591 * false or -errno.
592 */
593int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
594{
595 struct msm_gem_object *msm_obj = to_msm_bo(obj);
596
597 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
598
599 if (msm_obj->madv != __MSM_MADV_PURGED)
600 msm_obj->madv = madv;
601
602 return (msm_obj->madv != __MSM_MADV_PURGED);
603}
604
Rob Clark68209392016-05-17 16:19:32 -0400605void msm_gem_purge(struct drm_gem_object *obj)
606{
607 struct drm_device *dev = obj->dev;
608 struct msm_gem_object *msm_obj = to_msm_bo(obj);
609
610 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
611 WARN_ON(!is_purgeable(msm_obj));
612 WARN_ON(obj->import_attach);
613
614 put_iova(obj);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700615 msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
Rob Clark68209392016-05-17 16:19:32 -0400616
Rob Clarke1e9db22016-05-27 11:16:28 -0400617 msm_gem_vunmap(obj);
Rob Clark68209392016-05-17 16:19:32 -0400618
619 put_pages(obj);
620
621 msm_obj->madv = __MSM_MADV_PURGED;
622
623 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
624 drm_gem_free_mmap_offset(obj);
625
626 /* Our goal here is to return as much of the memory as
627 * is possible back to the system as we are called from OOM.
628 * To do this we must instruct the shmfs to drop all of its
629 * backing pages, *now*.
630 */
631 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
632
633 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
634 0, (loff_t)-1);
635}
636
Rob Clarke1e9db22016-05-27 11:16:28 -0400637void msm_gem_vunmap(struct drm_gem_object *obj)
638{
639 struct msm_gem_object *msm_obj = to_msm_bo(obj);
640
641 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
642 return;
643
644 vunmap(msm_obj->vaddr);
645 msm_obj->vaddr = NULL;
646}
647
Rob Clarkb6295f92016-03-15 18:26:28 -0400648/* must be called before _move_to_active().. */
649int msm_gem_sync_object(struct drm_gem_object *obj,
650 struct msm_fence_context *fctx, bool exclusive)
651{
652 struct msm_gem_object *msm_obj = to_msm_bo(obj);
653 struct reservation_object_list *fobj;
654 struct fence *fence;
655 int i, ret;
656
657 if (!exclusive) {
658 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
659 * which makes this a slightly strange place to call it. OTOH this
660 * is a convenient can-fail point to hook it in. (And similar to
661 * how etnaviv and nouveau handle this.)
662 */
663 ret = reservation_object_reserve_shared(msm_obj->resv);
664 if (ret)
665 return ret;
666 }
667
668 fobj = reservation_object_get_list(msm_obj->resv);
669 if (!fobj || (fobj->shared_count == 0)) {
670 fence = reservation_object_get_excl(msm_obj->resv);
671 /* don't need to wait on our own fences, since ring is fifo */
672 if (fence && (fence->context != fctx->context)) {
673 ret = fence_wait(fence, true);
674 if (ret)
675 return ret;
676 }
677 }
678
679 if (!exclusive || !fobj)
680 return 0;
681
682 for (i = 0; i < fobj->shared_count; i++) {
683 fence = rcu_dereference_protected(fobj->shared[i],
684 reservation_object_held(msm_obj->resv));
685 if (fence->context != fctx->context) {
686 ret = fence_wait(fence, true);
687 if (ret)
688 return ret;
689 }
690 }
691
692 return 0;
693}
694
Rob Clark7198e6b2013-07-19 12:59:32 -0400695void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkb6295f92016-03-15 18:26:28 -0400696 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400697{
698 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400699 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400700 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400701 if (exclusive)
702 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400703 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400704 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400705 list_del_init(&msm_obj->mm_list);
706 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
707}
708
709void msm_gem_move_to_inactive(struct drm_gem_object *obj)
710{
711 struct drm_device *dev = obj->dev;
712 struct msm_drm_private *priv = dev->dev_private;
713 struct msm_gem_object *msm_obj = to_msm_bo(obj);
714
715 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
716
717 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400718 list_del_init(&msm_obj->mm_list);
719 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400720}
721
Rob Clarkba00c3f2016-03-16 18:18:17 -0400722int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
723{
Rob Clarkb6295f92016-03-15 18:26:28 -0400724 struct msm_gem_object *msm_obj = to_msm_bo(obj);
725 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100726 unsigned long remain =
727 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
728 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400729
Chris Wilsonf755e222016-08-29 08:08:26 +0100730 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
731 true, remain);
732 if (ret == 0)
733 return remain == 0 ? -EBUSY : -ETIMEDOUT;
734 else if (ret < 0)
735 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400736
Rob Clark7198e6b2013-07-19 12:59:32 -0400737 /* TODO cache maintenance */
738
Rob Clarkb6295f92016-03-15 18:26:28 -0400739 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400740}
741
742int msm_gem_cpu_fini(struct drm_gem_object *obj)
743{
744 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400745 return 0;
746}
747
748#ifdef CONFIG_DEBUG_FS
Rob Clarkb6295f92016-03-15 18:26:28 -0400749static void describe_fence(struct fence *fence, const char *type,
750 struct seq_file *m)
751{
752 if (!fence_is_signaled(fence))
753 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
754 fence->ops->get_driver_name(fence),
755 fence->ops->get_timeline_name(fence),
756 fence->seqno);
757}
758
Rob Clarkc8afe682013-06-26 12:44:06 -0400759void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
760{
Rob Clarkc8afe682013-06-26 12:44:06 -0400761 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Jordan Croused8e96522017-02-13 10:14:16 -0700762 struct msm_gem_vma *domain;
Rob Clarkb6295f92016-03-15 18:26:28 -0400763 struct reservation_object *robj = msm_obj->resv;
764 struct reservation_object_list *fobj;
765 struct fence *fence;
Rob Clarkc8afe682013-06-26 12:44:06 -0400766 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400767 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400768
Rob Clarkb6295f92016-03-15 18:26:28 -0400769 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
770
Rob Clark4cd33c42016-05-17 15:44:49 -0400771 switch (msm_obj->madv) {
772 case __MSM_MADV_PURGED:
773 madv = " purged";
774 break;
775 case MSM_MADV_DONTNEED:
776 madv = " purgeable";
777 break;
778 case MSM_MADV_WILLNEED:
779 default:
780 madv = "";
781 break;
782 }
783
784 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
Rob Clarke22a2fb2017-02-13 10:14:11 -0700785
Rob Clark7198e6b2013-07-19 12:59:32 -0400786 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400787 obj->name, obj->refcount.refcount.counter,
Rob Clark4cd33c42016-05-17 15:44:49 -0400788 off, msm_obj->vaddr, obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400789
790 rcu_read_lock();
791 fobj = rcu_dereference(robj->fence);
792 if (fobj) {
793 unsigned int i, shared_count = fobj->shared_count;
794
795 for (i = 0; i < shared_count; i++) {
796 fence = rcu_dereference(fobj->shared[i]);
797 describe_fence(fence, "Shared", m);
798 }
799 }
800
801 fence = rcu_dereference(robj->fence_excl);
802 if (fence)
803 describe_fence(fence, "Exclusive", m);
804 rcu_read_unlock();
Jordan Croused8e96522017-02-13 10:14:16 -0700805
806 /* FIXME: we need to print the address space here too */
807 list_for_each_entry(domain, &msm_obj->domains, list)
808 seq_printf(m, " %08llx", domain->iova);
809
810 seq_puts(m, "\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400811}
812
813void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
814{
815 struct msm_gem_object *msm_obj;
816 int count = 0;
817 size_t size = 0;
818
819 list_for_each_entry(msm_obj, list, mm_list) {
820 struct drm_gem_object *obj = &msm_obj->base;
821 seq_printf(m, " ");
822 msm_gem_describe(obj, m);
823 count++;
824 size += obj->size;
825 }
826
827 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
828}
829#endif
830
831void msm_gem_free_object(struct drm_gem_object *obj)
832{
833 struct drm_device *dev = obj->dev;
834 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400835
836 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
837
Rob Clark7198e6b2013-07-19 12:59:32 -0400838 /* object should not be on active list: */
839 WARN_ON(is_active(msm_obj));
840
Rob Clarkc8afe682013-06-26 12:44:06 -0400841 list_del(&msm_obj->mm_list);
842
Rob Clark4fe5f652016-06-01 11:38:28 -0400843 put_iova(obj);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700844 msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400845
Rob Clark05b84912013-09-28 11:28:35 -0400846 if (obj->import_attach) {
847 if (msm_obj->vaddr)
Jordan Crouse12bf3622017-02-13 10:14:11 -0700848 dma_buf_vunmap(obj->import_attach->dmabuf,
849 msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400850
Rob Clark05b84912013-09-28 11:28:35 -0400851 /* Don't drop the pages for imported dmabuf, as they are not
852 * ours, just free the array we allocated:
853 */
854 if (msm_obj->pages)
855 drm_free_large(msm_obj->pages);
856
jilai wangf28730c2015-04-07 13:51:32 -0400857 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400858 } else {
Rob Clarke1e9db22016-05-27 11:16:28 -0400859 msm_gem_vunmap(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400860 put_pages(obj);
861 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400862
Rob Clark7198e6b2013-07-19 12:59:32 -0400863 if (msm_obj->resv == &msm_obj->_resv)
864 reservation_object_fini(msm_obj->resv);
865
Rob Clarkc8afe682013-06-26 12:44:06 -0400866 drm_gem_object_release(obj);
867
868 kfree(msm_obj);
869}
870
871/* convenience method to construct a GEM buffer object, and userspace handle */
872int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
873 uint32_t size, uint32_t flags, uint32_t *handle)
874{
875 struct drm_gem_object *obj;
876 int ret;
877
878 ret = mutex_lock_interruptible(&dev->struct_mutex);
879 if (ret)
880 return ret;
881
882 obj = msm_gem_new(dev, size, flags);
883
884 mutex_unlock(&dev->struct_mutex);
885
886 if (IS_ERR(obj))
887 return PTR_ERR(obj);
888
889 ret = drm_gem_handle_create(file, obj, handle);
890
891 /* drop reference from allocate - handle holds it now */
892 drm_gem_object_unreference_unlocked(obj);
893
894 return ret;
895}
896
Rob Clark05b84912013-09-28 11:28:35 -0400897static int msm_gem_new_impl(struct drm_device *dev,
898 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400899 struct reservation_object *resv,
Rob Clark05b84912013-09-28 11:28:35 -0400900 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400901{
902 struct msm_drm_private *priv = dev->dev_private;
903 struct msm_gem_object *msm_obj;
Rob Clark072f1f92015-03-03 15:04:25 -0500904 bool use_vram = false;
Rob Clarkc8afe682013-06-26 12:44:06 -0400905
906 switch (flags & MSM_BO_CACHE_MASK) {
907 case MSM_BO_UNCACHED:
908 case MSM_BO_CACHED:
909 case MSM_BO_WC:
910 break;
911 default:
912 dev_err(dev->dev, "invalid cache flag: %x\n",
913 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400914 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400915 }
916
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -0400917 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
918
Rob Clark871d8122013-11-16 12:56:06 -0500919 if (!iommu_present(&platform_bus_type))
Rob Clark072f1f92015-03-03 15:04:25 -0500920 use_vram = true;
921 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
922 use_vram = true;
923
924 if (WARN_ON(use_vram && !priv->vram.size))
925 return -EINVAL;
926
Rob Clarke22a2fb2017-02-13 10:14:11 -0700927 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400928 if (!msm_obj)
929 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400930
Jordan Croused8e96522017-02-13 10:14:16 -0700931 if (use_vram) {
932 struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
933 NULL);
934
935 if (!IS_ERR(domain))
936 msm_obj->vram_node = &domain->node;
937 }
Rob Clark871d8122013-11-16 12:56:06 -0500938
Rob Clarkc8afe682013-06-26 12:44:06 -0400939 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400940 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400941
Rob Clark79f0e202016-03-16 12:40:35 -0400942 if (resv) {
943 msm_obj->resv = resv;
944 } else {
945 msm_obj->resv = &msm_obj->_resv;
946 reservation_object_init(msm_obj->resv);
947 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400948
Rob Clark7198e6b2013-07-19 12:59:32 -0400949 INIT_LIST_HEAD(&msm_obj->submit_entry);
Jordan Croused8e96522017-02-13 10:14:16 -0700950 INIT_LIST_HEAD(&msm_obj->domains);
Abhijit Kulkarnif4657b12017-06-28 18:40:19 -0700951 INIT_LIST_HEAD(&msm_obj->iova_list);
Abhijit Kulkarni75c31e72017-07-25 17:31:33 -0700952 msm_obj->aspace = NULL;
Jordan Croused8e96522017-02-13 10:14:16 -0700953
Rob Clarkc8afe682013-06-26 12:44:06 -0400954 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
955
Rob Clark05b84912013-09-28 11:28:35 -0400956 *obj = &msm_obj->base;
957
958 return 0;
959}
960
961struct drm_gem_object *msm_gem_new(struct drm_device *dev,
962 uint32_t size, uint32_t flags)
963{
Rob Clark871d8122013-11-16 12:56:06 -0500964 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400965 int ret;
966
967 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
968
969 size = PAGE_ALIGN(size);
970
Rob Clark79f0e202016-03-16 12:40:35 -0400971 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
Rob Clark05b84912013-09-28 11:28:35 -0400972 if (ret)
973 goto fail;
974
Rob Clark072f1f92015-03-03 15:04:25 -0500975 if (use_pages(obj)) {
Rob Clark871d8122013-11-16 12:56:06 -0500976 ret = drm_gem_object_init(dev, obj, size);
977 if (ret)
978 goto fail;
979 } else {
980 drm_gem_private_object_init(dev, obj, size);
981 }
Rob Clark05b84912013-09-28 11:28:35 -0400982
983 return obj;
984
985fail:
Markus Elfring0a677122016-07-13 19:29:19 +0200986 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400987 return ERR_PTR(ret);
988}
989
990struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400991 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400992{
993 struct msm_gem_object *msm_obj;
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -0400994 struct drm_gem_object *obj = NULL;
Rob Clark79f0e202016-03-16 12:40:35 -0400995 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400996 int ret, npages;
997
Rob Clark871d8122013-11-16 12:56:06 -0500998 /* if we don't have IOMMU, don't bother pretending we can import: */
999 if (!iommu_present(&platform_bus_type)) {
1000 dev_err(dev->dev, "cannot import without IOMMU\n");
1001 return ERR_PTR(-EINVAL);
1002 }
1003
Rob Clark79f0e202016-03-16 12:40:35 -04001004 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001005
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -04001006 ret = mutex_lock_interruptible(&dev->struct_mutex);
1007 if (ret)
1008 return ERR_PTR(ret);
1009
Rob Clark79f0e202016-03-16 12:40:35 -04001010 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
Lloyd Atkinsonc30fdd72017-09-13 09:21:06 -04001011 mutex_unlock(&dev->struct_mutex);
Rob Clark05b84912013-09-28 11:28:35 -04001012 if (ret)
1013 goto fail;
1014
1015 drm_gem_private_object_init(dev, obj, size);
1016
1017 npages = size / PAGE_SIZE;
1018
1019 msm_obj = to_msm_bo(obj);
1020 msm_obj->sgt = sgt;
1021 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
1022 if (!msm_obj->pages) {
1023 ret = -ENOMEM;
1024 goto fail;
1025 }
1026
1027 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1028 if (ret)
1029 goto fail;
1030
Rob Clarkc8afe682013-06-26 12:44:06 -04001031 return obj;
1032
1033fail:
Markus Elfringe73a8562016-07-13 19:15:35 +02001034 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001035 return ERR_PTR(ret);
1036}