blob: 81fe6d6740cec7582d11b4f92e267509dffb2c29 [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060029static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
Rob Clark871d8122013-11-16 12:56:06 -050032static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
Rob Clark072f1f92015-03-03 15:04:25 -050040static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
Rob Clark871d8122013-11-16 12:56:06 -050046/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060047static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050048{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
Michal Hocko20981052017-05-17 14:23:12 +020055 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050056 if (!p)
57 return ERR_PTR(-ENOMEM);
58
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060059 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000060 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060061 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050062 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020063 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050064 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
Rob Clarkc8afe682013-06-26 12:44:06 -040075
Rob Clarkc8afe682013-06-26 12:44:06 -040076static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050082 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040083 int npages = obj->size >> PAGE_SHIFT;
84
Rob Clark072f1f92015-03-03 15:04:25 -050085 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020086 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050087 else
88 p = get_pages_vram(obj, npages);
89
Rob Clarkc8afe682013-06-26 12:44:06 -040090 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
96 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080097 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040098 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080099 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400100 }
101
102 msm_obj->pages = p;
103
104 /* For non-cached buffers, ensure the new pages are clean
105 * because display controller, GPU, etc. are not coherent:
106 */
107 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
108 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
109 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
110 }
111
112 return msm_obj->pages;
113}
114
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600115static void put_pages_vram(struct drm_gem_object *obj)
116{
117 struct msm_gem_object *msm_obj = to_msm_bo(obj);
118 struct msm_drm_private *priv = obj->dev->dev_private;
119
120 spin_lock(&priv->vram.lock);
121 drm_mm_remove_node(msm_obj->vram_node);
122 spin_unlock(&priv->vram.lock);
123
124 kvfree(msm_obj->pages);
125}
126
Rob Clarkc8afe682013-06-26 12:44:06 -0400127static void put_pages(struct drm_gem_object *obj)
128{
129 struct msm_gem_object *msm_obj = to_msm_bo(obj);
130
131 if (msm_obj->pages) {
132 /* For non-cached buffers, ensure the new pages are clean
133 * because display controller, GPU, etc. are not coherent:
134 */
135 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
136 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
137 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
138 sg_free_table(msm_obj->sgt);
139 kfree(msm_obj->sgt);
140
Rob Clark072f1f92015-03-03 15:04:25 -0500141 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500142 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600143 else
144 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500145
Rob Clarkc8afe682013-06-26 12:44:06 -0400146 msm_obj->pages = NULL;
147 }
148}
149
Rob Clark05b84912013-09-28 11:28:35 -0400150struct page **msm_gem_get_pages(struct drm_gem_object *obj)
151{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400153 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600154
155 mutex_lock(&msm_obj->lock);
156
157 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
158 mutex_unlock(&msm_obj->lock);
159 return ERR_PTR(-EBUSY);
160 }
161
Rob Clark05b84912013-09-28 11:28:35 -0400162 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600163 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400164 return p;
165}
166
167void msm_gem_put_pages(struct drm_gem_object *obj)
168{
169 /* when we start tracking the pin count, then do something here */
170}
171
Rob Clarkc8afe682013-06-26 12:44:06 -0400172int msm_gem_mmap_obj(struct drm_gem_object *obj,
173 struct vm_area_struct *vma)
174{
175 struct msm_gem_object *msm_obj = to_msm_bo(obj);
176
177 vma->vm_flags &= ~VM_PFNMAP;
178 vma->vm_flags |= VM_MIXEDMAP;
179
180 if (msm_obj->flags & MSM_BO_WC) {
181 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
182 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
183 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
184 } else {
185 /*
186 * Shunt off cached objs to shmem file so they have their own
187 * address_space (so unmap_mapping_range does what we want,
188 * in particular in the case of mmap'd dmabufs)
189 */
190 fput(vma->vm_file);
191 get_file(obj->filp);
192 vma->vm_pgoff = 0;
193 vma->vm_file = obj->filp;
194
195 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
196 }
197
198 return 0;
199}
200
201int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
202{
203 int ret;
204
205 ret = drm_gem_mmap(filp, vma);
206 if (ret) {
207 DBG("mmap failed: %d", ret);
208 return ret;
209 }
210
211 return msm_gem_mmap_obj(vma->vm_private_data, vma);
212}
213
Dave Jiang11bac802017-02-24 14:56:41 -0800214int msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400215{
Dave Jiang11bac802017-02-24 14:56:41 -0800216 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400217 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600218 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400219 struct page **pages;
220 unsigned long pfn;
221 pgoff_t pgoff;
222 int ret;
223
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600224 /*
225 * vm_ops.open/drm_gem_mmap_obj and close get and put
226 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400227 */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600228 ret = mutex_lock_interruptible(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400229 if (ret)
230 goto out;
231
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 mutex_unlock(&msm_obj->lock);
234 return VM_FAULT_SIGBUS;
235 }
236
Rob Clarkc8afe682013-06-26 12:44:06 -0400237 /* make sure we have pages attached now */
238 pages = get_pages(obj);
239 if (IS_ERR(pages)) {
240 ret = PTR_ERR(pages);
241 goto out_unlock;
242 }
243
244 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800245 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400246
Rob Clark871d8122013-11-16 12:56:06 -0500247 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400248
Jan Kara1a29d852016-12-14 15:07:01 -0800249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400250 pfn, pfn << PAGE_SHIFT);
251
Jan Kara1a29d852016-12-14 15:07:01 -0800252 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400253
254out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600255 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400256out:
257 switch (ret) {
258 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400259 case 0:
260 case -ERESTARTSYS:
261 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400262 case -EBUSY:
263 /*
264 * EBUSY is ok: this just means that another thread
265 * already did the job.
266 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400267 return VM_FAULT_NOPAGE;
268 case -ENOMEM:
269 return VM_FAULT_OOM;
270 default:
271 return VM_FAULT_SIGBUS;
272 }
273}
274
275/** get mmap offset */
276static uint64_t mmap_offset(struct drm_gem_object *obj)
277{
278 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600279 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400280 int ret;
281
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600282 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400283
284 /* Make it mmapable */
285 ret = drm_gem_create_mmap_offset(obj);
286
287 if (ret) {
288 dev_err(dev->dev, "could not allocate mmap offset\n");
289 return 0;
290 }
291
292 return drm_vma_node_offset_addr(&obj->vma_node);
293}
294
295uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
296{
297 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600298 struct msm_gem_object *msm_obj = to_msm_bo(obj);
299
300 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400301 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600302 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400303 return offset;
304}
305
Rob Clark4b85f7f2017-06-13 13:54:13 -0400306static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
307 struct msm_gem_address_space *aspace)
308{
309 struct msm_gem_object *msm_obj = to_msm_bo(obj);
310 struct msm_gem_vma *vma;
311
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600312 WARN_ON(!mutex_is_locked(&msm_obj->lock));
313
Rob Clark4b85f7f2017-06-13 13:54:13 -0400314 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
315 if (!vma)
316 return ERR_PTR(-ENOMEM);
317
318 vma->aspace = aspace;
319
320 list_add_tail(&vma->list, &msm_obj->vmas);
321
322 return vma;
323}
324
325static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
326 struct msm_gem_address_space *aspace)
327{
328 struct msm_gem_object *msm_obj = to_msm_bo(obj);
329 struct msm_gem_vma *vma;
330
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600331 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400332
333 list_for_each_entry(vma, &msm_obj->vmas, list) {
334 if (vma->aspace == aspace)
335 return vma;
336 }
337
338 return NULL;
339}
340
341static void del_vma(struct msm_gem_vma *vma)
342{
343 if (!vma)
344 return;
345
346 list_del(&vma->list);
347 kfree(vma);
348}
349
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600350/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400351static void
352put_iova(struct drm_gem_object *obj)
353{
Rob Clark4fe5f652016-06-01 11:38:28 -0400354 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400355 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400356
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400358
Rob Clark4b85f7f2017-06-13 13:54:13 -0400359 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
360 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
361 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400362 }
363}
364
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600365/* get iova, taking a reference. Should have a matching put */
366int msm_gem_get_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400367 struct msm_gem_address_space *aspace, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400368{
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400370 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400371 int ret = 0;
372
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600373 mutex_lock(&msm_obj->lock);
374
375 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
376 mutex_unlock(&msm_obj->lock);
377 return -EBUSY;
378 }
Rob Clarkcb1e3812017-06-13 09:15:36 -0400379
Rob Clark4b85f7f2017-06-13 13:54:13 -0400380 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500381
Rob Clark4b85f7f2017-06-13 13:54:13 -0400382 if (!vma) {
383 struct page **pages;
Rob Clark871d8122013-11-16 12:56:06 -0500384
Rob Clark4b85f7f2017-06-13 13:54:13 -0400385 vma = add_vma(obj, aspace);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300386 if (IS_ERR(vma)) {
387 ret = PTR_ERR(vma);
388 goto unlock;
389 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400390
391 pages = get_pages(obj);
392 if (IS_ERR(pages)) {
393 ret = PTR_ERR(pages);
394 goto fail;
395 }
396
397 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
398 obj->size >> PAGE_SHIFT);
399 if (ret)
400 goto fail;
Rob Clarkc8afe682013-06-26 12:44:06 -0400401 }
402
Rob Clark4b85f7f2017-06-13 13:54:13 -0400403 *iova = vma->iova;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600404
405 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400406 return 0;
407
408fail:
409 del_vma(vma);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300410unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600411 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400412 return ret;
413}
414
Rob Clark2638d902014-11-08 09:13:37 -0500415/* get iova without taking a reference, used in places where you have
416 * already done a 'msm_gem_get_iova()'.
417 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400418uint64_t msm_gem_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500420{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400422 struct msm_gem_vma *vma;
423
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600424 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400425 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600426 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400427 WARN_ON(!vma);
428
429 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500430}
431
Rob Clark8bdcd942017-06-13 11:07:08 -0400432void msm_gem_put_iova(struct drm_gem_object *obj,
433 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400434{
435 // XXX TODO ..
436 // NOTE: probably don't need a _locked() version.. we wouldn't
437 // normally unmap here, but instead just mark that it could be
438 // unmapped (if the iova refcnt drops to zero), but then later
439 // if another _get_iova_locked() fails we can start unmapping
440 // things that are no longer needed..
441}
442
443int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
444 struct drm_mode_create_dumb *args)
445{
446 args->pitch = align_pitch(args->width, args->bpp);
447 args->size = PAGE_ALIGN(args->pitch * args->height);
448 return msm_gem_new_handle(dev, file, args->size,
449 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
450}
451
Rob Clarkc8afe682013-06-26 12:44:06 -0400452int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
453 uint32_t handle, uint64_t *offset)
454{
455 struct drm_gem_object *obj;
456 int ret = 0;
457
458 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100459 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400460 if (obj == NULL) {
461 ret = -ENOENT;
462 goto fail;
463 }
464
465 *offset = msm_gem_mmap_offset(obj);
466
467 drm_gem_object_unreference_unlocked(obj);
468
469fail:
470 return ret;
471}
472
Rob Clarkfad33f42017-09-15 08:38:20 -0400473static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400474{
Rob Clarke1e9db22016-05-27 11:16:28 -0400475 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600476 int ret = 0;
477
478 mutex_lock(&msm_obj->lock);
479
Rob Clarkfad33f42017-09-15 08:38:20 -0400480 if (WARN_ON(msm_obj->madv > madv)) {
481 dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
482 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600483 mutex_unlock(&msm_obj->lock);
484 return ERR_PTR(-EBUSY);
485 }
486
487 /* increment vmap_count *before* vmap() call, so shrinker can
488 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
489 * This guarantees that we won't try to msm_gem_vunmap() this
490 * same object from within the vmap() call (while we already
491 * hold msm_obj->lock)
492 */
493 msm_obj->vmap_count++;
494
495 if (!msm_obj->vaddr) {
496 struct page **pages = get_pages(obj);
497 if (IS_ERR(pages)) {
498 ret = PTR_ERR(pages);
499 goto fail;
500 }
501 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
502 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
503 if (msm_obj->vaddr == NULL) {
504 ret = -ENOMEM;
505 goto fail;
506 }
507 }
508
509 mutex_unlock(&msm_obj->lock);
510 return msm_obj->vaddr;
511
512fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400513 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600514 mutex_unlock(&msm_obj->lock);
515 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400516}
517
Rob Clarkfad33f42017-09-15 08:38:20 -0400518void *msm_gem_get_vaddr(struct drm_gem_object *obj)
519{
520 return get_vaddr(obj, MSM_MADV_WILLNEED);
521}
522
523/*
524 * Don't use this! It is for the very special case of dumping
525 * submits from GPU hangs or faults, were the bo may already
526 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
527 * active list.
528 */
529void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
530{
531 return get_vaddr(obj, __MSM_MADV_PURGED);
532}
533
Rob Clark18f23042016-05-26 16:24:35 -0400534void msm_gem_put_vaddr(struct drm_gem_object *obj)
535{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600536 struct msm_gem_object *msm_obj = to_msm_bo(obj);
537
538 mutex_lock(&msm_obj->lock);
539 WARN_ON(msm_obj->vmap_count < 1);
540 msm_obj->vmap_count--;
541 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400542}
543
Rob Clark4cd33c42016-05-17 15:44:49 -0400544/* Update madvise status, returns true if not purged, else
545 * false or -errno.
546 */
547int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
548{
549 struct msm_gem_object *msm_obj = to_msm_bo(obj);
550
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600551 mutex_lock(&msm_obj->lock);
552
Rob Clark4cd33c42016-05-17 15:44:49 -0400553 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
554
555 if (msm_obj->madv != __MSM_MADV_PURGED)
556 msm_obj->madv = madv;
557
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600558 madv = msm_obj->madv;
559
560 mutex_unlock(&msm_obj->lock);
561
562 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400563}
564
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600565void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400566{
567 struct drm_device *dev = obj->dev;
568 struct msm_gem_object *msm_obj = to_msm_bo(obj);
569
570 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
571 WARN_ON(!is_purgeable(msm_obj));
572 WARN_ON(obj->import_attach);
573
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600574 mutex_lock_nested(&msm_obj->lock, subclass);
575
Rob Clark68209392016-05-17 16:19:32 -0400576 put_iova(obj);
577
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600578 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400579
580 put_pages(obj);
581
582 msm_obj->madv = __MSM_MADV_PURGED;
583
584 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
585 drm_gem_free_mmap_offset(obj);
586
587 /* Our goal here is to return as much of the memory as
588 * is possible back to the system as we are called from OOM.
589 * To do this we must instruct the shmfs to drop all of its
590 * backing pages, *now*.
591 */
592 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
593
594 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
595 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600596
597 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400598}
599
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600600static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400601{
602 struct msm_gem_object *msm_obj = to_msm_bo(obj);
603
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600604 WARN_ON(!mutex_is_locked(&msm_obj->lock));
605
Rob Clarke1e9db22016-05-27 11:16:28 -0400606 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
607 return;
608
609 vunmap(msm_obj->vaddr);
610 msm_obj->vaddr = NULL;
611}
612
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600613void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
614{
615 struct msm_gem_object *msm_obj = to_msm_bo(obj);
616
617 mutex_lock_nested(&msm_obj->lock, subclass);
618 msm_gem_vunmap_locked(obj);
619 mutex_unlock(&msm_obj->lock);
620}
621
Rob Clarkb6295f92016-03-15 18:26:28 -0400622/* must be called before _move_to_active().. */
623int msm_gem_sync_object(struct drm_gem_object *obj,
624 struct msm_fence_context *fctx, bool exclusive)
625{
626 struct msm_gem_object *msm_obj = to_msm_bo(obj);
627 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100628 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400629 int i, ret;
630
Rob Clarkb6295f92016-03-15 18:26:28 -0400631 fobj = reservation_object_get_list(msm_obj->resv);
632 if (!fobj || (fobj->shared_count == 0)) {
633 fence = reservation_object_get_excl(msm_obj->resv);
634 /* don't need to wait on our own fences, since ring is fifo */
635 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100636 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400637 if (ret)
638 return ret;
639 }
640 }
641
642 if (!exclusive || !fobj)
643 return 0;
644
645 for (i = 0; i < fobj->shared_count; i++) {
646 fence = rcu_dereference_protected(fobj->shared[i],
647 reservation_object_held(msm_obj->resv));
648 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100649 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400650 if (ret)
651 return ret;
652 }
653 }
654
655 return 0;
656}
657
Rob Clark7198e6b2013-07-19 12:59:32 -0400658void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100659 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400660{
661 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400662 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400663 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400664 if (exclusive)
665 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400666 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400667 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400668 list_del_init(&msm_obj->mm_list);
669 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
670}
671
672void msm_gem_move_to_inactive(struct drm_gem_object *obj)
673{
674 struct drm_device *dev = obj->dev;
675 struct msm_drm_private *priv = dev->dev_private;
676 struct msm_gem_object *msm_obj = to_msm_bo(obj);
677
678 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
679
680 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400681 list_del_init(&msm_obj->mm_list);
682 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400683}
684
Rob Clarkba00c3f2016-03-16 18:18:17 -0400685int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
686{
Rob Clarkb6295f92016-03-15 18:26:28 -0400687 struct msm_gem_object *msm_obj = to_msm_bo(obj);
688 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100689 unsigned long remain =
690 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
691 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400692
Chris Wilsonf755e222016-08-29 08:08:26 +0100693 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
694 true, remain);
695 if (ret == 0)
696 return remain == 0 ? -EBUSY : -ETIMEDOUT;
697 else if (ret < 0)
698 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400699
Rob Clark7198e6b2013-07-19 12:59:32 -0400700 /* TODO cache maintenance */
701
Rob Clarkb6295f92016-03-15 18:26:28 -0400702 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400703}
704
705int msm_gem_cpu_fini(struct drm_gem_object *obj)
706{
707 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400708 return 0;
709}
710
711#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100712static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400713 struct seq_file *m)
714{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100715 if (!dma_fence_is_signaled(fence))
Rob Clarkb6295f92016-03-15 18:26:28 -0400716 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
717 fence->ops->get_driver_name(fence),
718 fence->ops->get_timeline_name(fence),
719 fence->seqno);
720}
721
Rob Clarkc8afe682013-06-26 12:44:06 -0400722void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
723{
Rob Clarkc8afe682013-06-26 12:44:06 -0400724 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400725 struct reservation_object *robj = msm_obj->resv;
726 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100727 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400728 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400729 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400730 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400731
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600732 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400733
Rob Clark4cd33c42016-05-17 15:44:49 -0400734 switch (msm_obj->madv) {
735 case __MSM_MADV_PURGED:
736 madv = " purged";
737 break;
738 case MSM_MADV_DONTNEED:
739 madv = " purgeable";
740 break;
741 case MSM_MADV_WILLNEED:
742 default:
743 madv = "";
744 break;
745 }
746
Rob Clark667ce332016-09-28 19:58:32 -0400747 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
Rob Clark7198e6b2013-07-19 12:59:32 -0400748 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100749 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400750 off, msm_obj->vaddr);
751
Rob Clark4b85f7f2017-06-13 13:54:13 -0400752 /* FIXME: we need to print the address space here too */
753 list_for_each_entry(vma, &msm_obj->vmas, list)
754 seq_printf(m, " %08llx", vma->iova);
Rob Clark667ce332016-09-28 19:58:32 -0400755
756 seq_printf(m, " %zu%s\n", obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400757
758 rcu_read_lock();
759 fobj = rcu_dereference(robj->fence);
760 if (fobj) {
761 unsigned int i, shared_count = fobj->shared_count;
762
763 for (i = 0; i < shared_count; i++) {
764 fence = rcu_dereference(fobj->shared[i]);
765 describe_fence(fence, "Shared", m);
766 }
767 }
768
769 fence = rcu_dereference(robj->fence_excl);
770 if (fence)
771 describe_fence(fence, "Exclusive", m);
772 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600773
774 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400775}
776
777void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
778{
779 struct msm_gem_object *msm_obj;
780 int count = 0;
781 size_t size = 0;
782
783 list_for_each_entry(msm_obj, list, mm_list) {
784 struct drm_gem_object *obj = &msm_obj->base;
785 seq_printf(m, " ");
786 msm_gem_describe(obj, m);
787 count++;
788 size += obj->size;
789 }
790
791 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
792}
793#endif
794
795void msm_gem_free_object(struct drm_gem_object *obj)
796{
797 struct drm_device *dev = obj->dev;
798 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400799
800 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
801
Rob Clark7198e6b2013-07-19 12:59:32 -0400802 /* object should not be on active list: */
803 WARN_ON(is_active(msm_obj));
804
Rob Clarkc8afe682013-06-26 12:44:06 -0400805 list_del(&msm_obj->mm_list);
806
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600807 mutex_lock(&msm_obj->lock);
808
Rob Clark4fe5f652016-06-01 11:38:28 -0400809 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400810
Rob Clark05b84912013-09-28 11:28:35 -0400811 if (obj->import_attach) {
812 if (msm_obj->vaddr)
813 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400814
Rob Clark05b84912013-09-28 11:28:35 -0400815 /* Don't drop the pages for imported dmabuf, as they are not
816 * ours, just free the array we allocated:
817 */
818 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200819 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400820
jilai wangf28730c2015-04-07 13:51:32 -0400821 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400822 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600823 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400824 put_pages(obj);
825 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400826
Rob Clark7198e6b2013-07-19 12:59:32 -0400827 if (msm_obj->resv == &msm_obj->_resv)
828 reservation_object_fini(msm_obj->resv);
829
Rob Clarkc8afe682013-06-26 12:44:06 -0400830 drm_gem_object_release(obj);
831
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600832 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400833 kfree(msm_obj);
834}
835
836/* convenience method to construct a GEM buffer object, and userspace handle */
837int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
838 uint32_t size, uint32_t flags, uint32_t *handle)
839{
840 struct drm_gem_object *obj;
841 int ret;
842
Rob Clarkc8afe682013-06-26 12:44:06 -0400843 obj = msm_gem_new(dev, size, flags);
844
Rob Clarkc8afe682013-06-26 12:44:06 -0400845 if (IS_ERR(obj))
846 return PTR_ERR(obj);
847
848 ret = drm_gem_handle_create(file, obj, handle);
849
850 /* drop reference from allocate - handle holds it now */
851 drm_gem_object_unreference_unlocked(obj);
852
853 return ret;
854}
855
Rob Clark05b84912013-09-28 11:28:35 -0400856static int msm_gem_new_impl(struct drm_device *dev,
857 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400858 struct reservation_object *resv,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600859 struct drm_gem_object **obj,
860 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400861{
862 struct msm_drm_private *priv = dev->dev_private;
863 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400864
865 switch (flags & MSM_BO_CACHE_MASK) {
866 case MSM_BO_UNCACHED:
867 case MSM_BO_CACHED:
868 case MSM_BO_WC:
869 break;
870 default:
871 dev_err(dev->dev, "invalid cache flag: %x\n",
872 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400873 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400874 }
875
Rob Clark667ce332016-09-28 19:58:32 -0400876 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400877 if (!msm_obj)
878 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400879
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600880 mutex_init(&msm_obj->lock);
881
Rob Clarkc8afe682013-06-26 12:44:06 -0400882 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400883 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400884
Rob Clark79f0e202016-03-16 12:40:35 -0400885 if (resv) {
886 msm_obj->resv = resv;
887 } else {
888 msm_obj->resv = &msm_obj->_resv;
889 reservation_object_init(msm_obj->resv);
890 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400891
Rob Clark7198e6b2013-07-19 12:59:32 -0400892 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400893 INIT_LIST_HEAD(&msm_obj->vmas);
894
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600895 if (struct_mutex_locked) {
896 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
897 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
898 } else {
899 mutex_lock(&dev->struct_mutex);
900 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
901 mutex_unlock(&dev->struct_mutex);
902 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400903
Rob Clark05b84912013-09-28 11:28:35 -0400904 *obj = &msm_obj->base;
905
906 return 0;
907}
908
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600909static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
910 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -0400911{
Rob Clarkf4839bd2017-06-13 11:50:05 -0400912 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500913 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400914 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -0400915 int ret;
916
Rob Clark05b84912013-09-28 11:28:35 -0400917 size = PAGE_ALIGN(size);
918
Rob Clarkf4839bd2017-06-13 11:50:05 -0400919 if (!iommu_present(&platform_bus_type))
920 use_vram = true;
921 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
922 use_vram = true;
923
924 if (WARN_ON(use_vram && !priv->vram.size))
925 return ERR_PTR(-EINVAL);
926
Jordan Crouse1a5dff52017-03-07 10:02:51 -0700927 /* Disallow zero sized objects as they make the underlying
928 * infrastructure grumpy
929 */
930 if (size == 0)
931 return ERR_PTR(-EINVAL);
932
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600933 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -0400934 if (ret)
935 goto fail;
936
Rob Clarkf4839bd2017-06-13 11:50:05 -0400937 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400938 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400939 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +0200940 struct msm_gem_object *msm_obj = to_msm_bo(obj);
941
942 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400943
Rob Clark4b85f7f2017-06-13 13:54:13 -0400944 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +0200945 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400946 if (IS_ERR(vma)) {
947 ret = PTR_ERR(vma);
948 goto fail;
949 }
950
951 to_msm_bo(obj)->vram_node = &vma->node;
952
Rob Clarkf4839bd2017-06-13 11:50:05 -0400953 drm_gem_private_object_init(dev, obj, size);
954
Rob Clarkf4839bd2017-06-13 11:50:05 -0400955 pages = get_pages(obj);
956 if (IS_ERR(pages)) {
957 ret = PTR_ERR(pages);
958 goto fail;
959 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400960
961 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400962 } else {
Rob Clark871d8122013-11-16 12:56:06 -0500963 ret = drm_gem_object_init(dev, obj, size);
964 if (ret)
965 goto fail;
Rob Clark871d8122013-11-16 12:56:06 -0500966 }
Rob Clark05b84912013-09-28 11:28:35 -0400967
968 return obj;
969
970fail:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600971 drm_gem_object_unreference_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400972 return ERR_PTR(ret);
973}
974
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600975struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
976 uint32_t size, uint32_t flags)
977{
978 return _msm_gem_new(dev, size, flags, true);
979}
980
981struct drm_gem_object *msm_gem_new(struct drm_device *dev,
982 uint32_t size, uint32_t flags)
983{
984 return _msm_gem_new(dev, size, flags, false);
985}
986
Rob Clark05b84912013-09-28 11:28:35 -0400987struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400988 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400989{
990 struct msm_gem_object *msm_obj;
991 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400992 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400993 int ret, npages;
994
Rob Clark871d8122013-11-16 12:56:06 -0500995 /* if we don't have IOMMU, don't bother pretending we can import: */
996 if (!iommu_present(&platform_bus_type)) {
997 dev_err(dev->dev, "cannot import without IOMMU\n");
998 return ERR_PTR(-EINVAL);
999 }
1000
Rob Clark79f0e202016-03-16 12:40:35 -04001001 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001002
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001003 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001004 if (ret)
1005 goto fail;
1006
1007 drm_gem_private_object_init(dev, obj, size);
1008
1009 npages = size / PAGE_SIZE;
1010
1011 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001012 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001013 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001014 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001015 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001016 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001017 ret = -ENOMEM;
1018 goto fail;
1019 }
1020
1021 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001022 if (ret) {
1023 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001024 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001025 }
Rob Clark05b84912013-09-28 11:28:35 -04001026
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001027 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001028 return obj;
1029
1030fail:
Markus Elfringe73a8562016-07-13 19:15:35 +02001031 drm_gem_object_unreference_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001032 return ERR_PTR(ret);
1033}
Jordan Crouse82232862017-07-27 10:42:40 -06001034
1035static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1036 uint32_t flags, struct msm_gem_address_space *aspace,
1037 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1038{
1039 void *vaddr;
1040 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1041 int ret;
1042
1043 if (IS_ERR(obj))
1044 return ERR_CAST(obj);
1045
1046 if (iova) {
1047 ret = msm_gem_get_iova(obj, aspace, iova);
1048 if (ret) {
1049 drm_gem_object_unreference(obj);
1050 return ERR_PTR(ret);
1051 }
1052 }
1053
1054 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001055 if (IS_ERR(vaddr)) {
Jordan Crouse82232862017-07-27 10:42:40 -06001056 msm_gem_put_iova(obj, aspace);
1057 drm_gem_object_unreference(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001058 return ERR_CAST(vaddr);
Jordan Crouse82232862017-07-27 10:42:40 -06001059 }
1060
1061 if (bo)
1062 *bo = obj;
1063
1064 return vaddr;
1065}
1066
1067void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1068 uint32_t flags, struct msm_gem_address_space *aspace,
1069 struct drm_gem_object **bo, uint64_t *iova)
1070{
1071 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1072}
1073
1074void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1075 uint32_t flags, struct msm_gem_address_space *aspace,
1076 struct drm_gem_object **bo, uint64_t *iova)
1077{
1078 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1079}