blob: 00c795ced02c200235d0bee67a2952fde7eed84f [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060029static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
Rob Clark871d8122013-11-16 12:56:06 -050032static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
Rob Clark072f1f92015-03-03 15:04:25 -050040static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
Rob Clark871d8122013-11-16 12:56:06 -050046/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060047static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050048{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
Michal Hocko20981052017-05-17 14:23:12 +020055 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050056 if (!p)
57 return ERR_PTR(-ENOMEM);
58
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060059 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000060 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060061 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050062 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020063 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050064 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
Rob Clarkc8afe682013-06-26 12:44:06 -040075
Rob Clarkc8afe682013-06-26 12:44:06 -040076static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050082 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040083 int npages = obj->size >> PAGE_SHIFT;
84
Rob Clark072f1f92015-03-03 15:04:25 -050085 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020086 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050087 else
88 p = get_pages_vram(obj, npages);
89
Rob Clarkc8afe682013-06-26 12:44:06 -040090 if (IS_ERR(p)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +053091 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
Rob Clarkc8afe682013-06-26 12:44:06 -040092 PTR_ERR(p));
93 return p;
94 }
95
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053096 msm_obj->pages = p;
97
Rob Clarkc8afe682013-06-26 12:44:06 -040098 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080099 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530100 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400101
Mamta Shukla6a41da12018-10-20 23:19:26 +0530102 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530103 msm_obj->sgt = NULL;
104 return ptr;
105 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600118static void put_pages_vram(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
122
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
126
127 kvfree(msm_obj->pages);
128}
129
Rob Clarkc8afe682013-06-26 12:44:06 -0400130static void put_pages(struct drm_gem_object *obj)
131{
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133
134 if (msm_obj->pages) {
Ben Hutchings39766262018-04-03 23:38:45 +0100135 if (msm_obj->sgt) {
136 /* For non-cached buffers, ensure the new
137 * pages are clean because display controller,
138 * GPU, etc. are not coherent:
139 */
140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530144
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530145 sg_free_table(msm_obj->sgt);
Ben Hutchings39766262018-04-03 23:38:45 +0100146 kfree(msm_obj->sgt);
147 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400148
Rob Clark072f1f92015-03-03 15:04:25 -0500149 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600151 else
152 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500153
Rob Clarkc8afe682013-06-26 12:44:06 -0400154 msm_obj->pages = NULL;
155 }
156}
157
Rob Clark05b84912013-09-28 11:28:35 -0400158struct page **msm_gem_get_pages(struct drm_gem_object *obj)
159{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400161 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600162
163 mutex_lock(&msm_obj->lock);
164
165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 mutex_unlock(&msm_obj->lock);
167 return ERR_PTR(-EBUSY);
168 }
169
Rob Clark05b84912013-09-28 11:28:35 -0400170 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600171 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400172 return p;
173}
174
175void msm_gem_put_pages(struct drm_gem_object *obj)
176{
177 /* when we start tracking the pin count, then do something here */
178}
179
Rob Clarkc8afe682013-06-26 12:44:06 -0400180int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 struct vm_area_struct *vma)
182{
183 struct msm_gem_object *msm_obj = to_msm_bo(obj);
184
185 vma->vm_flags &= ~VM_PFNMAP;
186 vma->vm_flags |= VM_MIXEDMAP;
187
188 if (msm_obj->flags & MSM_BO_WC) {
189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 } else {
193 /*
194 * Shunt off cached objs to shmem file so they have their own
195 * address_space (so unmap_mapping_range does what we want,
196 * in particular in the case of mmap'd dmabufs)
197 */
198 fput(vma->vm_file);
199 get_file(obj->filp);
200 vma->vm_pgoff = 0;
201 vma->vm_file = obj->filp;
202
203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
204 }
205
206 return 0;
207}
208
209int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
210{
211 int ret;
212
213 ret = drm_gem_mmap(filp, vma);
214 if (ret) {
215 DBG("mmap failed: %d", ret);
216 return ret;
217 }
218
219 return msm_gem_mmap_obj(vma->vm_private_data, vma);
220}
221
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530222vm_fault_t msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400223{
Dave Jiang11bac802017-02-24 14:56:41 -0800224 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400225 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400227 struct page **pages;
228 unsigned long pfn;
229 pgoff_t pgoff;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530230 int err;
231 vm_fault_t ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400232
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600233 /*
234 * vm_ops.open/drm_gem_mmap_obj and close get and put
235 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400236 */
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530237 err = mutex_lock_interruptible(&msm_obj->lock);
238 if (err) {
239 ret = VM_FAULT_NOPAGE;
Rob Clarkc8afe682013-06-26 12:44:06 -0400240 goto out;
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530241 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400242
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600243 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 mutex_unlock(&msm_obj->lock);
245 return VM_FAULT_SIGBUS;
246 }
247
Rob Clarkc8afe682013-06-26 12:44:06 -0400248 /* make sure we have pages attached now */
249 pages = get_pages(obj);
250 if (IS_ERR(pages)) {
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530251 ret = vmf_error(PTR_ERR(pages));
Rob Clarkc8afe682013-06-26 12:44:06 -0400252 goto out_unlock;
253 }
254
255 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800256 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400257
Rob Clark871d8122013-11-16 12:56:06 -0500258 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400259
Jan Kara1a29d852016-12-14 15:07:01 -0800260 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400261 pfn, pfn << PAGE_SHIFT);
262
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530263 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400264out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600265 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400266out:
Souptick Joardera5f74ec2018-05-21 22:59:48 +0530267 return ret;
Rob Clarkc8afe682013-06-26 12:44:06 -0400268}
269
270/** get mmap offset */
271static uint64_t mmap_offset(struct drm_gem_object *obj)
272{
273 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600274 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400275 int ret;
276
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600277 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400278
279 /* Make it mmapable */
280 ret = drm_gem_create_mmap_offset(obj);
281
282 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530283 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
Rob Clarkc8afe682013-06-26 12:44:06 -0400284 return 0;
285 }
286
287 return drm_vma_node_offset_addr(&obj->vma_node);
288}
289
290uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
291{
292 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
294
295 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400296 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600297 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400298 return offset;
299}
300
Rob Clark4b85f7f2017-06-13 13:54:13 -0400301static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
302 struct msm_gem_address_space *aspace)
303{
304 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 struct msm_gem_vma *vma;
306
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600307 WARN_ON(!mutex_is_locked(&msm_obj->lock));
308
Rob Clark4b85f7f2017-06-13 13:54:13 -0400309 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
310 if (!vma)
311 return ERR_PTR(-ENOMEM);
312
313 vma->aspace = aspace;
314
315 list_add_tail(&vma->list, &msm_obj->vmas);
316
317 return vma;
318}
319
320static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
321 struct msm_gem_address_space *aspace)
322{
323 struct msm_gem_object *msm_obj = to_msm_bo(obj);
324 struct msm_gem_vma *vma;
325
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600326 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400327
328 list_for_each_entry(vma, &msm_obj->vmas, list) {
329 if (vma->aspace == aspace)
330 return vma;
331 }
332
333 return NULL;
334}
335
336static void del_vma(struct msm_gem_vma *vma)
337{
338 if (!vma)
339 return;
340
341 list_del(&vma->list);
342 kfree(vma);
343}
344
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600345/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400346static void
347put_iova(struct drm_gem_object *obj)
348{
Rob Clark4fe5f652016-06-01 11:38:28 -0400349 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400350 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400351
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600352 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400353
Rob Clark4b85f7f2017-06-13 13:54:13 -0400354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
355 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
356 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400357 }
358}
359
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600360/* get iova, taking a reference. Should have a matching put */
361int msm_gem_get_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400362 struct msm_gem_address_space *aspace, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400363{
364 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400365 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400366 int ret = 0;
367
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600368 mutex_lock(&msm_obj->lock);
369
370 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
371 mutex_unlock(&msm_obj->lock);
372 return -EBUSY;
373 }
Rob Clarkcb1e3812017-06-13 09:15:36 -0400374
Rob Clark4b85f7f2017-06-13 13:54:13 -0400375 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500376
Rob Clark4b85f7f2017-06-13 13:54:13 -0400377 if (!vma) {
378 struct page **pages;
Rob Clark871d8122013-11-16 12:56:06 -0500379
Rob Clark4b85f7f2017-06-13 13:54:13 -0400380 vma = add_vma(obj, aspace);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300381 if (IS_ERR(vma)) {
382 ret = PTR_ERR(vma);
383 goto unlock;
384 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400385
386 pages = get_pages(obj);
387 if (IS_ERR(pages)) {
388 ret = PTR_ERR(pages);
389 goto fail;
390 }
391
392 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
393 obj->size >> PAGE_SHIFT);
394 if (ret)
395 goto fail;
Rob Clarkc8afe682013-06-26 12:44:06 -0400396 }
397
Rob Clark4b85f7f2017-06-13 13:54:13 -0400398 *iova = vma->iova;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600399
400 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400401 return 0;
402
403fail:
404 del_vma(vma);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300405unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600406 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400407 return ret;
408}
409
Rob Clark2638d902014-11-08 09:13:37 -0500410/* get iova without taking a reference, used in places where you have
411 * already done a 'msm_gem_get_iova()'.
412 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400413uint64_t msm_gem_iova(struct drm_gem_object *obj,
414 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500415{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600416 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400417 struct msm_gem_vma *vma;
418
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600419 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400420 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600421 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400422 WARN_ON(!vma);
423
424 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500425}
426
Rob Clark8bdcd942017-06-13 11:07:08 -0400427void msm_gem_put_iova(struct drm_gem_object *obj,
428 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400429{
430 // XXX TODO ..
431 // NOTE: probably don't need a _locked() version.. we wouldn't
432 // normally unmap here, but instead just mark that it could be
433 // unmapped (if the iova refcnt drops to zero), but then later
434 // if another _get_iova_locked() fails we can start unmapping
435 // things that are no longer needed..
436}
437
438int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
439 struct drm_mode_create_dumb *args)
440{
441 args->pitch = align_pitch(args->width, args->bpp);
442 args->size = PAGE_ALIGN(args->pitch * args->height);
443 return msm_gem_new_handle(dev, file, args->size,
444 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
445}
446
Rob Clarkc8afe682013-06-26 12:44:06 -0400447int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
448 uint32_t handle, uint64_t *offset)
449{
450 struct drm_gem_object *obj;
451 int ret = 0;
452
453 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100454 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400455 if (obj == NULL) {
456 ret = -ENOENT;
457 goto fail;
458 }
459
460 *offset = msm_gem_mmap_offset(obj);
461
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100462 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400463
464fail:
465 return ret;
466}
467
Rob Clarkfad33f42017-09-15 08:38:20 -0400468static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400469{
Rob Clarke1e9db22016-05-27 11:16:28 -0400470 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600471 int ret = 0;
472
473 mutex_lock(&msm_obj->lock);
474
Rob Clarkfad33f42017-09-15 08:38:20 -0400475 if (WARN_ON(msm_obj->madv > madv)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530476 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
Rob Clarkfad33f42017-09-15 08:38:20 -0400477 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600478 mutex_unlock(&msm_obj->lock);
479 return ERR_PTR(-EBUSY);
480 }
481
482 /* increment vmap_count *before* vmap() call, so shrinker can
483 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
484 * This guarantees that we won't try to msm_gem_vunmap() this
485 * same object from within the vmap() call (while we already
486 * hold msm_obj->lock)
487 */
488 msm_obj->vmap_count++;
489
490 if (!msm_obj->vaddr) {
491 struct page **pages = get_pages(obj);
492 if (IS_ERR(pages)) {
493 ret = PTR_ERR(pages);
494 goto fail;
495 }
496 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
497 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
498 if (msm_obj->vaddr == NULL) {
499 ret = -ENOMEM;
500 goto fail;
501 }
502 }
503
504 mutex_unlock(&msm_obj->lock);
505 return msm_obj->vaddr;
506
507fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400508 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600509 mutex_unlock(&msm_obj->lock);
510 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400511}
512
Rob Clarkfad33f42017-09-15 08:38:20 -0400513void *msm_gem_get_vaddr(struct drm_gem_object *obj)
514{
515 return get_vaddr(obj, MSM_MADV_WILLNEED);
516}
517
518/*
519 * Don't use this! It is for the very special case of dumping
520 * submits from GPU hangs or faults, were the bo may already
521 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
522 * active list.
523 */
524void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
525{
526 return get_vaddr(obj, __MSM_MADV_PURGED);
527}
528
Rob Clark18f23042016-05-26 16:24:35 -0400529void msm_gem_put_vaddr(struct drm_gem_object *obj)
530{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600531 struct msm_gem_object *msm_obj = to_msm_bo(obj);
532
533 mutex_lock(&msm_obj->lock);
534 WARN_ON(msm_obj->vmap_count < 1);
535 msm_obj->vmap_count--;
536 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400537}
538
Rob Clark4cd33c42016-05-17 15:44:49 -0400539/* Update madvise status, returns true if not purged, else
540 * false or -errno.
541 */
542int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
543{
544 struct msm_gem_object *msm_obj = to_msm_bo(obj);
545
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600546 mutex_lock(&msm_obj->lock);
547
Rob Clark4cd33c42016-05-17 15:44:49 -0400548 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
549
550 if (msm_obj->madv != __MSM_MADV_PURGED)
551 msm_obj->madv = madv;
552
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600553 madv = msm_obj->madv;
554
555 mutex_unlock(&msm_obj->lock);
556
557 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400558}
559
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600560void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400561{
562 struct drm_device *dev = obj->dev;
563 struct msm_gem_object *msm_obj = to_msm_bo(obj);
564
565 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
566 WARN_ON(!is_purgeable(msm_obj));
567 WARN_ON(obj->import_attach);
568
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600569 mutex_lock_nested(&msm_obj->lock, subclass);
570
Rob Clark68209392016-05-17 16:19:32 -0400571 put_iova(obj);
572
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600573 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400574
575 put_pages(obj);
576
577 msm_obj->madv = __MSM_MADV_PURGED;
578
579 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
580 drm_gem_free_mmap_offset(obj);
581
582 /* Our goal here is to return as much of the memory as
583 * is possible back to the system as we are called from OOM.
584 * To do this we must instruct the shmfs to drop all of its
585 * backing pages, *now*.
586 */
587 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
588
589 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
590 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600591
592 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400593}
594
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600595static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400596{
597 struct msm_gem_object *msm_obj = to_msm_bo(obj);
598
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600599 WARN_ON(!mutex_is_locked(&msm_obj->lock));
600
Rob Clarke1e9db22016-05-27 11:16:28 -0400601 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
602 return;
603
604 vunmap(msm_obj->vaddr);
605 msm_obj->vaddr = NULL;
606}
607
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600608void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
609{
610 struct msm_gem_object *msm_obj = to_msm_bo(obj);
611
612 mutex_lock_nested(&msm_obj->lock, subclass);
613 msm_gem_vunmap_locked(obj);
614 mutex_unlock(&msm_obj->lock);
615}
616
Rob Clarkb6295f92016-03-15 18:26:28 -0400617/* must be called before _move_to_active().. */
618int msm_gem_sync_object(struct drm_gem_object *obj,
619 struct msm_fence_context *fctx, bool exclusive)
620{
621 struct msm_gem_object *msm_obj = to_msm_bo(obj);
622 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100623 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400624 int i, ret;
625
Rob Clarkb6295f92016-03-15 18:26:28 -0400626 fobj = reservation_object_get_list(msm_obj->resv);
627 if (!fobj || (fobj->shared_count == 0)) {
628 fence = reservation_object_get_excl(msm_obj->resv);
629 /* don't need to wait on our own fences, since ring is fifo */
630 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100631 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400632 if (ret)
633 return ret;
634 }
635 }
636
637 if (!exclusive || !fobj)
638 return 0;
639
640 for (i = 0; i < fobj->shared_count; i++) {
641 fence = rcu_dereference_protected(fobj->shared[i],
642 reservation_object_held(msm_obj->resv));
643 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100644 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400645 if (ret)
646 return ret;
647 }
648 }
649
650 return 0;
651}
652
Rob Clark7198e6b2013-07-19 12:59:32 -0400653void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100654 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400655{
656 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400657 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400658 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400659 if (exclusive)
660 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400661 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400662 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400663 list_del_init(&msm_obj->mm_list);
664 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
665}
666
667void msm_gem_move_to_inactive(struct drm_gem_object *obj)
668{
669 struct drm_device *dev = obj->dev;
670 struct msm_drm_private *priv = dev->dev_private;
671 struct msm_gem_object *msm_obj = to_msm_bo(obj);
672
673 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
674
675 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400676 list_del_init(&msm_obj->mm_list);
677 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400678}
679
Rob Clarkba00c3f2016-03-16 18:18:17 -0400680int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
681{
Rob Clarkb6295f92016-03-15 18:26:28 -0400682 struct msm_gem_object *msm_obj = to_msm_bo(obj);
683 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100684 unsigned long remain =
685 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
686 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400687
Chris Wilsonf755e222016-08-29 08:08:26 +0100688 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
689 true, remain);
690 if (ret == 0)
691 return remain == 0 ? -EBUSY : -ETIMEDOUT;
692 else if (ret < 0)
693 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400694
Rob Clark7198e6b2013-07-19 12:59:32 -0400695 /* TODO cache maintenance */
696
Rob Clarkb6295f92016-03-15 18:26:28 -0400697 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400698}
699
700int msm_gem_cpu_fini(struct drm_gem_object *obj)
701{
702 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400703 return 0;
704}
705
706#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100707static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400708 struct seq_file *m)
709{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100710 if (!dma_fence_is_signaled(fence))
Rob Clarkb6295f92016-03-15 18:26:28 -0400711 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
712 fence->ops->get_driver_name(fence),
713 fence->ops->get_timeline_name(fence),
714 fence->seqno);
715}
716
Rob Clarkc8afe682013-06-26 12:44:06 -0400717void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
718{
Rob Clarkc8afe682013-06-26 12:44:06 -0400719 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400720 struct reservation_object *robj = msm_obj->resv;
721 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100722 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400723 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400724 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400725 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400726
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600727 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400728
Rob Clark4cd33c42016-05-17 15:44:49 -0400729 switch (msm_obj->madv) {
730 case __MSM_MADV_PURGED:
731 madv = " purged";
732 break;
733 case MSM_MADV_DONTNEED:
734 madv = " purgeable";
735 break;
736 case MSM_MADV_WILLNEED:
737 default:
738 madv = "";
739 break;
740 }
741
Rob Clark667ce332016-09-28 19:58:32 -0400742 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
Rob Clark7198e6b2013-07-19 12:59:32 -0400743 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100744 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400745 off, msm_obj->vaddr);
746
Rob Clark4b85f7f2017-06-13 13:54:13 -0400747 /* FIXME: we need to print the address space here too */
748 list_for_each_entry(vma, &msm_obj->vmas, list)
749 seq_printf(m, " %08llx", vma->iova);
Rob Clark667ce332016-09-28 19:58:32 -0400750
751 seq_printf(m, " %zu%s\n", obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400752
753 rcu_read_lock();
754 fobj = rcu_dereference(robj->fence);
755 if (fobj) {
756 unsigned int i, shared_count = fobj->shared_count;
757
758 for (i = 0; i < shared_count; i++) {
759 fence = rcu_dereference(fobj->shared[i]);
760 describe_fence(fence, "Shared", m);
761 }
762 }
763
764 fence = rcu_dereference(robj->fence_excl);
765 if (fence)
766 describe_fence(fence, "Exclusive", m);
767 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600768
769 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400770}
771
772void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
773{
774 struct msm_gem_object *msm_obj;
775 int count = 0;
776 size_t size = 0;
777
778 list_for_each_entry(msm_obj, list, mm_list) {
779 struct drm_gem_object *obj = &msm_obj->base;
780 seq_printf(m, " ");
781 msm_gem_describe(obj, m);
782 count++;
783 size += obj->size;
784 }
785
786 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
787}
788#endif
789
Rob Clarkd71b6bd2018-02-14 11:14:23 -0500790/* don't call directly! Use drm_gem_object_put() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -0400791void msm_gem_free_object(struct drm_gem_object *obj)
792{
793 struct drm_device *dev = obj->dev;
794 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400795
796 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
797
Rob Clark7198e6b2013-07-19 12:59:32 -0400798 /* object should not be on active list: */
799 WARN_ON(is_active(msm_obj));
800
Rob Clarkc8afe682013-06-26 12:44:06 -0400801 list_del(&msm_obj->mm_list);
802
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600803 mutex_lock(&msm_obj->lock);
804
Rob Clark4fe5f652016-06-01 11:38:28 -0400805 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400806
Rob Clark05b84912013-09-28 11:28:35 -0400807 if (obj->import_attach) {
808 if (msm_obj->vaddr)
809 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400810
Rob Clark05b84912013-09-28 11:28:35 -0400811 /* Don't drop the pages for imported dmabuf, as they are not
812 * ours, just free the array we allocated:
813 */
814 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200815 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400816
jilai wangf28730c2015-04-07 13:51:32 -0400817 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400818 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600819 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400820 put_pages(obj);
821 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400822
Rob Clark7198e6b2013-07-19 12:59:32 -0400823 if (msm_obj->resv == &msm_obj->_resv)
824 reservation_object_fini(msm_obj->resv);
825
Rob Clarkc8afe682013-06-26 12:44:06 -0400826 drm_gem_object_release(obj);
827
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600828 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400829 kfree(msm_obj);
830}
831
832/* convenience method to construct a GEM buffer object, and userspace handle */
833int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
834 uint32_t size, uint32_t flags, uint32_t *handle)
835{
836 struct drm_gem_object *obj;
837 int ret;
838
Rob Clarkc8afe682013-06-26 12:44:06 -0400839 obj = msm_gem_new(dev, size, flags);
840
Rob Clarkc8afe682013-06-26 12:44:06 -0400841 if (IS_ERR(obj))
842 return PTR_ERR(obj);
843
844 ret = drm_gem_handle_create(file, obj, handle);
845
846 /* drop reference from allocate - handle holds it now */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100847 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400848
849 return ret;
850}
851
Rob Clark05b84912013-09-28 11:28:35 -0400852static int msm_gem_new_impl(struct drm_device *dev,
853 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400854 struct reservation_object *resv,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600855 struct drm_gem_object **obj,
856 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400857{
858 struct msm_drm_private *priv = dev->dev_private;
859 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400860
861 switch (flags & MSM_BO_CACHE_MASK) {
862 case MSM_BO_UNCACHED:
863 case MSM_BO_CACHED:
864 case MSM_BO_WC:
865 break;
866 default:
Mamta Shukla6a41da12018-10-20 23:19:26 +0530867 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
Rob Clarkc8afe682013-06-26 12:44:06 -0400868 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400869 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400870 }
871
Rob Clark667ce332016-09-28 19:58:32 -0400872 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400873 if (!msm_obj)
874 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400875
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600876 mutex_init(&msm_obj->lock);
877
Rob Clarkc8afe682013-06-26 12:44:06 -0400878 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400879 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400880
Rob Clark79f0e202016-03-16 12:40:35 -0400881 if (resv) {
882 msm_obj->resv = resv;
883 } else {
884 msm_obj->resv = &msm_obj->_resv;
885 reservation_object_init(msm_obj->resv);
886 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400887
Rob Clark7198e6b2013-07-19 12:59:32 -0400888 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400889 INIT_LIST_HEAD(&msm_obj->vmas);
890
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600891 if (struct_mutex_locked) {
892 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
893 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
894 } else {
895 mutex_lock(&dev->struct_mutex);
896 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
897 mutex_unlock(&dev->struct_mutex);
898 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400899
Rob Clark05b84912013-09-28 11:28:35 -0400900 *obj = &msm_obj->base;
901
902 return 0;
903}
904
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600905static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
906 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -0400907{
Rob Clarkf4839bd2017-06-13 11:50:05 -0400908 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500909 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400910 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -0400911 int ret;
912
Rob Clark05b84912013-09-28 11:28:35 -0400913 size = PAGE_ALIGN(size);
914
Rob Clarkf4839bd2017-06-13 11:50:05 -0400915 if (!iommu_present(&platform_bus_type))
916 use_vram = true;
917 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
918 use_vram = true;
919
920 if (WARN_ON(use_vram && !priv->vram.size))
921 return ERR_PTR(-EINVAL);
922
Jordan Crouse1a5dff52017-03-07 10:02:51 -0700923 /* Disallow zero sized objects as they make the underlying
924 * infrastructure grumpy
925 */
926 if (size == 0)
927 return ERR_PTR(-EINVAL);
928
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600929 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -0400930 if (ret)
931 goto fail;
932
Rob Clarkf4839bd2017-06-13 11:50:05 -0400933 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400934 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400935 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +0200936 struct msm_gem_object *msm_obj = to_msm_bo(obj);
937
938 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400939
Rob Clark4b85f7f2017-06-13 13:54:13 -0400940 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +0200941 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400942 if (IS_ERR(vma)) {
943 ret = PTR_ERR(vma);
944 goto fail;
945 }
946
947 to_msm_bo(obj)->vram_node = &vma->node;
948
Rob Clarkf4839bd2017-06-13 11:50:05 -0400949 drm_gem_private_object_init(dev, obj, size);
950
Rob Clarkf4839bd2017-06-13 11:50:05 -0400951 pages = get_pages(obj);
952 if (IS_ERR(pages)) {
953 ret = PTR_ERR(pages);
954 goto fail;
955 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400956
957 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400958 } else {
Rob Clark871d8122013-11-16 12:56:06 -0500959 ret = drm_gem_object_init(dev, obj, size);
960 if (ret)
961 goto fail;
Rob Clark871d8122013-11-16 12:56:06 -0500962 }
Rob Clark05b84912013-09-28 11:28:35 -0400963
964 return obj;
965
966fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100967 drm_gem_object_put_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400968 return ERR_PTR(ret);
969}
970
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600971struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
972 uint32_t size, uint32_t flags)
973{
974 return _msm_gem_new(dev, size, flags, true);
975}
976
977struct drm_gem_object *msm_gem_new(struct drm_device *dev,
978 uint32_t size, uint32_t flags)
979{
980 return _msm_gem_new(dev, size, flags, false);
981}
982
Rob Clark05b84912013-09-28 11:28:35 -0400983struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400984 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400985{
986 struct msm_gem_object *msm_obj;
987 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400988 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -0400989 int ret, npages;
990
Rob Clark871d8122013-11-16 12:56:06 -0500991 /* if we don't have IOMMU, don't bother pretending we can import: */
992 if (!iommu_present(&platform_bus_type)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530993 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
Rob Clark871d8122013-11-16 12:56:06 -0500994 return ERR_PTR(-EINVAL);
995 }
996
Rob Clark79f0e202016-03-16 12:40:35 -0400997 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -0400998
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600999 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001000 if (ret)
1001 goto fail;
1002
1003 drm_gem_private_object_init(dev, obj, size);
1004
1005 npages = size / PAGE_SIZE;
1006
1007 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001008 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001009 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001010 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001011 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001012 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001013 ret = -ENOMEM;
1014 goto fail;
1015 }
1016
1017 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001018 if (ret) {
1019 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001020 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001021 }
Rob Clark05b84912013-09-28 11:28:35 -04001022
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001023 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001024 return obj;
1025
1026fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001027 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001028 return ERR_PTR(ret);
1029}
Jordan Crouse82232862017-07-27 10:42:40 -06001030
1031static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1032 uint32_t flags, struct msm_gem_address_space *aspace,
1033 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1034{
1035 void *vaddr;
1036 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1037 int ret;
1038
1039 if (IS_ERR(obj))
1040 return ERR_CAST(obj);
1041
1042 if (iova) {
1043 ret = msm_gem_get_iova(obj, aspace, iova);
1044 if (ret) {
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001045 drm_gem_object_put(obj);
Jordan Crouse82232862017-07-27 10:42:40 -06001046 return ERR_PTR(ret);
1047 }
1048 }
1049
1050 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001051 if (IS_ERR(vaddr)) {
Jordan Crouse82232862017-07-27 10:42:40 -06001052 msm_gem_put_iova(obj, aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001053 drm_gem_object_put(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001054 return ERR_CAST(vaddr);
Jordan Crouse82232862017-07-27 10:42:40 -06001055 }
1056
1057 if (bo)
1058 *bo = obj;
1059
1060 return vaddr;
1061}
1062
1063void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1064 uint32_t flags, struct msm_gem_address_space *aspace,
1065 struct drm_gem_object **bo, uint64_t *iova)
1066{
1067 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1068}
1069
1070void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1071 uint32_t flags, struct msm_gem_address_space *aspace,
1072 struct drm_gem_object **bo, uint64_t *iova)
1073{
1074 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1075}