blob: 95196479f651b18697229765432a0493f6b7d4ce [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040022
23#include "msm_drv.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040024#include "msm_fence.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040026#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050027#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040028
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060029static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
Rob Clark871d8122013-11-16 12:56:06 -050032static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
Rob Clark072f1f92015-03-03 15:04:25 -050040static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
Rob Clark871d8122013-11-16 12:56:06 -050046/* allocate pages from VRAM carveout, used when no IOMMU: */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060047static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
Rob Clark871d8122013-11-16 12:56:06 -050048{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
Michal Hocko20981052017-05-17 14:23:12 +020055 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark871d8122013-11-16 12:56:06 -050056 if (!p)
57 return ERR_PTR(-ENOMEM);
58
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060059 spin_lock(&priv->vram.lock);
Chris Wilson4e64e552017-02-02 21:04:38 +000060 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -060061 spin_unlock(&priv->vram.lock);
Rob Clark871d8122013-11-16 12:56:06 -050062 if (ret) {
Michal Hocko20981052017-05-17 14:23:12 +020063 kvfree(p);
Rob Clark871d8122013-11-16 12:56:06 -050064 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
Rob Clarkc8afe682013-06-26 12:44:06 -040075
Rob Clarkc8afe682013-06-26 12:44:06 -040076static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050082 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040083 int npages = obj->size >> PAGE_SHIFT;
84
Rob Clark072f1f92015-03-03 15:04:25 -050085 if (use_pages(obj))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020086 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050087 else
88 p = get_pages_vram(obj, npages);
89
Rob Clarkc8afe682013-06-26 12:44:06 -040090 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +053096 msm_obj->pages = p;
97
Rob Clarkc8afe682013-06-26 12:44:06 -040098 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080099 if (IS_ERR(msm_obj->sgt)) {
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530100 void *ptr = ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -0400101
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530102 dev_err(dev->dev, "failed to allocate sgt\n");
103 msm_obj->sgt = NULL;
104 return ptr;
105 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600118static void put_pages_vram(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
122
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
126
127 kvfree(msm_obj->pages);
128}
129
Rob Clarkc8afe682013-06-26 12:44:06 -0400130static void put_pages(struct drm_gem_object *obj)
131{
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133
134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean
136 * because display controller, GPU, etc. are not coherent:
137 */
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
Prakash Kamliya62e3a3e2017-12-04 19:10:15 +0530141
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt);
144
Rob Clarkc8afe682013-06-26 12:44:06 -0400145 kfree(msm_obj->sgt);
146
Rob Clark072f1f92015-03-03 15:04:25 -0500147 if (use_pages(obj))
Rob Clark871d8122013-11-16 12:56:06 -0500148 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600149 else
150 put_pages_vram(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500151
Rob Clarkc8afe682013-06-26 12:44:06 -0400152 msm_obj->pages = NULL;
153 }
154}
155
Rob Clark05b84912013-09-28 11:28:35 -0400156struct page **msm_gem_get_pages(struct drm_gem_object *obj)
157{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600158 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400159 struct page **p;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600160
161 mutex_lock(&msm_obj->lock);
162
163 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
164 mutex_unlock(&msm_obj->lock);
165 return ERR_PTR(-EBUSY);
166 }
167
Rob Clark05b84912013-09-28 11:28:35 -0400168 p = get_pages(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600169 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -0400170 return p;
171}
172
173void msm_gem_put_pages(struct drm_gem_object *obj)
174{
175 /* when we start tracking the pin count, then do something here */
176}
177
Rob Clarkc8afe682013-06-26 12:44:06 -0400178int msm_gem_mmap_obj(struct drm_gem_object *obj,
179 struct vm_area_struct *vma)
180{
181 struct msm_gem_object *msm_obj = to_msm_bo(obj);
182
183 vma->vm_flags &= ~VM_PFNMAP;
184 vma->vm_flags |= VM_MIXEDMAP;
185
186 if (msm_obj->flags & MSM_BO_WC) {
187 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
188 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
189 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
190 } else {
191 /*
192 * Shunt off cached objs to shmem file so they have their own
193 * address_space (so unmap_mapping_range does what we want,
194 * in particular in the case of mmap'd dmabufs)
195 */
196 fput(vma->vm_file);
197 get_file(obj->filp);
198 vma->vm_pgoff = 0;
199 vma->vm_file = obj->filp;
200
201 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
202 }
203
204 return 0;
205}
206
207int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
208{
209 int ret;
210
211 ret = drm_gem_mmap(filp, vma);
212 if (ret) {
213 DBG("mmap failed: %d", ret);
214 return ret;
215 }
216
217 return msm_gem_mmap_obj(vma->vm_private_data, vma);
218}
219
Dave Jiang11bac802017-02-24 14:56:41 -0800220int msm_gem_fault(struct vm_fault *vmf)
Rob Clarkc8afe682013-06-26 12:44:06 -0400221{
Dave Jiang11bac802017-02-24 14:56:41 -0800222 struct vm_area_struct *vma = vmf->vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400223 struct drm_gem_object *obj = vma->vm_private_data;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600224 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400225 struct page **pages;
226 unsigned long pfn;
227 pgoff_t pgoff;
228 int ret;
229
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600230 /*
231 * vm_ops.open/drm_gem_mmap_obj and close get and put
232 * a reference on obj. So, we dont need to hold one here.
Rob Clarkd78d3832016-08-22 15:28:38 -0400233 */
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600234 ret = mutex_lock_interruptible(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400235 if (ret)
236 goto out;
237
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600238 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
239 mutex_unlock(&msm_obj->lock);
240 return VM_FAULT_SIGBUS;
241 }
242
Rob Clarkc8afe682013-06-26 12:44:06 -0400243 /* make sure we have pages attached now */
244 pages = get_pages(obj);
245 if (IS_ERR(pages)) {
246 ret = PTR_ERR(pages);
247 goto out_unlock;
248 }
249
250 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800251 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkc8afe682013-06-26 12:44:06 -0400252
Rob Clark871d8122013-11-16 12:56:06 -0500253 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400254
Jan Kara1a29d852016-12-14 15:07:01 -0800255 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkc8afe682013-06-26 12:44:06 -0400256 pfn, pfn << PAGE_SHIFT);
257
Jan Kara1a29d852016-12-14 15:07:01 -0800258 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkc8afe682013-06-26 12:44:06 -0400259
260out_unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600261 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400262out:
263 switch (ret) {
264 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400265 case 0:
266 case -ERESTARTSYS:
267 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400268 case -EBUSY:
269 /*
270 * EBUSY is ok: this just means that another thread
271 * already did the job.
272 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400273 return VM_FAULT_NOPAGE;
274 case -ENOMEM:
275 return VM_FAULT_OOM;
276 default:
277 return VM_FAULT_SIGBUS;
278 }
279}
280
281/** get mmap offset */
282static uint64_t mmap_offset(struct drm_gem_object *obj)
283{
284 struct drm_device *dev = obj->dev;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600285 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400286 int ret;
287
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600288 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clarkc8afe682013-06-26 12:44:06 -0400289
290 /* Make it mmapable */
291 ret = drm_gem_create_mmap_offset(obj);
292
293 if (ret) {
294 dev_err(dev->dev, "could not allocate mmap offset\n");
295 return 0;
296 }
297
298 return drm_vma_node_offset_addr(&obj->vma_node);
299}
300
301uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
302{
303 uint64_t offset;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600304 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305
306 mutex_lock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400307 offset = mmap_offset(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600308 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400309 return offset;
310}
311
Rob Clark4b85f7f2017-06-13 13:54:13 -0400312static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
313 struct msm_gem_address_space *aspace)
314{
315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
316 struct msm_gem_vma *vma;
317
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600318 WARN_ON(!mutex_is_locked(&msm_obj->lock));
319
Rob Clark4b85f7f2017-06-13 13:54:13 -0400320 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
321 if (!vma)
322 return ERR_PTR(-ENOMEM);
323
324 vma->aspace = aspace;
325
326 list_add_tail(&vma->list, &msm_obj->vmas);
327
328 return vma;
329}
330
331static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
332 struct msm_gem_address_space *aspace)
333{
334 struct msm_gem_object *msm_obj = to_msm_bo(obj);
335 struct msm_gem_vma *vma;
336
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600337 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4b85f7f2017-06-13 13:54:13 -0400338
339 list_for_each_entry(vma, &msm_obj->vmas, list) {
340 if (vma->aspace == aspace)
341 return vma;
342 }
343
344 return NULL;
345}
346
347static void del_vma(struct msm_gem_vma *vma)
348{
349 if (!vma)
350 return;
351
352 list_del(&vma->list);
353 kfree(vma);
354}
355
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600356/* Called with msm_obj->lock locked */
Rob Clark4fe5f652016-06-01 11:38:28 -0400357static void
358put_iova(struct drm_gem_object *obj)
359{
Rob Clark4fe5f652016-06-01 11:38:28 -0400360 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400361 struct msm_gem_vma *vma, *tmp;
Rob Clark4fe5f652016-06-01 11:38:28 -0400362
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600363 WARN_ON(!mutex_is_locked(&msm_obj->lock));
Rob Clark4fe5f652016-06-01 11:38:28 -0400364
Rob Clark4b85f7f2017-06-13 13:54:13 -0400365 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
366 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
367 del_vma(vma);
Rob Clark4fe5f652016-06-01 11:38:28 -0400368 }
369}
370
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600371/* get iova, taking a reference. Should have a matching put */
372int msm_gem_get_iova(struct drm_gem_object *obj,
Rob Clark8bdcd942017-06-13 11:07:08 -0400373 struct msm_gem_address_space *aspace, uint64_t *iova)
Rob Clarkc8afe682013-06-26 12:44:06 -0400374{
375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400376 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400377 int ret = 0;
378
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600379 mutex_lock(&msm_obj->lock);
380
381 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
382 mutex_unlock(&msm_obj->lock);
383 return -EBUSY;
384 }
Rob Clarkcb1e3812017-06-13 09:15:36 -0400385
Rob Clark4b85f7f2017-06-13 13:54:13 -0400386 vma = lookup_vma(obj, aspace);
Rob Clark871d8122013-11-16 12:56:06 -0500387
Rob Clark4b85f7f2017-06-13 13:54:13 -0400388 if (!vma) {
389 struct page **pages;
Rob Clark871d8122013-11-16 12:56:06 -0500390
Rob Clark4b85f7f2017-06-13 13:54:13 -0400391 vma = add_vma(obj, aspace);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300392 if (IS_ERR(vma)) {
393 ret = PTR_ERR(vma);
394 goto unlock;
395 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400396
397 pages = get_pages(obj);
398 if (IS_ERR(pages)) {
399 ret = PTR_ERR(pages);
400 goto fail;
401 }
402
403 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
404 obj->size >> PAGE_SHIFT);
405 if (ret)
406 goto fail;
Rob Clarkc8afe682013-06-26 12:44:06 -0400407 }
408
Rob Clark4b85f7f2017-06-13 13:54:13 -0400409 *iova = vma->iova;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600410
411 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400412 return 0;
413
414fail:
415 del_vma(vma);
Dan Carpenter71e3dfa2017-07-10 10:20:42 +0300416unlock:
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600417 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400418 return ret;
419}
420
Rob Clark2638d902014-11-08 09:13:37 -0500421/* get iova without taking a reference, used in places where you have
422 * already done a 'msm_gem_get_iova()'.
423 */
Rob Clark8bdcd942017-06-13 11:07:08 -0400424uint64_t msm_gem_iova(struct drm_gem_object *obj,
425 struct msm_gem_address_space *aspace)
Rob Clark2638d902014-11-08 09:13:37 -0500426{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600427 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400428 struct msm_gem_vma *vma;
429
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600430 mutex_lock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400431 vma = lookup_vma(obj, aspace);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600432 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400433 WARN_ON(!vma);
434
435 return vma ? vma->iova : 0;
Rob Clark2638d902014-11-08 09:13:37 -0500436}
437
Rob Clark8bdcd942017-06-13 11:07:08 -0400438void msm_gem_put_iova(struct drm_gem_object *obj,
439 struct msm_gem_address_space *aspace)
Rob Clarkc8afe682013-06-26 12:44:06 -0400440{
441 // XXX TODO ..
442 // NOTE: probably don't need a _locked() version.. we wouldn't
443 // normally unmap here, but instead just mark that it could be
444 // unmapped (if the iova refcnt drops to zero), but then later
445 // if another _get_iova_locked() fails we can start unmapping
446 // things that are no longer needed..
447}
448
449int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
450 struct drm_mode_create_dumb *args)
451{
452 args->pitch = align_pitch(args->width, args->bpp);
453 args->size = PAGE_ALIGN(args->pitch * args->height);
454 return msm_gem_new_handle(dev, file, args->size,
455 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
456}
457
Rob Clarkc8afe682013-06-26 12:44:06 -0400458int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
459 uint32_t handle, uint64_t *offset)
460{
461 struct drm_gem_object *obj;
462 int ret = 0;
463
464 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100465 obj = drm_gem_object_lookup(file, handle);
Rob Clarkc8afe682013-06-26 12:44:06 -0400466 if (obj == NULL) {
467 ret = -ENOENT;
468 goto fail;
469 }
470
471 *offset = msm_gem_mmap_offset(obj);
472
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100473 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400474
475fail:
476 return ret;
477}
478
Rob Clarkfad33f42017-09-15 08:38:20 -0400479static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
Rob Clarkc8afe682013-06-26 12:44:06 -0400480{
Rob Clarke1e9db22016-05-27 11:16:28 -0400481 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600482 int ret = 0;
483
484 mutex_lock(&msm_obj->lock);
485
Rob Clarkfad33f42017-09-15 08:38:20 -0400486 if (WARN_ON(msm_obj->madv > madv)) {
487 dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
488 msm_obj->madv, madv);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600489 mutex_unlock(&msm_obj->lock);
490 return ERR_PTR(-EBUSY);
491 }
492
493 /* increment vmap_count *before* vmap() call, so shrinker can
494 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
495 * This guarantees that we won't try to msm_gem_vunmap() this
496 * same object from within the vmap() call (while we already
497 * hold msm_obj->lock)
498 */
499 msm_obj->vmap_count++;
500
501 if (!msm_obj->vaddr) {
502 struct page **pages = get_pages(obj);
503 if (IS_ERR(pages)) {
504 ret = PTR_ERR(pages);
505 goto fail;
506 }
507 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
508 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
509 if (msm_obj->vaddr == NULL) {
510 ret = -ENOMEM;
511 goto fail;
512 }
513 }
514
515 mutex_unlock(&msm_obj->lock);
516 return msm_obj->vaddr;
517
518fail:
Rob Clarke1e9db22016-05-27 11:16:28 -0400519 msm_obj->vmap_count--;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600520 mutex_unlock(&msm_obj->lock);
521 return ERR_PTR(ret);
Rob Clark18f23042016-05-26 16:24:35 -0400522}
523
Rob Clarkfad33f42017-09-15 08:38:20 -0400524void *msm_gem_get_vaddr(struct drm_gem_object *obj)
525{
526 return get_vaddr(obj, MSM_MADV_WILLNEED);
527}
528
529/*
530 * Don't use this! It is for the very special case of dumping
531 * submits from GPU hangs or faults, were the bo may already
532 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
533 * active list.
534 */
535void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
536{
537 return get_vaddr(obj, __MSM_MADV_PURGED);
538}
539
Rob Clark18f23042016-05-26 16:24:35 -0400540void msm_gem_put_vaddr(struct drm_gem_object *obj)
541{
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600542 struct msm_gem_object *msm_obj = to_msm_bo(obj);
543
544 mutex_lock(&msm_obj->lock);
545 WARN_ON(msm_obj->vmap_count < 1);
546 msm_obj->vmap_count--;
547 mutex_unlock(&msm_obj->lock);
Rob Clark18f23042016-05-26 16:24:35 -0400548}
549
Rob Clark4cd33c42016-05-17 15:44:49 -0400550/* Update madvise status, returns true if not purged, else
551 * false or -errno.
552 */
553int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
554{
555 struct msm_gem_object *msm_obj = to_msm_bo(obj);
556
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600557 mutex_lock(&msm_obj->lock);
558
Rob Clark4cd33c42016-05-17 15:44:49 -0400559 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
560
561 if (msm_obj->madv != __MSM_MADV_PURGED)
562 msm_obj->madv = madv;
563
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600564 madv = msm_obj->madv;
565
566 mutex_unlock(&msm_obj->lock);
567
568 return (madv != __MSM_MADV_PURGED);
Rob Clark4cd33c42016-05-17 15:44:49 -0400569}
570
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600571void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
Rob Clark68209392016-05-17 16:19:32 -0400572{
573 struct drm_device *dev = obj->dev;
574 struct msm_gem_object *msm_obj = to_msm_bo(obj);
575
576 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
577 WARN_ON(!is_purgeable(msm_obj));
578 WARN_ON(obj->import_attach);
579
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600580 mutex_lock_nested(&msm_obj->lock, subclass);
581
Rob Clark68209392016-05-17 16:19:32 -0400582 put_iova(obj);
583
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600584 msm_gem_vunmap_locked(obj);
Rob Clark68209392016-05-17 16:19:32 -0400585
586 put_pages(obj);
587
588 msm_obj->madv = __MSM_MADV_PURGED;
589
590 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
591 drm_gem_free_mmap_offset(obj);
592
593 /* Our goal here is to return as much of the memory as
594 * is possible back to the system as we are called from OOM.
595 * To do this we must instruct the shmfs to drop all of its
596 * backing pages, *now*.
597 */
598 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
599
600 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
601 0, (loff_t)-1);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600602
603 mutex_unlock(&msm_obj->lock);
Rob Clark68209392016-05-17 16:19:32 -0400604}
605
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600606static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
Rob Clarke1e9db22016-05-27 11:16:28 -0400607{
608 struct msm_gem_object *msm_obj = to_msm_bo(obj);
609
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600610 WARN_ON(!mutex_is_locked(&msm_obj->lock));
611
Rob Clarke1e9db22016-05-27 11:16:28 -0400612 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
613 return;
614
615 vunmap(msm_obj->vaddr);
616 msm_obj->vaddr = NULL;
617}
618
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600619void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
620{
621 struct msm_gem_object *msm_obj = to_msm_bo(obj);
622
623 mutex_lock_nested(&msm_obj->lock, subclass);
624 msm_gem_vunmap_locked(obj);
625 mutex_unlock(&msm_obj->lock);
626}
627
Rob Clarkb6295f92016-03-15 18:26:28 -0400628/* must be called before _move_to_active().. */
629int msm_gem_sync_object(struct drm_gem_object *obj,
630 struct msm_fence_context *fctx, bool exclusive)
631{
632 struct msm_gem_object *msm_obj = to_msm_bo(obj);
633 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100634 struct dma_fence *fence;
Rob Clarkb6295f92016-03-15 18:26:28 -0400635 int i, ret;
636
Rob Clarkb6295f92016-03-15 18:26:28 -0400637 fobj = reservation_object_get_list(msm_obj->resv);
638 if (!fobj || (fobj->shared_count == 0)) {
639 fence = reservation_object_get_excl(msm_obj->resv);
640 /* don't need to wait on our own fences, since ring is fifo */
641 if (fence && (fence->context != fctx->context)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100642 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400643 if (ret)
644 return ret;
645 }
646 }
647
648 if (!exclusive || !fobj)
649 return 0;
650
651 for (i = 0; i < fobj->shared_count; i++) {
652 fence = rcu_dereference_protected(fobj->shared[i],
653 reservation_object_held(msm_obj->resv));
654 if (fence->context != fctx->context) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100655 ret = dma_fence_wait(fence, true);
Rob Clarkb6295f92016-03-15 18:26:28 -0400656 if (ret)
657 return ret;
658 }
659 }
660
661 return 0;
662}
663
Rob Clark7198e6b2013-07-19 12:59:32 -0400664void msm_gem_move_to_active(struct drm_gem_object *obj,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100665 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400666{
667 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark4cd33c42016-05-17 15:44:49 -0400668 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
Rob Clark7198e6b2013-07-19 12:59:32 -0400669 msm_obj->gpu = gpu;
Rob Clarkb6295f92016-03-15 18:26:28 -0400670 if (exclusive)
671 reservation_object_add_excl_fence(msm_obj->resv, fence);
Rob Clarkbf6811f2013-09-01 13:25:09 -0400672 else
Rob Clarkb6295f92016-03-15 18:26:28 -0400673 reservation_object_add_shared_fence(msm_obj->resv, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400674 list_del_init(&msm_obj->mm_list);
675 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
676}
677
678void msm_gem_move_to_inactive(struct drm_gem_object *obj)
679{
680 struct drm_device *dev = obj->dev;
681 struct msm_drm_private *priv = dev->dev_private;
682 struct msm_gem_object *msm_obj = to_msm_bo(obj);
683
684 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
685
686 msm_obj->gpu = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400687 list_del_init(&msm_obj->mm_list);
688 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400689}
690
Rob Clarkba00c3f2016-03-16 18:18:17 -0400691int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
692{
Rob Clarkb6295f92016-03-15 18:26:28 -0400693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
694 bool write = !!(op & MSM_PREP_WRITE);
Chris Wilsonf755e222016-08-29 08:08:26 +0100695 unsigned long remain =
696 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
697 long ret;
Rob Clarkb6295f92016-03-15 18:26:28 -0400698
Chris Wilsonf755e222016-08-29 08:08:26 +0100699 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
700 true, remain);
701 if (ret == 0)
702 return remain == 0 ? -EBUSY : -ETIMEDOUT;
703 else if (ret < 0)
704 return ret;
Rob Clarkba00c3f2016-03-16 18:18:17 -0400705
Rob Clark7198e6b2013-07-19 12:59:32 -0400706 /* TODO cache maintenance */
707
Rob Clarkb6295f92016-03-15 18:26:28 -0400708 return 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400709}
710
711int msm_gem_cpu_fini(struct drm_gem_object *obj)
712{
713 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400714 return 0;
715}
716
717#ifdef CONFIG_DEBUG_FS
Chris Wilsonf54d1862016-10-25 13:00:45 +0100718static void describe_fence(struct dma_fence *fence, const char *type,
Rob Clarkb6295f92016-03-15 18:26:28 -0400719 struct seq_file *m)
720{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100721 if (!dma_fence_is_signaled(fence))
Rob Clarkb6295f92016-03-15 18:26:28 -0400722 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
723 fence->ops->get_driver_name(fence),
724 fence->ops->get_timeline_name(fence),
725 fence->seqno);
726}
727
Rob Clarkc8afe682013-06-26 12:44:06 -0400728void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
729{
Rob Clarkc8afe682013-06-26 12:44:06 -0400730 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkb6295f92016-03-15 18:26:28 -0400731 struct reservation_object *robj = msm_obj->resv;
732 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100733 struct dma_fence *fence;
Rob Clark4b85f7f2017-06-13 13:54:13 -0400734 struct msm_gem_vma *vma;
Rob Clarkc8afe682013-06-26 12:44:06 -0400735 uint64_t off = drm_vma_node_start(&obj->vma_node);
Rob Clark4cd33c42016-05-17 15:44:49 -0400736 const char *madv;
Rob Clarkc8afe682013-06-26 12:44:06 -0400737
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600738 mutex_lock(&msm_obj->lock);
Rob Clarkb6295f92016-03-15 18:26:28 -0400739
Rob Clark4cd33c42016-05-17 15:44:49 -0400740 switch (msm_obj->madv) {
741 case __MSM_MADV_PURGED:
742 madv = " purged";
743 break;
744 case MSM_MADV_DONTNEED:
745 madv = " purgeable";
746 break;
747 case MSM_MADV_WILLNEED:
748 default:
749 madv = "";
750 break;
751 }
752
Rob Clark667ce332016-09-28 19:58:32 -0400753 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
Rob Clark7198e6b2013-07-19 12:59:32 -0400754 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100755 obj->name, kref_read(&obj->refcount),
Rob Clark667ce332016-09-28 19:58:32 -0400756 off, msm_obj->vaddr);
757
Rob Clark4b85f7f2017-06-13 13:54:13 -0400758 /* FIXME: we need to print the address space here too */
759 list_for_each_entry(vma, &msm_obj->vmas, list)
760 seq_printf(m, " %08llx", vma->iova);
Rob Clark667ce332016-09-28 19:58:32 -0400761
762 seq_printf(m, " %zu%s\n", obj->size, madv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400763
764 rcu_read_lock();
765 fobj = rcu_dereference(robj->fence);
766 if (fobj) {
767 unsigned int i, shared_count = fobj->shared_count;
768
769 for (i = 0; i < shared_count; i++) {
770 fence = rcu_dereference(fobj->shared[i]);
771 describe_fence(fence, "Shared", m);
772 }
773 }
774
775 fence = rcu_dereference(robj->fence_excl);
776 if (fence)
777 describe_fence(fence, "Exclusive", m);
778 rcu_read_unlock();
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600779
780 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400781}
782
783void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
784{
785 struct msm_gem_object *msm_obj;
786 int count = 0;
787 size_t size = 0;
788
789 list_for_each_entry(msm_obj, list, mm_list) {
790 struct drm_gem_object *obj = &msm_obj->base;
791 seq_printf(m, " ");
792 msm_gem_describe(obj, m);
793 count++;
794 size += obj->size;
795 }
796
797 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
798}
799#endif
800
Rob Clarkd71b6bd2018-02-14 11:14:23 -0500801/* don't call directly! Use drm_gem_object_put() and friends */
Rob Clarkc8afe682013-06-26 12:44:06 -0400802void msm_gem_free_object(struct drm_gem_object *obj)
803{
804 struct drm_device *dev = obj->dev;
805 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400806
807 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
808
Rob Clark7198e6b2013-07-19 12:59:32 -0400809 /* object should not be on active list: */
810 WARN_ON(is_active(msm_obj));
811
Rob Clarkc8afe682013-06-26 12:44:06 -0400812 list_del(&msm_obj->mm_list);
813
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600814 mutex_lock(&msm_obj->lock);
815
Rob Clark4fe5f652016-06-01 11:38:28 -0400816 put_iova(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400817
Rob Clark05b84912013-09-28 11:28:35 -0400818 if (obj->import_attach) {
819 if (msm_obj->vaddr)
820 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400821
Rob Clark05b84912013-09-28 11:28:35 -0400822 /* Don't drop the pages for imported dmabuf, as they are not
823 * ours, just free the array we allocated:
824 */
825 if (msm_obj->pages)
Michal Hocko20981052017-05-17 14:23:12 +0200826 kvfree(msm_obj->pages);
Rob Clark05b84912013-09-28 11:28:35 -0400827
jilai wangf28730c2015-04-07 13:51:32 -0400828 drm_prime_gem_destroy(obj, msm_obj->sgt);
Rob Clark05b84912013-09-28 11:28:35 -0400829 } else {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600830 msm_gem_vunmap_locked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400831 put_pages(obj);
832 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400833
Rob Clark7198e6b2013-07-19 12:59:32 -0400834 if (msm_obj->resv == &msm_obj->_resv)
835 reservation_object_fini(msm_obj->resv);
836
Rob Clarkc8afe682013-06-26 12:44:06 -0400837 drm_gem_object_release(obj);
838
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600839 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -0400840 kfree(msm_obj);
841}
842
843/* convenience method to construct a GEM buffer object, and userspace handle */
844int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
845 uint32_t size, uint32_t flags, uint32_t *handle)
846{
847 struct drm_gem_object *obj;
848 int ret;
849
Rob Clarkc8afe682013-06-26 12:44:06 -0400850 obj = msm_gem_new(dev, size, flags);
851
Rob Clarkc8afe682013-06-26 12:44:06 -0400852 if (IS_ERR(obj))
853 return PTR_ERR(obj);
854
855 ret = drm_gem_handle_create(file, obj, handle);
856
857 /* drop reference from allocate - handle holds it now */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100858 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400859
860 return ret;
861}
862
Rob Clark05b84912013-09-28 11:28:35 -0400863static int msm_gem_new_impl(struct drm_device *dev,
864 uint32_t size, uint32_t flags,
Rob Clark79f0e202016-03-16 12:40:35 -0400865 struct reservation_object *resv,
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600866 struct drm_gem_object **obj,
867 bool struct_mutex_locked)
Rob Clarkc8afe682013-06-26 12:44:06 -0400868{
869 struct msm_drm_private *priv = dev->dev_private;
870 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400871
872 switch (flags & MSM_BO_CACHE_MASK) {
873 case MSM_BO_UNCACHED:
874 case MSM_BO_CACHED:
875 case MSM_BO_WC:
876 break;
877 default:
878 dev_err(dev->dev, "invalid cache flag: %x\n",
879 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400880 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400881 }
882
Rob Clark667ce332016-09-28 19:58:32 -0400883 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400884 if (!msm_obj)
885 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400886
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600887 mutex_init(&msm_obj->lock);
888
Rob Clarkc8afe682013-06-26 12:44:06 -0400889 msm_obj->flags = flags;
Rob Clark4cd33c42016-05-17 15:44:49 -0400890 msm_obj->madv = MSM_MADV_WILLNEED;
Rob Clarkc8afe682013-06-26 12:44:06 -0400891
Rob Clark79f0e202016-03-16 12:40:35 -0400892 if (resv) {
893 msm_obj->resv = resv;
894 } else {
895 msm_obj->resv = &msm_obj->_resv;
896 reservation_object_init(msm_obj->resv);
897 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400898
Rob Clark7198e6b2013-07-19 12:59:32 -0400899 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400900 INIT_LIST_HEAD(&msm_obj->vmas);
901
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600902 if (struct_mutex_locked) {
903 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
904 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
905 } else {
906 mutex_lock(&dev->struct_mutex);
907 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
908 mutex_unlock(&dev->struct_mutex);
909 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400910
Rob Clark05b84912013-09-28 11:28:35 -0400911 *obj = &msm_obj->base;
912
913 return 0;
914}
915
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600916static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
917 uint32_t size, uint32_t flags, bool struct_mutex_locked)
Rob Clark05b84912013-09-28 11:28:35 -0400918{
Rob Clarkf4839bd2017-06-13 11:50:05 -0400919 struct msm_drm_private *priv = dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500920 struct drm_gem_object *obj = NULL;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400921 bool use_vram = false;
Rob Clark05b84912013-09-28 11:28:35 -0400922 int ret;
923
Rob Clark05b84912013-09-28 11:28:35 -0400924 size = PAGE_ALIGN(size);
925
Rob Clarkf4839bd2017-06-13 11:50:05 -0400926 if (!iommu_present(&platform_bus_type))
927 use_vram = true;
928 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
929 use_vram = true;
930
931 if (WARN_ON(use_vram && !priv->vram.size))
932 return ERR_PTR(-EINVAL);
933
Jordan Crouse1a5dff52017-03-07 10:02:51 -0700934 /* Disallow zero sized objects as they make the underlying
935 * infrastructure grumpy
936 */
937 if (size == 0)
938 return ERR_PTR(-EINVAL);
939
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600940 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
Rob Clark05b84912013-09-28 11:28:35 -0400941 if (ret)
942 goto fail;
943
Rob Clarkf4839bd2017-06-13 11:50:05 -0400944 if (use_vram) {
Rob Clark4b85f7f2017-06-13 13:54:13 -0400945 struct msm_gem_vma *vma;
Rob Clarkf4839bd2017-06-13 11:50:05 -0400946 struct page **pages;
Hans Verkuilb3949a92017-07-30 14:42:36 +0200947 struct msm_gem_object *msm_obj = to_msm_bo(obj);
948
949 mutex_lock(&msm_obj->lock);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400950
Rob Clark4b85f7f2017-06-13 13:54:13 -0400951 vma = add_vma(obj, NULL);
Hans Verkuilb3949a92017-07-30 14:42:36 +0200952 mutex_unlock(&msm_obj->lock);
Rob Clark4b85f7f2017-06-13 13:54:13 -0400953 if (IS_ERR(vma)) {
954 ret = PTR_ERR(vma);
955 goto fail;
956 }
957
958 to_msm_bo(obj)->vram_node = &vma->node;
959
Rob Clarkf4839bd2017-06-13 11:50:05 -0400960 drm_gem_private_object_init(dev, obj, size);
961
Rob Clarkf4839bd2017-06-13 11:50:05 -0400962 pages = get_pages(obj);
963 if (IS_ERR(pages)) {
964 ret = PTR_ERR(pages);
965 goto fail;
966 }
Rob Clark4b85f7f2017-06-13 13:54:13 -0400967
968 vma->iova = physaddr(obj);
Rob Clarkf4839bd2017-06-13 11:50:05 -0400969 } else {
Rob Clark871d8122013-11-16 12:56:06 -0500970 ret = drm_gem_object_init(dev, obj, size);
971 if (ret)
972 goto fail;
Rob Clark871d8122013-11-16 12:56:06 -0500973 }
Rob Clark05b84912013-09-28 11:28:35 -0400974
975 return obj;
976
977fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100978 drm_gem_object_put_unlocked(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400979 return ERR_PTR(ret);
980}
981
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600982struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
983 uint32_t size, uint32_t flags)
984{
985 return _msm_gem_new(dev, size, flags, true);
986}
987
988struct drm_gem_object *msm_gem_new(struct drm_device *dev,
989 uint32_t size, uint32_t flags)
990{
991 return _msm_gem_new(dev, size, flags, false);
992}
993
Rob Clark05b84912013-09-28 11:28:35 -0400994struct drm_gem_object *msm_gem_import(struct drm_device *dev,
Rob Clark79f0e202016-03-16 12:40:35 -0400995 struct dma_buf *dmabuf, struct sg_table *sgt)
Rob Clark05b84912013-09-28 11:28:35 -0400996{
997 struct msm_gem_object *msm_obj;
998 struct drm_gem_object *obj;
Rob Clark79f0e202016-03-16 12:40:35 -0400999 uint32_t size;
Rob Clark05b84912013-09-28 11:28:35 -04001000 int ret, npages;
1001
Rob Clark871d8122013-11-16 12:56:06 -05001002 /* if we don't have IOMMU, don't bother pretending we can import: */
1003 if (!iommu_present(&platform_bus_type)) {
1004 dev_err(dev->dev, "cannot import without IOMMU\n");
1005 return ERR_PTR(-EINVAL);
1006 }
1007
Rob Clark79f0e202016-03-16 12:40:35 -04001008 size = PAGE_ALIGN(dmabuf->size);
Rob Clark05b84912013-09-28 11:28:35 -04001009
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001010 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
Rob Clark05b84912013-09-28 11:28:35 -04001011 if (ret)
1012 goto fail;
1013
1014 drm_gem_private_object_init(dev, obj, size);
1015
1016 npages = size / PAGE_SIZE;
1017
1018 msm_obj = to_msm_bo(obj);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001019 mutex_lock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001020 msm_obj->sgt = sgt;
Michal Hocko20981052017-05-17 14:23:12 +02001021 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -04001022 if (!msm_obj->pages) {
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001023 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001024 ret = -ENOMEM;
1025 goto fail;
1026 }
1027
1028 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001029 if (ret) {
1030 mutex_unlock(&msm_obj->lock);
Rob Clark05b84912013-09-28 11:28:35 -04001031 goto fail;
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001032 }
Rob Clark05b84912013-09-28 11:28:35 -04001033
Sushmita Susheelendra0e082702017-06-13 16:52:54 -06001034 mutex_unlock(&msm_obj->lock);
Rob Clarkc8afe682013-06-26 12:44:06 -04001035 return obj;
1036
1037fail:
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001038 drm_gem_object_put_unlocked(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -04001039 return ERR_PTR(ret);
1040}
Jordan Crouse82232862017-07-27 10:42:40 -06001041
1042static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1043 uint32_t flags, struct msm_gem_address_space *aspace,
1044 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1045{
1046 void *vaddr;
1047 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1048 int ret;
1049
1050 if (IS_ERR(obj))
1051 return ERR_CAST(obj);
1052
1053 if (iova) {
1054 ret = msm_gem_get_iova(obj, aspace, iova);
1055 if (ret) {
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001056 drm_gem_object_put(obj);
Jordan Crouse82232862017-07-27 10:42:40 -06001057 return ERR_PTR(ret);
1058 }
1059 }
1060
1061 vaddr = msm_gem_get_vaddr(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001062 if (IS_ERR(vaddr)) {
Jordan Crouse82232862017-07-27 10:42:40 -06001063 msm_gem_put_iova(obj, aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001064 drm_gem_object_put(obj);
Wei Yongjunc9811d02017-10-11 11:36:56 +00001065 return ERR_CAST(vaddr);
Jordan Crouse82232862017-07-27 10:42:40 -06001066 }
1067
1068 if (bo)
1069 *bo = obj;
1070
1071 return vaddr;
1072}
1073
1074void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1075 uint32_t flags, struct msm_gem_address_space *aspace,
1076 struct drm_gem_object **bo, uint64_t *iova)
1077{
1078 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1079}
1080
1081void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1082 uint32_t flags, struct msm_gem_address_space *aspace,
1083 struct drm_gem_object **bo, uint64_t *iova)
1084{
1085 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1086}