blob: 583286f392998137b3442cc15b67e73ff81d34db [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20
21#include "msm_drv.h"
22#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040023#include "msm_gpu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040024
25
26/* called with dev->struct_mutex held */
27static struct page **get_pages(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30
31 if (!msm_obj->pages) {
32 struct drm_device *dev = obj->dev;
33 struct page **p = drm_gem_get_pages(obj, 0);
34 int npages = obj->size >> PAGE_SHIFT;
35
36 if (IS_ERR(p)) {
37 dev_err(dev->dev, "could not get pages: %ld\n",
38 PTR_ERR(p));
39 return p;
40 }
41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080043 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040044 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080045 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040046 }
47
48 msm_obj->pages = p;
49
50 /* For non-cached buffers, ensure the new pages are clean
51 * because display controller, GPU, etc. are not coherent:
52 */
53 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 }
57
58 return msm_obj->pages;
59}
60
61static void put_pages(struct drm_gem_object *obj)
62{
63 struct msm_gem_object *msm_obj = to_msm_bo(obj);
64
65 if (msm_obj->pages) {
66 /* For non-cached buffers, ensure the new pages are clean
67 * because display controller, GPU, etc. are not coherent:
68 */
69 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 sg_free_table(msm_obj->sgt);
73 kfree(msm_obj->sgt);
74
75 drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 msm_obj->pages = NULL;
77 }
78}
79
80int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma)
82{
83 struct msm_gem_object *msm_obj = to_msm_bo(obj);
84
85 vma->vm_flags &= ~VM_PFNMAP;
86 vma->vm_flags |= VM_MIXEDMAP;
87
88 if (msm_obj->flags & MSM_BO_WC) {
89 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 } else {
93 /*
94 * Shunt off cached objs to shmem file so they have their own
95 * address_space (so unmap_mapping_range does what we want,
96 * in particular in the case of mmap'd dmabufs)
97 */
98 fput(vma->vm_file);
99 get_file(obj->filp);
100 vma->vm_pgoff = 0;
101 vma->vm_file = obj->filp;
102
103 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 }
105
106 return 0;
107}
108
109int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110{
111 int ret;
112
113 ret = drm_gem_mmap(filp, vma);
114 if (ret) {
115 DBG("mmap failed: %d", ret);
116 return ret;
117 }
118
119 return msm_gem_mmap_obj(vma->vm_private_data, vma);
120}
121
122int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123{
124 struct drm_gem_object *obj = vma->vm_private_data;
125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 struct drm_device *dev = obj->dev;
127 struct page **pages;
128 unsigned long pfn;
129 pgoff_t pgoff;
130 int ret;
131
132 /* Make sure we don't parallel update on a fault, nor move or remove
133 * something from beneath our feet
134 */
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
136 if (ret)
137 goto out;
138
139 /* make sure we have pages attached now */
140 pages = get_pages(obj);
141 if (IS_ERR(pages)) {
142 ret = PTR_ERR(pages);
143 goto out_unlock;
144 }
145
146 /* We don't use vmf->pgoff since that has the fake offset: */
147 pgoff = ((unsigned long)vmf->virtual_address -
148 vma->vm_start) >> PAGE_SHIFT;
149
150 pfn = page_to_pfn(msm_obj->pages[pgoff]);
151
152 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 pfn, pfn << PAGE_SHIFT);
154
155 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156
157out_unlock:
158 mutex_unlock(&dev->struct_mutex);
159out:
160 switch (ret) {
161 case -EAGAIN:
162 set_need_resched();
163 case 0:
164 case -ERESTARTSYS:
165 case -EINTR:
166 return VM_FAULT_NOPAGE;
167 case -ENOMEM:
168 return VM_FAULT_OOM;
169 default:
170 return VM_FAULT_SIGBUS;
171 }
172}
173
174/** get mmap offset */
175static uint64_t mmap_offset(struct drm_gem_object *obj)
176{
177 struct drm_device *dev = obj->dev;
178 int ret;
179
180 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181
182 /* Make it mmapable */
183 ret = drm_gem_create_mmap_offset(obj);
184
185 if (ret) {
186 dev_err(dev->dev, "could not allocate mmap offset\n");
187 return 0;
188 }
189
190 return drm_vma_node_offset_addr(&obj->vma_node);
191}
192
193uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194{
195 uint64_t offset;
196 mutex_lock(&obj->dev->struct_mutex);
197 offset = mmap_offset(obj);
198 mutex_unlock(&obj->dev->struct_mutex);
199 return offset;
200}
201
202/* helpers for dealing w/ iommu: */
203static int map_range(struct iommu_domain *domain, unsigned int iova,
204 struct sg_table *sgt, unsigned int len, int prot)
205{
206 struct scatterlist *sg;
207 unsigned int da = iova;
208 unsigned int i, j;
209 int ret;
210
211 if (!domain || !sgt)
212 return -EINVAL;
213
214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 u32 pa = sg_phys(sg) - sg->offset;
216 size_t bytes = sg->length + sg->offset;
217
218 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219
220 ret = iommu_map(domain, da, pa, bytes, prot);
221 if (ret)
222 goto fail;
223
224 da += bytes;
225 }
226
227 return 0;
228
229fail:
230 da = iova;
231
232 for_each_sg(sgt->sgl, sg, i, j) {
233 size_t bytes = sg->length + sg->offset;
234 iommu_unmap(domain, da, bytes);
235 da += bytes;
236 }
237 return ret;
238}
239
240static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 struct sg_table *sgt, unsigned int len)
242{
243 struct scatterlist *sg;
244 unsigned int da = iova;
245 int i;
246
247 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 size_t bytes = sg->length + sg->offset;
249 size_t unmapped;
250
251 unmapped = iommu_unmap(domain, da, bytes);
252 if (unmapped < bytes)
253 break;
254
255 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256
257 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258
259 da += bytes;
260 }
261}
262
263/* should be called under struct_mutex.. although it can be called
264 * from atomic context without struct_mutex to acquire an extra
265 * iova ref if you know one is already held.
266 *
267 * That means when I do eventually need to add support for unpinning
268 * the refcnt counter needs to be atomic_t.
269 */
270int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 uint32_t *iova)
272{
273 struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 int ret = 0;
275
276 if (!msm_obj->domain[id].iova) {
277 struct msm_drm_private *priv = obj->dev->dev_private;
278 uint32_t offset = (uint32_t)mmap_offset(obj);
279 struct page **pages;
280 pages = get_pages(obj);
281 if (IS_ERR(pages))
282 return PTR_ERR(pages);
283 // XXX ideally we would not map buffers writable when not needed...
284 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 obj->size, IOMMU_READ | IOMMU_WRITE);
286 msm_obj->domain[id].iova = offset;
287 }
288
289 if (!ret)
290 *iova = msm_obj->domain[id].iova;
291
292 return ret;
293}
294
295int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296{
297 int ret;
298 mutex_lock(&obj->dev->struct_mutex);
299 ret = msm_gem_get_iova_locked(obj, id, iova);
300 mutex_unlock(&obj->dev->struct_mutex);
301 return ret;
302}
303
304void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305{
306 // XXX TODO ..
307 // NOTE: probably don't need a _locked() version.. we wouldn't
308 // normally unmap here, but instead just mark that it could be
309 // unmapped (if the iova refcnt drops to zero), but then later
310 // if another _get_iova_locked() fails we can start unmapping
311 // things that are no longer needed..
312}
313
314int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 struct drm_mode_create_dumb *args)
316{
317 args->pitch = align_pitch(args->width, args->bpp);
318 args->size = PAGE_ALIGN(args->pitch * args->height);
319 return msm_gem_new_handle(dev, file, args->size,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321}
322
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset)
332{
333 struct drm_gem_object *obj;
334 int ret = 0;
335
336 /* GEM does all our handle to object mapping */
337 obj = drm_gem_object_lookup(dev, file, handle);
338 if (obj == NULL) {
339 ret = -ENOENT;
340 goto fail;
341 }
342
343 *offset = msm_gem_mmap_offset(obj);
344
345 drm_gem_object_unreference_unlocked(obj);
346
347fail:
348 return ret;
349}
350
351void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 if (!msm_obj->vaddr) {
356 struct page **pages = get_pages(obj);
357 if (IS_ERR(pages))
358 return ERR_CAST(pages);
359 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 }
362 return msm_obj->vaddr;
363}
364
365void *msm_gem_vaddr(struct drm_gem_object *obj)
366{
367 void *ret;
368 mutex_lock(&obj->dev->struct_mutex);
369 ret = msm_gem_vaddr_locked(obj);
370 mutex_unlock(&obj->dev->struct_mutex);
371 return ret;
372}
373
374int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 struct work_struct *work)
376{
377 struct drm_device *dev = obj->dev;
378 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400379 struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 int ret = 0;
Rob Clarkc8afe682013-06-26 12:44:06 -0400381
Rob Clark7198e6b2013-07-19 12:59:32 -0400382 mutex_lock(&dev->struct_mutex);
383 if (!list_empty(&work->entry)) {
384 ret = -EINVAL;
385 } else if (is_active(msm_obj)) {
386 list_add_tail(&work->entry, &msm_obj->inactive_work);
387 } else {
388 queue_work(priv->wq, work);
389 }
390 mutex_unlock(&dev->struct_mutex);
Rob Clarkc8afe682013-06-26 12:44:06 -0400391
Rob Clark7198e6b2013-07-19 12:59:32 -0400392 return ret;
393}
394
395void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkbf6811f2013-09-01 13:25:09 -0400396 struct msm_gpu *gpu, bool write, uint32_t fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400400 if (write)
401 msm_obj->write_fence = fence;
402 else
403 msm_obj->read_fence = fence;
Rob Clark7198e6b2013-07-19 12:59:32 -0400404 list_del_init(&msm_obj->mm_list);
405 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
406}
407
408void msm_gem_move_to_inactive(struct drm_gem_object *obj)
409{
410 struct drm_device *dev = obj->dev;
411 struct msm_drm_private *priv = dev->dev_private;
412 struct msm_gem_object *msm_obj = to_msm_bo(obj);
413
414 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
415
416 msm_obj->gpu = NULL;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400417 msm_obj->read_fence = 0;
418 msm_obj->write_fence = 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400419 list_del_init(&msm_obj->mm_list);
420 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
421
422 while (!list_empty(&msm_obj->inactive_work)) {
423 struct work_struct *work;
424
425 work = list_first_entry(&msm_obj->inactive_work,
426 struct work_struct, entry);
427
428 list_del_init(&work->entry);
429 queue_work(priv->wq, work);
430 }
431}
432
433int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
434 struct timespec *timeout)
435{
436 struct drm_device *dev = obj->dev;
437 struct msm_gem_object *msm_obj = to_msm_bo(obj);
438 int ret = 0;
439
Rob Clarkf816f272013-09-11 17:34:07 -0400440 if (is_active(msm_obj)) {
Rob Clarkbf6811f2013-09-01 13:25:09 -0400441 uint32_t fence = 0;
Rob Clarkf816f272013-09-11 17:34:07 -0400442
Rob Clarkbf6811f2013-09-01 13:25:09 -0400443 if (op & MSM_PREP_READ)
444 fence = msm_obj->write_fence;
445 if (op & MSM_PREP_WRITE)
446 fence = max(fence, msm_obj->read_fence);
Rob Clarkf816f272013-09-11 17:34:07 -0400447 if (op & MSM_PREP_NOSYNC)
448 timeout = NULL;
449
Rob Clarkbf6811f2013-09-01 13:25:09 -0400450 ret = msm_wait_fence_interruptable(dev, fence, timeout);
451 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400452
453 /* TODO cache maintenance */
454
455 return ret;
456}
457
458int msm_gem_cpu_fini(struct drm_gem_object *obj)
459{
460 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400461 return 0;
462}
463
464#ifdef CONFIG_DEBUG_FS
465void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
466{
467 struct drm_device *dev = obj->dev;
468 struct msm_gem_object *msm_obj = to_msm_bo(obj);
469 uint64_t off = drm_vma_node_start(&obj->vma_node);
470
471 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
Rob Clarkbf6811f2013-09-01 13:25:09 -0400472 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400473 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400474 msm_obj->read_fence, msm_obj->write_fence,
475 obj->name, obj->refcount.refcount.counter,
Rob Clarkc8afe682013-06-26 12:44:06 -0400476 off, msm_obj->vaddr, obj->size);
477}
478
479void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
480{
481 struct msm_gem_object *msm_obj;
482 int count = 0;
483 size_t size = 0;
484
485 list_for_each_entry(msm_obj, list, mm_list) {
486 struct drm_gem_object *obj = &msm_obj->base;
487 seq_printf(m, " ");
488 msm_gem_describe(obj, m);
489 count++;
490 size += obj->size;
491 }
492
493 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
494}
495#endif
496
497void msm_gem_free_object(struct drm_gem_object *obj)
498{
499 struct drm_device *dev = obj->dev;
500 struct msm_gem_object *msm_obj = to_msm_bo(obj);
501 int id;
502
503 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
504
Rob Clark7198e6b2013-07-19 12:59:32 -0400505 /* object should not be on active list: */
506 WARN_ON(is_active(msm_obj));
507
Rob Clarkc8afe682013-06-26 12:44:06 -0400508 list_del(&msm_obj->mm_list);
509
510 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
511 if (msm_obj->domain[id].iova) {
512 struct msm_drm_private *priv = obj->dev->dev_private;
513 uint32_t offset = (uint32_t)mmap_offset(obj);
514 unmap_range(priv->iommus[id], offset,
515 msm_obj->sgt, obj->size);
516 }
517 }
518
519 drm_gem_free_mmap_offset(obj);
520
521 if (msm_obj->vaddr)
522 vunmap(msm_obj->vaddr);
523
524 put_pages(obj);
525
Rob Clark7198e6b2013-07-19 12:59:32 -0400526 if (msm_obj->resv == &msm_obj->_resv)
527 reservation_object_fini(msm_obj->resv);
528
Rob Clarkc8afe682013-06-26 12:44:06 -0400529 drm_gem_object_release(obj);
530
531 kfree(msm_obj);
532}
533
534/* convenience method to construct a GEM buffer object, and userspace handle */
535int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
536 uint32_t size, uint32_t flags, uint32_t *handle)
537{
538 struct drm_gem_object *obj;
539 int ret;
540
541 ret = mutex_lock_interruptible(&dev->struct_mutex);
542 if (ret)
543 return ret;
544
545 obj = msm_gem_new(dev, size, flags);
546
547 mutex_unlock(&dev->struct_mutex);
548
549 if (IS_ERR(obj))
550 return PTR_ERR(obj);
551
552 ret = drm_gem_handle_create(file, obj, handle);
553
554 /* drop reference from allocate - handle holds it now */
555 drm_gem_object_unreference_unlocked(obj);
556
557 return ret;
558}
559
560struct drm_gem_object *msm_gem_new(struct drm_device *dev,
561 uint32_t size, uint32_t flags)
562{
563 struct msm_drm_private *priv = dev->dev_private;
564 struct msm_gem_object *msm_obj;
565 struct drm_gem_object *obj = NULL;
566 int ret;
567
568 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
569
570 size = PAGE_ALIGN(size);
571
572 switch (flags & MSM_BO_CACHE_MASK) {
573 case MSM_BO_UNCACHED:
574 case MSM_BO_CACHED:
575 case MSM_BO_WC:
576 break;
577 default:
578 dev_err(dev->dev, "invalid cache flag: %x\n",
579 (flags & MSM_BO_CACHE_MASK));
580 ret = -EINVAL;
581 goto fail;
582 }
583
584 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
585 if (!msm_obj) {
586 ret = -ENOMEM;
587 goto fail;
588 }
589
590 obj = &msm_obj->base;
591
592 ret = drm_gem_object_init(dev, obj, size);
593 if (ret)
594 goto fail;
595
596 msm_obj->flags = flags;
597
Rob Clark7198e6b2013-07-19 12:59:32 -0400598 msm_obj->resv = &msm_obj->_resv;
599 reservation_object_init(msm_obj->resv);
Rob Clarkc8afe682013-06-26 12:44:06 -0400600
Rob Clark7198e6b2013-07-19 12:59:32 -0400601 INIT_LIST_HEAD(&msm_obj->submit_entry);
602 INIT_LIST_HEAD(&msm_obj->inactive_work);
Rob Clarkc8afe682013-06-26 12:44:06 -0400603 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
604
605 return obj;
606
607fail:
608 if (obj)
609 drm_gem_object_unreference_unlocked(obj);
610
611 return ERR_PTR(ret);
612}