blob: af29432a6471a1063da5e0611430052c754a8a16 [file] [log] [blame]
Eric Anholtc8b75bc2015-03-02 13:01:12 -08001/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
Eric Anholt72f793f2017-02-27 12:11:41 -08009/**
10 * DOC: VC4 GEM BO management support
Eric Anholtc8b75bc2015-03-02 13:01:12 -080011 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
Eric Anholtc826a6e2015-10-09 20:25:07 -070016 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
Eric Anholtc8b75bc2015-03-02 13:01:12 -080020 */
21
22#include "vc4_drv.h"
Eric Anholtd5bc60f2015-01-18 09:33:17 +130023#include "uapi/drm/vc4_drm.h"
Eric Anholtc8b75bc2015-03-02 13:01:12 -080024
Eric Anholtc826a6e2015-10-09 20:25:07 -070025static void vc4_bo_stats_dump(struct vc4_dev *vc4)
Eric Anholtc8b75bc2015-03-02 13:01:12 -080026{
Eric Anholtc826a6e2015-10-09 20:25:07 -070027 DRM_INFO("num bos allocated: %d\n",
28 vc4->bo_stats.num_allocated);
29 DRM_INFO("size bos allocated: %dkb\n",
30 vc4->bo_stats.size_allocated / 1024);
31 DRM_INFO("num bos used: %d\n",
32 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
33 DRM_INFO("size bos used: %dkb\n",
34 (vc4->bo_stats.size_allocated -
35 vc4->bo_stats.size_cached) / 1024);
36 DRM_INFO("num bos cached: %d\n",
37 vc4->bo_stats.num_cached);
38 DRM_INFO("size bos cached: %dkb\n",
39 vc4->bo_stats.size_cached / 1024);
40}
41
42#ifdef CONFIG_DEBUG_FS
43int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
44{
45 struct drm_info_node *node = (struct drm_info_node *)m->private;
46 struct drm_device *dev = node->minor->dev;
47 struct vc4_dev *vc4 = to_vc4_dev(dev);
48 struct vc4_bo_stats stats;
49
50 /* Take a snapshot of the current stats with the lock held. */
51 mutex_lock(&vc4->bo_lock);
52 stats = vc4->bo_stats;
53 mutex_unlock(&vc4->bo_lock);
54
55 seq_printf(m, "num bos allocated: %d\n",
56 stats.num_allocated);
57 seq_printf(m, "size bos allocated: %dkb\n",
58 stats.size_allocated / 1024);
59 seq_printf(m, "num bos used: %d\n",
60 stats.num_allocated - stats.num_cached);
61 seq_printf(m, "size bos used: %dkb\n",
62 (stats.size_allocated - stats.size_cached) / 1024);
63 seq_printf(m, "num bos cached: %d\n",
64 stats.num_cached);
65 seq_printf(m, "size bos cached: %dkb\n",
66 stats.size_cached / 1024);
67
68 return 0;
69}
70#endif
71
72static uint32_t bo_page_index(size_t size)
73{
74 return (size / PAGE_SIZE) - 1;
75}
76
77/* Must be called with bo_lock held. */
78static void vc4_bo_destroy(struct vc4_bo *bo)
79{
80 struct drm_gem_object *obj = &bo->base.base;
81 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
82
Eric Anholt463873d2015-11-30 11:41:40 -080083 if (bo->validated_shader) {
84 kfree(bo->validated_shader->texture_samples);
85 kfree(bo->validated_shader);
86 bo->validated_shader = NULL;
87 }
88
Eric Anholtc826a6e2015-10-09 20:25:07 -070089 vc4->bo_stats.num_allocated--;
90 vc4->bo_stats.size_allocated -= obj->size;
91 drm_gem_cma_free_object(obj);
92}
93
94/* Must be called with bo_lock held. */
95static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
96{
97 struct drm_gem_object *obj = &bo->base.base;
98 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
99
100 vc4->bo_stats.num_cached--;
101 vc4->bo_stats.size_cached -= obj->size;
102
103 list_del(&bo->unref_head);
104 list_del(&bo->size_head);
105}
106
107static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
108 size_t size)
109{
110 struct vc4_dev *vc4 = to_vc4_dev(dev);
111 uint32_t page_index = bo_page_index(size);
112
113 if (vc4->bo_cache.size_list_size <= page_index) {
114 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
115 page_index + 1);
116 struct list_head *new_list;
117 uint32_t i;
118
119 new_list = kmalloc_array(new_size, sizeof(struct list_head),
120 GFP_KERNEL);
121 if (!new_list)
122 return NULL;
123
124 /* Rebase the old cached BO lists to their new list
125 * head locations.
126 */
127 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
128 struct list_head *old_list =
129 &vc4->bo_cache.size_list[i];
130
131 if (list_empty(old_list))
132 INIT_LIST_HEAD(&new_list[i]);
133 else
134 list_replace(old_list, &new_list[i]);
135 }
136 /* And initialize the brand new BO list heads. */
137 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
138 INIT_LIST_HEAD(&new_list[i]);
139
140 kfree(vc4->bo_cache.size_list);
141 vc4->bo_cache.size_list = new_list;
142 vc4->bo_cache.size_list_size = new_size;
143 }
144
145 return &vc4->bo_cache.size_list[page_index];
146}
147
Baoyou Xieea903832016-09-08 19:03:20 +0800148static void vc4_bo_cache_purge(struct drm_device *dev)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700149{
150 struct vc4_dev *vc4 = to_vc4_dev(dev);
151
152 mutex_lock(&vc4->bo_lock);
153 while (!list_empty(&vc4->bo_cache.time_list)) {
154 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
155 struct vc4_bo, unref_head);
156 vc4_bo_remove_from_cache(bo);
157 vc4_bo_destroy(bo);
158 }
159 mutex_unlock(&vc4->bo_lock);
160}
161
162static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
163 uint32_t size)
164{
165 struct vc4_dev *vc4 = to_vc4_dev(dev);
166 uint32_t page_index = bo_page_index(size);
167 struct vc4_bo *bo = NULL;
168
169 size = roundup(size, PAGE_SIZE);
170
171 mutex_lock(&vc4->bo_lock);
172 if (page_index >= vc4->bo_cache.size_list_size)
173 goto out;
174
175 if (list_empty(&vc4->bo_cache.size_list[page_index]))
176 goto out;
177
178 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
179 struct vc4_bo, size_head);
180 vc4_bo_remove_from_cache(bo);
181 kref_init(&bo->base.base.refcount);
182
183out:
184 mutex_unlock(&vc4->bo_lock);
185 return bo;
186}
187
188/**
189 * vc4_gem_create_object - Implementation of driver->gem_create_object.
Eric Anholt72f793f2017-02-27 12:11:41 -0800190 * @dev: DRM device
191 * @size: Size in bytes of the memory the object will reference
Eric Anholtc826a6e2015-10-09 20:25:07 -0700192 *
193 * This lets the CMA helpers allocate object structs for us, and keep
194 * our BO stats correct.
195 */
196struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
197{
198 struct vc4_dev *vc4 = to_vc4_dev(dev);
199 struct vc4_bo *bo;
200
201 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
202 if (!bo)
203 return ERR_PTR(-ENOMEM);
204
205 mutex_lock(&vc4->bo_lock);
206 vc4->bo_stats.num_allocated++;
207 vc4->bo_stats.size_allocated += size;
208 mutex_unlock(&vc4->bo_lock);
209
210 return &bo->base.base;
211}
212
213struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
Eric Anholteb981382017-03-01 10:56:01 -0800214 bool allow_unzeroed)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700215{
216 size_t size = roundup(unaligned_size, PAGE_SIZE);
217 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800218 struct drm_gem_cma_object *cma_obj;
Eric Anholteb981382017-03-01 10:56:01 -0800219 struct vc4_bo *bo;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800220
Eric Anholtc826a6e2015-10-09 20:25:07 -0700221 if (size == 0)
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800222 return ERR_PTR(-EINVAL);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700223
224 /* First, try to get a vc4_bo from the kernel BO cache. */
Eric Anholteb981382017-03-01 10:56:01 -0800225 bo = vc4_bo_get_from_cache(dev, size);
226 if (bo) {
227 if (!allow_unzeroed)
228 memset(bo->base.vaddr, 0, bo->base.base.size);
229 return bo;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700230 }
231
232 cma_obj = drm_gem_cma_create(dev, size);
233 if (IS_ERR(cma_obj)) {
234 /*
235 * If we've run out of CMA memory, kill the cache of
236 * CMA allocations we've got laying around and try again.
237 */
238 vc4_bo_cache_purge(dev);
239
240 cma_obj = drm_gem_cma_create(dev, size);
241 if (IS_ERR(cma_obj)) {
242 DRM_ERROR("Failed to allocate from CMA:\n");
243 vc4_bo_stats_dump(vc4);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800244 return ERR_PTR(-ENOMEM);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700245 }
246 }
247
248 return to_vc4_bo(&cma_obj->base);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800249}
250
251int vc4_dumb_create(struct drm_file *file_priv,
252 struct drm_device *dev,
253 struct drm_mode_create_dumb *args)
254{
255 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
256 struct vc4_bo *bo = NULL;
257 int ret;
258
259 if (args->pitch < min_pitch)
260 args->pitch = min_pitch;
261
262 if (args->size < args->pitch * args->height)
263 args->size = args->pitch * args->height;
264
Eric Anholtc826a6e2015-10-09 20:25:07 -0700265 bo = vc4_bo_create(dev, args->size, false);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800266 if (IS_ERR(bo))
267 return PTR_ERR(bo);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800268
269 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
270 drm_gem_object_unreference_unlocked(&bo->base.base);
271
272 return ret;
273}
Eric Anholtc826a6e2015-10-09 20:25:07 -0700274
275/* Must be called with bo_lock held. */
276static void vc4_bo_cache_free_old(struct drm_device *dev)
277{
278 struct vc4_dev *vc4 = to_vc4_dev(dev);
279 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
280
281 while (!list_empty(&vc4->bo_cache.time_list)) {
282 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
283 struct vc4_bo, unref_head);
284 if (time_before(expire_time, bo->free_time)) {
285 mod_timer(&vc4->bo_cache.time_timer,
286 round_jiffies_up(jiffies +
287 msecs_to_jiffies(1000)));
288 return;
289 }
290
291 vc4_bo_remove_from_cache(bo);
292 vc4_bo_destroy(bo);
293 }
294}
295
296/* Called on the last userspace/kernel unreference of the BO. Returns
297 * it to the BO cache if possible, otherwise frees it.
Eric Anholtc826a6e2015-10-09 20:25:07 -0700298 */
299void vc4_free_object(struct drm_gem_object *gem_bo)
300{
301 struct drm_device *dev = gem_bo->dev;
302 struct vc4_dev *vc4 = to_vc4_dev(dev);
303 struct vc4_bo *bo = to_vc4_bo(gem_bo);
304 struct list_head *cache_list;
305
306 mutex_lock(&vc4->bo_lock);
307 /* If the object references someone else's memory, we can't cache it.
308 */
309 if (gem_bo->import_attach) {
310 vc4_bo_destroy(bo);
311 goto out;
312 }
313
314 /* Don't cache if it was publicly named. */
315 if (gem_bo->name) {
316 vc4_bo_destroy(bo);
317 goto out;
318 }
319
Eric Anholtca39b442017-03-01 10:56:02 -0800320 /* If this object was partially constructed but CMA allocation
321 * had failed, just free it.
322 */
323 if (!bo->base.vaddr) {
324 vc4_bo_destroy(bo);
325 goto out;
326 }
327
Eric Anholtc826a6e2015-10-09 20:25:07 -0700328 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
329 if (!cache_list) {
330 vc4_bo_destroy(bo);
331 goto out;
332 }
333
Eric Anholt463873d2015-11-30 11:41:40 -0800334 if (bo->validated_shader) {
335 kfree(bo->validated_shader->texture_samples);
336 kfree(bo->validated_shader);
337 bo->validated_shader = NULL;
338 }
339
Eric Anholtc826a6e2015-10-09 20:25:07 -0700340 bo->free_time = jiffies;
341 list_add(&bo->size_head, cache_list);
342 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
343
344 vc4->bo_stats.num_cached++;
345 vc4->bo_stats.size_cached += gem_bo->size;
346
347 vc4_bo_cache_free_old(dev);
348
349out:
350 mutex_unlock(&vc4->bo_lock);
351}
352
353static void vc4_bo_cache_time_work(struct work_struct *work)
354{
355 struct vc4_dev *vc4 =
356 container_of(work, struct vc4_dev, bo_cache.time_work);
357 struct drm_device *dev = vc4->dev;
358
359 mutex_lock(&vc4->bo_lock);
360 vc4_bo_cache_free_old(dev);
361 mutex_unlock(&vc4->bo_lock);
362}
363
364static void vc4_bo_cache_time_timer(unsigned long data)
365{
366 struct drm_device *dev = (struct drm_device *)data;
367 struct vc4_dev *vc4 = to_vc4_dev(dev);
368
369 schedule_work(&vc4->bo_cache.time_work);
370}
371
Eric Anholt463873d2015-11-30 11:41:40 -0800372struct dma_buf *
373vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
374{
375 struct vc4_bo *bo = to_vc4_bo(obj);
376
377 if (bo->validated_shader) {
378 DRM_ERROR("Attempting to export shader BO\n");
379 return ERR_PTR(-EINVAL);
380 }
381
382 return drm_gem_prime_export(dev, obj, flags);
383}
384
385int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
386{
387 struct drm_gem_object *gem_obj;
388 struct vc4_bo *bo;
389 int ret;
390
391 ret = drm_gem_mmap(filp, vma);
392 if (ret)
393 return ret;
394
395 gem_obj = vma->vm_private_data;
396 bo = to_vc4_bo(gem_obj);
397
398 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
399 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
400 return -EINVAL;
401 }
402
403 /*
404 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
405 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
406 * the whole buffer.
407 */
408 vma->vm_flags &= ~VM_PFNMAP;
409 vma->vm_pgoff = 0;
410
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800411 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
412 bo->base.paddr, vma->vm_end - vma->vm_start);
Eric Anholt463873d2015-11-30 11:41:40 -0800413 if (ret)
414 drm_gem_vm_close(vma);
415
416 return ret;
417}
418
419int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
420{
421 struct vc4_bo *bo = to_vc4_bo(obj);
422
423 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
424 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
425 return -EINVAL;
426 }
427
428 return drm_gem_cma_prime_mmap(obj, vma);
429}
430
431void *vc4_prime_vmap(struct drm_gem_object *obj)
432{
433 struct vc4_bo *bo = to_vc4_bo(obj);
434
435 if (bo->validated_shader) {
436 DRM_ERROR("mmaping of shader BOs not allowed.\n");
437 return ERR_PTR(-EINVAL);
438 }
439
440 return drm_gem_cma_prime_vmap(obj);
441}
442
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300443int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
445{
446 struct drm_vc4_create_bo *args = data;
447 struct vc4_bo *bo = NULL;
448 int ret;
449
450 /*
451 * We can't allocate from the BO cache, because the BOs don't
452 * get zeroed, and that might leak data between users.
453 */
454 bo = vc4_bo_create(dev, args->size, false);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800455 if (IS_ERR(bo))
456 return PTR_ERR(bo);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300457
458 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
459 drm_gem_object_unreference_unlocked(&bo->base.base);
460
461 return ret;
462}
463
464int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
465 struct drm_file *file_priv)
466{
467 struct drm_vc4_mmap_bo *args = data;
468 struct drm_gem_object *gem_obj;
469
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100470 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300471 if (!gem_obj) {
472 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
473 return -EINVAL;
474 }
475
476 /* The mmap offset was set up at BO allocation time. */
477 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
478
479 drm_gem_object_unreference_unlocked(gem_obj);
480 return 0;
481}
482
Eric Anholt463873d2015-11-30 11:41:40 -0800483int
484vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
486{
487 struct drm_vc4_create_shader_bo *args = data;
488 struct vc4_bo *bo = NULL;
489 int ret;
490
491 if (args->size == 0)
492 return -EINVAL;
493
494 if (args->size % sizeof(u64) != 0)
495 return -EINVAL;
496
497 if (args->flags != 0) {
498 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
499 return -EINVAL;
500 }
501
502 if (args->pad != 0) {
503 DRM_INFO("Pad set: 0x%08x\n", args->pad);
504 return -EINVAL;
505 }
506
507 bo = vc4_bo_create(dev, args->size, true);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800508 if (IS_ERR(bo))
509 return PTR_ERR(bo);
Eric Anholt463873d2015-11-30 11:41:40 -0800510
Dan Carpenter585cb132016-03-08 15:09:41 +0300511 if (copy_from_user(bo->base.vaddr,
Eric Anholt463873d2015-11-30 11:41:40 -0800512 (void __user *)(uintptr_t)args->data,
Dan Carpenter585cb132016-03-08 15:09:41 +0300513 args->size)) {
514 ret = -EFAULT;
Eric Anholt463873d2015-11-30 11:41:40 -0800515 goto fail;
Dan Carpenter585cb132016-03-08 15:09:41 +0300516 }
Eric Anholt463873d2015-11-30 11:41:40 -0800517 /* Clear the rest of the memory from allocating from the BO
518 * cache.
519 */
520 memset(bo->base.vaddr + args->size, 0,
521 bo->base.base.size - args->size);
522
523 bo->validated_shader = vc4_validate_shader(&bo->base);
524 if (!bo->validated_shader) {
525 ret = -EINVAL;
526 goto fail;
527 }
528
529 /* We have to create the handle after validation, to avoid
530 * races for users to do doing things like mmap the shader BO.
531 */
532 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
533
534 fail:
535 drm_gem_object_unreference_unlocked(&bo->base.base);
536
537 return ret;
538}
539
Eric Anholtc826a6e2015-10-09 20:25:07 -0700540void vc4_bo_cache_init(struct drm_device *dev)
541{
542 struct vc4_dev *vc4 = to_vc4_dev(dev);
543
544 mutex_init(&vc4->bo_lock);
545
546 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
547
548 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
549 setup_timer(&vc4->bo_cache.time_timer,
550 vc4_bo_cache_time_timer,
551 (unsigned long)dev);
552}
553
554void vc4_bo_cache_destroy(struct drm_device *dev)
555{
556 struct vc4_dev *vc4 = to_vc4_dev(dev);
557
558 del_timer(&vc4->bo_cache.time_timer);
559 cancel_work_sync(&vc4->bo_cache.time_work);
560
561 vc4_bo_cache_purge(dev);
562
563 if (vc4->bo_stats.num_allocated) {
564 DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
565 vc4_bo_stats_dump(vc4);
566 }
567}