blob: 3afdbf4bc10b37fcc1a21fc62b21aaa974405490 [file] [log] [blame]
Eric Anholtc8b75bc2015-03-02 13:01:12 -08001/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
Eric Anholt72f793f2017-02-27 12:11:41 -08009/**
10 * DOC: VC4 GEM BO management support
Eric Anholtc8b75bc2015-03-02 13:01:12 -080011 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
Eric Anholtc826a6e2015-10-09 20:25:07 -070016 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
Eric Anholtc8b75bc2015-03-02 13:01:12 -080020 */
21
Eric Anholtcdec4d32017-04-12 12:12:02 -070022#include <linux/dma-buf.h>
23
Eric Anholtc8b75bc2015-03-02 13:01:12 -080024#include "vc4_drv.h"
Eric Anholtd5bc60f2015-01-18 09:33:17 +130025#include "uapi/drm/vc4_drm.h"
Eric Anholtc8b75bc2015-03-02 13:01:12 -080026
Eric Anholtf3099462017-07-25 11:27:17 -070027static const char * const bo_type_names[] = {
28 "kernel",
29 "V3D",
30 "V3D shader",
31 "dumb",
32 "binner",
33 "RCL",
34 "BCL",
35 "kernel BO cache",
36};
37
38static bool is_user_label(int label)
39{
40 return label >= VC4_BO_TYPE_COUNT;
41}
42
Eric Anholtc826a6e2015-10-09 20:25:07 -070043static void vc4_bo_stats_dump(struct vc4_dev *vc4)
Eric Anholtc8b75bc2015-03-02 13:01:12 -080044{
Eric Anholtf3099462017-07-25 11:27:17 -070045 int i;
46
47 for (i = 0; i < vc4->num_labels; i++) {
48 if (!vc4->bo_labels[i].num_allocated)
49 continue;
50
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4->bo_labels[i].name,
53 vc4->bo_labels[i].size_allocated / 1024,
54 vc4->bo_labels[i].num_allocated);
55 }
Eric Anholtc826a6e2015-10-09 20:25:07 -070056}
57
58#ifdef CONFIG_DEBUG_FS
59int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
60{
61 struct drm_info_node *node = (struct drm_info_node *)m->private;
62 struct drm_device *dev = node->minor->dev;
63 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholtf3099462017-07-25 11:27:17 -070064 int i;
Eric Anholtc826a6e2015-10-09 20:25:07 -070065
Eric Anholtc826a6e2015-10-09 20:25:07 -070066 mutex_lock(&vc4->bo_lock);
Eric Anholtf3099462017-07-25 11:27:17 -070067 for (i = 0; i < vc4->num_labels; i++) {
68 if (!vc4->bo_labels[i].num_allocated)
69 continue;
Eric Anholtc826a6e2015-10-09 20:25:07 -070070
Eric Anholtf3099462017-07-25 11:27:17 -070071 seq_printf(m, "%30s: %6dkb BOs (%d)\n",
72 vc4->bo_labels[i].name,
73 vc4->bo_labels[i].size_allocated / 1024,
74 vc4->bo_labels[i].num_allocated);
75 }
76 mutex_unlock(&vc4->bo_lock);
Eric Anholtc826a6e2015-10-09 20:25:07 -070077
78 return 0;
79}
80#endif
81
Eric Anholtf3099462017-07-25 11:27:17 -070082/* Takes ownership of *name and returns the appropriate slot for it in
83 * the bo_labels[] array, extending it as necessary.
84 *
85 * This is inefficient and could use a hash table instead of walking
86 * an array and strcmp()ing. However, the assumption is that user
87 * labeling will be infrequent (scanout buffers and other long-lived
88 * objects, or debug driver builds), so we can live with it for now.
89 */
90static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
91{
92 int i;
93 int free_slot = -1;
94
95 for (i = 0; i < vc4->num_labels; i++) {
96 if (!vc4->bo_labels[i].name) {
97 free_slot = i;
98 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
99 kfree(name);
100 return i;
101 }
102 }
103
104 if (free_slot != -1) {
105 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
106 vc4->bo_labels[free_slot].name = name;
107 return free_slot;
108 } else {
109 u32 new_label_count = vc4->num_labels + 1;
110 struct vc4_label *new_labels =
111 krealloc(vc4->bo_labels,
112 new_label_count * sizeof(*new_labels),
113 GFP_KERNEL);
114
115 if (!new_labels) {
116 kfree(name);
117 return -1;
118 }
119
120 free_slot = vc4->num_labels;
121 vc4->bo_labels = new_labels;
122 vc4->num_labels = new_label_count;
123
124 vc4->bo_labels[free_slot].name = name;
125 vc4->bo_labels[free_slot].num_allocated = 0;
126 vc4->bo_labels[free_slot].size_allocated = 0;
127
128 return free_slot;
129 }
130}
131
132static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
133{
134 struct vc4_bo *bo = to_vc4_bo(gem_obj);
135 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
136
137 lockdep_assert_held(&vc4->bo_lock);
138
139 if (label != -1) {
140 vc4->bo_labels[label].num_allocated++;
141 vc4->bo_labels[label].size_allocated += gem_obj->size;
142 }
143
144 vc4->bo_labels[bo->label].num_allocated--;
145 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
146
147 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
148 is_user_label(bo->label)) {
149 /* Free user BO label slots on last unreference.
150 * Slots are just where we track the stats for a given
151 * name, and once a name is unused we can reuse that
152 * slot.
153 */
154 kfree(vc4->bo_labels[bo->label].name);
155 vc4->bo_labels[bo->label].name = NULL;
156 }
157
158 bo->label = label;
159}
160
Eric Anholtc826a6e2015-10-09 20:25:07 -0700161static uint32_t bo_page_index(size_t size)
162{
163 return (size / PAGE_SIZE) - 1;
164}
165
Eric Anholtc826a6e2015-10-09 20:25:07 -0700166static void vc4_bo_destroy(struct vc4_bo *bo)
167{
168 struct drm_gem_object *obj = &bo->base.base;
Eric Anholt4e6b1e92017-07-25 11:27:18 -0700169 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
170
171 lockdep_assert_held(&vc4->bo_lock);
Eric Anholtf3099462017-07-25 11:27:17 -0700172
173 vc4_bo_set_label(obj, -1);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700174
Eric Anholt463873d2015-11-30 11:41:40 -0800175 if (bo->validated_shader) {
176 kfree(bo->validated_shader->texture_samples);
177 kfree(bo->validated_shader);
178 bo->validated_shader = NULL;
179 }
180
Hans Verkuil24bb2062017-06-07 21:05:57 +0200181 reservation_object_fini(&bo->_resv);
Eric Anholtcdec4d32017-04-12 12:12:02 -0700182
Eric Anholtc826a6e2015-10-09 20:25:07 -0700183 drm_gem_cma_free_object(obj);
184}
185
Eric Anholtc826a6e2015-10-09 20:25:07 -0700186static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
187{
Eric Anholt4e6b1e92017-07-25 11:27:18 -0700188 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
189
190 lockdep_assert_held(&vc4->bo_lock);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700191 list_del(&bo->unref_head);
192 list_del(&bo->size_head);
193}
194
195static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
196 size_t size)
197{
198 struct vc4_dev *vc4 = to_vc4_dev(dev);
199 uint32_t page_index = bo_page_index(size);
200
201 if (vc4->bo_cache.size_list_size <= page_index) {
202 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
203 page_index + 1);
204 struct list_head *new_list;
205 uint32_t i;
206
207 new_list = kmalloc_array(new_size, sizeof(struct list_head),
208 GFP_KERNEL);
209 if (!new_list)
210 return NULL;
211
212 /* Rebase the old cached BO lists to their new list
213 * head locations.
214 */
215 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
216 struct list_head *old_list =
217 &vc4->bo_cache.size_list[i];
218
219 if (list_empty(old_list))
220 INIT_LIST_HEAD(&new_list[i]);
221 else
222 list_replace(old_list, &new_list[i]);
223 }
224 /* And initialize the brand new BO list heads. */
225 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
226 INIT_LIST_HEAD(&new_list[i]);
227
228 kfree(vc4->bo_cache.size_list);
229 vc4->bo_cache.size_list = new_list;
230 vc4->bo_cache.size_list_size = new_size;
231 }
232
233 return &vc4->bo_cache.size_list[page_index];
234}
235
Baoyou Xieea903832016-09-08 19:03:20 +0800236static void vc4_bo_cache_purge(struct drm_device *dev)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700237{
238 struct vc4_dev *vc4 = to_vc4_dev(dev);
239
240 mutex_lock(&vc4->bo_lock);
241 while (!list_empty(&vc4->bo_cache.time_list)) {
242 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
243 struct vc4_bo, unref_head);
244 vc4_bo_remove_from_cache(bo);
245 vc4_bo_destroy(bo);
246 }
247 mutex_unlock(&vc4->bo_lock);
248}
249
250static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
Eric Anholtf3099462017-07-25 11:27:17 -0700251 uint32_t size,
252 enum vc4_kernel_bo_type type)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700253{
254 struct vc4_dev *vc4 = to_vc4_dev(dev);
255 uint32_t page_index = bo_page_index(size);
256 struct vc4_bo *bo = NULL;
257
258 size = roundup(size, PAGE_SIZE);
259
260 mutex_lock(&vc4->bo_lock);
261 if (page_index >= vc4->bo_cache.size_list_size)
262 goto out;
263
264 if (list_empty(&vc4->bo_cache.size_list[page_index]))
265 goto out;
266
267 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
268 struct vc4_bo, size_head);
269 vc4_bo_remove_from_cache(bo);
270 kref_init(&bo->base.base.refcount);
271
272out:
Eric Anholtf3099462017-07-25 11:27:17 -0700273 if (bo)
274 vc4_bo_set_label(&bo->base.base, type);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700275 mutex_unlock(&vc4->bo_lock);
276 return bo;
277}
278
279/**
280 * vc4_gem_create_object - Implementation of driver->gem_create_object.
Eric Anholt72f793f2017-02-27 12:11:41 -0800281 * @dev: DRM device
282 * @size: Size in bytes of the memory the object will reference
Eric Anholtc826a6e2015-10-09 20:25:07 -0700283 *
284 * This lets the CMA helpers allocate object structs for us, and keep
285 * our BO stats correct.
286 */
287struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
288{
289 struct vc4_dev *vc4 = to_vc4_dev(dev);
290 struct vc4_bo *bo;
291
292 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
293 if (!bo)
294 return ERR_PTR(-ENOMEM);
295
296 mutex_lock(&vc4->bo_lock);
Eric Anholtf3099462017-07-25 11:27:17 -0700297 bo->label = VC4_BO_TYPE_KERNEL;
298 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
299 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700300 mutex_unlock(&vc4->bo_lock);
Hans Verkuil24bb2062017-06-07 21:05:57 +0200301 bo->resv = &bo->_resv;
302 reservation_object_init(bo->resv);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700303
304 return &bo->base.base;
305}
306
307struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
Eric Anholtf3099462017-07-25 11:27:17 -0700308 bool allow_unzeroed, enum vc4_kernel_bo_type type)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700309{
310 size_t size = roundup(unaligned_size, PAGE_SIZE);
311 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800312 struct drm_gem_cma_object *cma_obj;
Eric Anholteb981382017-03-01 10:56:01 -0800313 struct vc4_bo *bo;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800314
Eric Anholtc826a6e2015-10-09 20:25:07 -0700315 if (size == 0)
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800316 return ERR_PTR(-EINVAL);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700317
318 /* First, try to get a vc4_bo from the kernel BO cache. */
Eric Anholtf3099462017-07-25 11:27:17 -0700319 bo = vc4_bo_get_from_cache(dev, size, type);
Eric Anholteb981382017-03-01 10:56:01 -0800320 if (bo) {
321 if (!allow_unzeroed)
322 memset(bo->base.vaddr, 0, bo->base.base.size);
323 return bo;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700324 }
325
326 cma_obj = drm_gem_cma_create(dev, size);
327 if (IS_ERR(cma_obj)) {
328 /*
329 * If we've run out of CMA memory, kill the cache of
330 * CMA allocations we've got laying around and try again.
331 */
332 vc4_bo_cache_purge(dev);
333
334 cma_obj = drm_gem_cma_create(dev, size);
335 if (IS_ERR(cma_obj)) {
336 DRM_ERROR("Failed to allocate from CMA:\n");
337 vc4_bo_stats_dump(vc4);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800338 return ERR_PTR(-ENOMEM);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700339 }
340 }
Eric Anholtf3099462017-07-25 11:27:17 -0700341 bo = to_vc4_bo(&cma_obj->base);
342
343 mutex_lock(&vc4->bo_lock);
344 vc4_bo_set_label(&cma_obj->base, type);
345 mutex_unlock(&vc4->bo_lock);
346
347 return bo;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800348}
349
350int vc4_dumb_create(struct drm_file *file_priv,
351 struct drm_device *dev,
352 struct drm_mode_create_dumb *args)
353{
354 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
355 struct vc4_bo *bo = NULL;
356 int ret;
357
358 if (args->pitch < min_pitch)
359 args->pitch = min_pitch;
360
361 if (args->size < args->pitch * args->height)
362 args->size = args->pitch * args->height;
363
Eric Anholtf3099462017-07-25 11:27:17 -0700364 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800365 if (IS_ERR(bo))
366 return PTR_ERR(bo);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800367
368 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300369 drm_gem_object_put_unlocked(&bo->base.base);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800370
371 return ret;
372}
Eric Anholtc826a6e2015-10-09 20:25:07 -0700373
Eric Anholtc826a6e2015-10-09 20:25:07 -0700374static void vc4_bo_cache_free_old(struct drm_device *dev)
375{
376 struct vc4_dev *vc4 = to_vc4_dev(dev);
377 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
378
Eric Anholt4e6b1e92017-07-25 11:27:18 -0700379 lockdep_assert_held(&vc4->bo_lock);
380
Eric Anholtc826a6e2015-10-09 20:25:07 -0700381 while (!list_empty(&vc4->bo_cache.time_list)) {
382 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
383 struct vc4_bo, unref_head);
384 if (time_before(expire_time, bo->free_time)) {
385 mod_timer(&vc4->bo_cache.time_timer,
386 round_jiffies_up(jiffies +
387 msecs_to_jiffies(1000)));
388 return;
389 }
390
391 vc4_bo_remove_from_cache(bo);
392 vc4_bo_destroy(bo);
393 }
394}
395
396/* Called on the last userspace/kernel unreference of the BO. Returns
397 * it to the BO cache if possible, otherwise frees it.
Eric Anholtc826a6e2015-10-09 20:25:07 -0700398 */
399void vc4_free_object(struct drm_gem_object *gem_bo)
400{
401 struct drm_device *dev = gem_bo->dev;
402 struct vc4_dev *vc4 = to_vc4_dev(dev);
403 struct vc4_bo *bo = to_vc4_bo(gem_bo);
404 struct list_head *cache_list;
405
406 mutex_lock(&vc4->bo_lock);
407 /* If the object references someone else's memory, we can't cache it.
408 */
409 if (gem_bo->import_attach) {
410 vc4_bo_destroy(bo);
411 goto out;
412 }
413
414 /* Don't cache if it was publicly named. */
415 if (gem_bo->name) {
416 vc4_bo_destroy(bo);
417 goto out;
418 }
419
Eric Anholtca39b442017-03-01 10:56:02 -0800420 /* If this object was partially constructed but CMA allocation
421 * had failed, just free it.
422 */
423 if (!bo->base.vaddr) {
424 vc4_bo_destroy(bo);
425 goto out;
426 }
427
Eric Anholtc826a6e2015-10-09 20:25:07 -0700428 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
429 if (!cache_list) {
430 vc4_bo_destroy(bo);
431 goto out;
432 }
433
Eric Anholt463873d2015-11-30 11:41:40 -0800434 if (bo->validated_shader) {
435 kfree(bo->validated_shader->texture_samples);
436 kfree(bo->validated_shader);
437 bo->validated_shader = NULL;
438 }
439
Eric Anholt83753112017-06-07 17:13:36 -0700440 bo->t_format = false;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700441 bo->free_time = jiffies;
442 list_add(&bo->size_head, cache_list);
443 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
444
Eric Anholtf3099462017-07-25 11:27:17 -0700445 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700446
447 vc4_bo_cache_free_old(dev);
448
449out:
450 mutex_unlock(&vc4->bo_lock);
451}
452
453static void vc4_bo_cache_time_work(struct work_struct *work)
454{
455 struct vc4_dev *vc4 =
456 container_of(work, struct vc4_dev, bo_cache.time_work);
457 struct drm_device *dev = vc4->dev;
458
459 mutex_lock(&vc4->bo_lock);
460 vc4_bo_cache_free_old(dev);
461 mutex_unlock(&vc4->bo_lock);
462}
463
464static void vc4_bo_cache_time_timer(unsigned long data)
465{
466 struct drm_device *dev = (struct drm_device *)data;
467 struct vc4_dev *vc4 = to_vc4_dev(dev);
468
469 schedule_work(&vc4->bo_cache.time_work);
470}
471
Eric Anholtcdec4d32017-04-12 12:12:02 -0700472struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
473{
474 struct vc4_bo *bo = to_vc4_bo(obj);
475
476 return bo->resv;
477}
478
Eric Anholt463873d2015-11-30 11:41:40 -0800479struct dma_buf *
480vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
481{
482 struct vc4_bo *bo = to_vc4_bo(obj);
483
484 if (bo->validated_shader) {
Eric Anholtfb959922017-07-25 09:27:32 -0700485 DRM_DEBUG("Attempting to export shader BO\n");
Eric Anholt463873d2015-11-30 11:41:40 -0800486 return ERR_PTR(-EINVAL);
487 }
488
489 return drm_gem_prime_export(dev, obj, flags);
490}
491
492int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
493{
494 struct drm_gem_object *gem_obj;
495 struct vc4_bo *bo;
496 int ret;
497
498 ret = drm_gem_mmap(filp, vma);
499 if (ret)
500 return ret;
501
502 gem_obj = vma->vm_private_data;
503 bo = to_vc4_bo(gem_obj);
504
505 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
Eric Anholtfb959922017-07-25 09:27:32 -0700506 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
Eric Anholt463873d2015-11-30 11:41:40 -0800507 return -EINVAL;
508 }
509
510 /*
511 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
512 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
513 * the whole buffer.
514 */
515 vma->vm_flags &= ~VM_PFNMAP;
516 vma->vm_pgoff = 0;
517
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800518 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
519 bo->base.paddr, vma->vm_end - vma->vm_start);
Eric Anholt463873d2015-11-30 11:41:40 -0800520 if (ret)
521 drm_gem_vm_close(vma);
522
523 return ret;
524}
525
526int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
527{
528 struct vc4_bo *bo = to_vc4_bo(obj);
529
530 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
Eric Anholtfb959922017-07-25 09:27:32 -0700531 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
Eric Anholt463873d2015-11-30 11:41:40 -0800532 return -EINVAL;
533 }
534
535 return drm_gem_cma_prime_mmap(obj, vma);
536}
537
538void *vc4_prime_vmap(struct drm_gem_object *obj)
539{
540 struct vc4_bo *bo = to_vc4_bo(obj);
541
542 if (bo->validated_shader) {
Eric Anholtfb959922017-07-25 09:27:32 -0700543 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
Eric Anholt463873d2015-11-30 11:41:40 -0800544 return ERR_PTR(-EINVAL);
545 }
546
547 return drm_gem_cma_prime_vmap(obj);
548}
549
Eric Anholtcdec4d32017-04-12 12:12:02 -0700550struct drm_gem_object *
551vc4_prime_import_sg_table(struct drm_device *dev,
552 struct dma_buf_attachment *attach,
553 struct sg_table *sgt)
554{
555 struct drm_gem_object *obj;
556 struct vc4_bo *bo;
557
558 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
559 if (IS_ERR(obj))
560 return obj;
561
562 bo = to_vc4_bo(obj);
563 bo->resv = attach->dmabuf->resv;
564
565 return obj;
566}
567
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300568int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
569 struct drm_file *file_priv)
570{
571 struct drm_vc4_create_bo *args = data;
572 struct vc4_bo *bo = NULL;
573 int ret;
574
575 /*
576 * We can't allocate from the BO cache, because the BOs don't
577 * get zeroed, and that might leak data between users.
578 */
Eric Anholtf3099462017-07-25 11:27:17 -0700579 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800580 if (IS_ERR(bo))
581 return PTR_ERR(bo);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300582
583 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300584 drm_gem_object_put_unlocked(&bo->base.base);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300585
586 return ret;
587}
588
589int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
590 struct drm_file *file_priv)
591{
592 struct drm_vc4_mmap_bo *args = data;
593 struct drm_gem_object *gem_obj;
594
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100595 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300596 if (!gem_obj) {
Eric Anholtfb959922017-07-25 09:27:32 -0700597 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300598 return -EINVAL;
599 }
600
601 /* The mmap offset was set up at BO allocation time. */
602 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
603
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300604 drm_gem_object_put_unlocked(gem_obj);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300605 return 0;
606}
607
Eric Anholt463873d2015-11-30 11:41:40 -0800608int
609vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
611{
612 struct drm_vc4_create_shader_bo *args = data;
613 struct vc4_bo *bo = NULL;
614 int ret;
615
616 if (args->size == 0)
617 return -EINVAL;
618
619 if (args->size % sizeof(u64) != 0)
620 return -EINVAL;
621
622 if (args->flags != 0) {
623 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
624 return -EINVAL;
625 }
626
627 if (args->pad != 0) {
628 DRM_INFO("Pad set: 0x%08x\n", args->pad);
629 return -EINVAL;
630 }
631
Eric Anholtf3099462017-07-25 11:27:17 -0700632 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
Eric Anholt2c68f1f2016-01-25 14:13:12 -0800633 if (IS_ERR(bo))
634 return PTR_ERR(bo);
Eric Anholt463873d2015-11-30 11:41:40 -0800635
Dan Carpenter585cb132016-03-08 15:09:41 +0300636 if (copy_from_user(bo->base.vaddr,
Eric Anholt463873d2015-11-30 11:41:40 -0800637 (void __user *)(uintptr_t)args->data,
Dan Carpenter585cb132016-03-08 15:09:41 +0300638 args->size)) {
639 ret = -EFAULT;
Eric Anholt463873d2015-11-30 11:41:40 -0800640 goto fail;
Dan Carpenter585cb132016-03-08 15:09:41 +0300641 }
Eric Anholt463873d2015-11-30 11:41:40 -0800642 /* Clear the rest of the memory from allocating from the BO
643 * cache.
644 */
645 memset(bo->base.vaddr + args->size, 0,
646 bo->base.base.size - args->size);
647
648 bo->validated_shader = vc4_validate_shader(&bo->base);
649 if (!bo->validated_shader) {
650 ret = -EINVAL;
651 goto fail;
652 }
653
654 /* We have to create the handle after validation, to avoid
655 * races for users to do doing things like mmap the shader BO.
656 */
657 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
658
659 fail:
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300660 drm_gem_object_put_unlocked(&bo->base.base);
Eric Anholt463873d2015-11-30 11:41:40 -0800661
662 return ret;
663}
664
Eric Anholt83753112017-06-07 17:13:36 -0700665/**
666 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
667 * @dev: DRM device
668 * @data: ioctl argument
669 * @file_priv: DRM file for this fd
670 *
671 * The tiling state of the BO decides the default modifier of an fb if
672 * no specific modifier was set by userspace, and the return value of
673 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
674 * received from dmabuf as the same tiling format as the producer
675 * used).
676 */
677int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv)
679{
680 struct drm_vc4_set_tiling *args = data;
681 struct drm_gem_object *gem_obj;
682 struct vc4_bo *bo;
683 bool t_format;
684
685 if (args->flags != 0)
686 return -EINVAL;
687
688 switch (args->modifier) {
689 case DRM_FORMAT_MOD_NONE:
690 t_format = false;
691 break;
692 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
693 t_format = true;
694 break;
695 default:
696 return -EINVAL;
697 }
698
699 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
700 if (!gem_obj) {
Eric Anholtfb959922017-07-25 09:27:32 -0700701 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
Eric Anholt83753112017-06-07 17:13:36 -0700702 return -ENOENT;
703 }
704 bo = to_vc4_bo(gem_obj);
705 bo->t_format = t_format;
706
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300707 drm_gem_object_put_unlocked(gem_obj);
Eric Anholt83753112017-06-07 17:13:36 -0700708
709 return 0;
710}
711
712/**
713 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
714 * @dev: DRM device
715 * @data: ioctl argument
716 * @file_priv: DRM file for this fd
717 *
718 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
719 */
720int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
721 struct drm_file *file_priv)
722{
723 struct drm_vc4_get_tiling *args = data;
724 struct drm_gem_object *gem_obj;
725 struct vc4_bo *bo;
726
727 if (args->flags != 0 || args->modifier != 0)
728 return -EINVAL;
729
730 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
731 if (!gem_obj) {
Eric Anholtfb959922017-07-25 09:27:32 -0700732 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
Eric Anholt83753112017-06-07 17:13:36 -0700733 return -ENOENT;
734 }
735 bo = to_vc4_bo(gem_obj);
736
737 if (bo->t_format)
738 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
739 else
740 args->modifier = DRM_FORMAT_MOD_NONE;
741
Cihangir Akturk1d5494e2017-08-03 14:58:40 +0300742 drm_gem_object_put_unlocked(gem_obj);
Eric Anholt83753112017-06-07 17:13:36 -0700743
744 return 0;
745}
746
Eric Anholtf3099462017-07-25 11:27:17 -0700747int vc4_bo_cache_init(struct drm_device *dev)
Eric Anholtc826a6e2015-10-09 20:25:07 -0700748{
749 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholtf3099462017-07-25 11:27:17 -0700750 int i;
751
752 /* Create the initial set of BO labels that the kernel will
753 * use. This lets us avoid a bunch of string reallocation in
754 * the kernel's draw and BO allocation paths.
755 */
756 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
757 GFP_KERNEL);
758 if (!vc4->bo_labels)
759 return -ENOMEM;
760 vc4->num_labels = VC4_BO_TYPE_COUNT;
761
762 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
763 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
764 vc4->bo_labels[i].name = bo_type_names[i];
Eric Anholtc826a6e2015-10-09 20:25:07 -0700765
766 mutex_init(&vc4->bo_lock);
767
768 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
769
770 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
771 setup_timer(&vc4->bo_cache.time_timer,
772 vc4_bo_cache_time_timer,
773 (unsigned long)dev);
Eric Anholtf3099462017-07-25 11:27:17 -0700774
775 return 0;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700776}
777
778void vc4_bo_cache_destroy(struct drm_device *dev)
779{
780 struct vc4_dev *vc4 = to_vc4_dev(dev);
Eric Anholtf3099462017-07-25 11:27:17 -0700781 int i;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700782
783 del_timer(&vc4->bo_cache.time_timer);
784 cancel_work_sync(&vc4->bo_cache.time_work);
785
786 vc4_bo_cache_purge(dev);
787
Eric Anholtf3099462017-07-25 11:27:17 -0700788 for (i = 0; i < vc4->num_labels; i++) {
789 if (vc4->bo_labels[i].num_allocated) {
790 DRM_ERROR("Destroying BO cache with %d %s "
791 "BOs still allocated\n",
792 vc4->bo_labels[i].num_allocated,
793 vc4->bo_labels[i].name);
794 }
795
796 if (is_user_label(i))
797 kfree(vc4->bo_labels[i].name);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700798 }
Eric Anholtf3099462017-07-25 11:27:17 -0700799 kfree(vc4->bo_labels);
800}
801
802int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
803 struct drm_file *file_priv)
804{
805 struct vc4_dev *vc4 = to_vc4_dev(dev);
806 struct drm_vc4_label_bo *args = data;
807 char *name;
808 struct drm_gem_object *gem_obj;
809 int ret = 0, label;
810
811 if (!args->len)
812 return -EINVAL;
813
814 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
815 if (IS_ERR(name))
816 return PTR_ERR(name);
817
818 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
819 if (!gem_obj) {
820 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
821 kfree(name);
822 return -ENOENT;
823 }
824
825 mutex_lock(&vc4->bo_lock);
826 label = vc4_get_user_label(vc4, name);
827 if (label != -1)
828 vc4_bo_set_label(gem_obj, label);
829 else
830 ret = -ENOMEM;
831 mutex_unlock(&vc4->bo_lock);
832
Cihangir Akturkb9c55b62017-08-11 15:33:10 +0300833 drm_gem_object_put_unlocked(gem_obj);
Eric Anholtf3099462017-07-25 11:27:17 -0700834
835 return ret;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700836}