blob: d02880640ee7d2fd6bd2ca59a95f7496d1e0462d [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
Stephen Rothwell568d7c72016-03-17 15:30:49 +110029#include <linux/pagemap.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040030#include <drm/drmP.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35{
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37
38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
Christian König9298e522015-06-03 21:31:20 +020041 amdgpu_mn_unregister(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040042 amdgpu_bo_unref(&robj);
43 }
44}
45
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 int alignment, u32 initial_domain,
48 u64 flags, bool kernel,
49 struct drm_gem_object **obj)
50{
51 struct amdgpu_bo *robj;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052 int r;
53
54 *obj = NULL;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) {
57 alignment = PAGE_SIZE;
58 }
59
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060retry:
Christian König72d76682015-09-03 17:34:59 +020061 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
Yong Zhao2046d462017-07-20 18:49:09 -040062 flags, NULL, NULL, 0, &robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 if (r) {
64 if (r != -ERESTARTSYS) {
65 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
66 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
67 goto retry;
68 }
69 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
70 size, initial_domain, alignment, r);
71 }
72 return r;
73 }
74 *obj = &robj->gem_base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 return 0;
77}
78
Christian König418aa0c2016-02-15 16:59:57 +010079void amdgpu_gem_force_release(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040080{
Christian König418aa0c2016-02-15 16:59:57 +010081 struct drm_device *ddev = adev->ddev;
82 struct drm_file *file;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040083
Daniel Vetter1d2ac402016-04-26 19:29:41 +020084 mutex_lock(&ddev->filelist_mutex);
Christian König418aa0c2016-02-15 16:59:57 +010085
86 list_for_each_entry(file, &ddev->filelist, lhead) {
87 struct drm_gem_object *gobj;
88 int handle;
89
90 WARN_ONCE(1, "Still active user space clients!\n");
91 spin_lock(&file->table_lock);
92 idr_for_each_entry(&file->object_idr, gobj, handle) {
93 WARN_ONCE(1, "And also active allocations!\n");
Cihangir Akturkf62facc2017-08-03 14:58:16 +030094 drm_gem_object_put_unlocked(gobj);
Christian König418aa0c2016-02-15 16:59:57 +010095 }
96 idr_destroy(&file->object_idr);
97 spin_unlock(&file->table_lock);
98 }
99
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200100 mutex_unlock(&ddev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400101}
102
103/*
104 * Call from drm_gem_handle_create which appear in both new and open ioctl
105 * case.
106 */
Christian Königa7d64de2016-09-15 14:58:48 +0200107int amdgpu_gem_object_open(struct drm_gem_object *obj,
108 struct drm_file *file_priv)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109{
Christian König765e7fb2016-09-15 15:06:50 +0200110 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200111 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400112 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
113 struct amdgpu_vm *vm = &fpriv->vm;
114 struct amdgpu_bo_va *bo_va;
115 int r;
Christian König765e7fb2016-09-15 15:06:50 +0200116 r = amdgpu_bo_reserve(abo, false);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800117 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400118 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119
Christian König765e7fb2016-09-15 15:06:50 +0200120 bo_va = amdgpu_vm_bo_find(vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121 if (!bo_va) {
Christian König765e7fb2016-09-15 15:06:50 +0200122 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123 } else {
124 ++bo_va->ref_count;
125 }
Christian König765e7fb2016-09-15 15:06:50 +0200126 amdgpu_bo_unreserve(abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127 return 0;
128}
129
130void amdgpu_gem_object_close(struct drm_gem_object *obj,
131 struct drm_file *file_priv)
132{
Christian Königb5a5ec52016-03-08 17:47:46 +0100133 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200134 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
136 struct amdgpu_vm *vm = &fpriv->vm;
Christian Königb5a5ec52016-03-08 17:47:46 +0100137
138 struct amdgpu_bo_list_entry vm_pd;
Christian König5a0f3b52017-04-21 10:05:56 +0200139 struct list_head list;
Christian Königb5a5ec52016-03-08 17:47:46 +0100140 struct ttm_validate_buffer tv;
141 struct ww_acquire_ctx ticket;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400142 struct amdgpu_bo_va *bo_va;
143 int r;
Christian Königb5a5ec52016-03-08 17:47:46 +0100144
145 INIT_LIST_HEAD(&list);
Christian Königb5a5ec52016-03-08 17:47:46 +0100146
147 tv.bo = &bo->tbo;
148 tv.shared = true;
149 list_add(&tv.head, &list);
150
151 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
152
Christian König5a0f3b52017-04-21 10:05:56 +0200153 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154 if (r) {
155 dev_err(adev->dev, "leaking bo va because "
156 "we fail to reserve bo (%d)\n", r);
157 return;
158 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100159 bo_va = amdgpu_vm_bo_find(vm, bo);
Christian König5a0f3b52017-04-21 10:05:56 +0200160 if (bo_va && --bo_va->ref_count == 0) {
161 amdgpu_vm_bo_rmv(adev, bo_va);
162
Christian König3f3333f2017-08-03 14:02:13 +0200163 if (amdgpu_vm_ready(vm)) {
Christian König5a0f3b52017-04-21 10:05:56 +0200164 struct dma_fence *fence = NULL;
Nicolai Hähnle23e05632017-03-23 19:34:11 +0100165
166 r = amdgpu_vm_clear_freed(adev, vm, &fence);
167 if (unlikely(r)) {
168 dev_err(adev->dev, "failed to clear page "
169 "tables on GEM object close (%d)\n", r);
170 }
171
172 if (fence) {
173 amdgpu_bo_fence(bo, fence, true);
174 dma_fence_put(fence);
175 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176 }
177 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100178 ttm_eu_backoff_reservation(&ticket, &list);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400179}
180
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400181/*
182 * GEM ioctls.
183 */
184int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
185 struct drm_file *filp)
186{
187 struct amdgpu_device *adev = dev->dev_private;
188 union drm_amdgpu_gem_create *args = data;
Christian König6ac7def2017-08-23 20:11:25 +0200189 uint64_t flags = args->in.domain_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400190 uint64_t size = args->in.bo_size;
191 struct drm_gem_object *gobj;
192 uint32_t handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 int r;
194
Alex Deucher834e0f82017-03-08 17:40:17 -0500195 /* reject invalid gem flags */
Christian König6ac7def2017-08-23 20:11:25 +0200196 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
197 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
198 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
199 AMDGPU_GEM_CREATE_VRAM_CLEARED))
Christian Königa022c542017-05-08 15:14:54 +0200200 return -EINVAL;
201
Alex Deucher834e0f82017-03-08 17:40:17 -0500202 /* reject invalid gem domains */
203 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
204 AMDGPU_GEM_DOMAIN_GTT |
205 AMDGPU_GEM_DOMAIN_VRAM |
206 AMDGPU_GEM_DOMAIN_GDS |
207 AMDGPU_GEM_DOMAIN_GWS |
Christian Königa022c542017-05-08 15:14:54 +0200208 AMDGPU_GEM_DOMAIN_OA))
209 return -EINVAL;
Alex Deucher834e0f82017-03-08 17:40:17 -0500210
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400211 /* create a gem object to contain this object in */
212 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
213 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
Christian König6ac7def2017-08-23 20:11:25 +0200214 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400215 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
216 size = size << AMDGPU_GDS_SHIFT;
217 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
218 size = size << AMDGPU_GWS_SHIFT;
219 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
220 size = size << AMDGPU_OA_SHIFT;
Christian Königa022c542017-05-08 15:14:54 +0200221 else
222 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400223 }
224 size = roundup(size, PAGE_SIZE);
225
226 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
227 (u32)(0xffffffff & args->in.domains),
Christian König6ac7def2017-08-23 20:11:25 +0200228 flags, false, &gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400229 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200230 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400231
232 r = drm_gem_handle_create(filp, gobj, &handle);
233 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300234 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400235 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200236 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400237
238 memset(args, 0, sizeof(*args));
239 args->out.handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400240 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241}
242
243int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
244 struct drm_file *filp)
245{
246 struct amdgpu_device *adev = dev->dev_private;
247 struct drm_amdgpu_gem_userptr *args = data;
248 struct drm_gem_object *gobj;
249 struct amdgpu_bo *bo;
250 uint32_t handle;
251 int r;
252
253 if (offset_in_page(args->addr | args->size))
254 return -EINVAL;
255
256 /* reject unknown flag values */
257 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
258 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
259 AMDGPU_GEM_USERPTR_REGISTER))
260 return -EINVAL;
261
Christian König358c2582016-03-11 15:29:27 +0100262 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
263 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400264
Christian König358c2582016-03-11 15:29:27 +0100265 /* if we want to write to it we must install a MMU notifier */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266 return -EACCES;
267 }
268
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269 /* create a gem object to contain this object in */
270 r = amdgpu_gem_object_create(adev, args->size, 0,
271 AMDGPU_GEM_DOMAIN_CPU, 0,
272 0, &gobj);
273 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200274 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400275
276 bo = gem_to_amdgpu_bo(gobj);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400277 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
Christian König1ea863f2015-12-18 22:13:12 +0100278 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400279 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
280 if (r)
281 goto release_object;
282
283 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
284 r = amdgpu_mn_register(bo, args->addr);
285 if (r)
286 goto release_object;
287 }
288
289 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
290 down_read(&current->mm->mmap_sem);
Christian König2f568db2016-02-23 12:36:59 +0100291
292 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
293 bo->tbo.ttm->pages);
294 if (r)
295 goto unlock_mmap_sem;
296
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400297 r = amdgpu_bo_reserve(bo, true);
Christian König2f568db2016-02-23 12:36:59 +0100298 if (r)
299 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400300
301 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
302 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
303 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400304 if (r)
Christian König2f568db2016-02-23 12:36:59 +0100305 goto free_pages;
306
307 up_read(&current->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400308 }
309
310 r = drm_gem_handle_create(filp, gobj, &handle);
311 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300312 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400313 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200314 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400315
316 args->handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400317 return 0;
318
Christian König2f568db2016-02-23 12:36:59 +0100319free_pages:
320 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
321
322unlock_mmap_sem:
323 up_read(&current->mm->mmap_sem);
324
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400325release_object:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300326 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400327
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400328 return r;
329}
330
331int amdgpu_mode_dumb_mmap(struct drm_file *filp,
332 struct drm_device *dev,
333 uint32_t handle, uint64_t *offset_p)
334{
335 struct drm_gem_object *gobj;
336 struct amdgpu_bo *robj;
337
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100338 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400339 if (gobj == NULL) {
340 return -ENOENT;
341 }
342 robj = gem_to_amdgpu_bo(gobj);
Christian Königcc325d12016-02-08 11:08:35 +0100343 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
Christian König271c8122015-05-13 14:30:53 +0200344 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300345 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400346 return -EPERM;
347 }
348 *offset_p = amdgpu_bo_mmap_offset(robj);
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300349 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400350 return 0;
351}
352
353int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
354 struct drm_file *filp)
355{
356 union drm_amdgpu_gem_mmap *args = data;
357 uint32_t handle = args->in.handle;
358 memset(args, 0, sizeof(*args));
359 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
360}
361
362/**
363 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
364 *
365 * @timeout_ns: timeout in ns
366 *
367 * Calculate the timeout in jiffies from an absolute timeout in ns.
368 */
369unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
370{
371 unsigned long timeout_jiffies;
372 ktime_t timeout;
373
374 /* clamp timeout if it's to large */
375 if (((int64_t)timeout_ns) < 0)
376 return MAX_SCHEDULE_TIMEOUT;
377
Christian König0f117702015-07-08 16:58:48 +0200378 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400379 if (ktime_to_ns(timeout) < 0)
380 return 0;
381
382 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
383 /* clamp timeout to avoid unsigned-> signed overflow */
384 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
385 return MAX_SCHEDULE_TIMEOUT - 1;
386
387 return timeout_jiffies;
388}
389
390int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
391 struct drm_file *filp)
392{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400393 union drm_amdgpu_gem_wait_idle *args = data;
394 struct drm_gem_object *gobj;
395 struct amdgpu_bo *robj;
396 uint32_t handle = args->in.handle;
397 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
398 int r = 0;
399 long ret;
400
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100401 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400402 if (gobj == NULL) {
403 return -ENOENT;
404 }
405 robj = gem_to_amdgpu_bo(gobj);
Chris Wilson0fea2ed2016-08-29 08:08:24 +0100406 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
407 timeout);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400408
409 /* ret == 0 means not signaled,
410 * ret > 0 means signaled
411 * ret < 0 means interrupted before timeout
412 */
413 if (ret >= 0) {
414 memset(args, 0, sizeof(*args));
415 args->out.status = (ret == 0);
416 } else
417 r = ret;
418
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300419 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400420 return r;
421}
422
423int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
424 struct drm_file *filp)
425{
426 struct drm_amdgpu_gem_metadata *args = data;
427 struct drm_gem_object *gobj;
428 struct amdgpu_bo *robj;
429 int r = -1;
430
431 DRM_DEBUG("%d \n", args->handle);
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100432 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400433 if (gobj == NULL)
434 return -ENOENT;
435 robj = gem_to_amdgpu_bo(gobj);
436
437 r = amdgpu_bo_reserve(robj, false);
438 if (unlikely(r != 0))
439 goto out;
440
441 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
442 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
443 r = amdgpu_bo_get_metadata(robj, args->data.data,
444 sizeof(args->data.data),
445 &args->data.data_size_bytes,
446 &args->data.flags);
447 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
Dan Carpenter0913eab2015-09-23 14:00:35 +0300448 if (args->data.data_size_bytes > sizeof(args->data.data)) {
449 r = -EINVAL;
450 goto unreserve;
451 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400452 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
453 if (!r)
454 r = amdgpu_bo_set_metadata(robj, args->data.data,
455 args->data.data_size_bytes,
456 args->data.flags);
457 }
458
Dan Carpenter0913eab2015-09-23 14:00:35 +0300459unreserve:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400460 amdgpu_bo_unreserve(robj);
461out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300462 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400463 return r;
464}
465
466/**
467 * amdgpu_gem_va_update_vm -update the bo_va in its VM
468 *
469 * @adev: amdgpu_device pointer
Christian Königdc54d3d2017-03-13 10:13:38 +0100470 * @vm: vm to update
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400471 * @bo_va: bo_va to update
Christian König2ffdaaf2017-01-27 15:58:43 +0100472 * @list: validation list
Christian Königdc54d3d2017-03-13 10:13:38 +0100473 * @operation: map, unmap or clear
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400474 *
Christian König2ffdaaf2017-01-27 15:58:43 +0100475 * Update the bo_va directly after setting its address. Errors are not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400476 * vital here, so they are not reported back to userspace.
477 */
478static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
Christian Königdc54d3d2017-03-13 10:13:38 +0100479 struct amdgpu_vm *vm,
Christian Königf7da30d2016-09-28 12:03:04 +0200480 struct amdgpu_bo_va *bo_va,
Christian König2ffdaaf2017-01-27 15:58:43 +0100481 struct list_head *list,
Christian Königf7da30d2016-09-28 12:03:04 +0200482 uint32_t operation)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400483{
Christian König3f3333f2017-08-03 14:02:13 +0200484 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400485
Christian König3f3333f2017-08-03 14:02:13 +0200486 if (!amdgpu_vm_ready(vm))
487 return;
Chunming Zhoue410b5c2015-12-07 15:02:52 +0800488
Christian König194d2162016-10-12 15:13:52 +0200489 r = amdgpu_vm_update_directories(adev, vm);
Chunming Zhou43c27fb2015-11-12 15:33:09 +0800490 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100491 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400492
Nicolai Hähnlef3467812017-03-23 19:36:31 +0100493 r = amdgpu_vm_clear_freed(adev, vm, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100495 goto error;
monk.liu194a3362015-07-22 13:29:28 +0800496
Christian König80f95c52017-03-13 10:13:39 +0100497 if (operation == AMDGPU_VA_OP_MAP ||
498 operation == AMDGPU_VA_OP_REPLACE)
Flora Cui05dcb5c2016-09-22 11:34:47 +0800499 r = amdgpu_vm_bo_update(adev, bo_va, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400500
Christian König2ffdaaf2017-01-27 15:58:43 +0100501error:
Christian König68fdd3d2015-06-16 14:50:02 +0200502 if (r && r != -ERESTARTSYS)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
504}
505
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400506int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
507 struct drm_file *filp)
508{
Junwei Zhangb85891b2017-01-16 13:59:01 +0800509 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
510 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
Alex Xie66e02bc2017-02-14 12:04:52 -0500511 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
Junwei Zhangb85891b2017-01-16 13:59:01 +0800512 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
513 AMDGPU_VM_PAGE_PRT;
514
Christian König34b5f6a2015-06-08 15:03:00 +0200515 struct drm_amdgpu_gem_va *args = data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400516 struct drm_gem_object *gobj;
517 struct amdgpu_device *adev = dev->dev_private;
518 struct amdgpu_fpriv *fpriv = filp->driver_priv;
Christian König765e7fb2016-09-15 15:06:50 +0200519 struct amdgpu_bo *abo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400520 struct amdgpu_bo_va *bo_va;
Christian Königb88c8792016-09-28 16:33:01 +0200521 struct amdgpu_bo_list_entry vm_pd;
522 struct ttm_validate_buffer tv;
Chunming Zhou49b02b12015-11-13 14:18:38 +0800523 struct ww_acquire_ctx ticket;
Christian Königd7d29552017-01-30 10:24:13 +0100524 struct list_head list;
Alex Xie54635452017-02-14 12:22:57 -0500525 uint64_t va_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400526 int r = 0;
527
Christian König34b5f6a2015-06-08 15:03:00 +0200528 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400529 dev_err(&dev->pdev->dev,
530 "va_address 0x%lX is in reserved area 0x%X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200531 (unsigned long)args->va_address,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400532 AMDGPU_VA_RESERVED_SIZE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 return -EINVAL;
534 }
535
Junwei Zhangb85891b2017-01-16 13:59:01 +0800536 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
537 dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
538 args->flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400539 return -EINVAL;
540 }
541
Christian König34b5f6a2015-06-08 15:03:00 +0200542 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400543 case AMDGPU_VA_OP_MAP:
544 case AMDGPU_VA_OP_UNMAP:
Christian Königdc54d3d2017-03-13 10:13:38 +0100545 case AMDGPU_VA_OP_CLEAR:
Christian König80f95c52017-03-13 10:13:39 +0100546 case AMDGPU_VA_OP_REPLACE:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400547 break;
548 default:
549 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200550 args->operation);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400551 return -EINVAL;
552 }
Chunming Zhouf1892132017-05-15 16:48:27 +0800553 if ((args->operation == AMDGPU_VA_OP_MAP) ||
554 (args->operation == AMDGPU_VA_OP_REPLACE)) {
555 if (amdgpu_kms_vram_lost(adev, fpriv))
556 return -ENODEV;
557 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400558
Chunming Zhou49b02b12015-11-13 14:18:38 +0800559 INIT_LIST_HEAD(&list);
Christian Königdc54d3d2017-03-13 10:13:38 +0100560 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
561 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800562 gobj = drm_gem_object_lookup(filp, args->handle);
563 if (gobj == NULL)
564 return -ENOENT;
565 abo = gem_to_amdgpu_bo(gobj);
566 tv.bo = &abo->tbo;
567 tv.shared = false;
568 list_add(&tv.head, &list);
569 } else {
570 gobj = NULL;
571 abo = NULL;
572 }
Chunming Zhou49b02b12015-11-13 14:18:38 +0800573
Christian Königb88c8792016-09-28 16:33:01 +0200574 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
Christian Königb5a5ec52016-03-08 17:47:46 +0100575
Christian Königd7d29552017-01-30 10:24:13 +0100576 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800577 if (r)
578 goto error_unref;
Christian König34b5f6a2015-06-08 15:03:00 +0200579
Junwei Zhangb85891b2017-01-16 13:59:01 +0800580 if (abo) {
581 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
582 if (!bo_va) {
583 r = -ENOENT;
584 goto error_backoff;
585 }
Christian Königdc54d3d2017-03-13 10:13:38 +0100586 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800587 bo_va = fpriv->prt_va;
Christian Königdc54d3d2017-03-13 10:13:38 +0100588 } else {
589 bo_va = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400590 }
591
Christian König34b5f6a2015-06-08 15:03:00 +0200592 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400593 case AMDGPU_VA_OP_MAP:
Christian Königec681542017-08-01 10:51:43 +0200594 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König663e4572017-03-13 10:13:37 +0100595 args->map_size);
596 if (r)
597 goto error_backoff;
Alex Xie54635452017-02-14 12:22:57 -0500598
Christian König663e4572017-03-13 10:13:37 +0100599 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
Christian König34b5f6a2015-06-08 15:03:00 +0200600 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
601 args->offset_in_bo, args->map_size,
Christian König9f7eb532015-05-18 16:05:57 +0200602 va_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400603 break;
604 case AMDGPU_VA_OP_UNMAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200605 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400606 break;
Christian Königdc54d3d2017-03-13 10:13:38 +0100607
608 case AMDGPU_VA_OP_CLEAR:
609 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
610 args->va_address,
611 args->map_size);
612 break;
Christian König80f95c52017-03-13 10:13:39 +0100613 case AMDGPU_VA_OP_REPLACE:
Christian Königec681542017-08-01 10:51:43 +0200614 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König80f95c52017-03-13 10:13:39 +0100615 args->map_size);
616 if (r)
617 goto error_backoff;
618
619 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
620 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
621 args->offset_in_bo, args->map_size,
622 va_flags);
623 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400624 default:
625 break;
626 }
Junwei Zhangb85891b2017-01-16 13:59:01 +0800627 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
Christian Königdc54d3d2017-03-13 10:13:38 +0100628 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
629 args->operation);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800630
631error_backoff:
Christian König2ffdaaf2017-01-27 15:58:43 +0100632 ttm_eu_backoff_reservation(&ticket, &list);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800633
Junwei Zhangb85891b2017-01-16 13:59:01 +0800634error_unref:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300635 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400636 return r;
637}
638
639int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *filp)
641{
642 struct drm_amdgpu_gem_op *args = data;
643 struct drm_gem_object *gobj;
644 struct amdgpu_bo *robj;
645 int r;
646
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100647 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400648 if (gobj == NULL) {
649 return -ENOENT;
650 }
651 robj = gem_to_amdgpu_bo(gobj);
652
653 r = amdgpu_bo_reserve(robj, false);
654 if (unlikely(r))
655 goto out;
656
657 switch (args->op) {
658 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
659 struct drm_amdgpu_gem_create_in info;
Christian König7ecc2452017-07-26 17:02:52 +0200660 void __user *out = u64_to_user_ptr(args->value);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400661
662 info.bo_size = robj->gem_base.size;
663 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
Kent Russell6d7d9c52017-08-08 07:58:01 -0400664 info.domains = robj->preferred_domains;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400665 info.domain_flags = robj->flags;
Christian König4c28fb02015-08-28 17:27:54 +0200666 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400667 if (copy_to_user(out, &info, sizeof(info)))
668 r = -EFAULT;
669 break;
670 }
Marek Olšákd8f65a22015-05-27 14:30:38 +0200671 case AMDGPU_GEM_OP_SET_PLACEMENT:
Christopher James Halse Rogers803d89a2017-04-03 13:31:22 +1000672 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
673 r = -EINVAL;
674 amdgpu_bo_unreserve(robj);
675 break;
676 }
Christian Königcc325d12016-02-08 11:08:35 +0100677 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400678 r = -EPERM;
Christian König4c28fb02015-08-28 17:27:54 +0200679 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400680 break;
681 }
Kent Russell6d7d9c52017-08-08 07:58:01 -0400682 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
Christian König1ea863f2015-12-18 22:13:12 +0100683 AMDGPU_GEM_DOMAIN_GTT |
684 AMDGPU_GEM_DOMAIN_CPU);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400685 robj->allowed_domains = robj->preferred_domains;
Christian König1ea863f2015-12-18 22:13:12 +0100686 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
687 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
688
Christian König4c28fb02015-08-28 17:27:54 +0200689 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400690 break;
691 default:
Christian König4c28fb02015-08-28 17:27:54 +0200692 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400693 r = -EINVAL;
694 }
695
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400696out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300697 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400698 return r;
699}
700
701int amdgpu_mode_dumb_create(struct drm_file *file_priv,
702 struct drm_device *dev,
703 struct drm_mode_create_dumb *args)
704{
705 struct amdgpu_device *adev = dev->dev_private;
706 struct drm_gem_object *gobj;
707 uint32_t handle;
708 int r;
709
Laurent Pinchart8e911ab2016-10-18 01:41:17 +0300710 args->pitch = amdgpu_align_pitch(adev, args->width,
711 DIV_ROUND_UP(args->bpp, 8), 0);
Dan Carpenter54ef0b52015-09-23 14:00:59 +0300712 args->size = (u64)args->pitch * args->height;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400713 args->size = ALIGN(args->size, PAGE_SIZE);
714
715 r = amdgpu_gem_object_create(adev, args->size, 0,
716 AMDGPU_GEM_DOMAIN_VRAM,
Alex Deucher857d9132015-08-27 00:14:16 -0400717 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
718 ttm_bo_type_device,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400719 &gobj);
720 if (r)
721 return -ENOMEM;
722
723 r = drm_gem_handle_create(file_priv, gobj, &handle);
724 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300725 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726 if (r) {
727 return r;
728 }
729 args->handle = handle;
730 return 0;
731}
732
733#if defined(CONFIG_DEBUG_FS)
Christian König7ea23562016-02-15 15:23:00 +0100734static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
735{
736 struct drm_gem_object *gobj = ptr;
737 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
738 struct seq_file *m = data;
739
740 unsigned domain;
741 const char *placement;
742 unsigned pin_count;
Christian Königb8e0e6e2017-06-26 15:19:30 +0200743 uint64_t offset;
Christian König7ea23562016-02-15 15:23:00 +0100744
745 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
746 switch (domain) {
747 case AMDGPU_GEM_DOMAIN_VRAM:
748 placement = "VRAM";
749 break;
750 case AMDGPU_GEM_DOMAIN_GTT:
751 placement = " GTT";
752 break;
753 case AMDGPU_GEM_DOMAIN_CPU:
754 default:
755 placement = " CPU";
756 break;
757 }
Christian Königb8e0e6e2017-06-26 15:19:30 +0200758 seq_printf(m, "\t0x%08x: %12ld byte %s",
759 id, amdgpu_bo_size(bo), placement);
760
761 offset = ACCESS_ONCE(bo->tbo.mem.start);
762 if (offset != AMDGPU_BO_INVALID_OFFSET)
763 seq_printf(m, " @ 0x%010Lx", offset);
Christian König7ea23562016-02-15 15:23:00 +0100764
765 pin_count = ACCESS_ONCE(bo->pin_count);
766 if (pin_count)
767 seq_printf(m, " pin count %d", pin_count);
768 seq_printf(m, "\n");
769
770 return 0;
771}
772
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400773static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
774{
775 struct drm_info_node *node = (struct drm_info_node *)m->private;
776 struct drm_device *dev = node->minor->dev;
Christian König7ea23562016-02-15 15:23:00 +0100777 struct drm_file *file;
778 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400779
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200780 r = mutex_lock_interruptible(&dev->filelist_mutex);
Christian König7ea23562016-02-15 15:23:00 +0100781 if (r)
782 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400783
Christian König7ea23562016-02-15 15:23:00 +0100784 list_for_each_entry(file, &dev->filelist, lhead) {
785 struct task_struct *task;
Christian Königb22e3ce2016-02-15 12:41:37 +0100786
Christian König7ea23562016-02-15 15:23:00 +0100787 /*
788 * Although we have a valid reference on file->pid, that does
789 * not guarantee that the task_struct who called get_pid() is
790 * still alive (e.g. get_pid(current) => fork() => exit()).
791 * Therefore, we need to protect this ->comm access using RCU.
792 */
793 rcu_read_lock();
794 task = pid_task(file->pid, PIDTYPE_PID);
795 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
796 task ? task->comm : "<unknown>");
797 rcu_read_unlock();
798
799 spin_lock(&file->table_lock);
800 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
801 spin_unlock(&file->table_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400802 }
Christian König7ea23562016-02-15 15:23:00 +0100803
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200804 mutex_unlock(&dev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400805 return 0;
806}
807
Nils Wallménius06ab6832016-05-02 12:46:15 -0400808static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400809 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
810};
811#endif
812
813int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
814{
815#if defined(CONFIG_DEBUG_FS)
816 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
817#endif
818 return 0;
819}