blob: b0d45c8e6bb3f7762357f09cb3a570607cee5bb8 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
Stephen Rothwell568d7c72016-03-17 15:30:49 +110029#include <linux/pagemap.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040030#include <drm/drmP.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35{
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37
38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
Christian König9298e522015-06-03 21:31:20 +020041 amdgpu_mn_unregister(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040042 amdgpu_bo_unref(&robj);
43 }
44}
45
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
Christian Könige1eb899b42017-08-25 09:14:43 +020047 int alignment, u32 initial_domain,
48 u64 flags, bool kernel,
49 struct reservation_object *resv,
50 struct drm_gem_object **obj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051{
Christian Könige1eb899b42017-08-25 09:14:43 +020052 struct amdgpu_bo *bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053 int r;
54
55 *obj = NULL;
56 /* At least align on page size */
57 if (alignment < PAGE_SIZE) {
58 alignment = PAGE_SIZE;
59 }
60
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061retry:
Christian König72d76682015-09-03 17:34:59 +020062 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
Christian Könige1eb899b42017-08-25 09:14:43 +020063 flags, NULL, resv, 0, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064 if (r) {
65 if (r != -ERESTARTSYS) {
66 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
67 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
68 goto retry;
69 }
70 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
71 size, initial_domain, alignment, r);
72 }
73 return r;
74 }
Christian Könige1eb899b42017-08-25 09:14:43 +020075 *obj = &bo->gem_base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077 return 0;
78}
79
Christian König418aa0c2016-02-15 16:59:57 +010080void amdgpu_gem_force_release(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081{
Christian König418aa0c2016-02-15 16:59:57 +010082 struct drm_device *ddev = adev->ddev;
83 struct drm_file *file;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040084
Daniel Vetter1d2ac402016-04-26 19:29:41 +020085 mutex_lock(&ddev->filelist_mutex);
Christian König418aa0c2016-02-15 16:59:57 +010086
87 list_for_each_entry(file, &ddev->filelist, lhead) {
88 struct drm_gem_object *gobj;
89 int handle;
90
91 WARN_ONCE(1, "Still active user space clients!\n");
92 spin_lock(&file->table_lock);
93 idr_for_each_entry(&file->object_idr, gobj, handle) {
94 WARN_ONCE(1, "And also active allocations!\n");
Cihangir Akturkf62facc2017-08-03 14:58:16 +030095 drm_gem_object_put_unlocked(gobj);
Christian König418aa0c2016-02-15 16:59:57 +010096 }
97 idr_destroy(&file->object_idr);
98 spin_unlock(&file->table_lock);
99 }
100
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200101 mutex_unlock(&ddev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102}
103
104/*
105 * Call from drm_gem_handle_create which appear in both new and open ioctl
106 * case.
107 */
Christian Königa7d64de2016-09-15 14:58:48 +0200108int amdgpu_gem_object_open(struct drm_gem_object *obj,
109 struct drm_file *file_priv)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400110{
Christian König765e7fb2016-09-15 15:06:50 +0200111 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200112 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400113 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
114 struct amdgpu_vm *vm = &fpriv->vm;
115 struct amdgpu_bo_va *bo_va;
Christian König4f5839c2017-08-29 16:07:31 +0200116 struct mm_struct *mm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 int r;
Christian König4f5839c2017-08-29 16:07:31 +0200118
119 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
120 if (mm && mm != current->mm)
121 return -EPERM;
122
Christian Könige1eb899b42017-08-25 09:14:43 +0200123 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
124 abo->tbo.resv != vm->root.base.bo->tbo.resv)
125 return -EPERM;
126
Christian König765e7fb2016-09-15 15:06:50 +0200127 r = amdgpu_bo_reserve(abo, false);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800128 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130
Christian König765e7fb2016-09-15 15:06:50 +0200131 bo_va = amdgpu_vm_bo_find(vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132 if (!bo_va) {
Christian König765e7fb2016-09-15 15:06:50 +0200133 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134 } else {
135 ++bo_va->ref_count;
136 }
Christian König765e7fb2016-09-15 15:06:50 +0200137 amdgpu_bo_unreserve(abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138 return 0;
139}
140
141void amdgpu_gem_object_close(struct drm_gem_object *obj,
142 struct drm_file *file_priv)
143{
Christian Königb5a5ec52016-03-08 17:47:46 +0100144 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
147 struct amdgpu_vm *vm = &fpriv->vm;
Christian Königb5a5ec52016-03-08 17:47:46 +0100148
149 struct amdgpu_bo_list_entry vm_pd;
Christian Könige1eb899b42017-08-25 09:14:43 +0200150 struct list_head list, duplicates;
Christian Königb5a5ec52016-03-08 17:47:46 +0100151 struct ttm_validate_buffer tv;
152 struct ww_acquire_ctx ticket;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400153 struct amdgpu_bo_va *bo_va;
154 int r;
Christian Königb5a5ec52016-03-08 17:47:46 +0100155
156 INIT_LIST_HEAD(&list);
Christian Könige1eb899b42017-08-25 09:14:43 +0200157 INIT_LIST_HEAD(&duplicates);
Christian Königb5a5ec52016-03-08 17:47:46 +0100158
159 tv.bo = &bo->tbo;
160 tv.shared = true;
161 list_add(&tv.head, &list);
162
163 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
164
Christian Könige1eb899b42017-08-25 09:14:43 +0200165 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400166 if (r) {
167 dev_err(adev->dev, "leaking bo va because "
168 "we fail to reserve bo (%d)\n", r);
169 return;
170 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100171 bo_va = amdgpu_vm_bo_find(vm, bo);
Christian König5a0f3b52017-04-21 10:05:56 +0200172 if (bo_va && --bo_va->ref_count == 0) {
173 amdgpu_vm_bo_rmv(adev, bo_va);
174
Christian König3f3333f2017-08-03 14:02:13 +0200175 if (amdgpu_vm_ready(vm)) {
Christian König5a0f3b52017-04-21 10:05:56 +0200176 struct dma_fence *fence = NULL;
Nicolai Hähnle23e05632017-03-23 19:34:11 +0100177
178 r = amdgpu_vm_clear_freed(adev, vm, &fence);
179 if (unlikely(r)) {
180 dev_err(adev->dev, "failed to clear page "
181 "tables on GEM object close (%d)\n", r);
182 }
183
184 if (fence) {
185 amdgpu_bo_fence(bo, fence, true);
186 dma_fence_put(fence);
187 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188 }
189 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100190 ttm_eu_backoff_reservation(&ticket, &list);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400191}
192
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193/*
194 * GEM ioctls.
195 */
196int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
197 struct drm_file *filp)
198{
199 struct amdgpu_device *adev = dev->dev_private;
Christian Könige1eb899b42017-08-25 09:14:43 +0200200 struct amdgpu_fpriv *fpriv = filp->driver_priv;
201 struct amdgpu_vm *vm = &fpriv->vm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 union drm_amdgpu_gem_create *args = data;
Christian König6ac7def2017-08-23 20:11:25 +0200203 uint64_t flags = args->in.domain_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400204 uint64_t size = args->in.bo_size;
Christian Könige1eb899b42017-08-25 09:14:43 +0200205 struct reservation_object *resv = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400206 struct drm_gem_object *gobj;
207 uint32_t handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400208 int r;
209
Alex Deucher834e0f82017-03-08 17:40:17 -0500210 /* reject invalid gem flags */
Christian König6ac7def2017-08-23 20:11:25 +0200211 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
212 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
213 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
Christian Könige1eb899b42017-08-25 09:14:43 +0200214 AMDGPU_GEM_CREATE_VRAM_CLEARED |
215 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
Christian Königa022c542017-05-08 15:14:54 +0200216 return -EINVAL;
217
Alex Deucher834e0f82017-03-08 17:40:17 -0500218 /* reject invalid gem domains */
219 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
220 AMDGPU_GEM_DOMAIN_GTT |
221 AMDGPU_GEM_DOMAIN_VRAM |
222 AMDGPU_GEM_DOMAIN_GDS |
223 AMDGPU_GEM_DOMAIN_GWS |
Christian Königa022c542017-05-08 15:14:54 +0200224 AMDGPU_GEM_DOMAIN_OA))
225 return -EINVAL;
Alex Deucher834e0f82017-03-08 17:40:17 -0500226
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400227 /* create a gem object to contain this object in */
228 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
229 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
Christian König6ac7def2017-08-23 20:11:25 +0200230 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400231 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
232 size = size << AMDGPU_GDS_SHIFT;
233 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
234 size = size << AMDGPU_GWS_SHIFT;
235 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
236 size = size << AMDGPU_OA_SHIFT;
Christian Königa022c542017-05-08 15:14:54 +0200237 else
238 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400239 }
240 size = roundup(size, PAGE_SIZE);
241
Christian Könige1eb899b42017-08-25 09:14:43 +0200242 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
243 r = amdgpu_bo_reserve(vm->root.base.bo, false);
244 if (r)
245 return r;
246
247 resv = vm->root.base.bo->tbo.resv;
248 }
249
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400250 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
251 (u32)(0xffffffff & args->in.domains),
Christian Könige1eb899b42017-08-25 09:14:43 +0200252 flags, false, resv, &gobj);
253 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
254 if (!r) {
255 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
256
257 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
258 }
259 amdgpu_bo_unreserve(vm->root.base.bo);
260 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400261 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200262 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400263
264 r = drm_gem_handle_create(filp, gobj, &handle);
265 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300266 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400267 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200268 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269
270 memset(args, 0, sizeof(*args));
271 args->out.handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400272 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400273}
274
275int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp)
277{
278 struct amdgpu_device *adev = dev->dev_private;
279 struct drm_amdgpu_gem_userptr *args = data;
280 struct drm_gem_object *gobj;
281 struct amdgpu_bo *bo;
282 uint32_t handle;
283 int r;
284
285 if (offset_in_page(args->addr | args->size))
286 return -EINVAL;
287
288 /* reject unknown flag values */
289 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
290 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
291 AMDGPU_GEM_USERPTR_REGISTER))
292 return -EINVAL;
293
Christian König358c2582016-03-11 15:29:27 +0100294 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
295 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400296
Christian König358c2582016-03-11 15:29:27 +0100297 /* if we want to write to it we must install a MMU notifier */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400298 return -EACCES;
299 }
300
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400301 /* create a gem object to contain this object in */
Christian Könige1eb899b42017-08-25 09:14:43 +0200302 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
303 0, 0, NULL, &gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400304 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200305 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400306
307 bo = gem_to_amdgpu_bo(gobj);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400308 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
Christian König1ea863f2015-12-18 22:13:12 +0100309 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400310 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
311 if (r)
312 goto release_object;
313
314 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
315 r = amdgpu_mn_register(bo, args->addr);
316 if (r)
317 goto release_object;
318 }
319
320 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
Christian König2f568db2016-02-23 12:36:59 +0100321 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
322 bo->tbo.ttm->pages);
323 if (r)
324 goto unlock_mmap_sem;
325
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400326 r = amdgpu_bo_reserve(bo, true);
Christian König2f568db2016-02-23 12:36:59 +0100327 if (r)
328 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400329
330 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
331 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
332 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400333 if (r)
Christian König2f568db2016-02-23 12:36:59 +0100334 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400335 }
336
337 r = drm_gem_handle_create(filp, gobj, &handle);
338 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300339 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400340 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200341 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342
343 args->handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400344 return 0;
345
Christian König2f568db2016-02-23 12:36:59 +0100346free_pages:
347 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
348
349unlock_mmap_sem:
350 up_read(&current->mm->mmap_sem);
351
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400352release_object:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300353 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400354
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400355 return r;
356}
357
358int amdgpu_mode_dumb_mmap(struct drm_file *filp,
359 struct drm_device *dev,
360 uint32_t handle, uint64_t *offset_p)
361{
362 struct drm_gem_object *gobj;
363 struct amdgpu_bo *robj;
364
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100365 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400366 if (gobj == NULL) {
367 return -ENOENT;
368 }
369 robj = gem_to_amdgpu_bo(gobj);
Christian Königcc325d12016-02-08 11:08:35 +0100370 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
Christian König271c8122015-05-13 14:30:53 +0200371 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300372 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373 return -EPERM;
374 }
375 *offset_p = amdgpu_bo_mmap_offset(robj);
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300376 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400377 return 0;
378}
379
380int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *filp)
382{
383 union drm_amdgpu_gem_mmap *args = data;
384 uint32_t handle = args->in.handle;
385 memset(args, 0, sizeof(*args));
386 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
387}
388
389/**
390 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
391 *
392 * @timeout_ns: timeout in ns
393 *
394 * Calculate the timeout in jiffies from an absolute timeout in ns.
395 */
396unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
397{
398 unsigned long timeout_jiffies;
399 ktime_t timeout;
400
401 /* clamp timeout if it's to large */
402 if (((int64_t)timeout_ns) < 0)
403 return MAX_SCHEDULE_TIMEOUT;
404
Christian König0f117702015-07-08 16:58:48 +0200405 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400406 if (ktime_to_ns(timeout) < 0)
407 return 0;
408
409 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
410 /* clamp timeout to avoid unsigned-> signed overflow */
411 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
412 return MAX_SCHEDULE_TIMEOUT - 1;
413
414 return timeout_jiffies;
415}
416
417int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
418 struct drm_file *filp)
419{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400420 union drm_amdgpu_gem_wait_idle *args = data;
421 struct drm_gem_object *gobj;
422 struct amdgpu_bo *robj;
423 uint32_t handle = args->in.handle;
424 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
425 int r = 0;
426 long ret;
427
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100428 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400429 if (gobj == NULL) {
430 return -ENOENT;
431 }
432 robj = gem_to_amdgpu_bo(gobj);
Chris Wilson0fea2ed2016-08-29 08:08:24 +0100433 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
434 timeout);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435
436 /* ret == 0 means not signaled,
437 * ret > 0 means signaled
438 * ret < 0 means interrupted before timeout
439 */
440 if (ret >= 0) {
441 memset(args, 0, sizeof(*args));
442 args->out.status = (ret == 0);
443 } else
444 r = ret;
445
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300446 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400447 return r;
448}
449
450int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
451 struct drm_file *filp)
452{
453 struct drm_amdgpu_gem_metadata *args = data;
454 struct drm_gem_object *gobj;
455 struct amdgpu_bo *robj;
456 int r = -1;
457
458 DRM_DEBUG("%d \n", args->handle);
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100459 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400460 if (gobj == NULL)
461 return -ENOENT;
462 robj = gem_to_amdgpu_bo(gobj);
463
464 r = amdgpu_bo_reserve(robj, false);
465 if (unlikely(r != 0))
466 goto out;
467
468 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
469 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
470 r = amdgpu_bo_get_metadata(robj, args->data.data,
471 sizeof(args->data.data),
472 &args->data.data_size_bytes,
473 &args->data.flags);
474 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
Dan Carpenter0913eab2015-09-23 14:00:35 +0300475 if (args->data.data_size_bytes > sizeof(args->data.data)) {
476 r = -EINVAL;
477 goto unreserve;
478 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400479 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
480 if (!r)
481 r = amdgpu_bo_set_metadata(robj, args->data.data,
482 args->data.data_size_bytes,
483 args->data.flags);
484 }
485
Dan Carpenter0913eab2015-09-23 14:00:35 +0300486unreserve:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400487 amdgpu_bo_unreserve(robj);
488out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300489 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400490 return r;
491}
492
493/**
494 * amdgpu_gem_va_update_vm -update the bo_va in its VM
495 *
496 * @adev: amdgpu_device pointer
Christian Königdc54d3d2017-03-13 10:13:38 +0100497 * @vm: vm to update
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400498 * @bo_va: bo_va to update
Christian König2ffdaaf2017-01-27 15:58:43 +0100499 * @list: validation list
Christian Königdc54d3d2017-03-13 10:13:38 +0100500 * @operation: map, unmap or clear
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400501 *
Christian König2ffdaaf2017-01-27 15:58:43 +0100502 * Update the bo_va directly after setting its address. Errors are not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503 * vital here, so they are not reported back to userspace.
504 */
505static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
Christian Königdc54d3d2017-03-13 10:13:38 +0100506 struct amdgpu_vm *vm,
Christian Königf7da30d2016-09-28 12:03:04 +0200507 struct amdgpu_bo_va *bo_va,
Christian König2ffdaaf2017-01-27 15:58:43 +0100508 struct list_head *list,
Christian Königf7da30d2016-09-28 12:03:04 +0200509 uint32_t operation)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400510{
Christian König3f3333f2017-08-03 14:02:13 +0200511 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400512
Christian König3f3333f2017-08-03 14:02:13 +0200513 if (!amdgpu_vm_ready(vm))
514 return;
Chunming Zhoue410b5c2015-12-07 15:02:52 +0800515
Christian König194d2162016-10-12 15:13:52 +0200516 r = amdgpu_vm_update_directories(adev, vm);
Chunming Zhou43c27fb2015-11-12 15:33:09 +0800517 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100518 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400519
Nicolai Hähnlef3467812017-03-23 19:36:31 +0100520 r = amdgpu_vm_clear_freed(adev, vm, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400521 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100522 goto error;
monk.liu194a3362015-07-22 13:29:28 +0800523
Christian König80f95c52017-03-13 10:13:39 +0100524 if (operation == AMDGPU_VA_OP_MAP ||
525 operation == AMDGPU_VA_OP_REPLACE)
Flora Cui05dcb5c2016-09-22 11:34:47 +0800526 r = amdgpu_vm_bo_update(adev, bo_va, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400527
Christian König2ffdaaf2017-01-27 15:58:43 +0100528error:
Christian König68fdd3d2015-06-16 14:50:02 +0200529 if (r && r != -ERESTARTSYS)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400530 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
531}
532
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
534 struct drm_file *filp)
535{
Junwei Zhangb85891b2017-01-16 13:59:01 +0800536 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
537 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
Alex Xie66e02bc2017-02-14 12:04:52 -0500538 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
Junwei Zhangb85891b2017-01-16 13:59:01 +0800539 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
540 AMDGPU_VM_PAGE_PRT;
541
Christian König34b5f6a2015-06-08 15:03:00 +0200542 struct drm_amdgpu_gem_va *args = data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400543 struct drm_gem_object *gobj;
544 struct amdgpu_device *adev = dev->dev_private;
545 struct amdgpu_fpriv *fpriv = filp->driver_priv;
Christian König765e7fb2016-09-15 15:06:50 +0200546 struct amdgpu_bo *abo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400547 struct amdgpu_bo_va *bo_va;
Christian Königb88c8792016-09-28 16:33:01 +0200548 struct amdgpu_bo_list_entry vm_pd;
549 struct ttm_validate_buffer tv;
Chunming Zhou49b02b12015-11-13 14:18:38 +0800550 struct ww_acquire_ctx ticket;
Christian Könige1eb899b42017-08-25 09:14:43 +0200551 struct list_head list, duplicates;
Alex Xie54635452017-02-14 12:22:57 -0500552 uint64_t va_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400553 int r = 0;
554
Christian König34b5f6a2015-06-08 15:03:00 +0200555 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400556 dev_err(&dev->pdev->dev,
557 "va_address 0x%lX is in reserved area 0x%X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200558 (unsigned long)args->va_address,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400559 AMDGPU_VA_RESERVED_SIZE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 return -EINVAL;
561 }
562
Junwei Zhangb85891b2017-01-16 13:59:01 +0800563 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
564 dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
565 args->flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400566 return -EINVAL;
567 }
568
Christian König34b5f6a2015-06-08 15:03:00 +0200569 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400570 case AMDGPU_VA_OP_MAP:
571 case AMDGPU_VA_OP_UNMAP:
Christian Königdc54d3d2017-03-13 10:13:38 +0100572 case AMDGPU_VA_OP_CLEAR:
Christian König80f95c52017-03-13 10:13:39 +0100573 case AMDGPU_VA_OP_REPLACE:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400574 break;
575 default:
576 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200577 args->operation);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400578 return -EINVAL;
579 }
Chunming Zhouf1892132017-05-15 16:48:27 +0800580 if ((args->operation == AMDGPU_VA_OP_MAP) ||
581 (args->operation == AMDGPU_VA_OP_REPLACE)) {
582 if (amdgpu_kms_vram_lost(adev, fpriv))
583 return -ENODEV;
584 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400585
Chunming Zhou49b02b12015-11-13 14:18:38 +0800586 INIT_LIST_HEAD(&list);
Christian Könige1eb899b42017-08-25 09:14:43 +0200587 INIT_LIST_HEAD(&duplicates);
Christian Königdc54d3d2017-03-13 10:13:38 +0100588 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
589 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800590 gobj = drm_gem_object_lookup(filp, args->handle);
591 if (gobj == NULL)
592 return -ENOENT;
593 abo = gem_to_amdgpu_bo(gobj);
594 tv.bo = &abo->tbo;
595 tv.shared = false;
596 list_add(&tv.head, &list);
597 } else {
598 gobj = NULL;
599 abo = NULL;
600 }
Chunming Zhou49b02b12015-11-13 14:18:38 +0800601
Christian Königb88c8792016-09-28 16:33:01 +0200602 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
Christian Königb5a5ec52016-03-08 17:47:46 +0100603
Christian Könige1eb899b42017-08-25 09:14:43 +0200604 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800605 if (r)
606 goto error_unref;
Christian König34b5f6a2015-06-08 15:03:00 +0200607
Junwei Zhangb85891b2017-01-16 13:59:01 +0800608 if (abo) {
609 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
610 if (!bo_va) {
611 r = -ENOENT;
612 goto error_backoff;
613 }
Christian Königdc54d3d2017-03-13 10:13:38 +0100614 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800615 bo_va = fpriv->prt_va;
Christian Königdc54d3d2017-03-13 10:13:38 +0100616 } else {
617 bo_va = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400618 }
619
Christian König34b5f6a2015-06-08 15:03:00 +0200620 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400621 case AMDGPU_VA_OP_MAP:
Christian Königec681542017-08-01 10:51:43 +0200622 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König663e4572017-03-13 10:13:37 +0100623 args->map_size);
624 if (r)
625 goto error_backoff;
Alex Xie54635452017-02-14 12:22:57 -0500626
Christian König663e4572017-03-13 10:13:37 +0100627 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
Christian König34b5f6a2015-06-08 15:03:00 +0200628 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
629 args->offset_in_bo, args->map_size,
Christian König9f7eb532015-05-18 16:05:57 +0200630 va_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400631 break;
632 case AMDGPU_VA_OP_UNMAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200633 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400634 break;
Christian Königdc54d3d2017-03-13 10:13:38 +0100635
636 case AMDGPU_VA_OP_CLEAR:
637 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
638 args->va_address,
639 args->map_size);
640 break;
Christian König80f95c52017-03-13 10:13:39 +0100641 case AMDGPU_VA_OP_REPLACE:
Christian Königec681542017-08-01 10:51:43 +0200642 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König80f95c52017-03-13 10:13:39 +0100643 args->map_size);
644 if (r)
645 goto error_backoff;
646
647 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
648 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
649 args->offset_in_bo, args->map_size,
650 va_flags);
651 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652 default:
653 break;
654 }
Junwei Zhangb85891b2017-01-16 13:59:01 +0800655 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
Christian Königdc54d3d2017-03-13 10:13:38 +0100656 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
657 args->operation);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800658
659error_backoff:
Christian König2ffdaaf2017-01-27 15:58:43 +0100660 ttm_eu_backoff_reservation(&ticket, &list);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800661
Junwei Zhangb85891b2017-01-16 13:59:01 +0800662error_unref:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300663 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400664 return r;
665}
666
667int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
668 struct drm_file *filp)
669{
Christian Könige1eb899b42017-08-25 09:14:43 +0200670 struct amdgpu_device *adev = dev->dev_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400671 struct drm_amdgpu_gem_op *args = data;
672 struct drm_gem_object *gobj;
673 struct amdgpu_bo *robj;
674 int r;
675
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100676 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400677 if (gobj == NULL) {
678 return -ENOENT;
679 }
680 robj = gem_to_amdgpu_bo(gobj);
681
682 r = amdgpu_bo_reserve(robj, false);
683 if (unlikely(r))
684 goto out;
685
686 switch (args->op) {
687 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
688 struct drm_amdgpu_gem_create_in info;
Christian König7ecc2452017-07-26 17:02:52 +0200689 void __user *out = u64_to_user_ptr(args->value);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400690
691 info.bo_size = robj->gem_base.size;
692 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
Kent Russell6d7d9c52017-08-08 07:58:01 -0400693 info.domains = robj->preferred_domains;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400694 info.domain_flags = robj->flags;
Christian König4c28fb02015-08-28 17:27:54 +0200695 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400696 if (copy_to_user(out, &info, sizeof(info)))
697 r = -EFAULT;
698 break;
699 }
Marek Olšákd8f65a22015-05-27 14:30:38 +0200700 case AMDGPU_GEM_OP_SET_PLACEMENT:
Christopher James Halse Rogers803d89a2017-04-03 13:31:22 +1000701 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
702 r = -EINVAL;
703 amdgpu_bo_unreserve(robj);
704 break;
705 }
Christian Königcc325d12016-02-08 11:08:35 +0100706 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 r = -EPERM;
Christian König4c28fb02015-08-28 17:27:54 +0200708 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400709 break;
710 }
Kent Russell6d7d9c52017-08-08 07:58:01 -0400711 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
Christian König1ea863f2015-12-18 22:13:12 +0100712 AMDGPU_GEM_DOMAIN_GTT |
713 AMDGPU_GEM_DOMAIN_CPU);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400714 robj->allowed_domains = robj->preferred_domains;
Christian König1ea863f2015-12-18 22:13:12 +0100715 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
716 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
717
Christian Könige1eb899b42017-08-25 09:14:43 +0200718 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
719 amdgpu_vm_bo_invalidate(adev, robj, true);
720
Christian König4c28fb02015-08-28 17:27:54 +0200721 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400722 break;
723 default:
Christian König4c28fb02015-08-28 17:27:54 +0200724 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400725 r = -EINVAL;
726 }
727
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300729 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400730 return r;
731}
732
733int amdgpu_mode_dumb_create(struct drm_file *file_priv,
734 struct drm_device *dev,
735 struct drm_mode_create_dumb *args)
736{
737 struct amdgpu_device *adev = dev->dev_private;
738 struct drm_gem_object *gobj;
739 uint32_t handle;
740 int r;
741
Laurent Pinchart8e911ab2016-10-18 01:41:17 +0300742 args->pitch = amdgpu_align_pitch(adev, args->width,
743 DIV_ROUND_UP(args->bpp, 8), 0);
Dan Carpenter54ef0b52015-09-23 14:00:59 +0300744 args->size = (u64)args->pitch * args->height;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400745 args->size = ALIGN(args->size, PAGE_SIZE);
746
747 r = amdgpu_gem_object_create(adev, args->size, 0,
748 AMDGPU_GEM_DOMAIN_VRAM,
Alex Deucher857d9132015-08-27 00:14:16 -0400749 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian Könige1eb899b42017-08-25 09:14:43 +0200750 false, NULL, &gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400751 if (r)
752 return -ENOMEM;
753
754 r = drm_gem_handle_create(file_priv, gobj, &handle);
755 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300756 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757 if (r) {
758 return r;
759 }
760 args->handle = handle;
761 return 0;
762}
763
764#if defined(CONFIG_DEBUG_FS)
Christian König7ea23562016-02-15 15:23:00 +0100765static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
766{
767 struct drm_gem_object *gobj = ptr;
768 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
769 struct seq_file *m = data;
770
771 unsigned domain;
772 const char *placement;
773 unsigned pin_count;
Christian Königb8e0e6e2017-06-26 15:19:30 +0200774 uint64_t offset;
Christian König7ea23562016-02-15 15:23:00 +0100775
776 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
777 switch (domain) {
778 case AMDGPU_GEM_DOMAIN_VRAM:
779 placement = "VRAM";
780 break;
781 case AMDGPU_GEM_DOMAIN_GTT:
782 placement = " GTT";
783 break;
784 case AMDGPU_GEM_DOMAIN_CPU:
785 default:
786 placement = " CPU";
787 break;
788 }
Christian Königb8e0e6e2017-06-26 15:19:30 +0200789 seq_printf(m, "\t0x%08x: %12ld byte %s",
790 id, amdgpu_bo_size(bo), placement);
791
792 offset = ACCESS_ONCE(bo->tbo.mem.start);
793 if (offset != AMDGPU_BO_INVALID_OFFSET)
794 seq_printf(m, " @ 0x%010Lx", offset);
Christian König7ea23562016-02-15 15:23:00 +0100795
796 pin_count = ACCESS_ONCE(bo->pin_count);
797 if (pin_count)
798 seq_printf(m, " pin count %d", pin_count);
799 seq_printf(m, "\n");
800
801 return 0;
802}
803
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400804static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
805{
806 struct drm_info_node *node = (struct drm_info_node *)m->private;
807 struct drm_device *dev = node->minor->dev;
Christian König7ea23562016-02-15 15:23:00 +0100808 struct drm_file *file;
809 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400810
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200811 r = mutex_lock_interruptible(&dev->filelist_mutex);
Christian König7ea23562016-02-15 15:23:00 +0100812 if (r)
813 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400814
Christian König7ea23562016-02-15 15:23:00 +0100815 list_for_each_entry(file, &dev->filelist, lhead) {
816 struct task_struct *task;
Christian Königb22e3ce2016-02-15 12:41:37 +0100817
Christian König7ea23562016-02-15 15:23:00 +0100818 /*
819 * Although we have a valid reference on file->pid, that does
820 * not guarantee that the task_struct who called get_pid() is
821 * still alive (e.g. get_pid(current) => fork() => exit()).
822 * Therefore, we need to protect this ->comm access using RCU.
823 */
824 rcu_read_lock();
825 task = pid_task(file->pid, PIDTYPE_PID);
826 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
827 task ? task->comm : "<unknown>");
828 rcu_read_unlock();
829
830 spin_lock(&file->table_lock);
831 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
832 spin_unlock(&file->table_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400833 }
Christian König7ea23562016-02-15 15:23:00 +0100834
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200835 mutex_unlock(&dev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400836 return 0;
837}
838
Nils Wallménius06ab6832016-05-02 12:46:15 -0400839static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400840 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
841};
842#endif
843
844int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
845{
846#if defined(CONFIG_DEBUG_FS)
847 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
848#endif
849 return 0;
850}