blob: 951d625bbdd7d3e65dfa168e619c03c982bf130d [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
Stephen Rothwell568d7c72016-03-17 15:30:49 +110029#include <linux/pagemap.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040030#include <drm/drmP.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35{
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37
38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
Christian König9298e522015-06-03 21:31:20 +020041 amdgpu_mn_unregister(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040042 amdgpu_bo_unref(&robj);
43 }
44}
45
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
Christian Könige1eb899b42017-08-25 09:14:43 +020047 int alignment, u32 initial_domain,
48 u64 flags, bool kernel,
49 struct reservation_object *resv,
50 struct drm_gem_object **obj)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051{
Christian Könige1eb899b42017-08-25 09:14:43 +020052 struct amdgpu_bo *bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053 int r;
54
55 *obj = NULL;
56 /* At least align on page size */
57 if (alignment < PAGE_SIZE) {
58 alignment = PAGE_SIZE;
59 }
60
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061retry:
Christian König72d76682015-09-03 17:34:59 +020062 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
Christian Könige1eb899b42017-08-25 09:14:43 +020063 flags, NULL, resv, 0, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064 if (r) {
65 if (r != -ERESTARTSYS) {
Roger He8e96e372017-11-10 20:00:30 +080066 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
67 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
68 goto retry;
69 }
70
Alex Deucherd38ceaf2015-04-20 16:55:21 -040071 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
72 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
73 goto retry;
74 }
75 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
76 size, initial_domain, alignment, r);
77 }
78 return r;
79 }
Christian Könige1eb899b42017-08-25 09:14:43 +020080 *obj = &bo->gem_base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081
Alex Deucherd38ceaf2015-04-20 16:55:21 -040082 return 0;
83}
84
Christian König418aa0c2016-02-15 16:59:57 +010085void amdgpu_gem_force_release(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040086{
Christian König418aa0c2016-02-15 16:59:57 +010087 struct drm_device *ddev = adev->ddev;
88 struct drm_file *file;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089
Daniel Vetter1d2ac402016-04-26 19:29:41 +020090 mutex_lock(&ddev->filelist_mutex);
Christian König418aa0c2016-02-15 16:59:57 +010091
92 list_for_each_entry(file, &ddev->filelist, lhead) {
93 struct drm_gem_object *gobj;
94 int handle;
95
96 WARN_ONCE(1, "Still active user space clients!\n");
97 spin_lock(&file->table_lock);
98 idr_for_each_entry(&file->object_idr, gobj, handle) {
99 WARN_ONCE(1, "And also active allocations!\n");
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300100 drm_gem_object_put_unlocked(gobj);
Christian König418aa0c2016-02-15 16:59:57 +0100101 }
102 idr_destroy(&file->object_idr);
103 spin_unlock(&file->table_lock);
104 }
105
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200106 mutex_unlock(&ddev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107}
108
109/*
110 * Call from drm_gem_handle_create which appear in both new and open ioctl
111 * case.
112 */
Christian Königa7d64de2016-09-15 14:58:48 +0200113int amdgpu_gem_object_open(struct drm_gem_object *obj,
114 struct drm_file *file_priv)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115{
Christian König765e7fb2016-09-15 15:06:50 +0200116 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200117 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400118 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
119 struct amdgpu_vm *vm = &fpriv->vm;
120 struct amdgpu_bo_va *bo_va;
Christian König4f5839c2017-08-29 16:07:31 +0200121 struct mm_struct *mm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122 int r;
Christian König4f5839c2017-08-29 16:07:31 +0200123
124 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
125 if (mm && mm != current->mm)
126 return -EPERM;
127
Christian Könige1eb899b42017-08-25 09:14:43 +0200128 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
129 abo->tbo.resv != vm->root.base.bo->tbo.resv)
130 return -EPERM;
131
Christian König765e7fb2016-09-15 15:06:50 +0200132 r = amdgpu_bo_reserve(abo, false);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800133 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135
Christian König765e7fb2016-09-15 15:06:50 +0200136 bo_va = amdgpu_vm_bo_find(vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137 if (!bo_va) {
Christian König765e7fb2016-09-15 15:06:50 +0200138 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400139 } else {
140 ++bo_va->ref_count;
141 }
Christian König765e7fb2016-09-15 15:06:50 +0200142 amdgpu_bo_unreserve(abo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143 return 0;
144}
145
146void amdgpu_gem_object_close(struct drm_gem_object *obj,
147 struct drm_file *file_priv)
148{
Christian Königb5a5ec52016-03-08 17:47:46 +0100149 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Christian Königa7d64de2016-09-15 14:58:48 +0200150 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400151 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
152 struct amdgpu_vm *vm = &fpriv->vm;
Christian Königb5a5ec52016-03-08 17:47:46 +0100153
154 struct amdgpu_bo_list_entry vm_pd;
Christian Könige1eb899b42017-08-25 09:14:43 +0200155 struct list_head list, duplicates;
Christian Königb5a5ec52016-03-08 17:47:46 +0100156 struct ttm_validate_buffer tv;
157 struct ww_acquire_ctx ticket;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158 struct amdgpu_bo_va *bo_va;
159 int r;
Christian Königb5a5ec52016-03-08 17:47:46 +0100160
161 INIT_LIST_HEAD(&list);
Christian Könige1eb899b42017-08-25 09:14:43 +0200162 INIT_LIST_HEAD(&duplicates);
Christian Königb5a5ec52016-03-08 17:47:46 +0100163
164 tv.bo = &bo->tbo;
165 tv.shared = true;
166 list_add(&tv.head, &list);
167
168 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
169
Christian Könige1eb899b42017-08-25 09:14:43 +0200170 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400171 if (r) {
172 dev_err(adev->dev, "leaking bo va because "
173 "we fail to reserve bo (%d)\n", r);
174 return;
175 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100176 bo_va = amdgpu_vm_bo_find(vm, bo);
Christian König5a0f3b52017-04-21 10:05:56 +0200177 if (bo_va && --bo_va->ref_count == 0) {
178 amdgpu_vm_bo_rmv(adev, bo_va);
179
Christian König3f3333f2017-08-03 14:02:13 +0200180 if (amdgpu_vm_ready(vm)) {
Christian König5a0f3b52017-04-21 10:05:56 +0200181 struct dma_fence *fence = NULL;
Nicolai Hähnle23e05632017-03-23 19:34:11 +0100182
183 r = amdgpu_vm_clear_freed(adev, vm, &fence);
184 if (unlikely(r)) {
185 dev_err(adev->dev, "failed to clear page "
186 "tables on GEM object close (%d)\n", r);
187 }
188
189 if (fence) {
190 amdgpu_bo_fence(bo, fence, true);
191 dma_fence_put(fence);
192 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 }
194 }
Christian Königb5a5ec52016-03-08 17:47:46 +0100195 ttm_eu_backoff_reservation(&ticket, &list);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400196}
197
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400198/*
199 * GEM ioctls.
200 */
201int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
202 struct drm_file *filp)
203{
204 struct amdgpu_device *adev = dev->dev_private;
Christian Könige1eb899b42017-08-25 09:14:43 +0200205 struct amdgpu_fpriv *fpriv = filp->driver_priv;
206 struct amdgpu_vm *vm = &fpriv->vm;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400207 union drm_amdgpu_gem_create *args = data;
Christian König6ac7def2017-08-23 20:11:25 +0200208 uint64_t flags = args->in.domain_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400209 uint64_t size = args->in.bo_size;
Christian Könige1eb899b42017-08-25 09:14:43 +0200210 struct reservation_object *resv = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400211 struct drm_gem_object *gobj;
212 uint32_t handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213 int r;
214
Alex Deucher834e0f82017-03-08 17:40:17 -0500215 /* reject invalid gem flags */
Christian König6ac7def2017-08-23 20:11:25 +0200216 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
217 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
218 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
Christian Könige1eb899b42017-08-25 09:14:43 +0200219 AMDGPU_GEM_CREATE_VRAM_CLEARED |
Andres Rodriguez177ae092017-09-15 20:44:06 -0400220 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
221 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
222
Christian Königa022c542017-05-08 15:14:54 +0200223 return -EINVAL;
224
Alex Deucher834e0f82017-03-08 17:40:17 -0500225 /* reject invalid gem domains */
226 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
227 AMDGPU_GEM_DOMAIN_GTT |
228 AMDGPU_GEM_DOMAIN_VRAM |
229 AMDGPU_GEM_DOMAIN_GDS |
230 AMDGPU_GEM_DOMAIN_GWS |
Christian Königa022c542017-05-08 15:14:54 +0200231 AMDGPU_GEM_DOMAIN_OA))
232 return -EINVAL;
Alex Deucher834e0f82017-03-08 17:40:17 -0500233
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400234 /* create a gem object to contain this object in */
235 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
236 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
Christian König6ac7def2017-08-23 20:11:25 +0200237 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400238 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
239 size = size << AMDGPU_GDS_SHIFT;
240 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
241 size = size << AMDGPU_GWS_SHIFT;
242 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
243 size = size << AMDGPU_OA_SHIFT;
Christian Königa022c542017-05-08 15:14:54 +0200244 else
245 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400246 }
247 size = roundup(size, PAGE_SIZE);
248
Christian Könige1eb899b42017-08-25 09:14:43 +0200249 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
250 r = amdgpu_bo_reserve(vm->root.base.bo, false);
251 if (r)
252 return r;
253
254 resv = vm->root.base.bo->tbo.resv;
255 }
256
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400257 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
258 (u32)(0xffffffff & args->in.domains),
Christian Könige1eb899b42017-08-25 09:14:43 +0200259 flags, false, resv, &gobj);
260 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
261 if (!r) {
262 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
263
264 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
265 }
266 amdgpu_bo_unreserve(vm->root.base.bo);
267 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400268 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200269 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400270
271 r = drm_gem_handle_create(filp, gobj, &handle);
272 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300273 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400274 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200275 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400276
277 memset(args, 0, sizeof(*args));
278 args->out.handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400279 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400280}
281
282int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
283 struct drm_file *filp)
284{
285 struct amdgpu_device *adev = dev->dev_private;
286 struct drm_amdgpu_gem_userptr *args = data;
287 struct drm_gem_object *gobj;
288 struct amdgpu_bo *bo;
289 uint32_t handle;
290 int r;
291
292 if (offset_in_page(args->addr | args->size))
293 return -EINVAL;
294
295 /* reject unknown flag values */
296 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
297 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
298 AMDGPU_GEM_USERPTR_REGISTER))
299 return -EINVAL;
300
Christian König358c2582016-03-11 15:29:27 +0100301 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
302 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400303
Christian König358c2582016-03-11 15:29:27 +0100304 /* if we want to write to it we must install a MMU notifier */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400305 return -EACCES;
306 }
307
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400308 /* create a gem object to contain this object in */
Christian Könige1eb899b42017-08-25 09:14:43 +0200309 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
310 0, 0, NULL, &gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400311 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200312 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400313
314 bo = gem_to_amdgpu_bo(gobj);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400315 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
Christian König1ea863f2015-12-18 22:13:12 +0100316 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400317 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
318 if (r)
319 goto release_object;
320
321 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
322 r = amdgpu_mn_register(bo, args->addr);
323 if (r)
324 goto release_object;
325 }
326
327 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
Christian König2f568db2016-02-23 12:36:59 +0100328 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
329 bo->tbo.ttm->pages);
330 if (r)
331 goto unlock_mmap_sem;
332
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400333 r = amdgpu_bo_reserve(bo, true);
Christian König2f568db2016-02-23 12:36:59 +0100334 if (r)
335 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400336
337 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
338 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
339 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400340 if (r)
Christian König2f568db2016-02-23 12:36:59 +0100341 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342 }
343
344 r = drm_gem_handle_create(filp, gobj, &handle);
345 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300346 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400347 if (r)
Christian Königa022c542017-05-08 15:14:54 +0200348 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400349
350 args->handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351 return 0;
352
Christian König2f568db2016-02-23 12:36:59 +0100353free_pages:
354 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
355
356unlock_mmap_sem:
357 up_read(&current->mm->mmap_sem);
358
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400359release_object:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300360 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400361
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400362 return r;
363}
364
365int amdgpu_mode_dumb_mmap(struct drm_file *filp,
366 struct drm_device *dev,
367 uint32_t handle, uint64_t *offset_p)
368{
369 struct drm_gem_object *gobj;
370 struct amdgpu_bo *robj;
371
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100372 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373 if (gobj == NULL) {
374 return -ENOENT;
375 }
376 robj = gem_to_amdgpu_bo(gobj);
Christian Königcc325d12016-02-08 11:08:35 +0100377 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
Christian König271c8122015-05-13 14:30:53 +0200378 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300379 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400380 return -EPERM;
381 }
382 *offset_p = amdgpu_bo_mmap_offset(robj);
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300383 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400384 return 0;
385}
386
387int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *filp)
389{
390 union drm_amdgpu_gem_mmap *args = data;
391 uint32_t handle = args->in.handle;
392 memset(args, 0, sizeof(*args));
393 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
394}
395
396/**
397 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
398 *
399 * @timeout_ns: timeout in ns
400 *
401 * Calculate the timeout in jiffies from an absolute timeout in ns.
402 */
403unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
404{
405 unsigned long timeout_jiffies;
406 ktime_t timeout;
407
408 /* clamp timeout if it's to large */
409 if (((int64_t)timeout_ns) < 0)
410 return MAX_SCHEDULE_TIMEOUT;
411
Christian König0f117702015-07-08 16:58:48 +0200412 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400413 if (ktime_to_ns(timeout) < 0)
414 return 0;
415
416 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
417 /* clamp timeout to avoid unsigned-> signed overflow */
418 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
419 return MAX_SCHEDULE_TIMEOUT - 1;
420
421 return timeout_jiffies;
422}
423
424int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
425 struct drm_file *filp)
426{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400427 union drm_amdgpu_gem_wait_idle *args = data;
428 struct drm_gem_object *gobj;
429 struct amdgpu_bo *robj;
430 uint32_t handle = args->in.handle;
431 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
432 int r = 0;
433 long ret;
434
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100435 gobj = drm_gem_object_lookup(filp, handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400436 if (gobj == NULL) {
437 return -ENOENT;
438 }
439 robj = gem_to_amdgpu_bo(gobj);
Chris Wilson0fea2ed2016-08-29 08:08:24 +0100440 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
441 timeout);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400442
443 /* ret == 0 means not signaled,
444 * ret > 0 means signaled
445 * ret < 0 means interrupted before timeout
446 */
447 if (ret >= 0) {
448 memset(args, 0, sizeof(*args));
449 args->out.status = (ret == 0);
450 } else
451 r = ret;
452
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300453 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400454 return r;
455}
456
457int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
458 struct drm_file *filp)
459{
460 struct drm_amdgpu_gem_metadata *args = data;
461 struct drm_gem_object *gobj;
462 struct amdgpu_bo *robj;
463 int r = -1;
464
465 DRM_DEBUG("%d \n", args->handle);
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100466 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400467 if (gobj == NULL)
468 return -ENOENT;
469 robj = gem_to_amdgpu_bo(gobj);
470
471 r = amdgpu_bo_reserve(robj, false);
472 if (unlikely(r != 0))
473 goto out;
474
475 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
476 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
477 r = amdgpu_bo_get_metadata(robj, args->data.data,
478 sizeof(args->data.data),
479 &args->data.data_size_bytes,
480 &args->data.flags);
481 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
Dan Carpenter0913eab2015-09-23 14:00:35 +0300482 if (args->data.data_size_bytes > sizeof(args->data.data)) {
483 r = -EINVAL;
484 goto unreserve;
485 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400486 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
487 if (!r)
488 r = amdgpu_bo_set_metadata(robj, args->data.data,
489 args->data.data_size_bytes,
490 args->data.flags);
491 }
492
Dan Carpenter0913eab2015-09-23 14:00:35 +0300493unreserve:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494 amdgpu_bo_unreserve(robj);
495out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300496 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400497 return r;
498}
499
500/**
501 * amdgpu_gem_va_update_vm -update the bo_va in its VM
502 *
503 * @adev: amdgpu_device pointer
Christian Königdc54d3d2017-03-13 10:13:38 +0100504 * @vm: vm to update
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400505 * @bo_va: bo_va to update
Christian König2ffdaaf2017-01-27 15:58:43 +0100506 * @list: validation list
Christian Königdc54d3d2017-03-13 10:13:38 +0100507 * @operation: map, unmap or clear
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400508 *
Christian König2ffdaaf2017-01-27 15:58:43 +0100509 * Update the bo_va directly after setting its address. Errors are not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400510 * vital here, so they are not reported back to userspace.
511 */
512static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
Christian Königdc54d3d2017-03-13 10:13:38 +0100513 struct amdgpu_vm *vm,
Christian Königf7da30d2016-09-28 12:03:04 +0200514 struct amdgpu_bo_va *bo_va,
Christian König2ffdaaf2017-01-27 15:58:43 +0100515 struct list_head *list,
Christian Königf7da30d2016-09-28 12:03:04 +0200516 uint32_t operation)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400517{
Christian König3f3333f2017-08-03 14:02:13 +0200518 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400519
Christian König3f3333f2017-08-03 14:02:13 +0200520 if (!amdgpu_vm_ready(vm))
521 return;
Chunming Zhoue410b5c2015-12-07 15:02:52 +0800522
Christian König194d2162016-10-12 15:13:52 +0200523 r = amdgpu_vm_update_directories(adev, vm);
Chunming Zhou43c27fb2015-11-12 15:33:09 +0800524 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100525 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400526
Nicolai Hähnlef3467812017-03-23 19:36:31 +0100527 r = amdgpu_vm_clear_freed(adev, vm, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400528 if (r)
Christian König2ffdaaf2017-01-27 15:58:43 +0100529 goto error;
monk.liu194a3362015-07-22 13:29:28 +0800530
Christian König80f95c52017-03-13 10:13:39 +0100531 if (operation == AMDGPU_VA_OP_MAP ||
532 operation == AMDGPU_VA_OP_REPLACE)
Flora Cui05dcb5c2016-09-22 11:34:47 +0800533 r = amdgpu_vm_bo_update(adev, bo_va, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400534
Christian König2ffdaaf2017-01-27 15:58:43 +0100535error:
Christian König68fdd3d2015-06-16 14:50:02 +0200536 if (r && r != -ERESTARTSYS)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
538}
539
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400540int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
541 struct drm_file *filp)
542{
Junwei Zhangb85891b2017-01-16 13:59:01 +0800543 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
544 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
Alex Xie66e02bc2017-02-14 12:04:52 -0500545 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
Junwei Zhangb85891b2017-01-16 13:59:01 +0800546 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
547 AMDGPU_VM_PAGE_PRT;
548
Christian König34b5f6a2015-06-08 15:03:00 +0200549 struct drm_amdgpu_gem_va *args = data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400550 struct drm_gem_object *gobj;
551 struct amdgpu_device *adev = dev->dev_private;
552 struct amdgpu_fpriv *fpriv = filp->driver_priv;
Christian König765e7fb2016-09-15 15:06:50 +0200553 struct amdgpu_bo *abo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400554 struct amdgpu_bo_va *bo_va;
Christian Königb88c8792016-09-28 16:33:01 +0200555 struct amdgpu_bo_list_entry vm_pd;
556 struct ttm_validate_buffer tv;
Chunming Zhou49b02b12015-11-13 14:18:38 +0800557 struct ww_acquire_ctx ticket;
Christian Könige1eb899b42017-08-25 09:14:43 +0200558 struct list_head list, duplicates;
Alex Xie54635452017-02-14 12:22:57 -0500559 uint64_t va_flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 int r = 0;
561
Christian König34b5f6a2015-06-08 15:03:00 +0200562 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400563 dev_err(&dev->pdev->dev,
Christian Königff4cd382017-11-06 15:25:37 +0100564 "va_address 0x%LX is in reserved area 0x%LX\n",
565 args->va_address, AMDGPU_VA_RESERVED_SIZE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400566 return -EINVAL;
567 }
568
Junwei Zhangb85891b2017-01-16 13:59:01 +0800569 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
570 dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
571 args->flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400572 return -EINVAL;
573 }
574
Christian König34b5f6a2015-06-08 15:03:00 +0200575 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400576 case AMDGPU_VA_OP_MAP:
577 case AMDGPU_VA_OP_UNMAP:
Christian Königdc54d3d2017-03-13 10:13:38 +0100578 case AMDGPU_VA_OP_CLEAR:
Christian König80f95c52017-03-13 10:13:39 +0100579 case AMDGPU_VA_OP_REPLACE:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400580 break;
581 default:
582 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200583 args->operation);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400584 return -EINVAL;
585 }
586
Chunming Zhou49b02b12015-11-13 14:18:38 +0800587 INIT_LIST_HEAD(&list);
Christian Könige1eb899b42017-08-25 09:14:43 +0200588 INIT_LIST_HEAD(&duplicates);
Christian Königdc54d3d2017-03-13 10:13:38 +0100589 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
590 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800591 gobj = drm_gem_object_lookup(filp, args->handle);
592 if (gobj == NULL)
593 return -ENOENT;
594 abo = gem_to_amdgpu_bo(gobj);
595 tv.bo = &abo->tbo;
596 tv.shared = false;
597 list_add(&tv.head, &list);
598 } else {
599 gobj = NULL;
600 abo = NULL;
601 }
Chunming Zhou49b02b12015-11-13 14:18:38 +0800602
Christian Königb88c8792016-09-28 16:33:01 +0200603 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
Christian Königb5a5ec52016-03-08 17:47:46 +0100604
Christian Könige1eb899b42017-08-25 09:14:43 +0200605 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800606 if (r)
607 goto error_unref;
Christian König34b5f6a2015-06-08 15:03:00 +0200608
Junwei Zhangb85891b2017-01-16 13:59:01 +0800609 if (abo) {
610 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
611 if (!bo_va) {
612 r = -ENOENT;
613 goto error_backoff;
614 }
Christian Königdc54d3d2017-03-13 10:13:38 +0100615 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
Junwei Zhangb85891b2017-01-16 13:59:01 +0800616 bo_va = fpriv->prt_va;
Christian Königdc54d3d2017-03-13 10:13:38 +0100617 } else {
618 bo_va = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400619 }
620
Christian König34b5f6a2015-06-08 15:03:00 +0200621 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400622 case AMDGPU_VA_OP_MAP:
Christian Königec681542017-08-01 10:51:43 +0200623 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König663e4572017-03-13 10:13:37 +0100624 args->map_size);
625 if (r)
626 goto error_backoff;
Alex Xie54635452017-02-14 12:22:57 -0500627
Christian König663e4572017-03-13 10:13:37 +0100628 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
Christian König34b5f6a2015-06-08 15:03:00 +0200629 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
630 args->offset_in_bo, args->map_size,
Christian König9f7eb532015-05-18 16:05:57 +0200631 va_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400632 break;
633 case AMDGPU_VA_OP_UNMAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200634 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400635 break;
Christian Königdc54d3d2017-03-13 10:13:38 +0100636
637 case AMDGPU_VA_OP_CLEAR:
638 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
639 args->va_address,
640 args->map_size);
641 break;
Christian König80f95c52017-03-13 10:13:39 +0100642 case AMDGPU_VA_OP_REPLACE:
Christian Königec681542017-08-01 10:51:43 +0200643 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
Christian König80f95c52017-03-13 10:13:39 +0100644 args->map_size);
645 if (r)
646 goto error_backoff;
647
648 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
649 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
650 args->offset_in_bo, args->map_size,
651 va_flags);
652 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400653 default:
654 break;
655 }
Junwei Zhangb85891b2017-01-16 13:59:01 +0800656 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
Christian Königdc54d3d2017-03-13 10:13:38 +0100657 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
658 args->operation);
Junwei Zhangb85891b2017-01-16 13:59:01 +0800659
660error_backoff:
Christian König2ffdaaf2017-01-27 15:58:43 +0100661 ttm_eu_backoff_reservation(&ticket, &list);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800662
Junwei Zhangb85891b2017-01-16 13:59:01 +0800663error_unref:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300664 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400665 return r;
666}
667
668int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
669 struct drm_file *filp)
670{
Christian Könige1eb899b42017-08-25 09:14:43 +0200671 struct amdgpu_device *adev = dev->dev_private;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400672 struct drm_amdgpu_gem_op *args = data;
673 struct drm_gem_object *gobj;
674 struct amdgpu_bo *robj;
675 int r;
676
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100677 gobj = drm_gem_object_lookup(filp, args->handle);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400678 if (gobj == NULL) {
679 return -ENOENT;
680 }
681 robj = gem_to_amdgpu_bo(gobj);
682
683 r = amdgpu_bo_reserve(robj, false);
684 if (unlikely(r))
685 goto out;
686
687 switch (args->op) {
688 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
689 struct drm_amdgpu_gem_create_in info;
Christian König7ecc2452017-07-26 17:02:52 +0200690 void __user *out = u64_to_user_ptr(args->value);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400691
692 info.bo_size = robj->gem_base.size;
693 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
Kent Russell6d7d9c52017-08-08 07:58:01 -0400694 info.domains = robj->preferred_domains;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695 info.domain_flags = robj->flags;
Christian König4c28fb02015-08-28 17:27:54 +0200696 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400697 if (copy_to_user(out, &info, sizeof(info)))
698 r = -EFAULT;
699 break;
700 }
Marek Olšákd8f65a22015-05-27 14:30:38 +0200701 case AMDGPU_GEM_OP_SET_PLACEMENT:
Christopher James Halse Rogers803d89a2017-04-03 13:31:22 +1000702 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
703 r = -EINVAL;
704 amdgpu_bo_unreserve(robj);
705 break;
706 }
Christian Königcc325d12016-02-08 11:08:35 +0100707 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400708 r = -EPERM;
Christian König4c28fb02015-08-28 17:27:54 +0200709 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400710 break;
711 }
Kent Russell6d7d9c52017-08-08 07:58:01 -0400712 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
Christian König1ea863f2015-12-18 22:13:12 +0100713 AMDGPU_GEM_DOMAIN_GTT |
714 AMDGPU_GEM_DOMAIN_CPU);
Kent Russell6d7d9c52017-08-08 07:58:01 -0400715 robj->allowed_domains = robj->preferred_domains;
Christian König1ea863f2015-12-18 22:13:12 +0100716 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
717 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
718
Christian Könige1eb899b42017-08-25 09:14:43 +0200719 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
720 amdgpu_vm_bo_invalidate(adev, robj, true);
721
Christian König4c28fb02015-08-28 17:27:54 +0200722 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400723 break;
724 default:
Christian König4c28fb02015-08-28 17:27:54 +0200725 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726 r = -EINVAL;
727 }
728
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400729out:
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300730 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400731 return r;
732}
733
734int amdgpu_mode_dumb_create(struct drm_file *file_priv,
735 struct drm_device *dev,
736 struct drm_mode_create_dumb *args)
737{
738 struct amdgpu_device *adev = dev->dev_private;
739 struct drm_gem_object *gobj;
740 uint32_t handle;
741 int r;
742
Laurent Pinchart8e911ab2016-10-18 01:41:17 +0300743 args->pitch = amdgpu_align_pitch(adev, args->width,
744 DIV_ROUND_UP(args->bpp, 8), 0);
Dan Carpenter54ef0b52015-09-23 14:00:59 +0300745 args->size = (u64)args->pitch * args->height;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400746 args->size = ALIGN(args->size, PAGE_SIZE);
747
748 r = amdgpu_gem_object_create(adev, args->size, 0,
749 AMDGPU_GEM_DOMAIN_VRAM,
Alex Deucher857d9132015-08-27 00:14:16 -0400750 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian Könige1eb899b42017-08-25 09:14:43 +0200751 false, NULL, &gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400752 if (r)
753 return -ENOMEM;
754
755 r = drm_gem_handle_create(file_priv, gobj, &handle);
756 /* drop reference from allocate - handle holds it now */
Cihangir Akturkf62facc2017-08-03 14:58:16 +0300757 drm_gem_object_put_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400758 if (r) {
759 return r;
760 }
761 args->handle = handle;
762 return 0;
763}
764
765#if defined(CONFIG_DEBUG_FS)
Christian König7ea23562016-02-15 15:23:00 +0100766static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
767{
768 struct drm_gem_object *gobj = ptr;
769 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
770 struct seq_file *m = data;
771
772 unsigned domain;
773 const char *placement;
774 unsigned pin_count;
Christian Königb8e0e6e2017-06-26 15:19:30 +0200775 uint64_t offset;
Christian König7ea23562016-02-15 15:23:00 +0100776
777 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
778 switch (domain) {
779 case AMDGPU_GEM_DOMAIN_VRAM:
780 placement = "VRAM";
781 break;
782 case AMDGPU_GEM_DOMAIN_GTT:
783 placement = " GTT";
784 break;
785 case AMDGPU_GEM_DOMAIN_CPU:
786 default:
787 placement = " CPU";
788 break;
789 }
Christian Königb8e0e6e2017-06-26 15:19:30 +0200790 seq_printf(m, "\t0x%08x: %12ld byte %s",
791 id, amdgpu_bo_size(bo), placement);
792
793 offset = ACCESS_ONCE(bo->tbo.mem.start);
794 if (offset != AMDGPU_BO_INVALID_OFFSET)
795 seq_printf(m, " @ 0x%010Lx", offset);
Christian König7ea23562016-02-15 15:23:00 +0100796
797 pin_count = ACCESS_ONCE(bo->pin_count);
798 if (pin_count)
799 seq_printf(m, " pin count %d", pin_count);
800 seq_printf(m, "\n");
801
802 return 0;
803}
804
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400805static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
806{
807 struct drm_info_node *node = (struct drm_info_node *)m->private;
808 struct drm_device *dev = node->minor->dev;
Christian König7ea23562016-02-15 15:23:00 +0100809 struct drm_file *file;
810 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400811
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200812 r = mutex_lock_interruptible(&dev->filelist_mutex);
Christian König7ea23562016-02-15 15:23:00 +0100813 if (r)
814 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400815
Christian König7ea23562016-02-15 15:23:00 +0100816 list_for_each_entry(file, &dev->filelist, lhead) {
817 struct task_struct *task;
Christian Königb22e3ce2016-02-15 12:41:37 +0100818
Christian König7ea23562016-02-15 15:23:00 +0100819 /*
820 * Although we have a valid reference on file->pid, that does
821 * not guarantee that the task_struct who called get_pid() is
822 * still alive (e.g. get_pid(current) => fork() => exit()).
823 * Therefore, we need to protect this ->comm access using RCU.
824 */
825 rcu_read_lock();
826 task = pid_task(file->pid, PIDTYPE_PID);
827 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
828 task ? task->comm : "<unknown>");
829 rcu_read_unlock();
830
831 spin_lock(&file->table_lock);
832 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
833 spin_unlock(&file->table_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400834 }
Christian König7ea23562016-02-15 15:23:00 +0100835
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200836 mutex_unlock(&dev->filelist_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400837 return 0;
838}
839
Nils Wallménius06ab6832016-05-02 12:46:15 -0400840static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400841 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
842};
843#endif
844
845int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
846{
847#if defined(CONFIG_DEBUG_FS)
848 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
849#endif
850 return 0;
851}