blob: cb27754ec07fe375cf3c6c06d01f4a6caf20d516 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
29#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32
33void amdgpu_gem_object_free(struct drm_gem_object *gobj)
34{
35 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
36
37 if (robj) {
38 if (robj->gem_base.import_attach)
39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
Christian König9298e522015-06-03 21:31:20 +020040 amdgpu_mn_unregister(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040041 amdgpu_bo_unref(&robj);
42 }
43}
44
45int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46 int alignment, u32 initial_domain,
47 u64 flags, bool kernel,
48 struct drm_gem_object **obj)
49{
50 struct amdgpu_bo *robj;
51 unsigned long max_size;
52 int r;
53
54 *obj = NULL;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) {
57 alignment = PAGE_SIZE;
58 }
59
60 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
63 */
64 max_size = adev->mc.gtt_size - adev->gart_pin_size;
65 if (size > max_size) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size >> 20, max_size >> 20);
68 return -ENOMEM;
69 }
70 }
71retry:
Christian König72d76682015-09-03 17:34:59 +020072 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73 flags, NULL, NULL, &robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040074 if (r) {
75 if (r != -ERESTARTSYS) {
76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
77 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
78 goto retry;
79 }
80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81 size, initial_domain, alignment, r);
82 }
83 return r;
84 }
85 *obj = &robj->gem_base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040086
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 return 0;
88}
89
Christian König418aa0c2016-02-15 16:59:57 +010090void amdgpu_gem_force_release(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091{
Christian König418aa0c2016-02-15 16:59:57 +010092 struct drm_device *ddev = adev->ddev;
93 struct drm_file *file;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040094
Christian König418aa0c2016-02-15 16:59:57 +010095 mutex_lock(&ddev->struct_mutex);
96
97 list_for_each_entry(file, &ddev->filelist, lhead) {
98 struct drm_gem_object *gobj;
99 int handle;
100
101 WARN_ONCE(1, "Still active user space clients!\n");
102 spin_lock(&file->table_lock);
103 idr_for_each_entry(&file->object_idr, gobj, handle) {
104 WARN_ONCE(1, "And also active allocations!\n");
105 drm_gem_object_unreference(gobj);
106 }
107 idr_destroy(&file->object_idr);
108 spin_unlock(&file->table_lock);
109 }
110
111 mutex_unlock(&ddev->struct_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400112}
113
114/*
115 * Call from drm_gem_handle_create which appear in both new and open ioctl
116 * case.
117 */
118int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
119{
120 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
121 struct amdgpu_device *adev = rbo->adev;
122 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
123 struct amdgpu_vm *vm = &fpriv->vm;
124 struct amdgpu_bo_va *bo_va;
125 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 r = amdgpu_bo_reserve(rbo, false);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800127 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400128 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129
130 bo_va = amdgpu_vm_bo_find(vm, rbo);
131 if (!bo_va) {
132 bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
133 } else {
134 ++bo_va->ref_count;
135 }
136 amdgpu_bo_unreserve(rbo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137 return 0;
138}
139
140void amdgpu_gem_object_close(struct drm_gem_object *obj,
141 struct drm_file *file_priv)
142{
143 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
144 struct amdgpu_device *adev = rbo->adev;
145 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
146 struct amdgpu_vm *vm = &fpriv->vm;
147 struct amdgpu_bo_va *bo_va;
148 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400149 r = amdgpu_bo_reserve(rbo, true);
150 if (r) {
151 dev_err(adev->dev, "leaking bo va because "
152 "we fail to reserve bo (%d)\n", r);
153 return;
154 }
155 bo_va = amdgpu_vm_bo_find(vm, rbo);
156 if (bo_va) {
157 if (--bo_va->ref_count == 0) {
158 amdgpu_vm_bo_rmv(adev, bo_va);
159 }
160 }
161 amdgpu_bo_unreserve(rbo);
162}
163
164static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
165{
166 if (r == -EDEADLK) {
167 r = amdgpu_gpu_reset(adev);
168 if (!r)
169 r = -EAGAIN;
170 }
171 return r;
172}
173
174/*
175 * GEM ioctls.
176 */
177int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
178 struct drm_file *filp)
179{
180 struct amdgpu_device *adev = dev->dev_private;
181 union drm_amdgpu_gem_create *args = data;
182 uint64_t size = args->in.bo_size;
183 struct drm_gem_object *gobj;
184 uint32_t handle;
185 bool kernel = false;
186 int r;
187
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188 /* create a gem object to contain this object in */
189 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
190 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
191 kernel = true;
192 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
193 size = size << AMDGPU_GDS_SHIFT;
194 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
195 size = size << AMDGPU_GWS_SHIFT;
196 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
197 size = size << AMDGPU_OA_SHIFT;
198 else {
199 r = -EINVAL;
200 goto error_unlock;
201 }
202 }
203 size = roundup(size, PAGE_SIZE);
204
205 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
206 (u32)(0xffffffff & args->in.domains),
207 args->in.domain_flags,
208 kernel, &gobj);
209 if (r)
210 goto error_unlock;
211
212 r = drm_gem_handle_create(filp, gobj, &handle);
213 /* drop reference from allocate - handle holds it now */
214 drm_gem_object_unreference_unlocked(gobj);
215 if (r)
216 goto error_unlock;
217
218 memset(args, 0, sizeof(*args));
219 args->out.handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400220 return 0;
221
222error_unlock:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400223 r = amdgpu_gem_handle_lockup(adev, r);
224 return r;
225}
226
227int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
228 struct drm_file *filp)
229{
230 struct amdgpu_device *adev = dev->dev_private;
231 struct drm_amdgpu_gem_userptr *args = data;
232 struct drm_gem_object *gobj;
233 struct amdgpu_bo *bo;
234 uint32_t handle;
235 int r;
236
237 if (offset_in_page(args->addr | args->size))
238 return -EINVAL;
239
240 /* reject unknown flag values */
241 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
242 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
243 AMDGPU_GEM_USERPTR_REGISTER))
244 return -EINVAL;
245
Christian König585116c2015-11-26 11:06:20 +0100246 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
247 !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
248 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400249
250 /* if we want to write to it we must require anonymous
251 memory and install a MMU notifier */
252 return -EACCES;
253 }
254
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400255 /* create a gem object to contain this object in */
256 r = amdgpu_gem_object_create(adev, args->size, 0,
257 AMDGPU_GEM_DOMAIN_CPU, 0,
258 0, &gobj);
259 if (r)
260 goto handle_lockup;
261
262 bo = gem_to_amdgpu_bo(gobj);
Christian König1ea863f2015-12-18 22:13:12 +0100263 bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
264 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400265 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
266 if (r)
267 goto release_object;
268
269 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
270 r = amdgpu_mn_register(bo, args->addr);
271 if (r)
272 goto release_object;
273 }
274
275 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
276 down_read(&current->mm->mmap_sem);
Christian König2f568db2016-02-23 12:36:59 +0100277
278 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
279 bo->tbo.ttm->pages);
280 if (r)
281 goto unlock_mmap_sem;
282
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400283 r = amdgpu_bo_reserve(bo, true);
Christian König2f568db2016-02-23 12:36:59 +0100284 if (r)
285 goto free_pages;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400286
287 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
288 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
289 amdgpu_bo_unreserve(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400290 if (r)
Christian König2f568db2016-02-23 12:36:59 +0100291 goto free_pages;
292
293 up_read(&current->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400294 }
295
296 r = drm_gem_handle_create(filp, gobj, &handle);
297 /* drop reference from allocate - handle holds it now */
298 drm_gem_object_unreference_unlocked(gobj);
299 if (r)
300 goto handle_lockup;
301
302 args->handle = handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400303 return 0;
304
Christian König2f568db2016-02-23 12:36:59 +0100305free_pages:
306 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
307
308unlock_mmap_sem:
309 up_read(&current->mm->mmap_sem);
310
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400311release_object:
312 drm_gem_object_unreference_unlocked(gobj);
313
314handle_lockup:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400315 r = amdgpu_gem_handle_lockup(adev, r);
316
317 return r;
318}
319
320int amdgpu_mode_dumb_mmap(struct drm_file *filp,
321 struct drm_device *dev,
322 uint32_t handle, uint64_t *offset_p)
323{
324 struct drm_gem_object *gobj;
325 struct amdgpu_bo *robj;
326
327 gobj = drm_gem_object_lookup(dev, filp, handle);
328 if (gobj == NULL) {
329 return -ENOENT;
330 }
331 robj = gem_to_amdgpu_bo(gobj);
Christian Königcc325d12016-02-08 11:08:35 +0100332 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
Christian König271c8122015-05-13 14:30:53 +0200333 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400334 drm_gem_object_unreference_unlocked(gobj);
335 return -EPERM;
336 }
337 *offset_p = amdgpu_bo_mmap_offset(robj);
338 drm_gem_object_unreference_unlocked(gobj);
339 return 0;
340}
341
342int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *filp)
344{
345 union drm_amdgpu_gem_mmap *args = data;
346 uint32_t handle = args->in.handle;
347 memset(args, 0, sizeof(*args));
348 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
349}
350
351/**
352 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
353 *
354 * @timeout_ns: timeout in ns
355 *
356 * Calculate the timeout in jiffies from an absolute timeout in ns.
357 */
358unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
359{
360 unsigned long timeout_jiffies;
361 ktime_t timeout;
362
363 /* clamp timeout if it's to large */
364 if (((int64_t)timeout_ns) < 0)
365 return MAX_SCHEDULE_TIMEOUT;
366
Christian König0f117702015-07-08 16:58:48 +0200367 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400368 if (ktime_to_ns(timeout) < 0)
369 return 0;
370
371 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
372 /* clamp timeout to avoid unsigned-> signed overflow */
373 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
374 return MAX_SCHEDULE_TIMEOUT - 1;
375
376 return timeout_jiffies;
377}
378
379int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
380 struct drm_file *filp)
381{
382 struct amdgpu_device *adev = dev->dev_private;
383 union drm_amdgpu_gem_wait_idle *args = data;
384 struct drm_gem_object *gobj;
385 struct amdgpu_bo *robj;
386 uint32_t handle = args->in.handle;
387 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
388 int r = 0;
389 long ret;
390
391 gobj = drm_gem_object_lookup(dev, filp, handle);
392 if (gobj == NULL) {
393 return -ENOENT;
394 }
395 robj = gem_to_amdgpu_bo(gobj);
396 if (timeout == 0)
397 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
398 else
399 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
400
401 /* ret == 0 means not signaled,
402 * ret > 0 means signaled
403 * ret < 0 means interrupted before timeout
404 */
405 if (ret >= 0) {
406 memset(args, 0, sizeof(*args));
407 args->out.status = (ret == 0);
408 } else
409 r = ret;
410
411 drm_gem_object_unreference_unlocked(gobj);
412 r = amdgpu_gem_handle_lockup(adev, r);
413 return r;
414}
415
416int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *filp)
418{
419 struct drm_amdgpu_gem_metadata *args = data;
420 struct drm_gem_object *gobj;
421 struct amdgpu_bo *robj;
422 int r = -1;
423
424 DRM_DEBUG("%d \n", args->handle);
425 gobj = drm_gem_object_lookup(dev, filp, args->handle);
426 if (gobj == NULL)
427 return -ENOENT;
428 robj = gem_to_amdgpu_bo(gobj);
429
430 r = amdgpu_bo_reserve(robj, false);
431 if (unlikely(r != 0))
432 goto out;
433
434 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
435 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
436 r = amdgpu_bo_get_metadata(robj, args->data.data,
437 sizeof(args->data.data),
438 &args->data.data_size_bytes,
439 &args->data.flags);
440 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
Dan Carpenter0913eab2015-09-23 14:00:35 +0300441 if (args->data.data_size_bytes > sizeof(args->data.data)) {
442 r = -EINVAL;
443 goto unreserve;
444 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400445 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
446 if (!r)
447 r = amdgpu_bo_set_metadata(robj, args->data.data,
448 args->data.data_size_bytes,
449 args->data.flags);
450 }
451
Dan Carpenter0913eab2015-09-23 14:00:35 +0300452unreserve:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400453 amdgpu_bo_unreserve(robj);
454out:
455 drm_gem_object_unreference_unlocked(gobj);
456 return r;
457}
458
459/**
460 * amdgpu_gem_va_update_vm -update the bo_va in its VM
461 *
462 * @adev: amdgpu_device pointer
463 * @bo_va: bo_va to update
464 *
465 * Update the bo_va directly after setting it's address. Errors are not
466 * vital here, so they are not reported back to userspace.
467 */
468static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
monk.liu194a3362015-07-22 13:29:28 +0800469 struct amdgpu_bo_va *bo_va, uint32_t operation)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400470{
471 struct ttm_validate_buffer tv, *entry;
Christian König56467eb2015-12-11 15:16:32 +0100472 struct amdgpu_bo_list_entry vm_pd;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400473 struct ww_acquire_ctx ticket;
Christian Königbf60efd2015-09-04 10:47:56 +0200474 struct list_head list, duplicates;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400475 unsigned domain;
476 int r;
477
478 INIT_LIST_HEAD(&list);
Christian Königbf60efd2015-09-04 10:47:56 +0200479 INIT_LIST_HEAD(&duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400480
481 tv.bo = &bo_va->bo->tbo;
482 tv.shared = true;
483 list_add(&tv.head, &list);
484
Christian König56467eb2015-12-11 15:16:32 +0100485 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400486
Christian Königbf60efd2015-09-04 10:47:56 +0200487 /* Provide duplicates to avoid -EALREADY */
488 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400489 if (r)
Christian König56467eb2015-12-11 15:16:32 +0100490 goto error_print;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400491
Christian Königee1782c2015-12-11 21:01:23 +0100492 amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400493 list_for_each_entry(entry, &list, head) {
494 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
495 /* if anything is swapped out don't swap it in here,
496 just abort and wait for the next CS */
497 if (domain == AMDGPU_GEM_DOMAIN_CPU)
498 goto error_unreserve;
499 }
Chunming Zhoue410b5c2015-12-07 15:02:52 +0800500 list_for_each_entry(entry, &duplicates, head) {
501 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
502 /* if anything is swapped out don't swap it in here,
503 just abort and wait for the next CS */
504 if (domain == AMDGPU_GEM_DOMAIN_CPU)
505 goto error_unreserve;
506 }
507
Chunming Zhou43c27fb2015-11-12 15:33:09 +0800508 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
509 if (r)
510 goto error_unreserve;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400511
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400512 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
513 if (r)
Chunming Zhouf48b2652015-10-16 14:06:19 +0800514 goto error_unreserve;
monk.liu194a3362015-07-22 13:29:28 +0800515
516 if (operation == AMDGPU_VA_OP_MAP)
517 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400518
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400519error_unreserve:
520 ttm_eu_backoff_reservation(&ticket, &list);
521
Christian König56467eb2015-12-11 15:16:32 +0100522error_print:
Christian König68fdd3d2015-06-16 14:50:02 +0200523 if (r && r != -ERESTARTSYS)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
525}
526
527
528
529int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
530 struct drm_file *filp)
531{
Christian König34b5f6a2015-06-08 15:03:00 +0200532 struct drm_amdgpu_gem_va *args = data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 struct drm_gem_object *gobj;
534 struct amdgpu_device *adev = dev->dev_private;
535 struct amdgpu_fpriv *fpriv = filp->driver_priv;
536 struct amdgpu_bo *rbo;
537 struct amdgpu_bo_va *bo_va;
Chunming Zhou49b02b12015-11-13 14:18:38 +0800538 struct ttm_validate_buffer tv, tv_pd;
539 struct ww_acquire_ctx ticket;
540 struct list_head list, duplicates;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400541 uint32_t invalid_flags, va_flags = 0;
542 int r = 0;
543
Christian König34b5f6a2015-06-08 15:03:00 +0200544 if (!adev->vm_manager.enabled)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400545 return -ENOTTY;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400546
Christian König34b5f6a2015-06-08 15:03:00 +0200547 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400548 dev_err(&dev->pdev->dev,
549 "va_address 0x%lX is in reserved area 0x%X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200550 (unsigned long)args->va_address,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400551 AMDGPU_VA_RESERVED_SIZE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400552 return -EINVAL;
553 }
554
Christian Königfc220f62015-06-29 17:12:20 +0200555 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
556 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
Christian König34b5f6a2015-06-08 15:03:00 +0200557 if ((args->flags & invalid_flags)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400558 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200559 args->flags, invalid_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 return -EINVAL;
561 }
562
Christian König34b5f6a2015-06-08 15:03:00 +0200563 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400564 case AMDGPU_VA_OP_MAP:
565 case AMDGPU_VA_OP_UNMAP:
566 break;
567 default:
568 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200569 args->operation);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400570 return -EINVAL;
571 }
572
Christian König34b5f6a2015-06-08 15:03:00 +0200573 gobj = drm_gem_object_lookup(dev, filp, args->handle);
574 if (gobj == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400575 return -ENOENT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400576 rbo = gem_to_amdgpu_bo(gobj);
Chunming Zhou49b02b12015-11-13 14:18:38 +0800577 INIT_LIST_HEAD(&list);
578 INIT_LIST_HEAD(&duplicates);
579 tv.bo = &rbo->tbo;
580 tv.shared = true;
581 list_add(&tv.head, &list);
582
583 if (args->operation == AMDGPU_VA_OP_MAP) {
584 tv_pd.bo = &fpriv->vm.page_directory->tbo;
585 tv_pd.shared = true;
586 list_add(&tv_pd.head, &list);
587 }
588 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400589 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400590 drm_gem_object_unreference_unlocked(gobj);
591 return r;
592 }
Christian König34b5f6a2015-06-08 15:03:00 +0200593
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400594 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
595 if (!bo_va) {
Chunming Zhou49b02b12015-11-13 14:18:38 +0800596 ttm_eu_backoff_reservation(&ticket, &list);
597 drm_gem_object_unreference_unlocked(gobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400598 return -ENOENT;
599 }
600
Christian König34b5f6a2015-06-08 15:03:00 +0200601 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400602 case AMDGPU_VA_OP_MAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200603 if (args->flags & AMDGPU_VM_PAGE_READABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400604 va_flags |= AMDGPU_PTE_READABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200605 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400606 va_flags |= AMDGPU_PTE_WRITEABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200607 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400608 va_flags |= AMDGPU_PTE_EXECUTABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200609 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
610 args->offset_in_bo, args->map_size,
Christian König9f7eb532015-05-18 16:05:57 +0200611 va_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400612 break;
613 case AMDGPU_VA_OP_UNMAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200614 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400615 break;
616 default:
617 break;
618 }
Chunming Zhou49b02b12015-11-13 14:18:38 +0800619 ttm_eu_backoff_reservation(&ticket, &list);
Christian Königfc220f62015-06-29 17:12:20 +0200620 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
monk.liu194a3362015-07-22 13:29:28 +0800621 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
Chunming Zhoue98c1b02015-11-13 15:22:04 +0800622
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400623 drm_gem_object_unreference_unlocked(gobj);
624 return r;
625}
626
627int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
628 struct drm_file *filp)
629{
630 struct drm_amdgpu_gem_op *args = data;
631 struct drm_gem_object *gobj;
632 struct amdgpu_bo *robj;
633 int r;
634
635 gobj = drm_gem_object_lookup(dev, filp, args->handle);
636 if (gobj == NULL) {
637 return -ENOENT;
638 }
639 robj = gem_to_amdgpu_bo(gobj);
640
641 r = amdgpu_bo_reserve(robj, false);
642 if (unlikely(r))
643 goto out;
644
645 switch (args->op) {
646 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
647 struct drm_amdgpu_gem_create_in info;
648 void __user *out = (void __user *)(long)args->value;
649
650 info.bo_size = robj->gem_base.size;
651 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
Christian König1ea863f2015-12-18 22:13:12 +0100652 info.domains = robj->prefered_domains;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400653 info.domain_flags = robj->flags;
Christian König4c28fb02015-08-28 17:27:54 +0200654 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400655 if (copy_to_user(out, &info, sizeof(info)))
656 r = -EFAULT;
657 break;
658 }
Marek Olšákd8f65a22015-05-27 14:30:38 +0200659 case AMDGPU_GEM_OP_SET_PLACEMENT:
Christian Königcc325d12016-02-08 11:08:35 +0100660 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400661 r = -EPERM;
Christian König4c28fb02015-08-28 17:27:54 +0200662 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400663 break;
664 }
Christian König1ea863f2015-12-18 22:13:12 +0100665 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
666 AMDGPU_GEM_DOMAIN_GTT |
667 AMDGPU_GEM_DOMAIN_CPU);
668 robj->allowed_domains = robj->prefered_domains;
669 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
670 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
671
Christian König4c28fb02015-08-28 17:27:54 +0200672 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400673 break;
674 default:
Christian König4c28fb02015-08-28 17:27:54 +0200675 amdgpu_bo_unreserve(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400676 r = -EINVAL;
677 }
678
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400679out:
680 drm_gem_object_unreference_unlocked(gobj);
681 return r;
682}
683
684int amdgpu_mode_dumb_create(struct drm_file *file_priv,
685 struct drm_device *dev,
686 struct drm_mode_create_dumb *args)
687{
688 struct amdgpu_device *adev = dev->dev_private;
689 struct drm_gem_object *gobj;
690 uint32_t handle;
691 int r;
692
693 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
Dan Carpenter54ef0b52015-09-23 14:00:59 +0300694 args->size = (u64)args->pitch * args->height;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695 args->size = ALIGN(args->size, PAGE_SIZE);
696
697 r = amdgpu_gem_object_create(adev, args->size, 0,
698 AMDGPU_GEM_DOMAIN_VRAM,
Alex Deucher857d9132015-08-27 00:14:16 -0400699 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
700 ttm_bo_type_device,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400701 &gobj);
702 if (r)
703 return -ENOMEM;
704
705 r = drm_gem_handle_create(file_priv, gobj, &handle);
706 /* drop reference from allocate - handle holds it now */
707 drm_gem_object_unreference_unlocked(gobj);
708 if (r) {
709 return r;
710 }
711 args->handle = handle;
712 return 0;
713}
714
715#if defined(CONFIG_DEBUG_FS)
Christian König7ea23562016-02-15 15:23:00 +0100716static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
717{
718 struct drm_gem_object *gobj = ptr;
719 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
720 struct seq_file *m = data;
721
722 unsigned domain;
723 const char *placement;
724 unsigned pin_count;
725
726 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
727 switch (domain) {
728 case AMDGPU_GEM_DOMAIN_VRAM:
729 placement = "VRAM";
730 break;
731 case AMDGPU_GEM_DOMAIN_GTT:
732 placement = " GTT";
733 break;
734 case AMDGPU_GEM_DOMAIN_CPU:
735 default:
736 placement = " CPU";
737 break;
738 }
739 seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
740 id, amdgpu_bo_size(bo), placement,
741 amdgpu_bo_gpu_offset(bo));
742
743 pin_count = ACCESS_ONCE(bo->pin_count);
744 if (pin_count)
745 seq_printf(m, " pin count %d", pin_count);
746 seq_printf(m, "\n");
747
748 return 0;
749}
750
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400751static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
752{
753 struct drm_info_node *node = (struct drm_info_node *)m->private;
754 struct drm_device *dev = node->minor->dev;
Christian König7ea23562016-02-15 15:23:00 +0100755 struct drm_file *file;
756 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757
Christian König7ea23562016-02-15 15:23:00 +0100758 r = mutex_lock_interruptible(&dev->struct_mutex);
759 if (r)
760 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400761
Christian König7ea23562016-02-15 15:23:00 +0100762 list_for_each_entry(file, &dev->filelist, lhead) {
763 struct task_struct *task;
Christian Königb22e3ce2016-02-15 12:41:37 +0100764
Christian König7ea23562016-02-15 15:23:00 +0100765 /*
766 * Although we have a valid reference on file->pid, that does
767 * not guarantee that the task_struct who called get_pid() is
768 * still alive (e.g. get_pid(current) => fork() => exit()).
769 * Therefore, we need to protect this ->comm access using RCU.
770 */
771 rcu_read_lock();
772 task = pid_task(file->pid, PIDTYPE_PID);
773 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
774 task ? task->comm : "<unknown>");
775 rcu_read_unlock();
776
777 spin_lock(&file->table_lock);
778 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
779 spin_unlock(&file->table_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400780 }
Christian König7ea23562016-02-15 15:23:00 +0100781
782 mutex_unlock(&dev->struct_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400783 return 0;
784}
785
786static struct drm_info_list amdgpu_debugfs_gem_list[] = {
787 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
788};
789#endif
790
791int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
792{
793#if defined(CONFIG_DEBUG_FS)
794 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
795#endif
796 return 0;
797}