blob: c3ea3635e1fc5059d88b4b7a71e0410336dbb643 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
29#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h"
32
33void amdgpu_gem_object_free(struct drm_gem_object *gobj)
34{
35 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
36
37 if (robj) {
38 if (robj->gem_base.import_attach)
39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
Christian König9298e522015-06-03 21:31:20 +020040 amdgpu_mn_unregister(robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040041 amdgpu_bo_unref(&robj);
42 }
43}
44
45int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46 int alignment, u32 initial_domain,
47 u64 flags, bool kernel,
48 struct drm_gem_object **obj)
49{
50 struct amdgpu_bo *robj;
51 unsigned long max_size;
52 int r;
53
54 *obj = NULL;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) {
57 alignment = PAGE_SIZE;
58 }
59
60 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
63 */
64 max_size = adev->mc.gtt_size - adev->gart_pin_size;
65 if (size > max_size) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size >> 20, max_size >> 20);
68 return -ENOMEM;
69 }
70 }
71retry:
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
73 if (r) {
74 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
76 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
77 goto retry;
78 }
79 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
80 size, initial_domain, alignment, r);
81 }
82 return r;
83 }
84 *obj = &robj->gem_base;
85 robj->pid = task_pid_nr(current);
86
87 mutex_lock(&adev->gem.mutex);
88 list_add_tail(&robj->list, &adev->gem.objects);
89 mutex_unlock(&adev->gem.mutex);
90
91 return 0;
92}
93
94int amdgpu_gem_init(struct amdgpu_device *adev)
95{
96 INIT_LIST_HEAD(&adev->gem.objects);
97 return 0;
98}
99
100void amdgpu_gem_fini(struct amdgpu_device *adev)
101{
102 amdgpu_bo_force_delete(adev);
103}
104
105/*
106 * Call from drm_gem_handle_create which appear in both new and open ioctl
107 * case.
108 */
109int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
110{
111 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
112 struct amdgpu_device *adev = rbo->adev;
113 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
114 struct amdgpu_vm *vm = &fpriv->vm;
115 struct amdgpu_bo_va *bo_va;
116 int r;
117
118 r = amdgpu_bo_reserve(rbo, false);
119 if (r) {
120 return r;
121 }
122
123 bo_va = amdgpu_vm_bo_find(vm, rbo);
124 if (!bo_va) {
125 bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
126 } else {
127 ++bo_va->ref_count;
128 }
129 amdgpu_bo_unreserve(rbo);
130
131 return 0;
132}
133
134void amdgpu_gem_object_close(struct drm_gem_object *obj,
135 struct drm_file *file_priv)
136{
137 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
138 struct amdgpu_device *adev = rbo->adev;
139 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
140 struct amdgpu_vm *vm = &fpriv->vm;
141 struct amdgpu_bo_va *bo_va;
142 int r;
143
144 r = amdgpu_bo_reserve(rbo, true);
145 if (r) {
146 dev_err(adev->dev, "leaking bo va because "
147 "we fail to reserve bo (%d)\n", r);
148 return;
149 }
150 bo_va = amdgpu_vm_bo_find(vm, rbo);
151 if (bo_va) {
152 if (--bo_va->ref_count == 0) {
153 amdgpu_vm_bo_rmv(adev, bo_va);
154 }
155 }
156 amdgpu_bo_unreserve(rbo);
157}
158
159static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
160{
161 if (r == -EDEADLK) {
162 r = amdgpu_gpu_reset(adev);
163 if (!r)
164 r = -EAGAIN;
165 }
166 return r;
167}
168
169/*
170 * GEM ioctls.
171 */
172int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
173 struct drm_file *filp)
174{
175 struct amdgpu_device *adev = dev->dev_private;
176 union drm_amdgpu_gem_create *args = data;
177 uint64_t size = args->in.bo_size;
178 struct drm_gem_object *gobj;
179 uint32_t handle;
180 bool kernel = false;
181 int r;
182
183 down_read(&adev->exclusive_lock);
184 /* create a gem object to contain this object in */
185 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
186 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
187 kernel = true;
188 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
189 size = size << AMDGPU_GDS_SHIFT;
190 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
191 size = size << AMDGPU_GWS_SHIFT;
192 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
193 size = size << AMDGPU_OA_SHIFT;
194 else {
195 r = -EINVAL;
196 goto error_unlock;
197 }
198 }
199 size = roundup(size, PAGE_SIZE);
200
201 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
202 (u32)(0xffffffff & args->in.domains),
203 args->in.domain_flags,
204 kernel, &gobj);
205 if (r)
206 goto error_unlock;
207
208 r = drm_gem_handle_create(filp, gobj, &handle);
209 /* drop reference from allocate - handle holds it now */
210 drm_gem_object_unreference_unlocked(gobj);
211 if (r)
212 goto error_unlock;
213
214 memset(args, 0, sizeof(*args));
215 args->out.handle = handle;
216 up_read(&adev->exclusive_lock);
217 return 0;
218
219error_unlock:
220 up_read(&adev->exclusive_lock);
221 r = amdgpu_gem_handle_lockup(adev, r);
222 return r;
223}
224
225int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *filp)
227{
228 struct amdgpu_device *adev = dev->dev_private;
229 struct drm_amdgpu_gem_userptr *args = data;
230 struct drm_gem_object *gobj;
231 struct amdgpu_bo *bo;
232 uint32_t handle;
233 int r;
234
235 if (offset_in_page(args->addr | args->size))
236 return -EINVAL;
237
238 /* reject unknown flag values */
239 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
240 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
241 AMDGPU_GEM_USERPTR_REGISTER))
242 return -EINVAL;
243
244 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
245 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
246
247 /* if we want to write to it we must require anonymous
248 memory and install a MMU notifier */
249 return -EACCES;
250 }
251
252 down_read(&adev->exclusive_lock);
253
254 /* create a gem object to contain this object in */
255 r = amdgpu_gem_object_create(adev, args->size, 0,
256 AMDGPU_GEM_DOMAIN_CPU, 0,
257 0, &gobj);
258 if (r)
259 goto handle_lockup;
260
261 bo = gem_to_amdgpu_bo(gobj);
262 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
263 if (r)
264 goto release_object;
265
266 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
267 r = amdgpu_mn_register(bo, args->addr);
268 if (r)
269 goto release_object;
270 }
271
272 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
273 down_read(&current->mm->mmap_sem);
274 r = amdgpu_bo_reserve(bo, true);
275 if (r) {
276 up_read(&current->mm->mmap_sem);
277 goto release_object;
278 }
279
280 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
281 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
282 amdgpu_bo_unreserve(bo);
283 up_read(&current->mm->mmap_sem);
284 if (r)
285 goto release_object;
286 }
287
288 r = drm_gem_handle_create(filp, gobj, &handle);
289 /* drop reference from allocate - handle holds it now */
290 drm_gem_object_unreference_unlocked(gobj);
291 if (r)
292 goto handle_lockup;
293
294 args->handle = handle;
295 up_read(&adev->exclusive_lock);
296 return 0;
297
298release_object:
299 drm_gem_object_unreference_unlocked(gobj);
300
301handle_lockup:
302 up_read(&adev->exclusive_lock);
303 r = amdgpu_gem_handle_lockup(adev, r);
304
305 return r;
306}
307
308int amdgpu_mode_dumb_mmap(struct drm_file *filp,
309 struct drm_device *dev,
310 uint32_t handle, uint64_t *offset_p)
311{
312 struct drm_gem_object *gobj;
313 struct amdgpu_bo *robj;
314
315 gobj = drm_gem_object_lookup(dev, filp, handle);
316 if (gobj == NULL) {
317 return -ENOENT;
318 }
319 robj = gem_to_amdgpu_bo(gobj);
Christian König271c8122015-05-13 14:30:53 +0200320 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
321 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400322 drm_gem_object_unreference_unlocked(gobj);
323 return -EPERM;
324 }
325 *offset_p = amdgpu_bo_mmap_offset(robj);
326 drm_gem_object_unreference_unlocked(gobj);
327 return 0;
328}
329
330int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *filp)
332{
333 union drm_amdgpu_gem_mmap *args = data;
334 uint32_t handle = args->in.handle;
335 memset(args, 0, sizeof(*args));
336 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
337}
338
339/**
340 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
341 *
342 * @timeout_ns: timeout in ns
343 *
344 * Calculate the timeout in jiffies from an absolute timeout in ns.
345 */
346unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
347{
348 unsigned long timeout_jiffies;
349 ktime_t timeout;
350
351 /* clamp timeout if it's to large */
352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT;
354
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns);
356 if (ktime_to_ns(timeout) < 0)
357 return 0;
358
359 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
360 /* clamp timeout to avoid unsigned-> signed overflow */
361 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
362 return MAX_SCHEDULE_TIMEOUT - 1;
363
364 return timeout_jiffies;
365}
366
367int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
368 struct drm_file *filp)
369{
370 struct amdgpu_device *adev = dev->dev_private;
371 union drm_amdgpu_gem_wait_idle *args = data;
372 struct drm_gem_object *gobj;
373 struct amdgpu_bo *robj;
374 uint32_t handle = args->in.handle;
375 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
376 int r = 0;
377 long ret;
378
379 gobj = drm_gem_object_lookup(dev, filp, handle);
380 if (gobj == NULL) {
381 return -ENOENT;
382 }
383 robj = gem_to_amdgpu_bo(gobj);
384 if (timeout == 0)
385 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
386 else
387 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
388
389 /* ret == 0 means not signaled,
390 * ret > 0 means signaled
391 * ret < 0 means interrupted before timeout
392 */
393 if (ret >= 0) {
394 memset(args, 0, sizeof(*args));
395 args->out.status = (ret == 0);
396 } else
397 r = ret;
398
399 drm_gem_object_unreference_unlocked(gobj);
400 r = amdgpu_gem_handle_lockup(adev, r);
401 return r;
402}
403
404int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
405 struct drm_file *filp)
406{
407 struct drm_amdgpu_gem_metadata *args = data;
408 struct drm_gem_object *gobj;
409 struct amdgpu_bo *robj;
410 int r = -1;
411
412 DRM_DEBUG("%d \n", args->handle);
413 gobj = drm_gem_object_lookup(dev, filp, args->handle);
414 if (gobj == NULL)
415 return -ENOENT;
416 robj = gem_to_amdgpu_bo(gobj);
417
418 r = amdgpu_bo_reserve(robj, false);
419 if (unlikely(r != 0))
420 goto out;
421
422 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
423 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
424 r = amdgpu_bo_get_metadata(robj, args->data.data,
425 sizeof(args->data.data),
426 &args->data.data_size_bytes,
427 &args->data.flags);
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
430 if (!r)
431 r = amdgpu_bo_set_metadata(robj, args->data.data,
432 args->data.data_size_bytes,
433 args->data.flags);
434 }
435
436 amdgpu_bo_unreserve(robj);
437out:
438 drm_gem_object_unreference_unlocked(gobj);
439 return r;
440}
441
442/**
443 * amdgpu_gem_va_update_vm -update the bo_va in its VM
444 *
445 * @adev: amdgpu_device pointer
446 * @bo_va: bo_va to update
447 *
448 * Update the bo_va directly after setting it's address. Errors are not
449 * vital here, so they are not reported back to userspace.
450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va)
453{
454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket;
457 struct list_head list;
458 unsigned domain;
459 int r;
460
461 INIT_LIST_HEAD(&list);
462
463 tv.bo = &bo_va->bo->tbo;
464 tv.shared = true;
465 list_add(&tv.head, &list);
466
467 vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
468 if (!vm_bos)
469 return;
470
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
472 if (r)
473 goto error_free;
474
475 list_for_each_entry(entry, &list, head) {
476 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
477 /* if anything is swapped out don't swap it in here,
478 just abort and wait for the next CS */
479 if (domain == AMDGPU_GEM_DOMAIN_CPU)
480 goto error_unreserve;
481 }
482
483 mutex_lock(&bo_va->vm->mutex);
484 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
485 if (r)
486 goto error_unlock;
487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489
490error_unlock:
491 mutex_unlock(&bo_va->vm->mutex);
492
493error_unreserve:
494 ttm_eu_backoff_reservation(&ticket, &list);
495
496error_free:
497 drm_free_large(vm_bos);
498
Christian König68fdd3d2015-06-16 14:50:02 +0200499 if (r && r != -ERESTARTSYS)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400500 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
501}
502
503
504
505int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
506 struct drm_file *filp)
507{
Christian König34b5f6a2015-06-08 15:03:00 +0200508 struct drm_amdgpu_gem_va *args = data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400509 struct drm_gem_object *gobj;
510 struct amdgpu_device *adev = dev->dev_private;
511 struct amdgpu_fpriv *fpriv = filp->driver_priv;
512 struct amdgpu_bo *rbo;
513 struct amdgpu_bo_va *bo_va;
514 uint32_t invalid_flags, va_flags = 0;
515 int r = 0;
516
Christian König34b5f6a2015-06-08 15:03:00 +0200517 if (!adev->vm_manager.enabled)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400518 return -ENOTTY;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400519
Christian König34b5f6a2015-06-08 15:03:00 +0200520 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400521 dev_err(&dev->pdev->dev,
522 "va_address 0x%lX is in reserved area 0x%X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200523 (unsigned long)args->va_address,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524 AMDGPU_VA_RESERVED_SIZE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400525 return -EINVAL;
526 }
527
528 invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
529 AMDGPU_VM_PAGE_EXECUTABLE);
Christian König34b5f6a2015-06-08 15:03:00 +0200530 if ((args->flags & invalid_flags)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400531 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200532 args->flags, invalid_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400533 return -EINVAL;
534 }
535
Christian König34b5f6a2015-06-08 15:03:00 +0200536 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537 case AMDGPU_VA_OP_MAP:
538 case AMDGPU_VA_OP_UNMAP:
539 break;
540 default:
541 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
Christian König34b5f6a2015-06-08 15:03:00 +0200542 args->operation);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400543 return -EINVAL;
544 }
545
Christian König34b5f6a2015-06-08 15:03:00 +0200546 gobj = drm_gem_object_lookup(dev, filp, args->handle);
547 if (gobj == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400548 return -ENOENT;
Christian König34b5f6a2015-06-08 15:03:00 +0200549
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400550 rbo = gem_to_amdgpu_bo(gobj);
551 r = amdgpu_bo_reserve(rbo, false);
552 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400553 drm_gem_object_unreference_unlocked(gobj);
554 return r;
555 }
Christian König34b5f6a2015-06-08 15:03:00 +0200556
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400557 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
558 if (!bo_va) {
Christian König34b5f6a2015-06-08 15:03:00 +0200559 amdgpu_bo_unreserve(rbo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 return -ENOENT;
561 }
562
Christian König34b5f6a2015-06-08 15:03:00 +0200563 switch (args->operation) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400564 case AMDGPU_VA_OP_MAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200565 if (args->flags & AMDGPU_VM_PAGE_READABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400566 va_flags |= AMDGPU_PTE_READABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200567 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400568 va_flags |= AMDGPU_PTE_WRITEABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200569 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400570 va_flags |= AMDGPU_PTE_EXECUTABLE;
Christian König34b5f6a2015-06-08 15:03:00 +0200571 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
572 args->offset_in_bo, args->map_size,
Christian König9f7eb532015-05-18 16:05:57 +0200573 va_flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400574 break;
575 case AMDGPU_VA_OP_UNMAP:
Christian König34b5f6a2015-06-08 15:03:00 +0200576 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400577 break;
578 default:
579 break;
580 }
581
Christian König34b5f6a2015-06-08 15:03:00 +0200582 if (!r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400583 amdgpu_gem_va_update_vm(adev, bo_va);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400584
585 drm_gem_object_unreference_unlocked(gobj);
586 return r;
587}
588
589int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
590 struct drm_file *filp)
591{
592 struct drm_amdgpu_gem_op *args = data;
593 struct drm_gem_object *gobj;
594 struct amdgpu_bo *robj;
595 int r;
596
597 gobj = drm_gem_object_lookup(dev, filp, args->handle);
598 if (gobj == NULL) {
599 return -ENOENT;
600 }
601 robj = gem_to_amdgpu_bo(gobj);
602
603 r = amdgpu_bo_reserve(robj, false);
604 if (unlikely(r))
605 goto out;
606
607 switch (args->op) {
608 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
609 struct drm_amdgpu_gem_create_in info;
610 void __user *out = (void __user *)(long)args->value;
611
612 info.bo_size = robj->gem_base.size;
613 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
614 info.domains = robj->initial_domain;
615 info.domain_flags = robj->flags;
616 if (copy_to_user(out, &info, sizeof(info)))
617 r = -EFAULT;
618 break;
619 }
Marek Olšákd8f65a22015-05-27 14:30:38 +0200620 case AMDGPU_GEM_OP_SET_PLACEMENT:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400621 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
622 r = -EPERM;
623 break;
624 }
625 robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
626 AMDGPU_GEM_DOMAIN_GTT |
627 AMDGPU_GEM_DOMAIN_CPU);
628 break;
629 default:
630 r = -EINVAL;
631 }
632
633 amdgpu_bo_unreserve(robj);
634out:
635 drm_gem_object_unreference_unlocked(gobj);
636 return r;
637}
638
639int amdgpu_mode_dumb_create(struct drm_file *file_priv,
640 struct drm_device *dev,
641 struct drm_mode_create_dumb *args)
642{
643 struct amdgpu_device *adev = dev->dev_private;
644 struct drm_gem_object *gobj;
645 uint32_t handle;
646 int r;
647
648 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
649 args->size = args->pitch * args->height;
650 args->size = ALIGN(args->size, PAGE_SIZE);
651
652 r = amdgpu_gem_object_create(adev, args->size, 0,
653 AMDGPU_GEM_DOMAIN_VRAM,
654 0, ttm_bo_type_device,
655 &gobj);
656 if (r)
657 return -ENOMEM;
658
659 r = drm_gem_handle_create(file_priv, gobj, &handle);
660 /* drop reference from allocate - handle holds it now */
661 drm_gem_object_unreference_unlocked(gobj);
662 if (r) {
663 return r;
664 }
665 args->handle = handle;
666 return 0;
667}
668
669#if defined(CONFIG_DEBUG_FS)
670static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
671{
672 struct drm_info_node *node = (struct drm_info_node *)m->private;
673 struct drm_device *dev = node->minor->dev;
674 struct amdgpu_device *adev = dev->dev_private;
675 struct amdgpu_bo *rbo;
676 unsigned i = 0;
677
678 mutex_lock(&adev->gem.mutex);
679 list_for_each_entry(rbo, &adev->gem.objects, list) {
680 unsigned domain;
681 const char *placement;
682
683 domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
684 switch (domain) {
685 case AMDGPU_GEM_DOMAIN_VRAM:
686 placement = "VRAM";
687 break;
688 case AMDGPU_GEM_DOMAIN_GTT:
689 placement = " GTT";
690 break;
691 case AMDGPU_GEM_DOMAIN_CPU:
692 default:
693 placement = " CPU";
694 break;
695 }
696 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
697 i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
698 placement, (unsigned long)rbo->pid);
699 i++;
700 }
701 mutex_unlock(&adev->gem.mutex);
702 return 0;
703}
704
705static struct drm_info_list amdgpu_debugfs_gem_list[] = {
706 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
707};
708#endif
709
710int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
711{
712#if defined(CONFIG_DEBUG_FS)
713 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
714#endif
715 return 0;
716}